#####Ogawa et al. Quiz-style online training tool helps to learn birdsong identification and support citizen science. #####Supplemental Code S1 #####This code was executed using R version 4.1.0 (https://www.R-project.org/). ####Calculating the mean value of each test score#### ##Data loading test_scores_total<-read.csv("test_scores_total.csv",header = TRUE) test_scores_total[,c("adaptive")] <- as.factor(test_scores_total[,c("adaptive")]) #pretest(=0) #all mean(test_scores_total$score[test_scores_total$test == 0]) #adaptive group(=1) mean(test_scores_total$score[test_scores_total$adaptive == 1 & test_scores_total$test == 0]) #baseline group(=0) mean(test_scores_total$score[test_scores_total$adaptive == 0 & test_scores_total$test == 0]) #midterm test(=3) mean(test_scores_total$score[test_scores_total$test == 3]) mma <- mean(test_scores_total$score[test_scores_total$adaptive == 1 & test_scores_total$test == 3]) mmc <- mean(test_scores_total$score[test_scores_total$adaptive == 0 & test_scores_total$test == 3]) #posttest(=6) mean(test_scores_total$score[test_scores_total$test == 6]) mpa <- mean(test_scores_total$score[test_scores_total$adaptive == 1 & test_scores_total$test == 6]) mpc <- mean(test_scores_total$score[test_scores_total$adaptive == 0 & test_scores_total$test == 6]) #delayed test(=20) mean(test_scores_total$score[test_scores_total$test == 20]) mda <- mean(test_scores_total$score[test_scores_total$adaptive == 1 & test_scores_total$test == 20]) mdc <- mean(test_scores_total$score[test_scores_total$adaptive == 0 & test_scores_total$test == 20]) ####Calculating the standard error of each test score#### #Create a function std_mean <- function(x){ sd(x)/sqrt(length(x)) } std_mean(test_scores_total$score[test_scores_total$adaptive == 1 & test_scores_total$test == 0]) std_mean(test_scores_total$score[test_scores_total$adaptive == 0 & test_scores_total$test == 0]) std_mean(test_scores_total$score[test_scores_total$adaptive == 1 & test_scores_total$test == 3]) std_mean(test_scores_total$score[test_scores_total$adaptive == 0 & test_scores_total$test == 3]) std_mean(test_scores_total$score[test_scores_total$adaptive == 1 & test_scores_total$test == 6]) std_mean(test_scores_total$score[test_scores_total$adaptive == 0 & test_scores_total$test == 6]) std_mean(test_scores_total$score[test_scores_total$adaptive == 1 & test_scores_total$test == 20]) std_mean(test_scores_total$score[test_scores_total$adaptive == 0 & test_scores_total$test == 20]) ####Table S4#### ##Data loading test_scores <- read.csv("test_scores.csv", header = TRUE) test_scores[,c("item")] <- as.factor(test_scores[,c("item")]) test_scores[,c("subject")] <- as.factor(test_scores[,c("subject")]) #midterm test test_scores.m <- test_scores[test_scores$test == "m", ] #posttest test_scores.post <- test_scores[test_scores$test == "post", ] #delayed test test_scores.d <- test_scores[test_scores$test == "d", ] ##In advance, the package glmmTMB should be installed from https://cran.r-project.org/ ##Package loading library(glmmTMB) ##GLMM #Whether adaptive training or not affects scores on midterm test m1<-glmmTMB(cbind(score,1-score) ~ m.adaptive + # fixed effect ((0+m.adaptive)|item) + ((0+m.adaptive)|subject) + # random effect(random slope) (1 | item ) + (1 | subject), # random effect(random intercept) family=binomial, data=test_scores.m) summary(m1) outm1 <- summary(m1) m1p <- outm1$coefficients$cond[2,4] #Store the p-value of m1 #Whether adaptive training or not affects scores on posttest m2<-glmmTMB(cbind(score,1-score) ~ post.adaptive + # fixed effect ((0+post.adaptive)|item) + # random effect(random slope) (1 | item ) + (1 | subject), # random effect(random intercept) family=binomial, data=test_scores.post) summary(m2) outm2 <- summary(m2) m2p <- outm2$coefficients$cond[2,4] #Store the p-value of m2 #Whether adaptive training or not affects scores on delayed test m3<-glmmTMB(cbind(score,1-score) ~ d.adaptive + # fixed effect ((0+d.adaptive)|item) + ((0+d.adaptive)|subject) + # random effect(random slope) (1 | item ) + (1 | subject), # random effect(random intercept) family=binomial, data=test_scores.d) summary(m3) outm3 <- summary(m3) m3p <- outm3$coefficients$cond[2,4] #Store the p-value of m3 ####Table S3#### ##GLMM #Whether training methods (adaptive training and baseline training) affects scores on midterm, post, and delayed tests m4<-glmmTMB(cbind(score,1-score) ~ m.adaptive + m.baseline + post.adaptive + post.baseline + d.adaptive + d.baseline + #fixed effect ((0+m.adaptive)|item) + ((0+m.baseline)|item) + #random effect(random slope) ((0+post.adaptive)|item) + ((0+post.baseline)|item) + ((0+d.adaptive)|item) + ((0+d.baseline)|item) + ((0+m.adaptive)|subject) + ((0+m.baseline)|subject) + ((0+post.adaptive)|subject) + ((0+post.baseline)|subject) + ((0+d.adaptive)|subject) + ((0+d.baseline)|subject) + (1 | item ) + (1 | subject), # random effect(random intercept) family=binomial, data=test_scores) summary(m4) outm4 <- summary(m4) m4p_ma <- outm4$coefficients$cond[2,4] #Store the p-value of m4 (m.adaptive) m4p_mc <- outm4$coefficients$cond[3,4] #Store the p-value of m4 (m.baseline) m4p_pa <- outm4$coefficients$cond[4,4] #Store the p-value of m4 (post.adaptive) m4p_pc <- outm4$coefficients$cond[5,4] #Store the p-value of m4 (post.baseline) m4p_da <- outm4$coefficients$cond[6,4] #Store the p-value of m4 (d.adaptive) m4p_dc <- outm4$coefficients$cond[7,4] #Store the p-value of m4 (d.baseline) #### Fig. 4#### #A function that returns the appropriate number of (*) when # p > 0.1 "", 0.1 ≧ p > 0.05 ".", 0.05 ≧ p > 0.01 "*", 0.01 ≧ p > 0.001 "**", 0.001 > p "***" sig <- function(a) { if (a > 0.1) { return("") } else { if (a > 0.05) { return(".") } else { if (a > 0.01) { return("*") } else { if (a > 0.001) { return("**") } else return("***") } } } } ##In advance, the package ggplot2 should be installed from https://cran.r-project.org/ ##Package loading library(ggplot2) ggplot( test_scores_total, aes( x = test, y = score, color = adaptive, group = adaptive ) ) + geom_rect(aes(xmin=1, xmax=2, ymin=0, ymax=26), fill="gray", color="NA") + #training periods geom_rect(aes(xmin=4, xmax=5, ymin=0, ymax=26), fill="gray", color="NA") + #training periods scale_x_continuous(breaks=c(0, 3, 6, 20)) + stat_summary( fun = "mean", geom = "line", position = position_dodge( width = 0 ) ) + stat_summary( fun.data = mean_cl_normal, geom = "errorbar", width = 0.5, position = position_dodge( width = 0 ) ) + stat_summary( fun = "mean", geom = "point", size = 1, position = position_dodge( width = 0 ) ) + theme(panel.background = element_blank(), panel.border=element_blank(),panel.grid=element_blank(), axis.ticks.y=element_blank(),text = element_text(size = 30), axis.line.x=element_line(colour="black"),axis.line.y=element_line(colour="black")) + xlab("Time elapsed (days) from the pretest") + scale_colour_manual(name = "legend", labels = c("1" = "adaptive group", "0" = "baseline group"), values = c("1" = "blue", "0" = "red")) + annotate("text", x=10, y=25,label=print("training periods"), size=7) + annotate("segment", x=6, xend = 4.5, y=23.5, yend = 22) + #line of training periods annotate("segment", x=6, xend = 1.5, y=23.5, yend = 22) + #line of training periods annotate("segment", x=c(0,3-0.1,3+0.1,6-0.1,6+0.1,20), xend = c(0,3-0.1,3+0.1,6-0.1,6+0.1,20), y=1, yend = 0, colour="blue") + #blue vertical line annotate("segment", x=c(0,3+0.1,6+0.1), xend = c(3-0.1,6-0.1,20), y=0, yend = 0, colour="blue") + #blue horizontal line annotate("segment", x=c(0,3-0.1,3+0.1,6-0.1,6+0.1,20), xend = c(0,3-0.1,3+0.1,6-0.1,6+0.1,20), y=18, yend = 19, colour="red") + #red vertical line annotate("segment", x=c(0,3+0.1,6+0.1), xend = c(3-0.1,6-0.1,20), y=19, yend = 19, colour="red") + #red horizontal line annotate("segment", x=c(3-0.9,6-0.9,20-0.9), xend = c(3-0.9,6-0.9,20-0.9), y=c(mmc, mpc, mdc), yend = c(mma, mpa, mda)) + #black vertical line annotate("segment", x=c(3-0.9,3-0.9,6-0.9,6-0.9,20-0.9,20-0.9), xend = c(3-0.3,3-0.3,6-0.3,6-0.3,20-0.3,20-0.3), y=c(mma,mmc,mpa,mpc,mda,mdc), yend = c(mma,mmc,mpa,mpc,mda,mdc)) + #black horizontal line annotate("text", x=c(1.4,4.6,18.5), y=c((mma+mmc)/2, (mpa+mpc)/2, (mda+mdc)/2), #adaptive and baseline label=print(c(sig(m1p), sig(m2p), sig(m3p))), size=10) + annotate("text", x=c(1.6,4.5,13), y=c(1, 1, 1), #adaptive label=print(c(sig(m4p_ma), sig(m4p_pa), sig(m4p_da))), colour="blue", size=10) + annotate("text", x=c(1.6,4.5,13), y=c(20, 20, 20), #baseline label=print(c(sig(m4p_mc), sig(m4p_pc), sig(m4p_dc))), colour="red", size=10) ggsave(filename = "Fig.4.png") ####Fig. 5#### test_scores[,c("post.adaptive")] <- as.factor(test_scores[,c("post.adaptive")]) ggplot(data=test_scores[test_scores$test == "post", ], mapping = aes(x = reorder(x = item, X = -score), y = score, group = post.adaptive, color = post.adaptive)) + xlab("species") + ylab("Accuracy rate") + stat_summary( fun.data = mean_se, geom = "errorbar", width = 0.5, position = position_dodge( width = 0.5 ) ) + stat_summary( fun = "mean", geom = "point", size = 3, position = position_dodge( width = 0.5 ) ) + theme(panel.background = element_blank(), panel.border=element_blank(),panel.grid=element_blank(), legend.position = c(1,1), legend.justification = c(1,2), axis.ticks.y=element_blank(),text=element_text(size=24),axis.line.x=element_line(colour="black"), axis.line.y=element_line(colour="black"), axis.text.x = element_text(face="italic", size = 15)) + scale_colour_manual(name = "legend", labels = c("1" = "adaptive group", "0" = "baseline group"), values = c("1" = "blue", "0" = "red")) ggsave("Fig.5.png", width = 17, height = 6) ####Table S5#### ##Package loading library(dplyr) #Cuca mean_a <- test_scores %>% filter( test == "post" & item == "Cuca" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Cuca" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Cuca" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Cuca" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Stor mean_a <- test_scores %>% filter( test == "post" & item == "Stor" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Stor" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Stor" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Stor" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Phco mean_a <- test_scores %>% filter( test == "post" & item == "Phco" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Phco" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Phco" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Phco" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Cedi mean_a <- test_scores %>% filter( test == "post" & item == "Cedi" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Cedi" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Cedi" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Cedi" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Coco mean_a <- test_scores %>% filter( test == "post" & item == "Coco" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Coco" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Coco" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Coco" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Coma mean_a <- test_scores %>% filter( test == "post" & item == "Coma" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Coma" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Coma" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Coma" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Cupo mean_a <- test_scores %>% filter( test == "post" & item == "Cupo" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Cupo" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Cupo" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Cupo" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Spci mean_a <- test_scores %>% filter( test == "post" & item == "Spci" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Spci" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Spci" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Spci" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Acor mean_a <- test_scores %>% filter( test == "post" & item == "Acor" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Acor" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Acor" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Acor" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Chsi mean_a <- test_scores %>% filter( test == "post" & item == "Chsi" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Chsi" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Chsi" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Chsi" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1 - pchisq(Wald,1) #Pamo mean_a <- test_scores %>% filter( test == "post" & item == "Pamo" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Pamo" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Pamo" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Pamo" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1 - pchisq(Wald,1) #Ursq mean_a <- test_scores %>% filter( test == "post" & item == "Ursq" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Ursq" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Ursq" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Ursq" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1 - pchisq(Wald,1) #Piaw mean_a <- test_scores %>% filter( test == "post" & item == "Piaw" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Piaw" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Piaw" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Piaw" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1 - pchisq(Wald,1) #Hiru mean_a <- test_scores %>% filter( test == "post" & item == "Hiru" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Hiru" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Hiru" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Hiru" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1 - pchisq(Wald,1) #Moci mean_a <- test_scores %>% filter( test == "post" & item == "Moci" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Moci" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Moci" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Moci" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Pami mean_a <- test_scores %>% filter( test == "post" & item == "Pami" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Pami" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Pami" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Pami" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Alar mean_a <- test_scores %>% filter( test == "post" & item == "Alar" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Alar" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Alar" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Alar" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Deki mean_a <- test_scores %>% filter( test == "post" & item == "Deki" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Deki" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Deki" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Deki" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Hyam mean_a <- test_scores %>% filter( test == "post" & item == "Hyam" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Hyam" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Hyam" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Hyam" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Gaca mean_a <- test_scores %>% filter( test == "post" & item == "Gaca" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Gaca" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Gaca" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Gaca" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Emci mean_a <- test_scores %>% filter( test == "post" & item == "Emci" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Emci" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Emci" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Emci" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Pova mean_a <- test_scores %>% filter( test == "post" & item == "Pova" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Pova" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Pova" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Pova" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Fina mean_a <- test_scores %>% filter( test == "post" & item == "Fina" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Fina" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Fina" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Fina" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Zoja mean_a <- test_scores %>% filter( test == "post" & item == "Zoja" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Zoja" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Zoja" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Zoja" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Moal mean_a <- test_scores %>% filter( test == "post" & item == "Moal" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Moal" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Moal" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Moal" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) #Mogr mean_a <- test_scores %>% filter( test == "post" & item == "Mogr" & post.adaptive == 1) %>% summarise(mean(score)) mean_b <- test_scores %>% filter( test == "post" & item == "Mogr" & post.control == 1) %>% summarise(mean(score)) se_a <- test_scores %>% filter( test == "post" & item == "Mogr" & post.adaptive == 1) %>% summarise(sd (score) / sqrt (length(score))) se_b <- test_scores %>% filter( test == "post" & item == "Mogr" & post.control == 1) %>% summarise(sd (score) / sqrt (length(score))) Wald <-(mean_a$`mean(score)`-mean_b$`mean(score)`)^2/(se_a$`sd(score)/sqrt(length(score))`^2+se_b$`sd(score)/sqrt(length(score))`^2) Wald 1-pchisq(Wald,1) ####attitude in post-questionnaire#### ##In advance, the package ordinal should be installed from https://cran.r-project.org/ ##Package loading library(ordinal) ##Data loading ## Loading each subject's postquestionnaire response data postq <- read.csv("postq.csv", header = TRUE) postq[,c("adaptive")] <- as.factor(postq[,c("adaptive")]) postq[,c("Q1")] <- as.ordered(postq[,c("Q1")]) postq[,c("Q2")] <- as.ordered(postq[,c("Q2")]) postq[,c("Q3")] <- as.ordered(postq[,c("Q3")]) postq[,c("Q4")] <- as.ordered(postq[,c("Q4")]) postq[,c("Q5")] <- as.ordered(postq[,c("Q5")]) postq[,c("Q6")] <- as.ordered(postq[,c("Q6")]) postq[,c("Q7")] <- as.ordered(postq[,c("Q7")]) postq[,c("Q8")] <- as.ordered(postq[,c("Q8")]) postq[,c("Q9")] <- as.ordered(postq[,c("Q9")]) postq[,c("Q10")] <- as.ordered(postq[,c("Q10")]) postq[,c("Q11")] <- as.ordered(postq[,c("Q11")]) postq[,c("Q12")] <- as.ordered(postq[,c("Q12")]) postq[,c("Q13")] <- as.ordered(postq[,c("Q13")]) postq[,c("Q14")] <- as.ordered(postq[,c("Q14")]) postq[,c("Q15")] <- as.ordered(postq[,c("Q15")]) postq[,c("Q16")] <- as.ordered(postq[,c("Q16")]) postq[,c("Q17")] <- as.ordered(postq[,c("Q17")]) postq[,c("Q18")] <- as.ordered(postq[,c("Q18")]) postq[,c("Q19")] <- as.ordered(postq[,c("Q19")]) postq[,c("Q20")] <- as.ordered(postq[,c("Q20")]) postq[,c("Q21")] <- as.ordered(postq[,c("Q21")]) postq[,c("Q22")] <- as.ordered(postq[,c("Q22")]) ##CLM #Whether adaptive training or not affects post-questionnaire responses m5<-clm(Q1 ~ adaptive , data=postq) summary(m5) m6<-clm(Q2 ~ adaptive, data=postq) summary(m6) m7<-clm(Q3 ~ adaptive, data=postq) summary(m7) m8<-clm(Q4 ~ adaptive, data=postq) summary(m8) m9<-clm(Q5 ~ adaptive, data=postq) summary(m9) m10<-clm(Q6 ~ adaptive, data=postq) summary(m10) m11<-clm(Q7 ~ adaptive, data=postq) summary(m11) m12<-clm(Q8 ~ adaptive, data=postq) summary(m12) m13<-clm(Q9 ~ adaptive, data=postq) summary(m13) m14<-clm(Q10 ~ adaptive, data=postq) summary(m14) m15<-clm(Q11 ~ adaptive, data=postq) summary(m15) m16<-clm(Q12 ~ adaptive, data=postq) summary(m16) m17<-clm(Q13 ~ adaptive, data=postq) summary(m17) m18<-clm(Q14 ~ adaptive, data=postq) summary(m18) m19<-clm(Q15 ~ adaptive, data=postq) summary(m19) m20<-clm(Q16 ~ adaptive, data=postq) summary(m20) m21<-clm(Q17 ~ adaptive, data=postq) summary(m21) m22<-clm(Q18 ~ adaptive, data=postq) summary(m22) m23<-clm(Q19 ~ adaptive, data=postq) summary(m23) m24<-clm(Q20 ~ adaptive, data=postq) summary(m24) m25<-clm(Q21 ~ adaptive, data=postq) summary(m25) m26<-clm(Q22 ~ adaptive, data=postq) summary(m26) ####attitude in pre- and post-questionnaire#### ##Data loading ## Loading each subject's prequestionnaire response data preq <- read.csv("preq.csv", header = TRUE) preq[,c("adaptive")] <- as.factor(preq[,c("adaptive")]) preq[,c("Q1")] <- as.ordered(preq[,c("Q1")]) preq[,c("Q2")] <- as.ordered(preq[,c("Q2")]) preq[,c("Q3")] <- as.ordered(preq[,c("Q3")]) preq.interest <- preq postq.interest <- postq #Delete all columns except the relevant one (about interest). x <- 0 preq.interest <- preq.interest[,-1] while(x <= 15){ preq.interest <- preq.interest[,-5] x <- x + 1 } postq.interest <- postq.interest[,c(-1,-3,-4,-5)] x <- 0 while(x <= 16){ postq.interest <- postq.interest[,-5] x <- x + 1 } #change colnames colnames(preq.interest) <- c("adaptive", "bird", "birdwatching", "birdsong") colnames(postq.interest) <- c("adaptive", "bird", "birdwatching", "birdsong") #combine preq.interest and post.interest interest<-rbind(preq.interest,postq.interest) #Create a bector where prequestionnaire is 0 and postquestionnaire is 1 questionnaire<-c(rep("0",66),rep("1",66)) interest<-cbind(interest,questionnaire) interest.a<-interest[interest$adaptive==1,] interest.c<-interest[interest$adaptive==0,] #Pre-questionnaire: 0, adaptive group in Post-questionnaire: 1, baseline group in Post-questionnaire: 0 adaptive2<-c(rep("0",66),rep("1",27),rep("0",39)) interest<-cbind(interest,adaptive2) #Pre-questionnaire: 0, adaptive group in Post-questionnaire: 0, baseline group in Post-questionnaire: 1 baseline2<-c(rep("0",66),rep("0",27),rep("1",39)) interest<-cbind(interest,baseline2) ##CLM #Whether training methods affects post-questionnaire responses considering pre-questionnaire m27<-clm(bird ~ adaptive2+baseline2, data = interest) summary(m27) m28<-clm(birdwatching ~ adaptive2+baseline2, data = interest) summary(m28) m29<-clm(birdsong ~ adaptive2+baseline2, data = interest) summary(m29) ####Table S6#### ##Data loading spp_binomial <- read.csv("binomial.csv", header = FALSE) ##In advance, the package MANOVA.RM should be installed from https://cran.r-project.org/ ##Package loading library(MANOVA.RM) #number of quiz training questions n1 <- list() n2 <- NULL #for 26spp for (i in 1:26) { n2 <- subset(test_scores.post, item==spp_binomial$V1[i], c(number.of.quiz.training.questions)) n1 <- append(n1, n2) }  n3 <- NULL for (i in 1:26) { n3 <- cbind(n3, n1[[i]]) # number of quiz training questions for each of the 26 species combined into n3 } n4 <- cbind(n3, subset(test_scores.post, item==spp_binomial$V1[1], c(post.adaptive))) #Add information about whether each user is an adaptive group to n3 colnames(n4) <- spp_binomial$V1 names(n4)[27] <- "post.adaptive" ##MANOVA in the case of multicollinearity #Whether adaptive training or not affects the number of quiz training questions of each species m30 <- MANOVA.wide(cbind(Acor, Alar, Cedi, Chsi, Coco, Coma, Cuca, Cupo, Deki, Emci, Fina, Gaca, Hiru, Hyam, Moal, Moci, Mogr, Pami, Pamo, Phco, Piaw, Pova, Spci, Stor, Ursq, Zoja) ~ post.adaptive, data=n4) summary(m30) #inverse lag time test_scores$lag.time.days <- test_scores$lag.time/(60*60*24) test_scores$inverse.lag.time <- 1/test_scores$lag.time.days test_scores$inverse.lag.time[test_scores$inverse.lag.time == "Inf"] <- 0 #最初から0であったものは0とみなす test_scores.post <- test_scores[test_scores$test == "post", ] test_scores.post[,c("post.adaptive")] <- as.factor(test_scores.post[,c("post.adaptive")]) n1 <- list() n2 <- NULL for (i in 1:26) { n2 <- subset(test_scores.post, item==spp_binomial$V1[i], c(inverse.lag.time)) n1 <- append(n1, n2) }  n3 <- NULL for (i in 1:26) { n3 <- cbind(n3, n1[[i]]) } n4 <- cbind(n3, subset(test_scores.post, item==spp_binomial$V1[1], c(post.adaptive))) n5 <- as.matrix(n4[,1:26]) ##MANOVA #Whether adaptive training or not affects the inverse lag time of each species m31 <- manova(n5 ~ post.adaptive, data=n4) summary(m31) #median question interval n1 <- list() n2 <- NULL for (i in 1:26) { n2 <- subset(test_scores.post, item==spp_binomial$V1[i], c(median.question.interval)) n1 <- append(n1, n2) }  n3 <- NULL for (i in 1:26) { n3 <- cbind(n3, n1[[i]]) } n4 <- cbind(n3, subset(test_scores.post, item==spp_binomial$V1[1], c(post.adaptive))) n5 <- as.matrix(n4[,1:26]) ##MANOVA #Whether adaptive training or not affects the median question interval of each species m32 <- manova(n5 ~ post.adaptive, data=n4) summary(m32) ####Table S7#### ##GLMM #The number of quiz training questions, inverse lag time, and median question interval affects the score of the midterm, post, and delayed tests m33 <- glmmTMB(cbind(score,1-score) ~ number.of.quiz.training.questions + inverse.lag.time + median.question.interval + # fixed effect ((0+number.of.quiz.training.questions)|item) + # random effect (random slope) ((0+inverse.lag.time)|item) + ((0+median.question.interval)|item) + (1 | item ) + (1 | subject), # random effect (random intercept) family=binomial, data=test_scores) summary(m33) rr <- ranef(m33) ####Fig.6-8#### ##Data loading accuracy_rate<-read.csv("accuracy_rate.csv",header = TRUE) accuracy_rate.post <- subset(accuracy_rate, test=="post") number.of.quiz.training.questions <- c() inverse.lag.time <- c() median.question.interval <- c() adaptive <- c() #adaptive group for (i in 1:26) { nnn <- test_scores.post[test_scores.post$post.adaptive == "1" & test_scores.post$item == spp_binomial$V1[i], ] mn <- mean(nnn$number.of.quiz.training.questions) mi <- mean(nnn$inverse.lag.time) mii <- mean(nnn$median.question.interval) number.of.quiz.training.questions <- append(number.of.quiz.training.questions, mn) inverse.lag.time <- append(inverse.lag.time, mi) median.question.interval <- append(median.question.interval, mii) adaptive <- append(adaptive, 1) } #baseline group for (i in 1:26) { nnn <- test_scores.post[test_scores.post$post.adaptive == "0" & test_scores.post$item == spp_binomial$V1[i], ] mn <- mean(nnn$number.of.quiz.training.questions) mi <- mean(nnn$inverse.lag.time) mii <- mean(nnn$median.question.interval) number.of.quiz.training.questions <- append(number.of.quiz.training.questions, mn) inverse.lag.time <- append(inverse.lag.time, mi) median.question.interval <- append(median.question.interval, mii) adaptive <- append(adaptive, 0) } random.slope.1 <- c() #number.of.quiz.training.questions random.slope.2 <- c() #inverse.lag.time random.slope.3 <- c() #median.question.interval intercept <- c() for (i in 1:26) { random.slope.1 <- append(random.slope.1, rr[[1]]$item[i,1]) random.slope.2 <- append(random.slope.2, rr[[1]]$item[i,2]) random.slope.3 <- append(random.slope.3, rr[[1]]$item[i,3]) intercept <- append(intercept, rr[[1]]$item[i,4]) } for (i in 1:26) { random.slope.1 <- append(random.slope.1, rr[[1]]$item[i,1]) random.slope.2 <- append(random.slope.2, rr[[1]]$item[i,2]) random.slope.3 <- append(random.slope.3, rr[[1]]$item[i,3]) intercept <- append(intercept, rr[[1]]$item[i,4]) } out1 <- summary(m33) incept <- out1$coefficients$cond[1,1] estimates.1 <- out1$coefficients$cond[2,1] estimates.2 <- out1$coefficients$cond[3,1] estimates.3 <- out1$coefficients$cond[4,1] spp_binomial2 <- rbind(spp_binomial, spp_binomial) colnames(spp_binomial2) <- c("spp_binomial2") d3 <- data.frame(spp_binomial2 = spp_binomial2, number.of.quiz.training.questions = number.of.quiz.training.questions, inverse.lag.time = inverse.lag.time, median.question.interval = median.question.interval, adaptive = adaptive, rate = accuracy_rate.post$rate, random.slope.1 = random.slope.1, random.slope.2 = random.slope.2, random.slope.3 = random.slope.3, intercept = intercept) d3[,c("adaptive")] <- as.factor(d3[,c("adaptive")]) ##In advance, the package ggrepel should be installed from https://cran.r-project.org/ ##Package loading library(ggrepel) #number.of.quiz.training.questions ggplot(data = d3, mapping = aes(x = number.of.quiz.training.questions, y = rate, color = adaptive, label = spp_binomial2)) + theme(panel.background = element_blank(), panel.border=element_blank(),panel.grid=element_blank(), axis.ticks.y=element_blank(),text=element_text(size=30),axis.line.x=element_line(colour="black"), axis.line.y=element_line(colour="black"), legend.position = c(1,1), legend.justification = c(1,2)) + scale_colour_manual(name = "legend", labels = c("1" = "adaptive group", "0" = "baseline group"), values = c("1" = "blue", "0" = "red")) + #geom_text(size = 5, fontface="italic") + geom_point(size=3) + #add points geom_text_repel(size = 8, fontface="italic",force = 30) + #Avoid overlapping text. Adjust the distance between text labels with "force" xlab("Number of quiz training questions") + ylab("Accuracy rate") #guides(colour = guide_legend(reverse = TRUE)) ggsave("Fig.6.png", width = 14, height = 13) #inverse.lag.time ggplot(data = d3, mapping = aes(x = inverse.lag.time, y = rate, color = adaptive, label = spp_binomial2)) + theme(panel.background = element_blank(), panel.border=element_blank(),panel.grid=element_blank(), axis.ticks.y=element_blank(),text=element_text(size=30),axis.line.x=element_line(colour="black"), axis.line.y=element_line(colour="black"), legend.position = c(1,1), legend.justification = c(1,2)) + scale_colour_manual(name = "legend", labels = c("1" = "adaptive group", "0" = "baseline group"), values = c("1" = "blue", "0" = "red")) + #geom_text(size = 5, fontface="italic") + geom_point(size=3) + #add points geom_text_repel(size = 8, fontface="italic",force = 30) + #Avoid overlapping text. Adjust the distance between text labels with "force" xlab("(←Large lag time) Inverse lag time (/days) (→Small lag time)") + ylab("Accuracy rate") #guides(colour = guide_legend(reverse = TRUE)) ggsave("Fig.7.png", width = 14, height = 13) #median.question.interval ggplot(data = d3, mapping = aes(x = median.question.interval, y = rate, color = adaptive, label = spp_binomial2)) + theme(panel.background = element_blank(), panel.border=element_blank(),panel.grid=element_blank(), axis.ticks.y=element_blank(),text=element_text(size=30),axis.line.x=element_line(colour="black"), axis.line.y=element_line(colour="black"), legend.position = c(1,0.5), legend.justification = c(1,2)) + scale_colour_manual(name = "legend", labels = c("1" = "adaptive group", "0" = "baseline group"), values = c("1" = "blue", "0" = "red")) + #geom_text(size = 5, fontface="italic") + geom_point(size=3) + #add points geom_text_repel(size = 8, fontface="italic",force = 30) + #Avoid overlapping text. Adjust the distance between text labels with "force" xlab("Median question interval") + ylab("Accuracy rate") #guides(colour = guide_legend(reverse = TRUE)) ggsave("Fig.8.png", width = 14, height = 13) ####correlation coefficient of each explanatory value and random slope#### d3$r1.estimates <- d3$random.slope.1+estimates.1 d3$r2.estimates <- d3$random.slope.2+estimates.2 d3$r3.estimates <- d3$random.slope.1+estimates.3 d3.a <- subset(d3, adaptive == 1) d3.c <- subset(d3, adaptive == 0) #number.of.quiz.training.questions cor.test(d3.a$number.of.quiz.training.questions, d3.a$r1.estimates) cor.test(d3.c$number.of.quiz.training.questions, d3.c$r1.estimates) #inverse.lag.time cor.test(d3.a$inverse.lag.time, d3.a$r2.estimates) cor.test(d3.c$inverse.lag.time, d3.c$r2.estimates) #median.question.interval cor.test(d3.a$median.question.interval, d3.a$r3.estimates) cor.test(d3.c$median.question.interval, d3.c$r3.estimates)