Setup libraries, load data
set windows with boundary of shallow lagoon as mask, create point pattern dataset
data_f <- up %>% filter(Type != "None")
data_f <- data_f %>% filter(Type != "NA")
quad74_m <-data_f %>% dplyr::select(X, Y_map)
fullW <- owin(mask=quad74_m)
fullW1<-as.polygonal(fullW, repair=FALSE)
fullW2<-simplify.owin(fullW1,2)
K_coralsAS <- ppp(x = data_f$X, y=data_f$Y_map,
marks = as.factor(data_f$Type),window = fullW2)
Create grid quadrats of various spatial scales 1X,2X,4X,8X with X=47m, which are referred to respectively as quadx/cutup1-4 in following script
quadx1<-quadrats(fullW2, 160, 80)
cutup1<-split(K_coralsAS,quadx1)
quadx2<-quadrats(fullW2, 80, 40)
cutup2<-split(K_coralsAS,quadx2)
quadx3<-quadrats(fullW2, 40, 20)
cutup3<-split(K_coralsAS,quadx3)
quadx4<-quadrats(fullW2, 20, 10)
cutup4<-split(K_coralsAS,quadx4)
Calculate Clark-Evans R, and fraction of plots covered by coral, algae and free-space/sand, and fit geometric model to estimate halo width for 1X spatial scale.
#count total pixels per plot
totvals1<-rep(1,length(quadx1$tiles))
for(i in 1:length(quadx1$tiles)){
totvals1[i]<-summary(cutup1[[i]])$n
}
#Select only full plots
#hist(totvals1)
fullplots1<-which(totvals1>=750) #holds plot ID
ialgae1<-rep(1,length(fullplots1)) #holds algae fraction
ialgae1_2<-rep(1,length(fullplots1)) #holds algae pixels
corals1<-rep(1,length(fullplots1)) #holds coral pixels
#count algae and coral cells per plot
for(i in 1:length(fullplots1)){
ialgae1[i]<-summary(cutup1[[fullplots1[i]]])$marks[1,2]
ialgae1_2[i]<-summary(cutup1[[fullplots1[i]]])$marks[1,1]
corals1[i]<-summary(cutup1[[fullplots1[i]]])$marks[2,1]
}
#restrict to plots with >0 coral pixels
fullplots1<-fullplots1[which(corals1>0)]
ialgae1_2<-ialgae1_2[which(corals1>0)]
corals1<-corals1[which(corals1>0)]
ialgae1<-ialgae1[which(corals1>0)]
a01<-(totvals1[fullplots1]-corals1)/totvals1[fullplots1] #holds A0, fraction non-habitat space
#calculate Clark-Evans R for each full plot
r1<-rep(NA,length(fullplots1))
for(i in 1:length(fullplots1)){
r1[i]<-clarkevans.test(split(cutup1[[fullplots1[i]]])$Coral)$statistic
}
#accumulate all data into a single dataframe
listvals1<-cbind(r1,ialgae1,totvals1[fullplots1],ialgae1_2,fullplots1,a01)
listvals1<-listvals1[-which(is.infinite(listvals1)),]
listvals1<-na.omit(listvals1)
#create geometric model
fullmax1=function(params,N,k){
halo=params[1]
pred_algae=listvals1[,6]*exp(-listvals1[,1]*halo*.5) #r_c set to 2 pixels
-sum(dbinom(k,prob=pred_algae, size=N,log=TRUE))
}
#run max likelihood fit to data
resfullmax1<-optim(fn=fullmax1,par=c(halo=8), N=listvals1[,3], k=listvals1[,4], method="L-BFGS-B")
#resulting fitted parameters
#resfullmax1$par
#summary(listvals1)
#dim(listvals1)
#check model fits, start with comparison of model to null with LRT
testlist<-rep(NA,length=length(listvals1[,3]))
fn1=function(halo){
for(i in 1:length(listvals1[,3])){
testlist[i]<-dbinom(listvals1[i,4],prob=
listvals1[i,6]*exp(-listvals1[i,1]*halo*.5)
,size=listvals1[i,3],log=TRUE)}
-sum(testlist)}
mle2(fn1, start=list(halo=8), method="L-BFGS-B")
##
## Call:
## mle2(minuslogl = fn1, start = list(halo = 8), method = "L-BFGS-B")
##
## Coefficients:
## halo
## 5.259725
##
## Log-likelihood: -238771.4
fn1_null=function(halo){
for(i in 1:length(listvals1[,3])){
testlist[i]<-dbinom(listvals1[i,4],prob=
exp(log(.5))
,size=listvals1[i,3],log=TRUE)}
-sum(testlist)}
anova(mle2(fn1_null, start=list(halo=8), method="L-BFGS-B"),mle2(fn1, start=list(halo=8), method="L-BFGS-B"))
## Likelihood Ratio Tests
## Model 1: mle2(fn1_null, start = list(halo = 8), method = "L-BFGS-B"),
## [fn1_null]: halo
## Model 2: mle2(fn1, start = list(halo = 8), method = "L-BFGS-B"), [fn1]: halo
## Tot Df Deviance Chisq Df Pr(>Chisq)
## 1 1 702498
## 2 1 477543 224955 0 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#output estimate of halo width in pixel units
resfullmax1$par
## halo
## 5.259725
Determine the R values for selected plots in Fig. 1.
#plot(quadx1,do.labels = TRUE,labels=1:3669) #to see plot IDs
justrt<-superimpose(split(K_coralsAS,quadx1[c(
1180:1194,
1287:1301,
1393:1407,
1494:1508,
1599:1613,
1698:1712,
1796:1810,
1892:1906,
1986:2000,
2078:2092,
2164:2178,
2247:2261,
2342:2356,
2439:2451
)])) #compile plot IDs that correspond to Fig. 1b aerial image region
marks(justrt)$pattern<-NULL #get rid of unnecessary pattern
#plot(split.ppp(justrt)$Algae,pch=19,cols=rgb(164,193,149,max=256),main="",cex=.15, border=1) #plot algae pixels
plot(split.ppp(justrt)$Coral,pch=19,cols=rgb(227,153,88,max=256),cex=.15 ,border=1) #plot coral pixels
#dumlist<-c(1180:1194,1287:1301,1393:1407,1494:1508,1599:1613,1698:1712,1796:1810,1892:1906,1986:2000,2078:2092,2164:2178,2247:2261,2342:2356,2439:2451)
#justrt_mat<-listvals1[listvals1[,5] %in% dumlist,]
#justrt_mat[order(justrt_mat[,1]),]
#determine R values for selected plots a-d in Fig. 1b
listvals1[which(listvals1[,5]==1396),]
## r1 ialgae1 ialgae1_2 fullplots1 a01
## 0.3913185 0.0000000 756.0000000 24.0000000 1396.0000000 0.9616402
listvals1[which(listvals1[,5]==2446),]
## r1 ialgae1 ialgae1_2 fullplots1 a01
## 0.4332851 0.1124339 756.0000000 371.0000000 2446.0000000 0.9682540
listvals1[which(listvals1[,5]==1709),]
## r1 ialgae1 ialgae1_2 fullplots1 a01
## 6.419713e-01 3.174603e-02 7.560000e+02 5.500000e+01 1.709000e+03 9.193122e-01
listvals1[which(listvals1[,5]==2000),]
## r1 ialgae1 ialgae1_2 fullplots1 a01
## 8.411701e-01 1.984127e-02 7.560000e+02 3.700000e+01 2.000000e+03 8.227513e-01
#plot borders of selected plots a-d to overlay onto aerial image
plot(quadx1[1396], add=T) #r-.391
plot(quadx1[2446], add=T) #r-.433
plot(quadx1[1709], add=T) #r-.642
plot(quadx1[2000], add=T) #r-.841
#ops<-ifelse(dumlist %in% justrt_mat[,5]==T,justrt_mat[,1],0)/2
#plot(quadx1[dumlist],do.col=T,col=rgb(0,0,0,ops,maxColorValue = 2),add=T)
2X Grid of 80x40 ~ 56.25 pixels length per square = 93.9m
totvals2<-rep(1,length(quadx2$tiles))
for(i in 1:length(quadx2$tiles)){
totvals2[i]<-summary(cutup2[[i]])$n
}
#hist(totvals2)
fullplots2<-which(totvals2>=3000)
ialgae2<-rep(1,length(fullplots2))
ialgae2_2<-rep(1,length(fullplots2))
corals2<-rep(1,length(fullplots2))
for(i in 1:length(fullplots2)){
ialgae2[i]<-summary(cutup2[[fullplots2[i]]])$marks[1,2]
ialgae2_2[i]<-summary(cutup2[[fullplots2[i]]])$marks[1,1]
corals2[i]<-summary(cutup2[[fullplots2[i]]])$marks[2,1]
}
fullplots2<-fullplots2[which(corals2>0)]
ialgae2_2<-ialgae2_2[which(corals2>0)]
corals2<-corals2[which(corals2>0)]
ialgae2<-ialgae2[which(corals2>0)]
a02<-(totvals2[fullplots2]-corals2)/totvals2[fullplots2]
r2<-rep(NA,length(fullplots2))
for(i in 1:length(fullplots2)){
r2[i]<-clarkevans.test(split(cutup2[[fullplots2[i]]])$Coral)$statistic
}
listvals2<-cbind(r2,ialgae2,totvals2[fullplots2],ialgae2_2,fullplots2,a02,corals2)
listvals2<-listvals2[-which(is.infinite(listvals2)),]
fullmax2=function(params,N,k){
halo=params[1]
pred_algae=listvals2[,6]*exp(-listvals2[,1]*halo*.5)
-sum(dbinom(k,prob=pred_algae, size=N,log=TRUE))
}
resfullmax2<-optim(fn=fullmax2,par=c(halo=8), N=listvals2[,3], k=listvals2[,4], method="L-BFGS-B")
#resfullmax2$par
#summary(listvals2)
#dim(listvals2)
#check model fits, start with comparison of model to null with LRT
testlist<-rep(NA,length=length(listvals2[,3]))
fn2=function(halo){
for(i in 1:length(listvals2[,3])){
testlist[i]<-dbinom(listvals2[i,4],prob=
listvals2[i,6]*exp(-listvals2[i,1]*halo*.5)
,size=listvals2[i,3],log=TRUE)}
-sum(testlist)}
mle2(fn2, start=list(halo=8), method="L-BFGS-B")
##
## Call:
## mle2(minuslogl = fn2, start = list(halo = 8), method = "L-BFGS-B")
##
## Coefficients:
## halo
## 5.892531
##
## Log-likelihood: -159491.5
fn2_null=function(halo){
for(i in 1:length(listvals2[,3])){
testlist[i]<-dbinom(listvals2[i,4],prob=
exp(log(.5))
,size=listvals2[i,3],log=TRUE)}
-sum(testlist)}
anova(mle2(fn2_null, start=list(halo=8), method="L-BFGS-B"),mle2(fn2, start=list(halo=8), method="L-BFGS-B"))
## Likelihood Ratio Tests
## Model 1: mle2(fn2_null, start = list(halo = 8), method = "L-BFGS-B"),
## [fn2_null]: halo
## Model 2: mle2(fn2, start = list(halo = 8), method = "L-BFGS-B"), [fn2]: halo
## Tot Df Deviance Chisq Df Pr(>Chisq)
## 1 1 529772
## 2 1 318983 210789 0 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
4X Grid of 40x20 ~ 112.5 pixels per square 188m x 188m
##calc R only for full plots
totvals3<-rep(1,length(quadx3$tiles))
for(i in 1:length(quadx3$tiles)){
totvals3[i]<-summary(cutup3[[i]])$n
}
#hist(totvals3)
fullplots3<-which(totvals3>=12000)
ialgae3<-rep(1,length(fullplots3))
ialgae3_2<-rep(1,length(fullplots3))
corals3<-rep(1,length(fullplots3))
for(i in 1:length(fullplots3)){
ialgae3[i]<-summary(cutup3[[fullplots3[i]]])$marks[1,2]
ialgae3_2[i]<-summary(cutup3[[fullplots3[i]]])$marks[1,1]
corals3[i]<-summary(cutup3[[fullplots3[i]]])$marks[2,1]
}
fullplots3<-fullplots3[which(corals3>0)]
ialgae3_2<-ialgae3_2[which(corals3>0)]
corals3<-corals3[which(corals3>0)]
ialgae3<-ialgae3[which(corals3>0)]
a03<-(totvals3[fullplots3]-corals3)/totvals3[fullplots3]
r3<-rep(NA,length(fullplots3))
for(i in 1:length(fullplots3)){
r3[i]<-clarkevans.test(split(cutup3[[fullplots3[i]]])$Coral)$statistic
}
listvals3<-cbind(r3,ialgae3,totvals3[fullplots3],ialgae3_2,fullplots3,a03)
#listvals3<-listvals3[-which(is.infinite(listvals3)),]
fullmax3=function(params,N,k){
halo=params[1]
pred_algae=listvals3[,6]*exp(-listvals3[,1]*halo*.5)
-sum(dbinom(k,prob=pred_algae, size=N,log=TRUE))
}
resfullmax3<-optim(fn=fullmax3,par=c(halo=8), N=listvals3[,3], k=listvals3[,4], method="L-BFGS-B")
#resfullmax3$par
#summary(listvals3)
#dim(listvals3)
#check model fits, start with comparison of model to null with LRT
testlist<-rep(NA,length=length(listvals3[,3]))
fn3=function(halo){
for(i in 1:length(listvals3[,3])){
testlist[i]<-dbinom(listvals3[i,4],prob=
listvals3[i,6]*exp(-listvals3[i,1]*halo*.5)
,size=listvals3[i,3],log=TRUE)}
-sum(testlist)}
mle2(fn3, start=list(halo=8), method="L-BFGS-B")
##
## Call:
## mle2(minuslogl = fn3, start = list(halo = 8), method = "L-BFGS-B")
##
## Coefficients:
## halo
## 5.869098
##
## Log-likelihood: -63660.71
fn3_null=function(halo){
for(i in 1:length(listvals3[,3])){
testlist[i]<-dbinom(listvals3[i,4],prob=
exp(log(.5))
,size=listvals3[i,3],log=TRUE)}
-sum(testlist)}
anova(mle2(fn3_null, start=list(halo=8), method="L-BFGS-B"),mle2(fn3, start=list(halo=8), method="L-BFGS-B"))
## Likelihood Ratio Tests
## Model 1: mle2(fn3_null, start = list(halo = 8), method = "L-BFGS-B"),
## [fn3_null]: halo
## Model 2: mle2(fn3, start = list(halo = 8), method = "L-BFGS-B"), [fn3]: halo
## Tot Df Deviance Chisq Df Pr(>Chisq)
## 1 1 286126
## 2 1 127321 158805 0 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
8X Grid of 20x10 ~ 225m pixels sided squares = 376m x 376m
##calc R only for full plots
totvals4<-rep(1,length(quadx4$tiles))
for(i in 1:length(quadx4$tiles)){
totvals4[i]<-summary(cutup4[[i]])$n
}
#hist(totvals4)
fullplots4<-which(totvals4>=40000)
ialgae4<-rep(1,length(fullplots4))
ialgae4_2<-rep(1,length(fullplots4))
corals4<-rep(1,length(fullplots4))
for(i in 1:length(fullplots4)){
ialgae4[i]<-summary(cutup4[[fullplots4[i]]])$marks[1,2]
ialgae4_2[i]<-summary(cutup4[[fullplots4[i]]])$marks[1,1]
corals4[i]<-summary(cutup4[[fullplots4[i]]])$marks[2,1]
}
fullplots4<-fullplots4[which(corals4>0)]
ialgae4_2<-ialgae4_2[which(corals4>0)]
corals4<-corals4[which(corals4>0)]
ialgae4<-ialgae4[which(corals4>0)]
a04<-(totvals4[fullplots4]-corals4)/totvals4[fullplots4]
r4<-rep(NA,length(fullplots4))
for(i in 1:length(fullplots4)){
r4[i]<-clarkevans.test(split(cutup4[[fullplots4[i]]])$Coral)$statistic
}
listvals4<-cbind(r4,ialgae4,totvals4[fullplots4],ialgae4_2,fullplots4,a04)
#listvals4<-listvals4[-which(is.infinite(listvals4)),]
fullmax4=function(params,N,k){
halo=params[1]
pred_algae=listvals4[,6]*exp(-listvals4[,1]*halo*.5)
-sum(dbinom(k,prob=pred_algae, size=N,log=TRUE))
}
resfullmax4<-optim(fn=fullmax4,par=c(halo=8), N=listvals4[,3], k=listvals4[,4], method="L-BFGS-B")
#resfullmax4$par
#summary(listvals4)
#dim(listvals4)
#check model fits, start with comparison of model to null with LRT
testlist<-rep(NA,length=length(listvals4[,3]))
fn4=function(halo){
for(i in 1:length(listvals4[,3])){
testlist[i]<-dbinom(listvals4[i,4],prob=
listvals4[i,6]*exp(-listvals4[i,1]*halo*.5)
,size=listvals4[i,3],log=TRUE)}
-sum(testlist)}
mle2(fn4, start=list(halo=8), method="L-BFGS-B")
##
## Call:
## mle2(minuslogl = fn4, start = list(halo = 8), method = "L-BFGS-B")
##
## Coefficients:
## halo
## 5.055826
##
## Log-likelihood: -21563.74
fn4_null=function(halo){
for(i in 1:length(listvals4[,3])){
testlist[i]<-dbinom(listvals4[i,4],prob=
exp(log(.5))
,size=listvals4[i,3],log=TRUE)}
-sum(testlist)}
anova(mle2(fn4_null, start=list(halo=8), method="L-BFGS-B"),mle2(fn4, start=list(halo=8), method="L-BFGS-B"))
## Likelihood Ratio Tests
## Model 1: mle2(fn4_null, start = list(halo = 8), method = "L-BFGS-B"),
## [fn4_null]: halo
## Model 2: mle2(fn4, start = list(halo = 8), method = "L-BFGS-B"), [fn4]: halo
## Tot Df Deviance Chisq Df Pr(>Chisq)
## 1 1 264539
## 2 1 43127 221411 0 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code for plotting Figure 3a
#subset for just coral and algae
mapAC<-subset(K_coralsAS,marks=="Algae"|marks=="Coral")
#plot map and overlap quadrats of 4X scale
plot(split.ppp(mapAC)$Algae,pch=19,cols=rgb(164,193,149,max=256),main="",cex=.015, border=0)
plot(split.ppp(mapAC)$Coral,pch=19,add=T,cols=rgb(227,153,88,max=256),cex=.015 ,border=0)
plot(quadx3, add=T)
Code for plotting Figure 3b
#listvals3[order(r3),]
#selected plots, increasing R
par(mfrow=c(1,4))
dumlist<-c(5,7,12,15)
for(i in 1:4){
plot(split.ppp(cutup3[[listvals3[order(r3),5][dumlist[i]]]])$Algae,pch=19,cols=rgb(164,193,149,max=256),main="",cex=.1, border=0)
plot(split.ppp(cutup3[[listvals3[order(r3),5][dumlist[i]]]])$Coral,pch=19,add=T,cols=rgb(227,153,88,max=256),cex=.1 ,border=0)
}
Code for plotting Figure 5b
#Fig. 5b
plot(listvals1[,1:2],ylim=c(0,1),xlab="Clark-Evans R",ylab="Fraction Algae",col=rgb(0,0,0,.2),pch=19,cex=.5)
Code for plotting Figure 6c and Halo Temporal Analysis
halotemp<-read.csv("halos_yes_no.csv")
ggplot(data=halotemp[1:50,1:5], aes(x=Year, y=Halos, group=ID,color=Treatment)) +
geom_line()+
geom_point()+
theme_classic()+
xlab("Date")+
facet_grid(rows = vars(ID))+
theme(legend.position="none")
binom.test(sum(halotemp[26:50,5]),25) #prob halos and CI in dispersed plots
##
## Exact binomial test
##
## data: sum(halotemp[26:50, 5]) and 25
## number of successes = 0, number of trials = 25, p-value = 5.96e-08
## alternative hypothesis: true probability of success is not equal to 0.5
## 95 percent confidence interval:
## 0.0000000 0.1371852
## sample estimates:
## probability of success
## 0
binom.test(sum(halotemp[1:25,5]),25) #prob halos and CI in clustered plots
##
## Exact binomial test
##
## data: sum(halotemp[1:25, 5]) and 25
## number of successes = 18, number of trials = 25, p-value = 0.04329
## alternative hypothesis: true probability of success is not equal to 0.5
## 95 percent confidence interval:
## 0.5061232 0.8792833
## sample estimates:
## probability of success
## 0.72
Geometric model simulations and plots for Supplementary Fig B2
#simulations for geometric model with binomial error around max likelihood fits
###simulate 100 geometric model runs given max likelihood parameter estimates and a binomial error
sim1mat<-matrix(nrow=length(seq(0,3,.1)),ncol=100)
for(i in 1:100){
sim1mat[,i]<-rbinom(length(seq(0,3,.1)), 100, mean(listvals1[,6])*exp(-seq(0,3,.1)*resfullmax1$par*.5))/100
}
#set up vectors for quantiles at .5, .25, .975
sim1mat_m<-rep(NA,length(seq(0,3,.1)))
sim1mat_h<-rep(NA,length(seq(0,3,.1)))
sim1mat_l<-rep(NA,length(seq(0,3,.1)))
#measure quantiles at .5, .25, .975
for(i in 1:length(seq(0,3,.1))){
sim1mat_m[i]<-quantile(sim1mat[i,],.5)
sim1mat_h[i]<-quantile(sim1mat[i,],.025)
sim1mat_l[i]<-quantile(sim1mat[i,],.975)
}
###run simulations for the other spatial scales
sim2mat<-matrix(nrow=length(seq(0,3,.1)),ncol=100)
for(i in 1:100){
sim2mat[,i]<-rbinom(length(seq(0,3,.1)), 100, mean(listvals2[,6])*exp(-seq(0,3,.1)*resfullmax2$par*.5))/100
}
sim2mat_m<-rep(NA,length(seq(0,3,.1)))
sim2mat_h<-rep(NA,length(seq(0,3,.1)))
sim2mat_l<-rep(NA,length(seq(0,3,.1)))
for(i in 1:length(seq(0,3,.1))){
sim2mat_m[i]<-quantile(sim2mat[i,],.5)
sim2mat_h[i]<-quantile(sim2mat[i,],.025)
sim2mat_l[i]<-quantile(sim2mat[i,],.975)
}
###
sim3mat<-matrix(nrow=length(seq(0,3,.1)),ncol=100)
for(i in 1:100){
sim3mat[,i]<-rbinom(length(seq(0,3,.1)), 100, mean(listvals3[,6])*exp(-seq(0,3,.1)*resfullmax3$par*.5))/100
}
sim3mat_m<-rep(NA,length(seq(0,3,.1)))
sim3mat_h<-rep(NA,length(seq(0,3,.1)))
sim3mat_l<-rep(NA,length(seq(0,3,.1)))
for(i in 1:length(seq(0,3,.1))){
sim3mat_m[i]<-quantile(sim3mat[i,],.5)
sim3mat_h[i]<-quantile(sim3mat[i,],.025)
sim3mat_l[i]<-quantile(sim3mat[i,],.975)
}
###
sim4mat<-matrix(nrow=length(seq(0,3,.1)),ncol=100)
for(i in 1:100){
sim4mat[,i]<-rbinom(length(seq(0,3,.1)), 100, mean(listvals4[,6])*exp(-seq(0,3,.1)*resfullmax4$par*.5))/100
}
sim4mat_m<-rep(NA,length(seq(0,3,.1)))
sim4mat_h<-rep(NA,length(seq(0,3,.1)))
sim4mat_l<-rep(NA,length(seq(0,3,.1)))
for(i in 1:length(seq(0,3,.1))){
sim4mat_m[i]<-quantile(sim4mat[i,],.5)
sim4mat_h[i]<-quantile(sim4mat[i,],.025)
sim4mat_l[i]<-quantile(sim4mat[i,],.975)
}
plot(listvals1[,1:2],ylim=c(0,1),xlab="Clark-Evans R",ylab="Fraction Algae",col=rgb(0,0,0,.21),pch=19,cex=.5)
points(seq(0,3,.1),sim1mat_m,type="l",lty=1)
points(seq(0,3,.1),sim1mat_h,type="l",lty=2)
points(seq(0,3,.1),sim1mat_l,type="l",lty=2)
polygon(c(seq(0,3,.1),rev(seq(0,3,.1))),c(sim1mat_h,rev(sim1mat_l)),border=NA, col=rgb(0,0,0,.2))
##
points(listvals2[,1:2],ylim=c(0,1),xlab="Clark-Evans R",ylab="Fraction Algae",col=rgb(221,204,119,100, max=256),pch=19,cex=.5)
points(seq(0,3,.1),sim2mat_m,type="l",lty=1,col=rgb(221,204,119,100, max=256))
points(seq(0,3,.1),sim2mat_h,type="l",lty=2,col=rgb(221,204,119,100, max=256))
points(seq(0,3,.1),sim2mat_l,type="l",lty=2,col=rgb(221,204,119,100, max=256))
polygon(c(seq(0,3,.1),rev(seq(0,3,.1))),c(sim2mat_h,rev(sim2mat_l)),border=NA, col=rgb(221,204,119,50, max=256))
##
points(listvals3[,1:2],ylim=c(0,1),xlab="Clark-Evans R",ylab="Fraction Algae",col=rgb(136,34,85,100,max=256),pch=19,cex=.5)
points(seq(0,3,.1),sim3mat_m,type="l",lty=1,col=rgb(136,34,85,max=256))
points(seq(0,3,.1),sim3mat_h,type="l",lty=2,col=rgb(136,34,85,max=256))
points(seq(0,3,.1),sim3mat_l,type="l",lty=2,col=rgb(136,34,85,max=256))
polygon(c(seq(0,3,.1),rev(seq(0,3,.1))),c(sim3mat_h,rev(sim3mat_l)),border=NA, col=rgb(136,34,85,50,max=256))
##
points(listvals4[,1:2],ylim=c(0,1),xlab="Clark-Evans R",ylab="Fraction Algae",col=rgb(86,180,233,100,max=256),pch=19,cex=.5)
points(seq(0,3,.1),sim4mat_m,type="l",lty=1,col=rgb(86,180,233,max=256))
points(seq(0,3,.1),sim4mat_h,type="l",lty=2,col=rgb(86,180,233,max=256))
points(seq(0,3,.1),sim4mat_l,type="l",lty=2,col=rgb(86,180,233,max=256))
polygon(c(seq(0,3,.1),rev(seq(0,3,.1))),c(sim4mat_h,rev(sim4mat_l)),border=NA, col=rgb(86,180,233,50,max=256))
#conversion of halo pixels to meters
resfullmax1$par*1.67
## halo
## 8.783741
resfullmax2$par*1.67
## halo
## 9.840526
resfullmax3$par*1.67
## halo
## 9.801394
resfullmax4$par*1.67
## halo
## 8.44323
legend("topright",c("1X, h=8.78m","2X, h=9.84m","4X, h=9.80m","8X, h=8.44m"),lty=1,col=c(rgb(0,0,0,1),rgb(221,204,119,100,max=256),rgb(136,34,85,max=256),rgb(86,180,233,max=256)),lwd=2,cex=1.5)
Re-visualization of Madin et al. 2022 data for halo temporal dynamics in Heron Island
#plotting cycles
halodataHx<-read.csv("halodataHx.csv")
sort(colSums(table(halodataHx$date,halodataHx$reef))) #select halos with at least 5 time points
## 102 144 202 401 407 503 618 107 148 152 408 414 423 605 134 153 213 216 228 236
## 1 1 1 1 1 1 1 2 2 2 2 2 2 2 3 3 3 3 3 3
## 411 417 425 501 603 606 610 612 703 141 156 211 217 234 300 420 428 615 128 205
## 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 5 5
## 212 215 301 308 702 704 119 123 304 305 705
## 5 5 5 5 5 5 6 6 6 6 6
longhalos<-subset(halodataHx,reef==128|
reef==205|
reef==212|
reef==215|
reef==301|
reef==308|
reef==702|
reef==704|
reef==119|
reef==123|
reef==304|
reef==305|
reef==705
)
longhalos$reef<-as.character(longhalos$reef)
longhalos$date<-as.Date(paste0(strrep("0", pmax(8 - nchar(longhalos$date), 0)),longhalos$date), format = "%Y%m%d")
ggplot(data=longhalos, aes(x=date, y=log(halo.wid/reef.rad), group=reef,color=reef)) +
geom_line()+
geom_point()+
theme_classic()+
xlab("Date")+
ylab("log(halo width/patch reef radius)")+
facet_grid(rows = vars(reef))+
theme(legend.position= "blank")
Satellite imagery classification accuracy tests for Appendix B
#classification tests for corals
#random5<-sample(fullplots1,size=5,replace=F) #coded out. Plots randomly selected are direct coded below to ease reproducibility
random5<-c(2519,1825,2178,3262,2713)
#of these 5, we randomly selected until we got one plot in upper and lower half
#plot 3 was upper half winner # 2178
#plot 5 was lower half winner # 2713
####################################################
####################################################
####################################################
####comparing A (non-expert) to T (expert author) and classified pixels for Upper half panel
#CORALS:
matT<-as.matrix(read.csv("Tclassifications_C.csv"))
matA<-as.matrix(read.csv("AL_Uclassifications_C.csv"))
#plot 3 is the upper half
dataT_UC<-data.frame(Actual =
unlist(as.list(matT)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[3]]])$Coral,eps=1)))))
dataA_UC<-data.frame(Actual =
unlist(as.list(matA)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[3]]])$Coral,eps=1)))))
confusionMatrix(as.factor(dataA_UC$Prediction), as.factor(dataA_UC$Actual), positive = "1") #ashley at 85% (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 567 104
## 1 127 14
##
## Accuracy : 0.7155
## 95% CI : (0.6831, 0.7463)
## No Information Rate : 0.8547
## P-Value [Acc > NIR] : 1.0000
##
## Kappa : -0.0595
##
## Mcnemar's Test P-Value : 0.1478
##
## Sensitivity : 0.11864
## Specificity : 0.81700
## Pos Pred Value : 0.09929
## Neg Pred Value : 0.84501
## Prevalence : 0.14532
## Detection Rate : 0.01724
## Detection Prevalence : 0.17365
## Balanced Accuracy : 0.46782
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataT_UC$Prediction), as.factor(dataT_UC$Actual), positive = "1") #T at 92% (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 621 50
## 1 134 7
##
## Accuracy : 0.7734
## 95% CI : (0.743, 0.8018)
## No Information Rate : 0.9298
## P-Value [Acc > NIR] : 1
##
## Kappa : -0.0325
##
## Mcnemar's Test P-Value : 9.426e-10
##
## Sensitivity : 0.122807
## Specificity : 0.822517
## Pos Pred Value : 0.049645
## Neg Pred Value : 0.925484
## Prevalence : 0.070197
## Detection Rate : 0.008621
## Detection Prevalence : 0.173645
## Balanced Accuracy : 0.472662
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataA_UC$Prediction), as.factor(dataT_UC$Prediction), positive = "1") #ashley predicts T at 98% accuracy (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 671 0
## 1 0 141
##
## Accuracy : 1
## 95% CI : (0.9955, 1)
## No Information Rate : 0.8264
## P-Value [Acc > NIR] : < 2.2e-16
##
## Kappa : 1
##
## Mcnemar's Test P-Value : NA
##
## Sensitivity : 1.0000
## Specificity : 1.0000
## Pos Pred Value : 1.0000
## Neg Pred Value : 1.0000
## Prevalence : 0.1736
## Detection Rate : 0.1736
## Detection Prevalence : 0.1736
## Balanced Accuracy : 1.0000
##
## 'Positive' Class : 1
##
#ALGAE:
matTA<-as.matrix(read.csv("Tclassifications_A.csv"))
matAA<-as.matrix(read.csv("AL_Uclassifications_A.csv"))
#plot 3 is the upper half
dataT_UA<-data.frame(Actual =
unlist(as.list(matTA)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[3]]])$Algae,eps=1)))))
dataA_UA<-data.frame(Actual =
unlist(as.list(matAA)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[3]]])$Algae,eps=1)))))
confusionMatrix(as.factor(dataA_UA$Prediction), as.factor(dataA_UA$Actual), positive = "1") #ashley at 89% (Nsig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 737 38
## 1 34 3
##
## Accuracy : 0.9113
## 95% CI : (0.8896, 0.93)
## No Information Rate : 0.9495
## P-Value [Acc > NIR] : 1.0000
##
## Kappa : 0.0305
##
## Mcnemar's Test P-Value : 0.7237
##
## Sensitivity : 0.073171
## Specificity : 0.955901
## Pos Pred Value : 0.081081
## Neg Pred Value : 0.950968
## Prevalence : 0.050493
## Detection Rate : 0.003695
## Detection Prevalence : 0.045567
## Balanced Accuracy : 0.514536
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataT_UA$Prediction), as.factor(dataT_UA$Actual), positive = "1") #T at 50% (Sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 383 392
## 1 14 23
##
## Accuracy : 0.5
## 95% CI : (0.465, 0.535)
## No Information Rate : 0.5111
## P-Value [Acc > NIR] : 0.7476
##
## Kappa : 0.0197
##
## Mcnemar's Test P-Value : <2e-16
##
## Sensitivity : 0.05542
## Specificity : 0.96474
## Pos Pred Value : 0.62162
## Neg Pred Value : 0.49419
## Prevalence : 0.51108
## Detection Rate : 0.02833
## Detection Prevalence : 0.04557
## Balanced Accuracy : 0.51008
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataT_UA$Prediction), as.factor(dataA_UA$Prediction), positive = "1") #T predicts A at 94% accuracy (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 775 0
## 1 0 37
##
## Accuracy : 1
## 95% CI : (0.9955, 1)
## No Information Rate : 0.9544
## P-Value [Acc > NIR] : < 2.2e-16
##
## Kappa : 1
##
## Mcnemar's Test P-Value : NA
##
## Sensitivity : 1.00000
## Specificity : 1.00000
## Pos Pred Value : 1.00000
## Neg Pred Value : 1.00000
## Prevalence : 0.04557
## Detection Rate : 0.04557
## Detection Prevalence : 0.04557
## Balanced Accuracy : 1.00000
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataA_UA$Prediction), as.factor(dataT_UA$Prediction), positive = "1") #A predicts T at 94% accuracy (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 775 0
## 1 0 37
##
## Accuracy : 1
## 95% CI : (0.9955, 1)
## No Information Rate : 0.9544
## P-Value [Acc > NIR] : < 2.2e-16
##
## Kappa : 1
##
## Mcnemar's Test P-Value : NA
##
## Sensitivity : 1.00000
## Specificity : 1.00000
## Pos Pred Value : 1.00000
## Neg Pred Value : 1.00000
## Prevalence : 0.04557
## Detection Rate : 0.04557
## Detection Prevalence : 0.04557
## Balanced Accuracy : 1.00000
##
## 'Positive' Class : 1
##
#SAND:
matTS<-as.matrix(read.csv("Tclassifications_S.csv"))
matAS<-as.matrix(read.csv("AL_Uclassifications_S.csv"))
matAS<-matAS[1:29,]
#plot 3 is the upper half
dataT_US<-data.frame(Actual =
unlist(as.list(matTS)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[3]]])$Sand,eps=1)))))
dataA_US<-data.frame(Actual =
unlist(as.list(matAS)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[3]]])$Sand,eps=1)))))
confusionMatrix(as.factor(dataA_US$Prediction), as.factor(dataA_US$Actual), positive = "1") #ashley at 71% (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 49 185
## 1 110 468
##
## Accuracy : 0.6367
## 95% CI : (0.6026, 0.6698)
## No Information Rate : 0.8042
## P-Value [Acc > NIR] : 1
##
## Kappa : 0.0211
##
## Mcnemar's Test P-Value : 1.644e-05
##
## Sensitivity : 0.7167
## Specificity : 0.3082
## Pos Pred Value : 0.8097
## Neg Pred Value : 0.2094
## Prevalence : 0.8042
## Detection Rate : 0.5764
## Detection Prevalence : 0.7118
## Balanced Accuracy : 0.5124
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataT_US$Prediction), as.factor(dataT_US$Actual), positive = "1") #T at 41% (Sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 140 94
## 1 332 246
##
## Accuracy : 0.4754
## 95% CI : (0.4405, 0.5104)
## No Information Rate : 0.5813
## P-Value [Acc > NIR] : 1
##
## Kappa : 0.0183
##
## Mcnemar's Test P-Value : <2e-16
##
## Sensitivity : 0.7235
## Specificity : 0.2966
## Pos Pred Value : 0.4256
## Neg Pred Value : 0.5983
## Prevalence : 0.4187
## Detection Rate : 0.3030
## Detection Prevalence : 0.7118
## Balanced Accuracy : 0.5101
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataT_UA$Prediction), as.factor(dataA_UA$Prediction), positive = "1") #T predicts A at 94% accuracy (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 775 0
## 1 0 37
##
## Accuracy : 1
## 95% CI : (0.9955, 1)
## No Information Rate : 0.9544
## P-Value [Acc > NIR] : < 2.2e-16
##
## Kappa : 1
##
## Mcnemar's Test P-Value : NA
##
## Sensitivity : 1.00000
## Specificity : 1.00000
## Pos Pred Value : 1.00000
## Neg Pred Value : 1.00000
## Prevalence : 0.04557
## Detection Rate : 0.04557
## Detection Prevalence : 0.04557
## Balanced Accuracy : 1.00000
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataA_UA$Prediction), as.factor(dataT_UA$Prediction), positive = "1") #A predicts T at 94% accuracy (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 775 0
## 1 0 37
##
## Accuracy : 1
## 95% CI : (0.9955, 1)
## No Information Rate : 0.9544
## P-Value [Acc > NIR] : < 2.2e-16
##
## Kappa : 1
##
## Mcnemar's Test P-Value : NA
##
## Sensitivity : 1.00000
## Specificity : 1.00000
## Pos Pred Value : 1.00000
## Neg Pred Value : 1.00000
## Prevalence : 0.04557
## Detection Rate : 0.04557
## Detection Prevalence : 0.04557
## Balanced Accuracy : 1.00000
##
## 'Positive' Class : 1
##
#############################
#############################
#############################
####comparing Ashley to Theresa and Actuals for Lower half panel
#CORALS:
matTL<-as.matrix(read.csv("Tclassifications_CL.csv"))
matAL<-as.matrix(read.csv("AL_Lclassifications_C.csv"))
#plot 3 is the upper half
dataT_LC<-data.frame(Actual =
unlist(as.list(matTL)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[5]]])$Coral,eps=1)))))
dataA_LC<-data.frame(Actual =
unlist(as.list(matAL)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[5]]])$Coral,eps=1)))))
confusionMatrix(as.factor(dataA_LC$Prediction), as.factor(dataA_LC$Actual), positive = "1") #ashley at 96% (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 763 23
## 1 25 1
##
## Accuracy : 0.9409
## 95% CI : (0.9224, 0.9561)
## No Information Rate : 0.9704
## P-Value [Acc > NIR] : 1.0000
##
## Kappa : 0.0096
##
## Mcnemar's Test P-Value : 0.8852
##
## Sensitivity : 0.041667
## Specificity : 0.968274
## Pos Pred Value : 0.038462
## Neg Pred Value : 0.970738
## Prevalence : 0.029557
## Detection Rate : 0.001232
## Detection Prevalence : 0.032020
## Balanced Accuracy : 0.504970
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataT_LC$Prediction), as.factor(dataT_LC$Actual), positive = "1") #T at 87% (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 691 95
## 1 26 0
##
## Accuracy : 0.851
## 95% CI : (0.8246, 0.8748)
## No Information Rate : 0.883
## P-Value [Acc > NIR] : 0.9975
##
## Kappa : -0.0529
##
## Mcnemar's Test P-Value : 6.337e-10
##
## Sensitivity : 0.00000
## Specificity : 0.96374
## Pos Pred Value : 0.00000
## Neg Pred Value : 0.87913
## Prevalence : 0.11700
## Detection Rate : 0.00000
## Detection Prevalence : 0.03202
## Balanced Accuracy : 0.48187
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataT_LC$Prediction), as.factor(dataA_LC$Prediction), positive = "1") #t predicts A at 99% accuracy (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 786 0
## 1 0 26
##
## Accuracy : 1
## 95% CI : (0.9955, 1)
## No Information Rate : 0.968
## P-Value [Acc > NIR] : 3.339e-12
##
## Kappa : 1
##
## Mcnemar's Test P-Value : NA
##
## Sensitivity : 1.00000
## Specificity : 1.00000
## Pos Pred Value : 1.00000
## Neg Pred Value : 1.00000
## Prevalence : 0.03202
## Detection Rate : 0.03202
## Detection Prevalence : 0.03202
## Balanced Accuracy : 1.00000
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataA_LC$Prediction), as.factor(dataT_LC$Prediction), positive = "1") #A predicts T at 99% accuracy (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 786 0
## 1 0 26
##
## Accuracy : 1
## 95% CI : (0.9955, 1)
## No Information Rate : 0.968
## P-Value [Acc > NIR] : 3.339e-12
##
## Kappa : 1
##
## Mcnemar's Test P-Value : NA
##
## Sensitivity : 1.00000
## Specificity : 1.00000
## Pos Pred Value : 1.00000
## Neg Pred Value : 1.00000
## Prevalence : 0.03202
## Detection Rate : 0.03202
## Detection Prevalence : 0.03202
## Balanced Accuracy : 1.00000
##
## 'Positive' Class : 1
##
#ALGAE:
matTAL<-as.matrix(read.csv("Tclassifications_AL.csv"))
matAAL<-as.matrix(read.csv("AL_Lclassifications_A.csv"))
#plot 3 is the upper half
dataT_LA<-data.frame(Actual =
unlist(as.list(matTAL)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[5]]])$Algae,eps=1)))))
dataA_LA<-data.frame(Actual =
unlist(as.list(matAAL)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[5]]])$Algae,eps=1)))))
confusionMatrix(as.factor(dataA_LA$Prediction), as.factor(dataA_LA$Actual), positive = "1") #ashley at 47% (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 339 267
## 1 37 169
##
## Accuracy : 0.6256
## 95% CI : (0.5913, 0.659)
## No Information Rate : 0.5369
## P-Value [Acc > NIR] : 2.026e-07
##
## Kappa : 0.2775
##
## Mcnemar's Test P-Value : < 2.2e-16
##
## Sensitivity : 0.3876
## Specificity : 0.9016
## Pos Pred Value : 0.8204
## Neg Pred Value : 0.5594
## Prevalence : 0.5369
## Detection Rate : 0.2081
## Detection Prevalence : 0.2537
## Balanced Accuracy : 0.6446
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataT_LA$Prediction), as.factor(dataT_LA$Actual), positive = "1") #T at 94% (Sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 591 15
## 1 180 26
##
## Accuracy : 0.7599
## 95% CI : (0.7289, 0.7889)
## No Information Rate : 0.9495
## P-Value [Acc > NIR] : 1
##
## Kappa : 0.1379
##
## Mcnemar's Test P-Value : <2e-16
##
## Sensitivity : 0.63415
## Specificity : 0.76654
## Pos Pred Value : 0.12621
## Neg Pred Value : 0.97525
## Prevalence : 0.05049
## Detection Rate : 0.03202
## Detection Prevalence : 0.25369
## Balanced Accuracy : 0.70034
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataA_LA$Prediction), as.factor(dataT_LA$Prediction), positive = "1") #A predicts T at 100% accuracy (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 606 0
## 1 0 206
##
## Accuracy : 1
## 95% CI : (0.9955, 1)
## No Information Rate : 0.7463
## P-Value [Acc > NIR] : < 2.2e-16
##
## Kappa : 1
##
## Mcnemar's Test P-Value : NA
##
## Sensitivity : 1.0000
## Specificity : 1.0000
## Pos Pred Value : 1.0000
## Neg Pred Value : 1.0000
## Prevalence : 0.2537
## Detection Rate : 0.2537
## Detection Prevalence : 0.2537
## Balanced Accuracy : 1.0000
##
## 'Positive' Class : 1
##
#SAND:
matTSL<-as.matrix(read.csv("Tclassifications_SL.csv"))
matASL<-as.matrix(read.csv("AL_Lclassifications_S.csv"))
matASL<-matASL[1:29,]
#plot 3 is the upper half
dataT_LS<-data.frame(Actual =
unlist(as.list(matTSL)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[5]]])$Sand,eps=1)))))
dataA_LS<-data.frame(Actual =
unlist(as.list(matASL)),
Prediction =
unlist(as.list(as.matrix(pixellate(split(cutup1[[random5[5]]])$Sand,eps=1)))))
confusionMatrix(as.factor(dataA_LS$Prediction), as.factor(dataA_LS$Actual), positive = "1") #ashley at 43% (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 206 82
## 1 254 270
##
## Accuracy : 0.5862
## 95% CI : (0.5515, 0.6203)
## No Information Rate : 0.5665
## P-Value [Acc > NIR] : 0.1361
##
## Kappa : 0.2032
##
## Mcnemar's Test P-Value : <2e-16
##
## Sensitivity : 0.7670
## Specificity : 0.4478
## Pos Pred Value : 0.5153
## Neg Pred Value : 0.7153
## Prevalence : 0.4335
## Detection Rate : 0.3325
## Detection Prevalence : 0.6453
## Balanced Accuracy : 0.6074
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataT_LS$Prediction), as.factor(dataT_LS$Actual), positive = "1") #T at 77% (Sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 75 213
## 1 61 463
##
## Accuracy : 0.6626
## 95% CI : (0.6289, 0.6951)
## No Information Rate : 0.8325
## P-Value [Acc > NIR] : 1
##
## Kappa : 0.1634
##
## Mcnemar's Test P-Value : <2e-16
##
## Sensitivity : 0.6849
## Specificity : 0.5515
## Pos Pred Value : 0.8836
## Neg Pred Value : 0.2604
## Prevalence : 0.8325
## Detection Rate : 0.5702
## Detection Prevalence : 0.6453
## Balanced Accuracy : 0.6182
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataA_LS$Prediction), as.factor(dataT_LS$Prediction), positive = "1") #A predicts Tat 100% accuracy (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 288 0
## 1 0 524
##
## Accuracy : 1
## 95% CI : (0.9955, 1)
## No Information Rate : 0.6453
## P-Value [Acc > NIR] : < 2.2e-16
##
## Kappa : 1
##
## Mcnemar's Test P-Value : NA
##
## Sensitivity : 1.0000
## Specificity : 1.0000
## Pos Pred Value : 1.0000
## Neg Pred Value : 1.0000
## Prevalence : 0.6453
## Detection Rate : 0.6453
## Detection Prevalence : 0.6453
## Balanced Accuracy : 1.0000
##
## 'Positive' Class : 1
##
confusionMatrix(as.factor(dataT_LS$Prediction), as.factor(dataA_LS$Prediction), positive = "1") #T predicts A at 100% accuracy (sig)
## Confusion Matrix and Statistics
##
## Reference
## Prediction 0 1
## 0 288 0
## 1 0 524
##
## Accuracy : 1
## 95% CI : (0.9955, 1)
## No Information Rate : 0.6453
## P-Value [Acc > NIR] : < 2.2e-16
##
## Kappa : 1
##
## Mcnemar's Test P-Value : NA
##
## Sensitivity : 1.0000
## Specificity : 1.0000
## Pos Pred Value : 1.0000
## Neg Pred Value : 1.0000
## Prevalence : 0.6453
## Detection Rate : 0.6453
## Detection Prevalence : 0.6453
## Balanced Accuracy : 1.0000
##
## 'Positive' Class : 1
##