#### ------------------------------------------------------------------------------ #### #### Specification of Bayesian temporal auto-logistic occupancy model for Black-backed Woodpeckers. #### This model corresponds to the "temporal model" in the main text. # Random intercept for fireID # Habitat type modeled as a random intercept # 8 covars on psi # 3 covars on p # Using R version 4.1.0 and JAGS version 4.3.0 #### ------------------------------------------------------------------------------- #### library(R2jags) library(dclone) #### Load data and organize for the model ## Note: data are archived as .csv files to facilitate long-term storage ## Be sure that the current working directory is set to correct folder occ.files <- list.files(pattern="*.csv") for (i in 1:length(occ.files)) assign(occ.files[i], read.csv(occ.files[i])) X.array <- simplify2array(list(as.matrix(X.array.yr1.csv),as.matrix(X.array.yr2.csv), as.matrix(X.array.yr3.csv),as.matrix(X.array.yr4.csv), as.matrix(X.array.yr5.csv),as.matrix(X.array.yr6.csv), as.matrix(X.array.yr7.csv),as.matrix(X.array.yr8.csv), as.matrix(X.array.yr9.csv),as.matrix(X.array.yr10.csv))) X.naive <- apply(X.array, c(1, 3), max, na.rm = T) # max detection for each site X.naive[X.naive == -Inf] <- 0 X.naive[is.na(X.naive)] <- 0 head(X.naive) mod.data = list( X.array = X.array, X.naive = X.naive, ef = detection_covars.csv$ef, itype = detection_covars.csv$itype, S.elev = site_covars.csv$S.elev, S.lat = site_covars.csv$S.lat, S.bs100 = site_covars.csv$S.bs100, S.precc100 = site_covars.csv$S.precc100, whr = as.factor(site_covars.csv$whr), S.fire.age = S.fire.age.csv, S.jday = S.jday.csv, site.code = site_covars.csv$site.code, fireID =as.factor(site_covars.csv$fireID) ) ## Clean up the workspace rm(list=setdiff(ls(), "mod.data")) attach(mod.data) #### -------------------------------- JAGS MODEL ------------------------------------------ #### mod.function <- function(){ #### PRIORS ## Priors on logit-linear model coefficients a.day ~ dnorm(0,0.2) # coefficients on detection a.type ~ dnorm(0,0.2) a.ef ~ dnorm(0,0.2) b.age ~ dnorm(0,0.2) # coefficients on occupancy b.agesq ~ dnorm(0,0.2) b.elev ~ dnorm(0,0.2) b.elevsq ~ dnorm(0,0.2) b.lat ~ dnorm(0,0.2) b.sev ~ dnorm(0,0.2) b.precc ~ dnorm(0,0.2) b.elevlat ~ dnorm(0,0.2) # interaction effects phi ~ dnorm(0,0.2) # auto-logistic component ## Prior on detection model intercept p0 ~ dunif(0.01, 0.99) # expected value in interval (0,1) a0 <- log(p0/(1-p0)) # logit-transform ## Random effect for WHR-type whr.sigma ~ dunif(0.01, 3) # sd between 0.01 and 3 on logit scale whr.tau <- 1/(whr.sigma * whr.sigma) for (m in 1:n.whr) { b.whr[m] ~ dnorm(0, whr.tau) # WHR-specific BLUP } ## Prior on occupancy intercept psi0 ~ dunif(0.01, 0.99) b0.mu <- log(psi0/(1-psi0)) for (m in 1:n.fires) { b.fire[m] ~ dnorm(b0.mu, 0.2) # reasonable uncertainty for b0 prior } b0 <- mean(b.fire) # monitor the average intercept (across fires) for plotting and predicting #### MODEL: Loop through all sites for(j in 1:n.sites){ #### Model for first survey t = 1 ## State process ## MT: changed notation for b.whr. I need to change this below as well z[j,1] ~ dbern(psi[j,1]) logit(psi[j,1]) <- b.fire[Fire[j]] + b.whr[WHR[j]] + b.age*Age[j,1] + b.agesq*(Age[j,1])^2 + b.elev*Elev[j] + b.elevsq*(Elev[j])^2 + b.lat*Lat[j] + b.sev*Sev100[j] + b.precc*Precc[j] + b.elevlat*(Elev[j]*Lat[j]) ## Observation process for(k in 1:n.surveys){ y[j,k,1] ~ dbern(z.p[j,k,1]) z.p[j,k,1] <- z[j,1]*p[j,k,1] logit(p[j,k,1]) <- a0 + a.day*Day[j,1] + a.type*Type[k] + a.ef*Ef[k] } #### Model for subsequent surveys t > 1 ## State process for(t in 2:n.years){ z[j,t] ~ dbern(psi[j,t]) logit(psi[j,t]) <- b.fire[Fire[j]] + b.whr[WHR[j]] + b.age*Age[j,t] + b.agesq*(Age[j,t])^2 + b.elev*Elev[j] + b.elevsq*(Elev[j])^2 + b.lat*Lat[j] + b.sev*Sev100[j] + b.precc*Precc[j] + b.elevlat*(Elev[j]*Lat[j]) + phi*(z[j,t-1]) ## Observation process for(k in 1:n.surveys){ y[j,k,t] ~ dbern(z.p[j,k,t]) z.p[j,k,t] <- z[j,t]*p[j,k,t] logit(p[j,k,t]) <- a0 + a.day*Day[j,1] + a.type*Type[k] + a.ef*Ef[k] } } } } #### -------------------------------- RUN JAGS ------------------------------------------ #### ## Data for JAGS jags.data <- list(y = X.array, Day = S.jday, Type = itype, Ef = ef, Age = S.fire.age, Elev = S.elev, Lat = S.lat, Sev100 = S.bs100, Precc = S.precc100, WHR = as.numeric(whr), Fire = as.numeric(fireID), n.sites = dim(X.array)[1], n.surveys = dim(X.array)[2], n.years = dim(X.array)[3], n.fires = length(unique(fireID)), n.whr = length(unique(whr))) params.save <- c("a0","a.day","a.type","a.ef","b.whr","b.age","b.agesq","b.elev","b.elevsq","b.lat","b.sev", "b.precc","b.elevlat","phi","b0") ## Initial values inits <- function() { list(z = X.naive) } ## Values for MCMC nc <- 3 n.adapt <- 1000 n.burn <- 30000 n.iter <- 50000 n.thin <- 100 # yields 1500 posterior samples ## Parallelize cl <- makePSOCKcluster(nc) # nc is number of cores (1 for each chain) tmp <- clusterEvalQ(cl, library(dclone)) # Check that `dclone` is loaded on each of cl's workers. parLoadModule(cl, "glm") # load the JAGS module 'glm' on each worker parListModules(cl) # make sure previous line worked. #### Run the model start.time<-Sys.time() occ.run <- jags.parfit(cl, data=jags.data, params=params.save, model=mod.function, inits=inits, n.chains=nc, n.adapt=n.adapt, n.update=n.burn, n.iter=n.iter, thin=n.thin) stopCluster(cl) # close out the cluster. elapsed.time = difftime(Sys.time(), start.time, units='mins') elapsed.time