library(raster) library(rgdal) ############################ Climate Data ######################################################## # list of geotiff files containing climate data clim=list.files("H:/Work/Ongoing/Projects/Neotropical Large Herbivores and Ecorregions/Climate Data/",pattern="*.tif") clim1=stack(clim) # read ecoregion shape file shape = readOGR(dsn = "H:/Work/Ongoing/Projects/Neotropical Large Herbivores and Ecorregions/Ecoregions2017/Ecoregions2017.shp") shape=shape[shape$REALM=="Neotropic",] # match projections shape <- spTransform(shape, crs(clim1)) ext <- extent(shape) # crop the global data do the Neotropical realm aa1 <- crop(clim1, ext) # get climate data to the ecoregions bb=extract(aa1,shape) # calculate number of pixels per ecoregion n.lines=lengths(bb)/length(clim) # go from list to dataframe cc=do.call(rbind, bb) # repeat the name of the ecoregion as many times as there are individual pixels in the ecoregion dd=data.frame(shape$ECO_NAME, n.lines) names(dd)[1]="ECO_NAME" dd1=dd[rep(row.names(dd), dd$n.lines), 1] dd2=data.frame(dd1,cc) # aggregate to the Ecoregion level dd3=aggregate(dd2[2:length(names(dd2))],by=list(dd2$ECO_NAME), mean, na.rm=T) ################################################# Soil Data ######################################################## ### Download geotif files from Soil Grids (https://www.isric.org/explore/soilgrids) # load tiff files containing soil data soil=list.files("H:/Work/Ongoing/Projects/Neotropical Large Herbivores and Ecorregions/SoilGrids 5 km data/",pattern="*.tif") soil setwd("H:/Work/Ongoing/Projects/Neotropical Large Herbivores and Ecorregions/SoilGrids 5 km data") soil1=stack(soil) # read ecoregion shapefile shape = readOGR(dsn = "H:/Work/Ongoing/Projects/Neotropical Large Herbivores and Ecorregions/Ecoregions2017/Ecoregions2017.shp") shape=shape[shape$REALM=="Neotropic",] # match projections shape <- spTransform(shape, crs(soil1)) ext <- extent(shape) # crop soil data aa1 <- crop(soil1, ext) head(aa1) class(aa1) # extract soil data for the ecoregions bb=extract(aa1,shape) class(bb) # from list to data.frame cc=do.call(rbind, bb) head(cc) # get number of pixels per Ecoregion to assign Ecoregion label to each data pixel in the data.frame n.lines=lengths(bb)/length(soil) length(n.lines) dd=data.frame(shape$ECO_NAME, n.lines) head(dd) names(dd)[1]="ECO_NAME" # Repeat ecoregion names the number of pixels within the ecoregion dd1=dd[rep(row.names(dd), dd$n.lines), 1] # merge to the soil data dd2=data.frame(dd1,cc) head(dd2) # change variables names for better handling foo=names(dd2)[2:length(names(dd2))] foo foo1=gsub("_M_","",foo) foo1 foo2=gsub("_5km_ll","",foo1) foo2 names(dd2)[2:length(names(dd2))]=foo2 names(dd2)[1]="ECO_NAME" # aggregate the soil data to the Ecoregion level dd3=aggregate(dd2[2:length(names(dd2))],by=list(dd2$ECO_NAME), mean, na.rm=T) names(dd3)[1]="ECO_NAME" # calculate means for the two soil depths dd3$cec=apply(dd3[,c(2,3)],1,mean, na.rm=T) dd3$ph=apply(dd3[,c(8,9)],1,mean, na.rm=T) dd3$slt=apply(dd3[,c(10,11)],1,mean, na.rm=T) dd3$snd=apply(dd3[,c(12,13)],1,mean, na.rm=T) ############################################### Fire Data ############################################## rm(list = ls()) # load the MCD14ML fire data to R # set the work directory to the directory containing the fire data files for (i in 1:length(temp)) assign(temp[i], read.table(temp[i],header=T)) list_df1=lapply(temp,get) list_df1=do.call(rbind,list_df1) # select only vegetation fires and relevant columns list_df2=list_df1[list_df1$type==0,c(1,4,5,9,10)] # select only areas in the Americas list_df2=list_df2[list_df2$lon<0,] fi=list_df2 coordinates(fi) <- c("lon", "lat") proj4string(fi) <- shape@proj4string fi.red <- crop(fi, ext) fi=fi.red[fi.red$conf>=95,] fi.red fi.red=fi.red[fi.red$conf>=95,] fi.red1<-extract(fi.red,neot) fi.red$area=NA # Parallel process: using multiple CPUs cores (from: https://gis.stackexchange.com/questions/241255/efficient-spatial-joining-for-large-dataset-in-r/241420) # Load 'parallel' package for support Parallel computation in R library(parallel) # Calculate the number of cores (let one core be free for your Operative System) no_cores <- detectCores() - 1 # Cut points.h in "n" parts # Higher "n": less memory requiered but slower # Lower "n": more memory requiered but faster n <- 1000 parts <- split(x = 1:length(fi.red), f = cut(1:length(fi.red), n)) # Initiate Cluster of CPU cores # Note: you have to define all the used objects in the parallel process # eg.: ices_eco, df, n, parts, etc. before making the cluster cl <- makeCluster(no_cores, type = "PSOCK") # Load libraries on clusters clusterEvalQ(cl = cl, expr = c(library('sp'))) # All the objects required to run the function # Objects to export to clusters clusterExport(cl = cl, varlist = c("shape", "fi.red", "parts", "n")) print(cl) # summary of the cluster # Parallelize sp::over function # Returns a list with the overlays system.time( overParts <- parLapply(cl = cl, X = 1:n, fun = function(x) { over <- over(fi.red[parts[[x]],], shape) gc() # release memory return(over) }) ) # Stop Cluster of CPU cores stopCluster(cl) # Merge df with ecoregions for (i in 1:n) { message(paste("Merging part", i, "of", n)) fi.red$eco[parts[[i]]] <- as.character(overParts[[i]]$ECO_NAME) } fi.red$biom=NA for (i in 1:n) { message(paste("Merging part", i, "of", n)) fi.red$biom[parts[[i]]] <- as.character(overParts[[i]]$BIOME_NAME) } # Download land cover data to filter the fire data against antropogenic influence (from https://www.esa-landcover-cci.org/?q=node/164) aa=raster("D:/Work/Ongoing/Projects/Neotropical Large Herbivores and Ecorregions/Land Cover Map 2015/ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7/product/ESACCI-LC-L4-LCCS-Map-300m-P1Y-2015-v2.0.7.tif") # load the Ecoregion shapefile for cropping (https://ecoregions.appspot.com/) shape = readOGR(dsn = "D:/Work/Ongoing/Projects/Neotropical Large Herbivores and Ecorregions/Ecoregions2017/Ecoregions2017.shp") # subset the neotropics shape=shape[shape$REALM=="Neotropic",] shape <- spTransform(shape, crs(aa)) ext <- extent(shape) aa1 <- crop(aa, ext) # create an object called "bb" in which the object bb (a list in which each object is the pixel Land Cover label of all pixels within each of the 179 Neotropical ecorregions bb=extract(aa1,shape) # create a data.frame containing the name of the ecorregion and the number of LC pixels within it # the objective is calculating the proportion of pixels falling in natural areas zz=data.frame(shape$ECO_NAME, lengths(bb)) head(zz) names(zz)=c("ECO_NAME", "tot.pix") # A function that calculate the number of pixels falling in natural areas natural=function(x){sum(x>40)-sum(x==190)-sum(x==210)} proj4string(fi.red) proj4string(aa1) shape@proj4string # because projection is in lat/long, use buffer in meters # This code takes more than one hour to run fired=extract(aa1,fi.red, buffer=1000) # number of LC pixels per fire fi.red$tot.lc.pix=lengths(fired) num.nat.pix=sapply(fired,natural) fi.red$num.nat.pix=num.nat.pix head(fi.red) prop.nat.pix=fi.red$num.nat.pix/fi.red$tot.lc.pix head(prop.nat.pix) fi.red$prop.nat.pix=prop.nat.pix hist(fi.red$prop.nat.pix) fi.nat=fi.red[fi.red$prop.nat.pix>=0.90,] dim(fi.red) dim(fi.nat) # number of natural fires per ecorregion head(fi.nat) n.fi.nat=data.frame(table(fi.nat$eco)) head(n.fi.nat) names(n.fi.nat)=c("ECO_NAME","n.fi.nat") dim(n.fi.nat) # number of natural fires per natural shape area zz$nat.pix=sapply(bb,natural) head(zz) # calculate the proportion of pixels that fall in natural terrestrial areas zz$prop.nat=zz$nat.pix/zz$tot.pix # now, merge the number of fires per ecorregion with the proportion of natural land cover pixel area per ecorregions ECO_NAME=as.character(zz$ECO_NAME) zz1=merge(zz, n.fi.nat, by="ECO_NAME", all.x=T) zz2 <- merge(zz1, shape@data, by="ECO_NAME", all.x=T) head(zz2) dim(zz2) zz2$nat.area=zz2$SHAPE_AREA*zz2$prop.nat zz2$nfi.nat.area=zz2$n.fi.nat/zz2$nat.area head(zz2) zz2$nfi.nat.area=ifelse(is.na(zz2$nfi.nat.area),0,zz2$nfi.nat.area) zz2$FF=ifelse(zz2$prop.nat==0,NA,zz2$nfi.nat.area) # Calculating fire intensity # mean fire radiative power of natural fires frp.nat=aggregate(fi.nat$FRP, by=list(fi.nat$eco), mean, na.rm=T) head(frp.nat) names(frp.nat)=c("ECO_NAME","frp.nat") head(frp.nat) zz3=merge(zz2,frp.nat, by="ECO_NAME", all=T) names(zz3)[24]="FI" head(zz3) ###################################### Mammal herbivore data ######################################################## # Example code for getting mammal herbivore data using the Phylacine database # Extant Mammal Indices per Ecoregion rm(list = ls()) library(rgdal) library(raster) library(data.table) # Load shapefile of Ecoregions, downloaded from ecoregions.appspot.com shape = readOGR(dsn = "D:/Work/Ongoing/Projects/Neotropical Large Herbivores and Ecorregions/Ecoregions2017") head(shape) class(shape) # Subset the Neotropical Realm neot <- shape[shape$REALM == "Neotropic", ] # Load the mammal trait data from the PHYLACINE dataset mam <- read.csv(file.choose(), fileEncoding = "UTF-8", stringsAsFactors = F) # Subset to species that are not in the "EP" IUCN status class(mam$IUCN.Status.1.2) ph.ex.mam = mam [mam$IUCN.Status.1.2!="EP",] # subset species that are over 90 % herbivorous mam.her.sp <- ph.ex.mam[ph.ex.mam$Diet.Plant >= 90, ] # subset terrestrial species mam.terr.her.sp <- mam.her.sp[mam.her.sp$Terrestrial==1, ] # load file with the diets of Neotropical mammals obtained from Kissling, W. D. et al. (2015) https://doi.org/10.5061/dryad.6cd0v diet=fread(file.choose(),header=T) head(diet) # march notation of species names with the PHYLACINE database diet$Binomial.1.2=paste0(diet$Genus, collapese="_",diet$Species) # subseting animals that are browsers, grazers or both head(diet) diet$Binomial.1.2=paste0(diet$Genus, collapese="_",diet$Species) sum(mam.terr.her.sp$Binomial.1.2%in%diet$Binomial.1.2) dim(mam.terr.her.sp) # define browsers and grazers bro<-diet[diet$Woody==1,] gra<-diet[diet$Herbaceous==1,] bot<-rbind(bro, gra) both<-merge(mam.terr.her.sp, bot, by="Binomial.1.2") # Load Present natural map of these species # Present natural maps represent a counterfactual scenario that shows where species would live without Anthropogenic pressures. # These ranges include extinct species maps.pres.nat = paste0("D:/Work/Ongoing/Projects/Neotropical Large Herbivores and Ecorregions/PHYLACINE_v1.2.1/Data/Ranges/Present_natural/", mam.terr.her.sp$Binomial.1.2, ".tif") r.pres.nat = stack(maps.pres.nat) # Project South America map to the range map projection neotr = spTransform(neot, crs(r.pres.nat)) # Crop range maps to just the extent of Australia for a cleaner plot ext = extent(neotr) r.pres.nat = crop(r.pres.nat, ext) # Create a blank raster of the region blank.r <- r.pres.nat[[1]] blank.r[] <- NA names(blank.r) <- NA dim(r.pres.nat) # Load all the present natural raster data into a matrix for faster handling m.pres.nat <- matrix(NA, nrow=nrow(both), ncol=dim(r.pres.nat)[1]*dim(r.pres.nat)[2]) rownames(m.pres.nat) <- both$Binomial.1.2 for(i in 1:nrow(both)) { m.pres.nat[i, ] <- getValues(r.pres.nat[[i]]) } # Present natural species list sum(rowSums(m.pres.nat) > 0) sp.pres.nat <- both$Binomial.1.2[rowSums(m.pres.nat) > 0] class(m.pres.nat) head(m.pres.nat) class(sp.pres.nat) head(sp.pres.nat) dim(m.pres.nat) # Calculating Extant Mammal Richness m.pres.nat1=m.pres.nat[both$Binomial.1.2,] dim(m.pres.nat1) # Create rasters of taxonomic richness pres.nat.div <- blank.r pres.nat.div[] <- colSums(m.pres.nat1) names(pres.nat.div) <- "Present natural richness" rich <- stack(pres.nat.div) # Change 0 to NA, for nicer plotting class(rich) head(rich) aa=extract(rich,neotr) head(aa) length(aa) # there are 179 objects in the list, each corresponding to an ecoregion. # within each there are the pixels corresponding to each ecoregions and their corresponding richnesses # now we calculate the mean richness per Ecoregion aaa=sapply(aa,mean,na.rm=T) aaa1=data.frame(aaa,neot$ECO_NAME) names(aaa1)=c("hrich","ECO_NAME") aaa1$hrich=round(aaa1$hrich, 0) # Calculate Extant Mammal mean body mass m.pres.nat1=m.pres.nat[rownames(m.pres.nat)%in%both$Binomial.1.2,] m.pres.nat2=sweep(m.pres.nat1, MARGIN=1, both$Mass.g, '*') # Create rasters of body mass pres.nat.body <- blank.r pres.nat.body[] <- colSums(m.pres.nat2)/colSums(m.pres.nat1) names(pres.nat.body) <- "Present natural mean body mass" body <- stack(pres.nat.body) aaa2=extract(body,neotr) aaa3=sapply(aaa2,mean, na.rm=T) aaa4=data.frame(aaa3,neot$ECO_NAME) head(aaa4) head(aaa1) names(aaa4)=c("h.bm","ECO_NAME") aaa55=merge(aaa1,aaa4,by="ECO_NAME") ######## richness of browsers and grazers # merge Phylacine and Mammal diets aaa5=merge(mam.terr.her.sp,diet,by="Binomial.1.2") head(aaa5) brow=aaa5[aaa5$Woody==1,] graz=aaa5[aaa5$Herbaceous==1,] m.pres.nat3=m.pres.nat[rownames(m.pres.nat)%in%brow$Binomial.1.2,] m.pres.nat4=m.pres.nat[rownames(m.pres.nat)%in%graz$Binomial.1.2,] pres.nat.brow.r <- blank.r pres.nat.brow.r[] <- colSums(m.pres.nat3) names(pres.nat.brow.r) <- "Present natural browser diversity" bdiv <- stack(pres.nat.brow.r) aaa6=extract(bdiv,neotr) aaa7=sapply(aaa6,mean) aaa8=data.frame(aaa7,neot$ECO_NAME) pres.nat.graz.r <- blank.r pres.nat.graz.r[] <- colSums(m.pres.nat4) names(pres.nat.graz.r) <- "Present natural grazer diversity" gdiv <- stack(pres.nat.graz.r) aaa9=extract(gdiv,neotr) aaa10=sapply(aaa9,mean) aaa11=data.frame(aaa10,neot$ECO_NAME) aaa12=merge(aaa8,aaa11,by="neot.ECO_NAME") head(aaa12) names(aaa12)=c("ECO_NAME","browsp","grazsp") head(aaa55) names(aaa55)[1]="ECO_NAME" aaa13=merge(aaa55,aaa12,by="ECO_NAME") head(aaa13) aaa13[,2]=round(aaa13[,2],0) aaa13[,3]=round(aaa13[,3],0) aaa13[,4]=round(aaa13[,4],0) aaa13[,5]=round(aaa13[,5],0) head(aaa13) # merge with the ecoregion matrix data01=read.csv(file.choose(),header=T) names(data01) data02=merge(data01, aaa13, by="ECO_NAME",all=T) # Calculate the difference between number of grazers and browsers (HGBdif) data02$cgbsp=as.numeric(scale(data02$grazsp-data02$browsp)) # the megafauna indices were calculated using a similar code, but changing the selection criteria of the mammal species to include prehistorically extinct species and species over 50 kg in weight, and using the diet spreadsheet of megafauna species provided with this dataset. ######################################################### Hurricanes ########################################################## # read .csv file with hurricane data hurri=read.csv(file.choose(),header=T, sep=";") # load the Ecorregions shape file library(rgdal) shape = readOGR(dsn = "D:/Work/Ongoing/Projects/Neotropical Large Herbivores and Ecorregions/Ecoregions2017") neot=shape[shape$REALM=="Neotropic",] # transform the data into a SpatialPointsDataFrame coordinates(hurri) <- c("long", "lat") # set the projection to match the projection in the Ecorregions shape proj4string(hurri) <- shape@proj4string # Crop the Hurricanes data to include only occurrences within the Neotropical realm library(raster) ext = extent(neot) hurri.neo = crop(hurri, ext) dim(hurri.neo) hurri.neo$ECO_NAME=over(hurri.neo, neot)$ECO_NAME head(hurri.neo) hurri.neo=hurri.neo[,-1] hurri.eco=data.frame(table(hurri.neo$ECO_NAME)) names(hurri.eco)=c("ECO_NAME","HUR")