#intcal<-read.csv("~/Dropbox/code/rowcal/intcal13.14c",header=FALSE,skip=11) #marine<-read.csv("~/Dropbox/code/rowcal/marine13.14c",header=FALSE,skip=11) #colnames(intcal)<-c("calBP","14Cage","Error","Delta14C","Sigma") #colnames(marine)<-c("calBP","14Cage","Error","Delta14C","Sigma") require(minpack.lm) # Function to calibrate a radiocarbon date rowcal<-function(date, sigma, calcurve=intcal, BC=FALSE, BPneg=TRUE, norm=TRUE) { curve<-calcurve[(calcurve[,2]<= (date+(sigma*4)) & calcurve[,2]>= (date-(sigma*4)) ),] #convert from cal. BP to cal. BC if required if(BC==TRUE) curve$calBP<-1950-curve$calBP #build an output table calibrated<-as.matrix(curve[,1:2]) if(BC==TRUE) colnames(calibrated)<-c("Cal. BC/AD","Relative p") else colnames(calibrated)<-c("Cal. BP","Relative p") if(BC==FALSE & BPneg==TRUE) calibrated[,1] <- -calibrated[,1] #apply the normal distribution function to each margin of the calibration curve and average H<-dnorm(curve[,2]+curve[,3],mean=date,sd=sigma) L<-dnorm(curve[,2]-curve[,3],mean=date,sd=sigma) M<-(H+L)/2 Res<-abs(mean(diff(curve[,1]))) if(norm==TRUE) calibrated[,2]<-(M/sum(M))/Res else calibrated[,2]<-M/Res calibrated } # Function for mixing curves, or applying local delta-R corrections, or both mixcurves<-function(mix=0.5, uncert=0, c1=intcal, c2=marine, delR=0, error_delR=0) { # First interpolate so that the two curves have the same calendar years c2y<-approx(c2[,1], c2[,2], c1[,1], rule=2)$y c2e<-approx(c2[,1], c2[,3], c1[,1], rule=2)$y # Apply delta-R if so required c2y<-c2y+delR c2e<-sqrt(c2e^2 + error_delR^2) # Perform curve mixing out<-c1[,1:3] out[,2] <- (1-mix)*c1[,2] + mix * c2y out[,3] <- sqrt(((1-mix)*c1[,3])^2 + (mix*c2e)^2 + (uncert * (c1[,3]-c2e))^2) return(out) } #____________________________________________________________________________________ # #function to find modal calibrated date for a C14 date, based on rowcal above rowcalmode<-function(date,sigma,...) { cd<-rowcal(date,sigma,...) cd[,1][cd[,2]==max(cd[,2])] } #____________________________________________________________________________________ # #function to find weighted mean date for a C14 date, based on rowcal above rowcalwm<-function(date,sigma,...) { g<-rowcal(date,sigma,...) sum(g[,1]*g[,2]/max(g[,2]))/sum(g[,2]/max(g[,2])) } #____________________________________________________________________________________ # #function to find (even just one) age sample for C14 date, based on rowcal above rowcalsam<-function(date, sigma, N=1, ...) { g<-rowcal(date, sigma, ...) random.points <- approx(cumsum(g[,2])/sum(g[,2]),g[,1],runif(N))$y random.points } #____________________________________________________________________________________ # #function to find median of a C14 date rowcalmedian<-function(date, sigma, ...) { d<-rowcal(date,sigma,...) return(d[which.min(abs(cumsum(d[,2])-0.1)),1]) } # return list of weighted means of samples, given three column input (BP, SD, Calcurve) findmixmedian<-function(L,...) { O<-c() for(N in 1:nrow(L)) eval(parse(text=paste('O[N]<-rowcalmedian(L[N,1],L[N,2],calcurve=',L[N,3],')',sep=''))) return(O) } #function to turn a three-column list of dates into a list of one sample per date #third column must be the name of the calibration curve, or 'cal' meaning calendar, 'sap' for sapwood MCmix<-function(L) { colnames(L)<-c('date','error','cc') spp<-L[L$cc=='sap',]; cal<-L[L$cc=='cal',] cal_points<-c(); sap_points<-c() if(nrow(spp)>0) sap_points<-apply(spp[,c('date','error')],1,function(rdate,...) sapsam(rdate['date'],rdate['error'],...) ) if(nrow(cal)>0) cal_points<-apply(cal[,c('date','error')],1,function(rdate,...) calsam(rdate['date'],rdate['error'],...) ) C14_points<-c() C14<-L[!(L$cc %in% c('sap','cal')),] if(nrow(C14)>0) { # I know this is as ugly as sin, but it works for (N in 1:length(C14[,1])) C14_points[N]<-eval(parse(text=paste("rowcalsam(",C14[N,1],",",C14[N,2],",calcurve=",as.character(C14[N,3]),")",sep=''))) } return(as.vector(c(cal_points,sap_points,C14_points))) } #function to turn a two-column list of uncalibrated dates into list of one sample per date MCsam<-function(L,...) { colnames(L)<-c('BP','SD') O<-apply(L[,c('BP','SD')],1,function(rdate,...) rowcalsam(rdate['BP'],rdate['SD'],...) ) O } #function to make best guess list from same findwm<-function(L,...) { colnames(L)<-c('BP','SD') O<-apply(L[,c('BP','SD')],1,function(rdate,...) rowcalwm(rdate['BP'],rdate['SD'],...) ) O } # function to make mode-based version of same findmode<-function(L,...) { colnames(L)<-c('BP','SD') O<-apply(L[,c('BP','SD')],1,function(rdate,...) rowcalmode(rdate['BP'],rdate['SD'],...) ) O } # function for reverse calibrating revcal<-function(N, calcurve=intcal, BC=FALSE, BPneg=TRUE) { out<-NA if (BC) { calcurve[,1]<-1950-calcurve[,1] out<-calcurve[which(abs(calcurve[,1]-N)==min(abs(calcurve[,1]-N))),2] } if (!BC) out<-calcurve[which(abs(calcurve[,1]-abs(N))==min(abs(calcurve[,1]-abs(N)))),2] if(BPneg) out <- -out out } # Function for calculating Gaussian KDE of set of dates using a Monte Carlo method, with output data allowing summary stats MCdensity<-function(DATA=CLIP(),N=100,perm_runs=1, perm_size=1, col=rgb(0,0,0,0.01),plot.new=FALSE,add=FALSE,bw=30,...) { if (perm_runs==1 & perm_size<1) stop("perm_runs must be specified and >1 if permutations of the data are required") if (perm_runs>1 & perm_size<1) { oDATA<-DATA DATA<-oDATA[sample(1:nrow(oDATA),round(perm_size*nrow(oDATA))),] } #by default the densities are calculated at 512 points in time n=512 below #this can be changed but needs to be a power of 2 --- 1024 2048 etc etc (must be a Fourier transform at work) d<-density(MCsam(DATA),bw,na.rm=TRUE,n=512) if (plot.new) {add<-TRUE; plot(d,col=col,...)} x1<-min(round(d$x)); x2<-max(round(d$x)); n=d$n out<-matrix(nrow = 512, ncol = N*perm_runs+1); out[,1]<-d$x; out[,2]<-d$y; P<-0 pb <- txtProgressBar(min=2,max=N*perm_runs-1,initial=2) for (perm in 1:perm_runs) { for (run in 2:N-1) { P<-P+1 if (perm_runs>1 & perm_size<1) DATA<-oDATA[sample(1:nrow(oDATA),round(perm_size*nrow(oDATA))),] d<-density(MCsam(DATA),bw,na.rm=TRUE,from=x1,to=x2,n=512) out[,P+2]<-d$y if (add & P>2) lines(out[,1],rowMeans(out[,2:P]),col=col) setTxtProgressBar(pb,P+1) } } close(pb) class(out)='MCd'; return(out) } # Function for calculating Gaussian KDE of set of dates using a Monte Carlo method, with output data allowing summary stats # differs from MCdensity in that there should be three input columns, specifying date, error, and curve # curve can be intcal, or any intcal-like object (marine) # alternatively, curve can be 'sap' for sapwood models, or 'cal' for calendar years mixdensity<-function(DATA=CLIP(),N=100,perm_runs=1, perm_size=1, col=rgb(0,0,0,0.01),plot.new=FALSE,add=FALSE,bw=30,...) { if (perm_runs==1 & perm_size<1) stop("perm_runs must be specified and >1 if permutations of the data are required") if (perm_runs>1 & perm_size<1) { oDATA<-DATA DATA<-oDATA[sample(1:nrow(oDATA),round(perm_size*nrow(oDATA))),] } #by default the densities are calculated at 512 points in time n=512 below #this can be changed but needs to be a power of 2 --- 1024 2048 etc etc (must be a Fourier transform at work) d<-density(MCmix(DATA),bw,na.rm=TRUE,n=512) if (plot.new) {add<-TRUE; plot(d,col=col,...)} x1<-min(round(d$x)); x2<-max(round(d$x)); n=d$n out<-matrix(nrow = 512, ncol = N*perm_runs+1); out[,1]<-d$x; out[,2]<-d$y; P<-0 pb <- txtProgressBar(min=2,max=N*perm_runs-1,initial=2) for (perm in 1:perm_runs) { for (run in 2:N-1) { P<-P+1 if (perm_runs>1 & perm_size<1) DATA<-oDATA[sample(1:nrow(oDATA),round(perm_size*nrow(oDATA))),] d<-density(MCmix(DATA),bw,na.rm=TRUE,from=x1,to=x2,n=512) out[,P+2]<-d$y if (add & P>2) lines(out[,1],rowMeans(out[,2:P]),col=col) setTxtProgressBar(pb,P+1) } } close(pb) class(out)='MCd'; return(out) } # Method for plotting density models plot.MCd<-function(x, style=1, add=FALSE,col=rgb(0,0,0.8,0.5),fill=rgb(.5,.5,.5,.5),scalefactor=1,ylab='Density',xlab='Calendar Year BP',grid=TRUE,...) { if(style==2) plotMCd2(x,add=FALSE,col=col,fill=fill,scalefactor=scalefactor,xlab=xlab,ylab=ylab,grid=grid,...) else { if(style!=1) warning('`style` must be 1 or 2. Defaulting to 1') M<-rowMeans(x[,2:ncol(x)],na.rm=TRUE)*scalefactor Sd<-apply(x[,2:ncol(x)],1,sd,na.rm=TRUE)*scalefactor if (!add) { plot(x[,1],M,col=NA,xlab=xlab,ylab=ylab,...) if(grid) grid(lwd=0.5) title(xlab=xlab) } lines(x[,1],M+Sd,lwd=1,col=col) lines(x[,1],M-Sd,lwd=1,col=col) polygon(c(x[,1], rev(x[,1])), c(M+Sd, rev(M-Sd)), col = col, border = NA) } } # Alternative style for plotting density models, used as default for growth rates plotMCd2<-function(x,add=FALSE,col=rgb(0,0,0.8,0.8),fill=rgb(.5,.5,.5,.5),scalefactor=1,ylab='Density',xlab='Calendar Year BP',grid=FALSE,...) { M<-rowMeans(x[,2:ncol(x)],na.rm=TRUE)*scalefactor Sd<-apply(x[,2:ncol(x)],1,sd,na.rm=TRUE)*scalefactor if (!add) { plot(x[,1],M,col=NA,xlab=xlab,ylab=ylab,...) if(grid) grid(lwd=0.75) title(xlab=xlab) } lines(x[,1],M,lwd=2,col=col) lines(x[,1],M+Sd,lwd=1,lty=2,col=col) lines(x[,1],M-Sd,lwd=1,lty=2,col=col) polygon(c(x[,1], rev(x[,1])), c(M+Sd, rev(M-Sd)), col = fill, border = NA) } # Method for adding an outline of density model to current plot # e.g. lines(MyDensityModel) lines.MCd<-function(x,scalefactor=1, ...) { M<-rowMeans(x[,2:ncol(x)],na.rm=TRUE)*scalefactor Sd<-apply(x[,2:ncol(x)],1,sd,na.rm=TRUE)*scalefactor lines(x[,1],M+Sd, ...) lines(x[,1],M-Sd, ...) } # Method for calculating median of a density model median.MCd<-function(x) { out<-matrix(nrow=nrow(x),ncol=2) out[,1]<-x[,1] out[,2]<-apply(x[,2:ncol(x)],1,median,na.rm=TRUE) return(out) } # Function to align two density models -- the values of 'A' will be interpolated to the timestamps of 'B' align_densities<-function(A, B) { out<-B[,1:ncol(A)] for(N in 2:ncol(A)) out[,N]<-approx(x=A[,1],y=A[,N],xout=B[,1])$y class(out)<-'MCd' return(out) } # function to calculate the ratio of two KDE models # Not yet tested for KDEs of different resolutions `/.MCd`<-function(A, B) { # First extrapolate the shorter (in time) KDE to the longer one spanA<-diff(range(A[,1])) spanB<-diff(range(B[,1])) if( spanA > spanB ) B<-align_densities(B,A) else A<-align_densities(A,B) # Use the meanĀ±sd as the envelope to divide by AM<-rowMeans(A[,2:ncol(A)],na.rm=TRUE) ASd<-apply(A[,2:ncol(A)],1,sd,na.rm=TRUE) BM<-rowMeans(B[,2:ncol(B)],na.rm=TRUE) BSd<-apply(B[,2:ncol(B)],1,sd,na.rm=TRUE) out<-matrix(nrow=nrow(A),ncol=3) out[,1]<-A[,1] out[,2]<-(AM+ASd)/(BM+BSd) out[,3]<-(AM-ASd)/(BM-BSd) class(out)<-'MCd_summary' return(out) } # Mean value of a density model, optionally between two points mean.MCd<-function(A, from=NULL, to=NULL) { if(is.null(from)) from<-min(A[,1]) if(is.null(to)) to<-max(A[,1]) return(mean(A[A[,1]>=from & A[,1]<=to, 2:ncol(A)], na.rm=TRUE)) } # Standard deviation method for density models # # 'sd' is not a generic function so need to hijack its default # Note to self to remember to add the following when / if this becomes a proper package: # formals(sd.default) <- c(formals(sd.default), alist(... = )) sd <- function(x, ...) UseMethod("sd") sd.default <- stats::sd sd.MCd<-function(A, from=NULL, to=NULL) { if(is.null(from)) from<-min(A[,1]) if(is.null(to)) to<-max(A[,1]) return(sd(A[A[,1]>=from & A[,1]<=to, 2:ncol(A)], na.rm=TRUE)) } # Method for plotting summary form of density models plot.MCd_summary<-function(x,add=FALSE,col=rgb(0,0,0.8,0.5),scalefactor=1,xlab='Calendar Year BP',grid=TRUE,...) { x<-x[!is.na(x[,2]),] x<-x[!is.na(x[,3]),] if (!add) { plot(x[,1],x[,2],col=NA,xlab=xlab,ylab='Density',...) if(grid) grid(lwd=0.5) title(xlab=xlab) } lines(x[,1],x[,2],lwd=1,col=col) lines(x[,1],x[,3],lwd=1,col=col) polygon(c(x[,1], rev(x[,1])), c(x[,2], rev(x[,3])), col = col, border = NA) } # Function to transform an object of class 'MCd' into geometric growth rate (in % per year) # via numeric differentiation ggr<-function(MCD, bw=100) { out<-MCD[-nrow(MCD),] yrs<-MCD[,1] for(N in 2:ncol(MCD)) out[,N]<- ( (diff(MCD[,N]) / MCD[-nrow(MCD),N]) / diff(MCD[,1])) *100 class(out)<-'diffMCd' return(out) } # Method for plotting same plot.diffMCd<-function(MCD, style=2, ..., ylab='Growth rate / %', ylim=c(-2,2)) plot.MCd(MCD, style=style, ylab=ylab, ylim=ylim, ...) # return list of weighted means of samples, given three column input (BP, SD, Calcurve) findmixmedian<-function(L,...) { O<-c() for(N in 1:nrow(L)) eval(parse(text=paste('O[N]<-rowcalmedian(L[N,1],L[N,2],calcurve=',L[N,3],')',sep=''))) return(O) } #Extract the various probablity densities for a given year from an object of class MCd year_densities<-function(Y,MCd) MCd[which.min(abs(MCd[,1]-Y)),][-1] # Functions for summarising radiocarbon dates # Firstly, a function to return the highest density interval for a date # Heavily, er, 'influenced' by the code for same in A Parnell's Bchron ! hdr<-function(date, prob = 0.95) { date_a<-approx(date[,1],date[,2],c(min(date[,1]):max(date[,1]))) ag = date_a$x de = date_a$y # Put the probabilities in order o = order(de) cu = cumsum(de[o]) # Find which ones are above the threshold good_cu = which(cu>1-prob) good_ag = sort(ag[o][good_cu]) # Pick out the extremes of each range breaks = diff(good_ag)>1 where_breaks = which(diff(good_ag)>1) n_breaks = sum(breaks) + 1 # Store output out = vector('list', length = n_breaks) low_seq = 1 high_seq = ifelse(length(where_breaks)==0, length(breaks), where_breaks[1]) for(i in 1:n_breaks) { out[[i]] = c(good_ag[low_seq], good_ag[high_seq]) curr_dens = round(100*sum(de[o][seq(good_cu[low_seq], good_cu[high_seq])]),1) names(out)[[i]] = paste0(as.character(curr_dens),'%') low_seq = high_seq + 1 high_seq = ifelse(i0,] class(out)<-'SPDboot' return(out) } # Find the median of a set of SPD bootstraps (i.e. 'population proxy') median.SPDboot<-function(SPDb) { y<-apply(SPDb,1,FUN='median') x<-as.numeric(rownames(SPDb)) return(list(x=x,y=y)) } # Function to compare two bootstrapped SPD objects # Uses a chi squared test on the counts of sets of simulations whose ratio is not unity SPDsigniftest<-function(SPDb,MOD) { e<-summary(SPDsignif(MOD / MOD[,ncol(MOD):1])) o<-summary(SPDsignif(MOD / SPDb)) expected<-sum(e[3:4]) observed<-sum(o[3:4]) totalexp<-(e[1]/round(e[2]))*ncol(MOD) totalobs<-(e[1]/round(e[2]))*ncol(SPDb) ctab<-as.table(rbind(c(expected, totalexp-expected),c(observed, totalobs-observed))) dimnames(ctab)<-list(SPD=c('Model','Data'),c('Different','NotDiff')) return(chisq.test(ctab)) } SPDsignif<-function(SPDb,probs=c(.05,.9), threshold=1) { SPDb<-summary(SPDb, probs=probs) sig_high<-SPDb[,1]>threshold sig_low<-SPDb[,2]0),] for(N in 1:nrow(imix)) { id<-paste(cu,as.character(imix[N,idx[1]]),sep='') code<-paste(id,"<<-mixcurves(mix=imix[",N,",idx[2]], delR=imix[",N,",idx[3]], error_delR=imix[",N,",idx[4]])",sep='') eval(parse(text=code)) } dl$curve='intcal' for(N in 1:nrow(dl)) if(dl[N,idx[2]]>0) dl$curve[N]=paste(cu,as.character(dl[N,idx[1]]),sep='') return(dl) } # Plot SPDs in the style of Fernandez-Lopez et al 2019 Nature Communications PDplot<-function(query=NULL, spd=NULL, boot=NULL, xlab='Calendar Age (BP)', add.legend=TRUE, ...) { if(is.null(spd)) spd<-PDSPD(query) if(is.null(boot)) boot<-SPDboot(spd) plot(boot,probs=c(.05,.9),col='lightgrey', xlab=xlab , ...) plot(boot,col='darkgrey',add=T) plot(spd,type='l',lwd=1.5,col='red',add=TRUE) lines(median(boot),lwd=2) if(add.legend) PDlegend() } PDlegend<-function(pos='topleft',pch=c(NA,'-','-',NA), lty=c(1,NA,NA,1),col=c('red','lightgrey','darkgrey','black'),legend=c('Original SPD','Bootstrap 95.4% C.I.','Bootstrap 68.2% C.I.','Bootstrap Median'),pt.cex=5.5) legend(pos,pch=pch, lty=lty,col=col,legend=legend,pt.cex=pt.cex) # Taphonomic corrections for SPDs taphSPD<-function(SPD, correction=1, dT=0){ if(!correction %in% c(1,2)) warning('`correction` should be 1 (Surovell et al) or 2 (Williams)') original_sum<-sum(SPD$sum) yrs <- abs(SPD$yrs)+dT if(correction==1) taph <- 5726442 * ((yrs + 2176.4)^ -1.3925309) if(correction==2) taph <- 21070000 * ((yrs + 2754)^ -1.526) SPD$sum<-SPD$sum / taph new_sum<-sum(SPD$sum) # Make sure the old and new SPDs integrate to the same number SPD$sum<-SPD$sum * original_sum/new_sum class(SPD)<-'c14sum' return(SPD) } # Function that computes performs multiple LOESS regression of a date list against another value or proxy # e.g. depth profile or palaeodietary trend MCloess<-function(dl,proxy, Nboot=200, default_curve='intcal', plot=FALSE, ...){ if (ncol(dl)<2 | ncol(dl)>3) stop("input dl should be a matrix of two or three columns (BP, error, [calcurve])") if (ncol(dl)==2) dl[,3]<-default_curve if (length(proxy) != nrow(dl)) stop('The number of 14C dates and the number of data points should be the same') out<-matrix(NA, nrow=nrow(dl), ncol=Nboot) outl<-out; outu<-out; boots<-out for(N in 1:Nboot) { boot<-MCmix(dl) if(plot) points(boot,proxy,col=rgb(0.7,0,0,0.05),pch=3) b_order<-order(boot) loem<-loess(proxy ~ boot) prediction<-predict(loem,se=TRUE) out[,N]<-prediction$fit[b_order] outu[,N]<-prediction$fit[b_order]+prediction$se.fit[b_order] outl[,N]<-prediction$fit[b_order]-prediction$se.fit[b_order] boots[,N]<-boot[b_order] } dl$value=proxy output<-list(data=dl, bootstraps=boots, loess=out, se=cbind(outl,outu), Nboot=Nboot) class(output)<-'MCl' if(plot==FALSE) return(output) } # Method for plotting the multiple Monte Carlo LOESS models plot.MCl<-function(l,se=TRUE,dates=FALSE,scale.dates=7,add=FALSE,col.se=rgb(0.2,0.2,0.2,0.2),col=rgb(0.1,0.1,0.1,0.5),col.dates=rgb(.8,0,0,.1),xlab='Calendar Year BP',ylab='Value',grid=TRUE,...) { x<-l$loess yrs<-rowMeans(l$bootstraps,na.rm=TRUE) M<-rowMeans(x,na.rm=TRUE) if (!add) { plot(yrs,M,col=NA,xlab=xlab,ylab=ylab,...) if(grid) grid(lwd=0.5) } scale.dates<-diff(par('usr')[3:4])*scale.dates if(dates) { for(N in 1:nrow(l$data)) { d<-eval(parse(text=paste("rowcal(",l$data[N,1],",",l$data[N,2],",",calcurve=l$data[N,3],")",sep=''))) fill(d,col=col.dates,scale=scale.dates,offset=l$data[N,4]) }} if(se) { x<-l$se Me<-rowMeans(x,na.rm=TRUE) Sd<-apply(x,1,sd,na.rm=TRUE) lines(yrs,Me+Sd,lwd=1,col=col.se) lines(yrs,Me-Sd,lwd=1,col=col.se) polygon(c(yrs, rev(yrs)), c(Me+Sd, rev(Me-Sd)), col = col.se, border = NA) } x<-l$loess Sd<-apply(x,1,sd,na.rm=TRUE) lines(yrs,M+Sd,lwd=1,col=col) lines(yrs,M-Sd,lwd=1,col=col) polygon(c(yrs, rev(yrs)), c(M+Sd, rev(M-Sd)), col = col, border = NA) } lines.MCl<-function(l, se=TRUE, col=rgb(0,0,0,.1), col.se=rgb(.5,.5,.5,.05), ...) { bootstraps<-l$bootstraps; lse<-l$se; lloess<-l$loess for(N in 1:l$Nboot) { lines(bootstraps[,N], lloess[,N], col=col) if(se) { lines(bootstraps[,N], lse[,N], col=col.se) lines(bootstraps[,N], lse[,Nboot+N], col=col.se) } } } # Linear model fitting, inspired by Silva's contributions to FLdP et al 2019 # Exponential for regression fitting -------------------------------------- fexp<-function(x, a, b, y0) return(a*exp(b*x)+y0) SPDfitexp<-function(SPD, start, end, ndates=1000) { out<-SPD if(class(SPD)=='SPDboot') { n<-median(SPD) out<-list(dates=sample(1:10000,ndates, replace=TRUE), stds=sample(20:80,ndates, replace=TRUE), curves=NA, yrs=NA, sum=NA) } if(class(SPD)=='c14sum') { n<-list(x=SPD$yrs, y=SPD$sum) out<-SPD } n<-as.data.frame(n) n[is.na(n)]<-0 aux <- subset(n, x>=start & x<=end) model <- nlsLM(y~fexp(x, a, b, 0), data=aux, start=list(b=0.001, a=1), control=nls.lm.control(maxiter=1000, maxfev=5000)) out$yrs <- aux[,1] out$sum <- predict(model) class(out)<-'c14sum' return(out) } # function to turn any x,y model into a structure resembling a SPD as.SPD<-function(model, dates=rep(1000,length(model$x)), stds=rep(30,length(model$x))) { out<-list(dates=NA, stds=NA, curves=NA, yrs=model$x, sum=model$y) class(out)<-'c14sum' return(out) } # Function for correlation analysis for SPDboot objects SPDboot_cor<-function(SPDb, proxy, start=as.numeric(rownames(SPDb))[1], end=as.numeric(rownames(SPDb))[nrow(SPDb)], method='spearman',...) { # subset SPD if required yrs<-as.numeric(rownames(SPDb)) SPDb<-SPDb[which(yrs > start & yrs < end),] yrs<-yrs[which(yrs > start & yrs < end)] # extrapolate proxy to same P<-approx(proxy[,1],proxy[,2],xout=yrs)$y # calculate correlation stats est<-c() p.value<-c() for(N in 1:ncol(SPDb)) { CT<-cor.test(x=SPDb[,N],P,method=method,...) est[N]<-CT$estimate p.value[N]<-CT$p.value } out<-data.frame('mean'=c(NA,NA),'sd'=c(NA,NA)) rownames(out)<-c('estimate','p-value') out[1,]<-c(mean(est),sd(est)) out[2,]<-c(mean(p.value),sd(p.value)) return(out) }