### Supplement. R script for studying the effect of stressors on functional measures. ## Written by Cayetano Gutierrez-Canovas. Aquatic Ecology. Universidad de Murcia (Spain). Email: cayeguti@um.es setwd("/Users/Tano/Documents/Trabajos en curso/Functional proposal/scripts") ### Setting working directory #setwd("/data") ### Loading FuncNiche functions source("Fuzzy Functions.R") library(plyr) ### Loading matrices # Taxonomic data tax.mat.s<-read.table("taxa_sal.txt",h=T) # Loading site x taxa matrix (salinity dataset) tax.mat.s$site<-NULL apply(tax.mat.s,2,sum)>0 -> sel.taxa # selecting just occurring taxa tax.mat.s[,sel.taxa]->tax.mat.s tax.mat.lu<-read.table("taxa_lu.txt",h=T) # Loading site x taxa matrix (land-use dataset) tax.mat.lu$site<-NULL apply(tax.mat.lu,2,sum)>0 -> sel.taxa # selecting just occurring taxa tax.mat.lu[,sel.taxa]->tax.mat.lu # Environmental data env.s<-read.table("env_sal.txt",h=T) # Loading site x environmental matrix (salinity dataset) cond.s<-log(env.s$cond) env.lu<-read.table("env_lu.txt",h=T) # Loading site x environmental matrix (land-use dataset) lu<-asin(sqrt(env.lu$pol/100)) # Trait data traits.s<-read.table("traits_sal.txt",h=T) # Loading taxa x trait matrix (salinity dataset) rownames(traits.s)<-traits.s$CodeTaxon traits.s$CodeTaxon<-NULL apply(traits.s,2,sum)>0 -> sel.t # removing categories with sum=0 traits.s<-traits.s[,sel.t] traits.blo<-c(6,2,3,4,7,4,4,5,8,9,6,3,3) # assigning trait blocks names(traits.blo)<-c("size","life.cycle","generations","aquatic.stage","reproduction","dispersal","resistance","respiration","locomotion","food","feeding.habits", "body.form","Sclerification") traits.s<-prep.fuzzy.df(traits.s,traits.blo) # transform fuzzy codes to percentages traits.lu<-read.table("traits_lu.txt",h=T) # Loading taxa x trait matrix (land-use dataset) rownames(traits.lu)<-traits.lu$CodeTaxon traits.lu$CodeTaxon<-NULL apply(traits.lu,2,sum)>0 -> sel.t # removing categories with sum=0 traits.lu<-traits.lu[,sel.t] traits.lu<-prep.fuzzy.df(traits.lu,traits.blo) # transform fuzzy codes to percentages ### Selecting traits that response to both stressors ## Creating community trait matrices (site x trait) tax.mat.s.PC<-apply(tax.mat.s,1,function(x) x/sum(x)) ## taxon matrix to percentages tax.mat.s.PCt<-t(tax.mat.s.PC) tax.mat.lu.PC<-apply(tax.mat.lu,1,function(x) x/sum(x)) tax.mat.lu.PCt<-t(tax.mat.lu.PC) com.traits.s<-as.matrix(tax.mat.s.PCt)%*%as.matrix(traits.s) ## calculating weighted trait categories matrix com.traits.lu<-as.matrix(tax.mat.lu.PCt)%*%as.matrix(traits.lu) ## Absolute correlation of each category with the putative stressor (coefficient without sign) cor.traits.s<-data.frame(cor=abs(cor(com.traits.s,cond.s))) cor.traits.lu<-data.frame(cor=abs(cor(com.traits.lu,lu))) ## Assigning trait names to a vector traits.name<-as.factor(rep(names(traits.blo),traits.blo)) ## Averaging absolute correlation per trait ddply(cor.traits.s,.(traits.name),function(x){sum(x)/sum(x>0)})->mean.trait.cor.s ddply(cor.traits.lu,.(traits.name),function(x){sum(x)/sum(x>0)})->mean.trait.cor.lu cor.trait.res<-data.frame(Traits=mean.trait.cor.s[,1],Salinity=round(mean.trait.cor.s[,2],2),Land.use=round(mean.trait.cor.lu[,2],2)) # We select 3 traits with mean correlations > |0.30|: "generations","body.form","Sclerification" traits traits.s[,c(9:11,59:61,62:64)]->sel.traits.s traits.lu[,c(9:11,59:61,62:64)]->sel.traits.lu traits.blo<-c(3,3,3) names(traits.blo)<-c("generations","body.form","Sclerification") sel.traits.s<-prep.fuzzy.df(sel.traits.s,traits.blo) sel.traits.lu<-prep.fuzzy.df(sel.traits.lu,traits.blo) ### Salinity ### Analyses to estimate functional diversity components and significance against null models ### Looking for the number of significant axes num.pca.axes(sel.traits.s) m<-4 # retaining first 4 axes ### Assesing the optimum number of randomizations (rdt) test<-c(10, 25, 50, 75, 100, 2000) funcSpace(sel.traits.s,traits.blo,m,max(test))->r.pca.s det.rdt(tax.mat.s,r.pca,m,test.rdt=test,calc.pca=F)->n.rdt n.rdt$rdt.table # Table to select the optimum number of rdt rdt<-25 #setting rdt ### Calculating functional components funcSpace(sel.traits.s,traits.blo,m,rdt)->r.pca.s funcNiche(tax.mat.s,r.pca.s,m,rdt)->fN.res.s write.table(fN.res.s,"fN_res_sal.txt") ### Null models simul.func(tax.mat.s,r.pca.s,m,rdt,2)->sim.sal.s test.simul(tax.mat.s,fN.res.s,sim.sal.s,cond.s)->sim.out.s ### Land-use ### Analyses to estimate functional diversity components and significance against null models ### Looking for the number of significant axes num.pca.axes(sel.traits.lu) m<-4 # retaining first 4 axes ### Assesing the optimum number of randomizations (rdt) test<-c(10, 25,50,75, 100, 2000) test<-c(3,5) funcSpace(sel.traits.lu,traits.blo,m,max(test))->r.pca.lu det.rdt(tax.mat.lu,r.pca,m,test.rdt=test,calc.pca=F)->n.rdt n.rdt$rdt.table # Table to select the optimum number of rdt rdt<-25 #setting rdt ### Calculating functional components funcSpace(sel.traits.lu,traits.blo,m,rdt)->r.pca.lu funcNiche(tax.mat.lu,r.pca.lu,m,rdt)->fN.res.lu write.table(fN.res.lu,"fN_res_lu.txt") ### Null models simul.func(tax.mat.lu,r.pca.lu,m,rdt,999)->sim.lu test.simul(tax.mat.lu,fN.res.lu,sim.lu,lu)->sim.out.lu ### Models and plots relating funcional components and stressors ## Variable names for salinity dataset fN.res.s$tFRic->tFRic.s fN.res.s$FSim->FSim.s fN.res.s$FRic->FRic.s fN.res.s$FDis->FDis.s fN.res.s$FR->FR.s ## Variable names for land-use dataset fN.res.lu$tFRic->tFRic.lu fN.res.lu$FSim->FSim.lu fN.res.lu$FRic->FRic.lu fN.res.lu$FDis->FDis.lu fN.res.lu$FR->FR.lu # Calculating taxon richness apply(tax.mat.s > 0, 1, sum)->taxa.s apply(tax.mat.lu > 0, 1, sum)->taxa.lu ### Plot labels for Functional Variables (fv), stressors (st) and datasets (ds) ## Functional components fv1<-"Taxon F. Richness" fv2<-"F. Dispersion" fv3<-"F. Richness" fv4<-"F. redundancy" fv5<-"F. Similarity" ## Datasets ds1<-"Salinity" ds3<-"Land use" ## Stressors st1<-"log(Conductivity)" st3<-"arcsin-sqrt(Land-use intensity)" ### Models relating funcional components and stressors ## Mean taxon functional niche summary(tFRic.s1<-(glm(tFRic.s~scale(cond.s)))) summary(tFRic.lu1<-(glm(tFRic.lu~scale(lu)))) ## Functional similarity FSim.s2<-log(FSim.s[which(taxa.s>1)]) cond.s2<-cond.s[which(taxa.s>1)] # selecting sites with taxon richness > 1 summary(FSim.s1<-(glm(FSim.s2~scale(cond.s2)))) # selecting sites with taxon richness > 1 log(FSim.lu[which(taxa.lu>1)])->FSim.lu2 lu[which(taxa.lu>1)]->lu2 # selecting sites with taxon richness > 1 summary(FSim.lu1<-(glm(FSim.lu2~scale(lu2)))) # selecting sites with taxon richness > 1 ## Functional richness summary(FRic.s1<-(glm((FRic.s)~scale(cond.s)))) summary(FRic.lu1<-(glm((FRic.lu)~scale(lu)))) ## Functional dispersion summary(FDis.s1<-(glm((FDis.s)~scale((cond.s))))) summary(FDis.lu1<-(glm((FDis.lu)~scale(lu)))) ## Functional redundacy summary(FR.s1<-(glm(log(FR.s+1)~scale(cond.s)))) summary(FR.lu1<-(glm(log(FR.lu+1)~scale(lu)))) ### Plots relating funcional components and stressors par(mar = c(4.3, 4.7, 2.8, 1)) par(mfrow=c(5,2),pch=19) ## Creating the sequence of predictor values to represent the fitted values cond.seq<-seq(min(cond.s),max(cond.s),length.out=1000) lu.seq<-seq(min(lu),max(lu),length.out=1000) ## For Functional similarity, we selected communities with > 1 taxon cond.seq2<-seq(min(cond.s[which(taxa.s>1)]),max(cond.s[which(taxa.s>1)]),length.out=1000) lu.seq2<-seq(min(lu[which(taxa.lu>1)]),max(lu[which(taxa.lu>1)]),length.out=1000) ## Mean taxon functional niche plot(cond.s,(tFRic.s),col="grey",xlab="",ylab=fv1,cex=2, cex.axis=1.5,cex.lab=1.75,cex.main=2,main=ds1,ylim=c(min(c(tFRic.s,tFRic.lu)),max(c(tFRic.s,tFRic.lu)))) lines(cond.seq,predict(tFRic.s1,data.frame(cond.s=cond.seq))) plot(lu,(tFRic.lu),col="grey",xlab="",ylab="",cex=2,cex.axis=1.5,cex.lab=1.75,cex.main=2,main=ds3,ylim=c(min(c(tFRic.s,tFRic.lu)),max(c(tFRic.s,tFRic.lu)))) lines(lu.seq,predict(tFRic.lu1,data.frame(lu=lu.seq))) ## Functional similarity plot(cond.s2,FSim.s2,col="grey",xlab="",ylab="log(F. Similarity)",cex=2,cex.axis=1.5,cex.lab=1.75,cex.main=2,ylim=c(min(c(log(FSim.s),log(FSim.lu))),max(c(log(FSim.s),log(FSim.lu))))) lines(cond.seq2,predict(FSim.s1,data.frame(cond.s2=cond.seq2))) plot(lu2,FSim.lu2,col="grey",xlab="",ylab="",cex=2,cex.axis=1.5,cex.lab=1.75,cex.main=2,ylim=c(min(c(log(FSim.s),log(FSim.lu))),max(c(log(FSim.s),log(FSim.lu))))) lines(lu.seq2,predict(FSim.lu1,data.frame(lu2=lu.seq2))) ## Functional richness plot(cond.s,FRic.s,col="grey", xlab="",ylab=fv3,cex=2,cex.axis=1.5,cex.lab=1.75,ylim=c(min(c(FRic.s,FRic.lu)),max(c(FRic.s,FRic.lu)))) lines(cond.seq,predict(FRic.s1,data.frame(cond.s=cond.seq))) plot(lu,FRic.lu,col="grey",xlab="",ylab="",cex=2,cex.axis=1.5,cex.lab=1.75,cex.main=2,ylim=c(min(c(FRic.s,FRic.lu)),max(c(FRic.s,FRic.lu)))) lines(lu.seq,predict(FRic.lu1,data.frame(lu=lu.seq))) ## Functional dispersion plot(cond.s,FDis.s,col="grey",xlab="",ylab=fv2,cex=2,cex.axis=1.5,cex.lab=1.75,cex.main=2,ylim=c(min(c(FDis.s,FDis.lu)),max(c(FDis.s,FDis.lu)))) lines(cond.seq,predict(FDis.s1,data.frame(cond.s=cond.seq))) plot(lu,FDis.lu,col="grey",xlab="",ylab="",cex=2,cex.axis=1.5,cex.lab=1.75,cex.main=2,ylim=c(min(c(FDis.s,FDis.lu)),max(c(FDis.s,FDis.lu)))) lines(lu,predict(FDis.lu1,data.frame(lu=lu))) ## Functional redundancy plot(cond.s,log(FR.s+1),col="grey",xlab=st1,ylab="log(F. Redundancy)",cex=2,cex.axis=1.5,cex.lab=1.75,cex.main=2,ylim=c(min(c(log(FR.s+1),log(FR.lu+1))),max(c(log(FR.s+1),log(FR.lu+1))))) lines(cond.seq,predict(FR.s1,data.frame(cond.s=cond.seq))) plot(lu,log(FR.lu+1),col="grey",xlab=st3,ylab="",cex=2,cex.axis=1.5,cex.lab=1.75,cex.main=2,ylim=c(min(c(log(FR.s+1),log(FR.lu+1))),max(c(log(FR.s+1),log(FR.lu+1))))) lines(lu.seq,predict(FR.lu1,data.frame(lu=lu.seq))) ### Creating a result table for models ## Creating the matrix dsets<-2 # number of datasets fv<-5 # number of functional variables res.rows<-fv*dsets #number of rows res.matrix<-matrix(nrow=res.rows, ncol=5) # creating the matrix for model results colnames(res.matrix)<-c("Dataset","Functional variable","Model","P-value", "Explained deviance") dset.label<-rep(c(ds1,ds3),fv) ddset.label2<-rep(c(rep(".s",1),rep(".lu",1)),fv) fv.label1<-c(rep(fv1,1*dsets),rep(fv2,1*dsets),rep(fv3,1*dsets),rep(fv4,1*dsets),rep(fv5,1*dsets)) fv.label2<-c(rep("in",1*dsets),rep("ar",1*dsets),rep("cn",1*dsets),rep("fd",1*dsets)) ## get p-values pv.FRic.s1 <- summary(FRic.s1)$coef[, "Pr(>|t|)"] pv.tFRic.s1 <- summary(tFRic.s1)$coef[, "Pr(>|t|)"] pv.FDis.s1 <- summary(FDis.s1)$coef[, "Pr(>|t|)"] pv.FR.s1 <- summary(FR.s1)$coef[, "Pr(>|t|)"] pv.FSim.s1 <- summary(FSim.s1)$coef[, "Pr(>|t|)"] pv.FRic.lu1 <- summary(FRic.lu1)$coef[, "Pr(>|t|)"] pv.tFRic.lu1 <- summary(tFRic.lu1)$coef[, "Pr(>|t|)"] pv.FDis.lu1 <- summary(FDis.lu1)$coef[, "Pr(>|t|)"] pv.FR.lu1 <- summary(FR.lu1)$coef[, "Pr(>|t|)"] pv.FSim.lu1 <- summary(FSim.s1)$coef[, "Pr(>|t|)"] ## Write the results for each GLM rm<-2 # decimal places for model coefficients rp<-1 # decimal places for model parameters rpv<-3 # decimal places for p-values ## Model equations model.FRic.s1<-paste("y=",round(FRic.s1$coef[1],rm),if (sign(FRic.s1$coef[2])==1) {"+"},round(FRic.s1$coef[2],rm),"x",sep="") model.FRic.lu1<-paste("y=",round(FRic.lu1$coef[1],rm),if (sign(FRic.lu1$coef[2])==1) {"+"},round(FRic.lu1$coef[2],rm),"x",sep="") model.FDis.s1<-paste("y=",round(FDis.s1$coef[1],rm),if (sign(FDis.s1$coef[2])==1) {"+"},round(FDis.s1$coef[2],rm),"x",sep="") model.FDis.lu1<-paste("y=",round(FDis.lu1$coef[1],rm),if (sign(FDis.lu1$coef[2])==1) {"+"},round(FDis.lu1$coef[2],rm),"x",sep="") model.tFRic.s1<-paste("y=",round(tFRic.s1$coef[1],rm),if (sign(tFRic.s1$coef[2])==1) {"+"},round(tFRic.s1$coef[2],rm),"x",sep="") model.tFRic.lu1<-paste("y=",round(tFRic.lu1$coef[1],rm),if (sign(tFRic.lu1$coef[2])==1) {"+"},round(tFRic.lu1$coef[2],rm),"x",sep="") model.FR.s1<-paste("y=",round(FR.s1$coef[1],rm),if (sign(FR.s1$coef[2])==1) {"+"},round(FR.s1$coef[2],rm),"x",sep="") model.FR.lu1<-paste("y=",round(FR.lu1$coef[1],rm),if (sign(FR.lu1$coef[2])==1) {"+"},round(FR.lu1$coef[2],rm),"x",sep="") model.FSim.s1<-paste("y=",round(FSim.s1$coef[1],rm),if (sign(FSim.s1$coef[2])==1) {"+"},round(FSim.s1$coef[2],rm),"x",sep="") model.FSim.lu1<-paste("y=",round(FSim.lu1$coef[1],rm),if (sign(FSim.lu1$coef[2])==1) {"+"},round(FSim.lu1$coef[2],rm),"x",sep="") ## Writing the matrix rows res.matrix[1,]<-c(fv.label1[1],dset.label[1],model.tFRic.s1,round(pv.tFRic.s1[2],rpv),round((1-(tFRic.s1$deviance/tFRic.s1$null.deviance))*100,rp)) res.matrix[2,]<-c(fv.label1[2],dset.label[2],model.tFRic.lu1,round(pv.tFRic.lu1[2],rpv),round((1-(tFRic.lu1$deviance/tFRic.lu1$null.deviance))*100,rp)) res.matrix[3,]<-c(fv.label1[3],dset.label[3],model.FDis.s1,round(pv.FDis.s1[2],rpv),round((1-(FDis.s1$deviance/FDis.s1$null.deviance))*100,rp)) res.matrix[4,]<-c(fv.label1[4],dset.label[4],model.FDis.lu1,round(pv.FDis.lu1[2],rpv),round((1-(FDis.lu1$deviance/FDis.lu1$null.deviance))*100,rp)) res.matrix[5,]<-c(fv.label1[5],dset.label[5],model.FRic.s1,round(pv.FRic.s1[2],rpv),round((1-(FRic.s1$deviance/FRic.s1$null.deviance))*100,rp)) res.matrix[6,]<-c(fv.label1[6],dset.label[6],model.FRic.lu1,round(pv.FRic.lu1[2],rpv),round((1-(FRic.lu1$deviance/FRic.lu1$null.deviance))*100,rp)) res.matrix[7,]<-c(fv.label1[7],dset.label[7],model.FR.s1,round(pv.FR.s1[2],rpv),round((1-(FR.s1$deviance/FR.s1$null.deviance))*100,rp)) res.matrix[8,]<-c(fv.label1[8],dset.label[8],model.FR.lu1,round(pv.FR.lu1[2],rpv),round((1-(FR.lu1$deviance/FR.lu1$null.deviance))*100,rp)) res.matrix[9,]<-c(fv.label1[9],dset.label[9],model.FSim.s1,round(pv.FSim.s1[2],rpv),round((1-(FSim.s1$deviance/FSim.s1$null.deviance))*100,rp)) res.matrix[10,]<-c(fv.label1[10],dset.label[10],model.FSim.lu1,round(pv.FSim.lu1[2],rpv),round((1-(FSim.lu1$deviance/FSim.lu1$null.deviance))*100,rp)) res.matrix ## have a look to the result matrix write.csv(res.matrix, file = "res_models.txt") ## writing result matrix