Post date: Jul 20, 2019 2:11:22 AM
First, the model. I fit a hierarchical Bayesian GLM with coefficients for linear (directional) and quadratic (stabilizing or disruptive) selection on RG and GB (four coefficients for a given selection gradient). Thus, each coefficient describes selection of a given form on a given trait while controlling for other forms/traits (i.e., these are selection gradients/partial regression coefficients). As this is a hierarchical model, we have coefficient estimates for each block and overall (i.e., the average from which we can assume each experiment/block was drawn). With n = 3 blocks, we have quite a bit of uncertainty at the highest level, so I will mostly focus on each block.
Here are the posterior medians and 95% ETPIs for the coefficients for each block:
Block 1:
AC
LinRG = -0.773 (-1.584,0.108)
LinGB = 0.791 (0.090,1.672)
QuadRG = 0.895 (0.448,1.449)
QuadGB = 0.581 (0.033,1.153)
MM
LinRG = 0.342 (-0.492,1.490)
LinGB = 0.017 (-0.613,0.615)
QuadRG = 0.179 (-0.335,0.644)
QuadGB = 0.078 (-0.334,0.514)
Block 2:
AC
LinRG = -0.909 (-1.750,-0.134)
LinGB = 0.055 (-0.729,0.794)
QuadRG = 0.955 (0.502,1.609)
QuadGB = 0.598 (0.101,1.138)
MM
LinRG = 0.225 (-0.529,1.142)
LinGB = 0.127 (-0.407,0.827)
QuadRG = 0.209 (-0.317,0.697)
QuadGB = 0.114 (-0.284,0.524)
Block 3:
AC
LinRG = -1.026 (-2.643,-0.186)
LinGB = 1.093 (-0.091,4.131)
QuadRG = 0.591 (-1.197,1.271)
QuadGB = -0.869 (-2.956,0.523)
MM
LinRG = -0.370 (-1.243,0.416)
LinGB = 0.005 (-0.628,0.556)
QuadRG = 0.258 (-0.193,0.731)
QuadGB = -0.018 (-0.510,0.328)
So, we have "significant" evidence of disruptive selection for AC block 1 (RG and GB) and AC block 2 (RG and GB), but not block 3. There isn't significant evidence of disruptive selection on MM (or selection at all). Note that we also detect directional selection on GB in block 1, RG in block 2 and RG in block 3 (all for AC). This is all pretty happy. But as you note, the real key is whether selection is more disruptive (bigger, more positive quadratic terms) on AC. Here are the relevant posterior probabilities for AC > MM (for each block and trait).
Block 1, RG = 0.9882
Block 1, GB = 0.9185
Block 2, RG = 0.9840
Block 2, GB = 0.9302
Block 3, RG = 0.6792
Block 3, GB = 0.1733
So, in blocks 1 and 2, disruptive selection is stronger on AC than MM for RG, and the results are pretty strong for GB too (> .9), but it doesn't hold for block 3. All in all good, but with block 3 being the mildly annoying outlier.
Here is the R code (selectionGradBayes.R)
## final/Bayesian selection gradient analysis
## read in data
dat<-read.table("2019_Tchumash_transplant_table.csv",header=TRUE,sep=",")
mm<-which(dat$Treatment=='MM')
ac<-which(dat$Treatment=='AC')
X<-cbind(dat$RG,dat$GB)
X<-apply(X,2,scale)
X2<-X^2
Y<-dat$Survival
## standard (non-Bayesian) GLM first
o_ac<-glm(Y[ac] ~ X[ac,1] + X[ac,2] + X2[ac,1] + X2[ac,2],family="binomial")
#Coefficients:
# Estimate Std. Error z value Pr(>|z|)
#(Intercept) -2.3025 0.3079 -7.477 7.59e-14 ***
#X[ac, 1] -0.5517 0.3213 -1.717 0.085977 .
#X[ac, 2] 0.3812 0.2379 1.602 0.109111
#X2[ac, 1] 0.5542 0.1554 3.567 0.000362 ***
#X2[ac, 2] 0.3213 0.1612 1.993 0.046229 *
o_mm<-glm(Y[mm] ~ X[mm,1] + X[mm,2] + X2[mm,1] + X2[mm,2],family="binomial")
#Coefficients:
# Estimate Std. Error z value Pr(>|z|)
#(Intercept) -1.72177 0.28383 -6.066 1.31e-09 ***
#X[mm, 1] -0.01100 0.30885 -0.036 0.972
#X[mm, 2] 0.03082 0.23074 0.134 0.894
#X2[mm, 1] 0.23103 0.19157 1.206 0.228
#X2[mm, 2] 0.07242 0.15260 0.475 0.635
######### Bayes ########
library(rjags)
## Bayesian glm, w/o random effects
Dac<-list(y=Y[ac],x1=X[ac,1],x2=X[ac,2],x3=X2[ac,1],x4=X2[ac,2],
N=length(ac))
Dmm<-list(y=Y[mm],x1=X[mm,1],x2=X[mm,2],x3=X2[mm,1],x4=X2[mm,2],
N=length(mm))
mod<-jags.model(file="bayes_glm",data=Dac,n.chains=3)
update(mod,5000)
out_ac<-coda.samples(model=mod,variable.names=c("beta","p"),n.iter=6000,thin=3)
est_ac<-summary(out_ac)[[2]]
mod<-jags.model(file="bayes_glm",data=Dmm,n.chains=3)
update(mod,5000)
out_mm<-coda.samples(model=mod,variable.names=c("beta","p"),n.iter=6000,thin=3)
est_mm<-summary(out_mm)[[2]]
## Bayesian glm, w random effects
blk<-dat$Block
Dac<-list(y=Y[ac],x1=X[ac,1],x2=X[ac,2],x3=X2[ac,1],x4=X2[ac,2],
N=length(ac),bk=blk[ac],J=3)
Dmm<-list(y=Y[mm],x1=X[mm,1],x2=X[mm,2],x3=X2[mm,1],x4=X2[mm,2],
N=length(mm),bk=blk[mm],J=3)
mod<-jags.model(file="bayes_hglm",data=Dac,n.chains=3)
update(mod,20000)
out_h_ac<-coda.samples(model=mod,variable.names=c("beta","p","alpha","pi"),
n.iter=20000,thin=10)
est_h_ac<-summary(out_h_ac)[[2]]
mod<-jags.model(file="bayes_hglm",data=Dmm,n.chains=3)
update(mod,20000)
out_h_mm<-coda.samples(model=mod,variable.names=c("beta","p","alpha","pi"),
n.iter=20000,thin=10)
est_h_mm<-summary(out_h_mm)[[2]]
pdf("betasHier.pdf",height=9,width=6)
a<-c(1:4,6:9,11:14,16:19)
par(mfrow=c(2,1))
par(mar=c(4,5.5,2.5,.5))
cs<-c("black","skyblue4","seagreen","coral")
cl<-1.5
ca<-1.2
cm<-1.4
plot(1:16,est_h_ac[a,3],pch=19,col=rep(cs,each=4),xlab="Parameter",ylab="Value",
cex.lab=cl,cex.axis=ca,ylim=c(-3,2))
title(main="Beta params. A/C",cex.main=cm)
segments(1:16,est_h_ac[a,1],1:16,est_h_ac[a,5])
abline(h=0,lty=2,lwd=1.5)
plot(1:16,est_h_mm[a,3],pch=19,col=rep(cs,each=4),xlab="Parameter",ylab="Value",
cex.lab=cl,cex.axis=ca,ylim=c(-3,2))
title(main="Beta params. MM",cex.main=cm)
segments(1:16,est_h_mm[a,1],1:16,est_h_mm[a,5])
abline(h=0,lty=2,lwd=1.5)
dev.off()
save(list=ls(),file="bayes_color_2019.rdat")
## additional summaries and plots of the selection gradients
## unlist
mat_h_ac<-matrix(NA,nrow=6000,ncol=20)
mat_h_mm<-matrix(NA,nrow=6000,ncol=20)
for(i in 1:20){
mat_h_ac[,i]<-c(out_h_ac[[1]][,i],out_h_ac[[2]][,i],out_h_ac[[3]][,i])
mat_h_mm[,i]<-c(out_h_mm[[1]][,i],out_h_mm[[2]][,i],out_h_mm[[3]][,i])
}
## posterior on gradients
xx_rg<-seq(-1.7,3.3,0.01)
xx_gb<-seq(-2.1,2.8,0.01)
e_grad_rg_ac<-vector("list",4)
e_grad_rg_mm<-vector("list",4)
e_grad_gb_ac<-vector("list",4)
e_grad_gb_mm<-vector("list",4)
grad_rg<-matrix(NA,nrow=6000,ncol=length(xx_rg))
grad_gb<-matrix(NA,nrow=6000,ncol=length(xx_gb))
## RG
for(k in 1:4){
## AC
for(i in 1:6000){
grad_rg[i,]<-mat_h_ac[i,5+5*(k-1)] + mat_h_ac[i,1+5*(k-1)] * xx_rg +
mat_h_ac[i,3+5*(k-1)] * xx_rg^2
}
e_grad_rg_ac[[k]]<-1/(1+exp(-1 * grad_rg))
## MM
for(i in 1:6000){
grad_rg[i,]<-mat_h_mm[i,5+5*(k-1)] + mat_h_mm[i,1+5*(k-1)] * xx_rg +
mat_h_mm[i,3+5*(k-1)] * xx_rg^2
}
e_grad_rg_mm[[k]]<-1/(1+exp(-1 * grad_rg))
}
## GB
for(k in 1:4){
## AC
for(i in 1:6000){
grad_gb[i,]<-mat_h_ac[i,5+5*(k-1)] + mat_h_ac[i,2+5*(k-1)] * xx_gb +
mat_h_ac[i,4+5*(k-1)] * xx_gb^2
}
e_grad_gb_ac[[k]]<-1/(1+exp(-1 * grad_gb))
## MM
for(i in 1:6000){
grad_gb[i,]<-mat_h_mm[i,5+5*(k-1)] + mat_h_mm[i,2+5*(k-1)] * xx_gb +
mat_h_mm[i,4+5*(k-1)] * xx_gb^2
}
e_grad_gb_mm[[k]]<-1/(1+exp(-1 * grad_gb))
}
est_grad_rg_ac<-vector("list",4)
est_grad_rg_mm<-vector("list",4)
est_grad_gb_ac<-vector("list",4)
est_grad_gb_mm<-vector("list",4)
for(k in 1:4){
est_grad_rg_ac[[k]]<-apply(e_grad_rg_ac[[k]],2,quantile,probs=c(.5,.05,.95))
est_grad_rg_mm[[k]]<-apply(e_grad_rg_mm[[k]],2,quantile,probs=c(.5,.05,.95))
est_grad_gb_ac[[k]]<-apply(e_grad_gb_ac[[k]],2,quantile,probs=c(.5,.05,.95))
est_grad_gb_mm[[k]]<-apply(e_grad_gb_mm[[k]],2,quantile,probs=c(.5,.05,.95))
}
cs<-c("coral","deepskyblue2")
library(scales)
pdf("bayesSelGradsRG.pdf",width=10,height=10)
par(mfrow=c(2,2))
par(mar=c(4,5,2.5,1))
tits<-c("(a) RG, pop. effect","(b) RG, block 1","(c) RG, block 2","(d) RG, block 3")
for(k in 1:4){
plot(xx_rg,est_grad_rg_ac[[2]][1,],type='n',ylim=c(0,1),xlab="Color (RG)",
ylab="Survival probability",cex.lab=1.4,cex.axis=1.1)
title(main=tits[k],cex.main=1.4)
polygon(c(xx_rg,rev(xx_rg)),c(est_grad_rg_ac[[k]][2,],rev(est_grad_rg_ac[[k]][3,])),
col=alpha(cs[1],.5),border=NA)
polygon(c(xx_rg,rev(xx_rg)),c(est_grad_rg_mm[[k]][2,],rev(est_grad_rg_mm[[k]][3,])),
col=alpha(cs[2],.5),border=NA)
lines(xx_rg,est_grad_rg_ac[[k]][1,],col=cs[1],lwd=1.5)
lines(xx_rg,est_grad_rg_mm[[k]][1,],col=cs[2],lwd=1.5)
}
dev.off()
pdf("bayesSelGradsGB.pdf",width=10,height=10)
par(mfrow=c(2,2))
par(mar=c(4,5,2.5,1))
tits<-c("(a) GB, pop. effect","(b) GB, block 1","(c) GB, block 2","(d) GB, block 3")
for(k in 1:4){
plot(xx_gb,est_grad_gb_ac[[2]][1,],type='n',ylim=c(0,1),xlab="Color (GB)",
ylab="Survival probability",cex.lab=1.4,cex.axis=1.1)
title(main=tits[k],cex.main=1.4)
polygon(c(xx_gb,rev(xx_gb)),c(est_grad_gb_ac[[k]][2,],rev(est_grad_gb_ac[[k]][3,])),
col=alpha(cs[1],.5),border=NA)
polygon(c(xx_gb,rev(xx_gb)),c(est_grad_gb_mm[[k]][2,],rev(est_grad_gb_mm[[k]][3,])),
col=alpha(cs[2],.5),border=NA)
lines(xx_gb,est_grad_gb_ac[[k]][1,],col=cs[1],lwd=1.5)
lines(xx_gb,est_grad_gb_mm[[k]][1,],col=cs[2],lwd=1.5)
}
dev.off()
## difference in selection
## RG
dif_grad_rg<-vector("list",4)
est_dif_grad_rg<-vector("list",4)
for(k in 1:4){
if(k > 1){
dif_grad_rg[[k]]<-e_grad_rg_ac[[k]]/mean(Dac$y[Dac$bk==(k-1)]) -
e_grad_rg_mm[[k]]/mean(Dmm$y[Dmm$bk==(k-1)])
}
else{
dif_grad_rg[[k]]<-e_grad_rg_ac[[k]]/mean(Dac$y) -
e_grad_rg_mm[[k]]/mean(Dmm$y)
}
est_dif_grad_rg[[k]]<-apply(dif_grad_rg[[k]],2,quantile,probs=c(.5,.05,.95))
}
pdf("bayesSelGradsDifRG.pdf",width=10,height=10)
par(mfrow=c(2,2))
par(mar=c(4,5,2.5,1))
tits<-c("(a) RG, pop. effect","(b) RG, block 1","(c) RG, block 2","(d) RG, block 3")
for(k in 1:4){
plot(xx_rg,est_dif_grad_rg[[2]][1,],type='n',ylim=c(-4,4),xlab="Color (RG)",
ylab="Dif. relative fitness",cex.lab=1.4,cex.axis=1.1)
title(main=tits[k],cex.main=1.4)
polygon(c(xx_rg,rev(xx_rg)),c(est_dif_grad_rg[[k]][2,],rev(est_dif_grad_rg[[k]][3,])),
col=alpha("darkgray",.5),border=NA)
lines(xx_rg,est_dif_grad_rg[[k]][1,],lwd=1.5)
abline(h=0,lty=2,lwd=1.3)
}
dev.off()
## GB
dif_grad_gb<-vector("list",4)
est_dif_grad_gb<-vector("list",4)
for(k in 1:4){
if(k > 1){
dif_grad_gb[[k]]<-e_grad_gb_ac[[k]]/mean(Dac$y[Dac$bk==(k-1)]) -
e_grad_gb_mm[[k]]/mean(Dmm$y[Dmm$bk==(k-1)])
}
else{
dif_grad_gb[[k]]<-e_grad_gb_ac[[k]]/mean(Dac$y) -
e_grad_gb_mm[[k]]/mean(Dmm$y)
}
est_dif_grad_gb[[k]]<-apply(dif_grad_gb[[k]],2,quantile,probs=c(.5,.05,.95))
}
pdf("bayesSelGradsDifGB.pdf",width=10,height=10)
par(mfrow=c(2,2))
par(mar=c(4,5,2.5,1))
tits<-c("(a) GB, pop. effect","(b) GB, block 1","(c) GB, block 2","(d) GB, block 3")
for(k in 1:4){
plot(xx_gb,est_dif_grad_gb[[2]][1,],type='n',ylim=c(-4,4),xlab="Color (GB)",
ylab="Dif. relative fitness",cex.lab=1.4,cex.axis=1.1)
title(main=tits[k],cex.main=1.4)
polygon(c(xx_gb,rev(xx_gb)),c(est_dif_grad_gb[[k]][2,],rev(est_dif_grad_gb[[k]][3,])),
col=alpha("darkgray",.5),border=NA)
lines(xx_gb,est_dif_grad_gb[[k]][1,],lwd=1.5)
abline(h=0,lty=2,lwd=1.3)
}
dev.off()
### quantitative summaries
## pp >, note positive is disruptive
ppg<-rep(NA,20)
for(i in 1:20){
ppg[i]<-mean(mat_h_ac[,i] > mat_h_mm[,i])
}
data.frame(param=rep(c("linRG","linGB","quadRG","quadGB","int"),4),round(ppg,4))