# d_obs reads in the observed effect sizes from the studies included in a meta-analysis. # n_obs reads in the corresponding sample sizes of the studies. # type specifies the suppression type of unpublished studies. # side specifies the type of alternative hypothesis. # np specifies the expected direction. # cpoint specifies the cutoff value in insignificant results suppression. # burn and thin specify the burn-in period and the thinning period respectively. # M specifies the total of iterations of the Markov Chains including the burn-in and thinning periods. # adjust=T is to obtain unbiased values if the g values are read in. balm=function(d_obs,n_obs,type,side=1,pn=1,cpoint=0.5,M=5000,burn=1000,thin=20,adjust=F){ n1<- n_obs[,1] n2<- n_obs[,2] cm<- (1-3/(4*(n1+n2-2)-1)) if (adjust=="T"){ d_obs<- cm*d_obs } nk<- length(cpoint)+1 index<- matrix(0,nrow=nk,ncol=2) j<- matrix(NA,nrow=length(d_obs),ncol=nk) index[1,]<- c(0,cpoint[1]) if (type==1){ if (side==1){ p<- 2*(1-pt(abs(d_obs/cm)*sqrt(n1/(n1+n2)*n2),df=n1+n2-2)) } else if (side==2){ p<-1- pt(d_obs/cm*sqrt(n1/(n1+n2)*n2),df=n1+n2-2) } else if (side==3){ p<- pt(d_obs/cm*sqrt(n1/(n1+n2)*n2),df=n1+n2-2) } } else if (type==2){ if (pn==1){ p<- 1-pt(abs(d_obs/cm)*sqrt(n1/(n1+n2)*n2),df=n1+n2-2) } else if (pn==2){ p<- pt(d_obs/cm*sqrt(n1/(n1+n2)*n2),df=n1+n2-2) } } if (length(which(p>index[1,1]&p <=index[1,2], arr.ind = TRUE))==0){ j[,1]<- rep(NA,length(d_obs)) }else{ j[1:length(which(p>index[1,1]&p <=index[1,2], arr.ind = TRUE)),1]<- which(p>index[1,1]&p <=index[1,2], arr.ind = TRUE)} if (nk>2){ for (k in 2:(nk-1)){ index[k,]<-c (cpoint[k-1],cpoint[k]) if (length(which(p>index[k,1]&p <=index[k,2], arr.ind = TRUE))==0){ j[,k]<- rep(NA,length(d_obs)) }else{ j[1:length(which(p>index[k,1]&p <=index[k,2], arr.ind = TRUE)),k]<- which(p>index[k,1]&p <=index[k,2], arr.ind = TRUE)} } } index[nk,]<-c(cpoint[nk-1],1) if (length(which(p>index[nk,1]&p <=index[nk,2], arr.ind = TRUE))==0){ j[,nk]<- rep(NA,length(d_obs)) }else{ j[1:length(which(p>index[nk,1]&p <=index[nk,2], arr.ind = TRUE)),nk]<- which(p>index[nk,1]&p <=index[nk,2], arr.ind = TRUE)} a<- length(d_obs) data<- matrix(NA,nrow=a,ncol=3*nk) for (k in 1:nk){ data[,k]<- c(n1[na.omit(j[,k])],rep(NA,a-length(na.omit(j[,k])))) data[,nk+k]<- c(n2[na.omit(j[,k])],rep(NA,a-length(na.omit(j[,k])))) data[,2*nk+k]<- c(d_obs[na.omit(j[,k])],rep(NA,a-length(na.omit(j[,k])))) } w<-matrix(0,nrow=M,ncol=nk) prefix1 <- "w" suffix <- seq(1:nk) names1 <- paste(prefix1, suffix, sep = "") colnames(w)<-names1 m<-matrix(0,nrow=M,ncol=nk) prefix2 <- "m" names2 <- paste(prefix2, suffix, sep = "") colnames(m)<-names2 alpha<- 0.1 beta<- 0.1 a<- 0 #mu prior b<- 10000 #mu prior c<- 0 # tau prior d<- 100 # tau prior w[1,]<- rep(0.5,nk) #w initial value mu<- c() mu[1]<- 0 tau<- c() tau[1]<- 0.2 w[1,]<- w[1,]/max(w[1,]) for(t in 2:M){ logi<- rep(0,10^3) k<- which(w[t-1,]==1) if (length(k)>1){k=1} m[t,k]<- 0 n_mis<- n_obs[c(sample(c(1:length(d_obs)), 10^3-length(d_obs), replace = T)),] n_up<- rbind(n_obs,n_mis) td<- rnorm(10^3,mean=mu[t-1], sd=tau[t-1]) v<- (n_up[,1]+n_up[,2])/n_up[,1]/n_up[,2]+ td^2/(2*(n_up[,1]+n_up[,2])) d_up<-rnorm(10^3,mean=td, sd=sqrt(v)) n1<- n_up[,1] n2<- n_up[,2] cm<- (1-3/(4*(n1+n2-2)-1)) if (type==1){ if (side==1){ p_up<- 2*(1-pt(abs(d_up/cm)*sqrt(n_up[,1]/(n_up[,1]+n_up[,2])*n_up[,2]),df=n_up[,1]+n_up[,2]-2)) } else if (side==2){ p_up<- 1-pt(d_up/cm*sqrt(n_up[,1]/(n_up[,1]+n_up[,2])*n_up[,2]),df=n_up[,1]+n_up[,2]-2)} else if (side==3){ p_up<- pt(d_up/cm*sqrt(n_up[,1]/(n_up[,1]+n_up[,2])*n_up[,2]),df=n_up[,1]+n_up[,2]-2)} }else if (type==2){ if (pn==1){ p_up<- 1-pt(abs(d_up/cm)*sqrt(n_up[,1]/(n_up[,1]+n_up[,2])*n_up[,2]),df=n_up[,1]+n_up[,2]-2) } else if (pn==2){ p_up<- pt(d_up/cm*sqrt(n_up[,1]/(n_up[,1]+n_up[,2])*n_up[,2]),df=n_up[,1]+n_up[,2]-2) } } logi[which(p_up > index[k,1] & p_up <= index[k,2], arr.ind = TRUE)]<- 1 logi<- order(logi, decreasing=T) et<- logi[length(na.omit(j[,k]))] if(length(et)==0){et=0} m2<- et-length(d_obs) if (m2<0){m2=0} n_up<- n_up[1:(m2+length(d_obs)),] d_up<- d_up[1:(m2+length(d_obs))] p_up<- p_up[1:(m2+length(d_obs))] for (k in 1:nk){ m[t,k]<- length(which(p_up>index[k,1] & p_up <=index[k,2], arr.ind = TRUE))-length(na.omit(j[,k])) if(m[t,k]<0){ m[t,k]= 0 id1<- which(p_up>index[k,1] & p_up <=index[k,2], arr.ind = TRUE) nid<- sample(1:length(na.omit(j[,k])),length(id1)) d_up[id1]<- na.omit(data[,2*nk+k])[nid] n_up[id1,]<- na.omit(cbind(data[,k],data[,nk+k]))[nid,] } else { id2<- sample(which(p_up>index[k,1] & p_up <=index[k,2], arr.ind = TRUE),length(na.omit(j[,k]))) d_up[id2]<- na.omit(data[,2*nk+k]) n_up[id2,]<- na.omit(cbind(data[,k],data[,nk+k])) } w[t,k]<- rbeta(1,length(na.omit(j[,k]))+alpha,m[t,k]+beta) } w[t,]<- w[t,]/max(w[t,]) #recale w dve<- d_up nve<- n_up numer<- sum(dve/((nve[,1]+nve[,2])/nve[,1]/nve[,2]+dve^2/2/(nve[,1]+nve[,2])+tau[t-1]^2)) deno<- sum(1/((nve[,1]+nve[,2])/nve[,1]/nve[,2]+dve^2/2/(nve[,1]+nve[,2])+tau[t-1]^2))+1/b mu[t]<- rnorm(1,numer/deno,sqrt(1/deno)) tau[t] <- tau[t-1] + runif(1,-0.1,0.1) # draw tau if(tau[t] >d ||tau[t] (logtau_t-logtau_t_1)) { tau[t] <- tau[t-1] } } results<-as.matrix(cbind(w,m,mu,tau)) pick = seq(burn,M,thin) w=w[pick,] m=m[pick,] mu=mu[pick] tau=tau[pick] note<-"" for (i in 1:nk){ note_tem <- paste(names1[i], "and", names2[i], "are corresponding to p value interval: ",index[i,1], "to", index[i,2],"; ") note<-paste(note,note_tem ) } results = list(note,w=w,m=m,mu=mu,tau=tau) return( results) } ####### EXAMPLE ######## sz<- 50 #number of studies delta<- 0 #overall population effect size ttau<-0.2 #between study standard deviation n1<- rep(100,sz) # sample sizes of studies n2<- rep(100,sz) # sample sizes of studies n_obs<-cbind(n1,n2) ttd<-rnorm(length(n1),mean=delta, sd=ttau) tv<-(n1+n2)/n1/n2+ ttd^2/(2*(n1+n2)) d_obs<-rnorm(length(n1),mean=ttd, sd=sqrt(tv)) #simulation effect sizes #### a<-which(d_obs<0) #negative study suppression a<-a[1:15] #simulation observed effect sizes d_obs<-d_obs[-a] n_obs<-n_obs[-a,] #### result<- balm(d_obs,n_obs,type=2,pn=1,cpoint=0.5,M=5000,burn=1000,thin=20) #BALM is used to correct for negative result suppression