##############################################
##### TABLES AND FIGURES FOR #####
##### THE SUPPLEMENTARY INFORMATION #####
##### OF VOROS, BELLOTTI ET AL. #####
##############################################
##################
### APPENDIX A ###
##################
# these tables were not generated in R
##################
##################
### APPENDIX B ###
##################
### load data
rm(list=ls())
load('01_data_all.RData')
### Table B2: average number of measures used
table.b2 <- matrix(NA, 11, 4)
colnames(table.b2) <- c('indiv mean', 'indiv sd', 'hh mean', 'hh sd')
for (i in 1:10) {
talk <- eval(parse(text=paste0('v', i, '.adjmatrix')))
covars <- eval(parse(text=paste0('v', i, '.attributes')))
# identify asha and healer
who.asha <- which(covars[,'ASHA']==1)
who.healer <- which(covars[,'Traditional Healer']==1)
# remove asha and healer
talk <- talk[-c(who.asha, who.healer),-c(who.asha, who.healer)]
covars <- covars[-c(who.asha, who.healer),]
# define number of measures used
covars$measure.sum <- rowSums(covars[,vars.dep], na.rm=T)
table.b2[i,1] <- mean(covars$measure.sum, na.rm=T)
table.b2[i,2] <- sd(covars$measure.sum, na.rm=T)
table.b2[i,3] <- mean(aggregate(covars$measure.sum,
list(covars$Household), mean, na.rm=T)$x)
table.b2[i,4] <- sd(aggregate(covars$measure.sum,
list(covars$Household), mean, na.rm=T)$x)
}
rm(i, talk, covars)
# add totals
covars <- v.attributes
covars$measure.sum <- rowSums(covars[,vars.dep], na.rm=T)
table.b2[11,1] <- mean(covars$measure.sum, na.rm=T)
table.b2[11,2] <- sd(covars$measure.sum, na.rm=T)
table.b2[11,3] <- mean(aggregate(covars$measure.sum,
list(covars$Household), mean, na.rm=T)$x)
table.b2[11,4] <- sd(aggregate(covars$measure.sum,
list(covars$Household), mean, na.rm=T)$x)
rm(covars)
library(xlsx)
write.xlsx(table.b2, file='table_b02.xlsx', row.names=T, col.names=T)
### Table B3: percentage of individuals using each measure
use.indiv <- v.attributes[,vars.dep]
table.b3 <- aggregate(use.indiv, by=list(v.attributes$Village), FUN=mean, na.rm=T)
rownames(table.b3) <- table.b3[,1]
table.b3 <- table.b3[,-1]
table.b3 <- table.b3[village.names,]
table.b3 <- round(table.b3, 4)
table.b3 <- rbind(table.b3, round(colMeans(use.indiv, na.rm=T), 4))
table.b3 <- rbind(table.b3, round(colSums(is.na(use.indiv))/nrow(use.indiv),4))
rm(use.indiv)
library(xlsx)
write.xlsx(table.b3, file='table_b03.xlsx', row.names=T, col.names=T)
### Table B5: percentage of female, head of hh, carer of sick, works in fields
covar <- v.attributes[,c('Female', 'Head of household', 'Carer for a sick person', 'Works in fields')]
table.b5 <- aggregate(covar, by=list(v.attributes$Village), FUN=mean, na.rm=T)
rownames(table.b5) <- table.b5[,1]
table.b5 <- table.b5[,-1]
table.b5 <- table.b5[village.names,]
table.b5 <- round(table.b5, 4)
table.b5 <- rbind(table.b5, round(colMeans(covar, na.rm=T),4))
table.b5 <- rbind(table.b5, round(colSums(is.na(covar))/nrow(covar),4))
rm(covar)
library(xlsx)
write.xlsx(table.b5, file='table_b05.xlsx', row.names=T, col.names=T)
### Table B6: age mean and sd
covar <- v.attributes[,c('Age')]
table.b6 <- cbind(aggregate(covar, by=list(v.attributes$Village), FUN=mean, na.rm=T),
aggregate(covar, by=list(v.attributes$Village), FUN=sd, na.rm=T))
rownames(table.b6) <- table.b6[,1]
table.b6 <- table.b6[,-c(1,3)]
table.b6 <- table.b6[village.names,]
table.b6 <- round(table.b6, 2)
table.b6 <- rbind(table.b6, c(round(mean(covar, na.rm=T),2), round(sd(covar, na.rm=T),2)))
table.b6 <- cbind(table.b6, c(aggregate(is.na(covar), by=list(v.attributes$Village), FUN=mean, na.rm=T)[,2],
mean(is.na(covar), na.rm=T)))
rm(covar)
library(xlsx)
write.xlsx(table.b6, file='table_b06.xlsx', row.names=T, col.names=T)
### Table B7: percentage in education categories
covar <- matrix(c(v.attributes$Education==0, v.attributes$Education==1, v.attributes$Education==2,
v.attributes$Education==3, v.attributes$Education==4, v.attributes$Education==5,
v.attributes$Education==6, is.na(v.attributes$Education)), nc=8)
table.b7 <- aggregate(covar, by=list(v.attributes$Village), FUN=mean, na.rm=T)
rownames(table.b7) <- table.b7[,1]
table.b7 <- table.b7[,-1]
table.b7 <- table.b7[village.names,]
table.b7 <- round(table.b7, 4)
table.b7 <- rbind(table.b7, round(colMeans(covar, na.rm=T),4))
rm(covar)
library(xlsx)
write.xlsx(table.b7, file='table_b07.xlsx', row.names=T, col.names=T)
### Table B9 network descriptives
library(sna)
net.desc.talk <- matrix(NA, 10, 10)
for (i in 1:10) {
talk <- eval(parse(text=paste0('v', i, '.adjmatrix')))
covars <- eval(parse(text=paste0('v', i, '.attributes')))
# identify asha and healer
who.asha <- which(covars[,'ASHA']==1)
who.healer <- which(covars[,'Traditional Healer']==1)
# remove asha and healer
talk <- talk[-c(who.asha, who.healer),-c(who.asha, who.healer)]
covars <- covars[-c(who.asha, who.healer),]
geo <- geodist(talk, inf.replace=NA)$gdist
diag(geo) <- NA
net.desc.talk[i,1] <- nrow(talk)
net.desc.talk[i,2] <- sum(talk, na.rm=T)
net.desc.talk[i,3] <- sum(talk, na.rm=T) / nrow(talk)
net.desc.talk[i,4] <- sd(rowSums(talk, na.rm=T))
net.desc.talk[i,5] <- sd(colSums(talk, na.rm=T))
net.desc.talk[i,6] <- centralization(talk, degree, cmode='indegree')
net.desc.talk[i,7] <- components(talk)
net.desc.talk[i,8] <- mean(geo, na.rm=T)
net.desc.talk[i,9] <- sd(geo, na.rm=T)
net.desc.talk[i,10] <- max(geo, na.rm=T)
}
rm(i, talk, geo, covars)
net.desc.talk <- rbind(net.desc.talk, colMeans(net.desc.talk))
library(xlsx)
write.xlsx(net.desc.talk, file='table_b09.xlsx', row.names=T, col.names=T)
### Table B10 ties to non-respondetns and out-of-village ties
covar <- v.attributes[,c('Network size among non-respondents', 'Network size out of village')]
table.b10 <- aggregate(covar, by=list(v.attributes$Village), FUN=mean, na.rm=T)
rownames(table.b10) <- table.b10[,1]
table.b10 <- table.b10[,-1]
table.b10 <- table.b10[village.names,]
table.b10 <- round(table.b10, 4)
table.b10 <- cbind(table.b10,
round(aggregate(covar, by=list(v.attributes$Village), FUN=sd, na.rm=T)[,2:3], 4))
table.b10 <- rbind(table.b10, round(colMeans(covar, na.rm=T),4))
rm(covar)
library(xlsx)
write.xlsx(table.b10, file='table_b10.xlsx', row.names=T, col.names=T)
### Table B11: individuals and households talking to ASHA and Healer
covar <- v.attributes[,c('Talks to ASHA', 'Talks to Healer')]
table.b11 <- aggregate(covar, by=list(v.attributes$Village), FUN=mean, na.rm=T)
rownames(table.b11) <- table.b11[,1]
table.b11 <- table.b11[,-1]
table.b11 <- table.b11[village.names,]
table.b11 <- round(table.b11, 4)
table.b11 <- rbind(table.b11, round(colMeans(covar, na.rm=T),4))
temp <- matrix(NA, 11, 2)
for (i in 1:10) {
dat <- covar[v.attributes$Village==village.names[i],]
hhs <- v.attributes[v.attributes$Village==village.names[i],'Household']
temp[i,] <- round(colMeans(aggregate(dat, by=list(hhs), FUN=max, na.rm=T)[,2:3]), 4)
}
rm(i, dat, hhs)
temp[11,] <- round(colMeans(aggregate(covar[v.attributes$Village %in% village.names[c(1:5,8)],],
by=list(v.attributes[v.attributes$Village %in% village.names[c(1:5,8)],1],
v.attributes[v.attributes$Village %in% village.names[c(1:5,8)],2]),
FUN=max, na.rm=T)[,3:4]), 4)
table.b11 <- cbind(table.b11, temp)
rm(temp, covar)
library(xlsx)
write.xlsx(table.b11, file='table_b11.xlsx', row.names=T, col.names=T)
### Table B12-B13: mean and median exposure of individuals
covar <- v.attributes[,vars.netexp]
table.b12 <- aggregate(covar, by=list(v.attributes$Village), FUN=mean)
rownames(table.b12) <- table.b12[,1]
table.b12 <- table.b12[,-1]
table.b12 <- table.b12[village.names,]
table.b12 <- round(table.b12, 4)
table.b12 <- rbind(table.b12, round(colMeans(covar), 4))
table.b13 <- aggregate(covar, by=list(v.attributes$Village), FUN=median)
rownames(table.b13) <- table.b13[,1]
table.b13 <- table.b13[,-1]
table.b13 <- table.b13[village.names,]
table.b13 <- round(table.b13, 4)
table.b13 <- rbind(table.b13, round(colMeans(covar), 4))
rm(covar)
library(xlsx)
write.xlsx(table.b12, file='table_b12.xlsx', row.names=T, col.names=T)
write.xlsx(table.b13, file='table_b13.xlsx', row.names=T, col.names=T)
### Table B14: percentage of individuals exposed in households
covar <- v.attributes[,vars.hhexp]
table.b14 <- aggregate(covar, by=list(v.attributes$Village), FUN=mean, na.rm=T)
rownames(table.b14) <- table.b14[,1]
table.b14 <- table.b14[,-1]
table.b14 <- table.b14[village.names,]
table.b14 <- round(table.b14, 4)
table.b14 <- rbind(table.b14, round(colMeans(covar, na.rm=T), 4))
rm(covar)
library(xlsx)
write.xlsx(table.b14, file='table_b14.xlsx', row.names=T, col.names=T)
##################
##################
### APPENDIX C ###
##################
### load data
rm(list=ls())
load('05_saom_all_results.RData')
#### Figures C1-10: multilevel network graphs
library(igraph)
library(graphlayouts)
library(ggraph)
#library(threejs)
library(RColorBrewer)
library(RSiena)
col <- brewer.pal(8, 'Dark2')
for (i in 1:10) {
eval(parse(text=paste0('s.result <- saom.v', i, '.rate3.7full')))
talk.e <- t(s.result$f$Data1$nets$talk[[2]]$mat1)
use.e <- t(s.result$f$Data1$bipartites$use[[2]]$mat1)
talk <- talk.e[,1:2]
talk <- matrix(paste0('A', talk), nc=2)
use <- use.e[,1:2]
use <- matrix(c(paste0('A', use[,1]), paste0('B', use[,2])), nc=2)
x <- t(matrix(c('B1','B2','B2','B3','B3','B4','B4','B5','B5','B6','B6','B7','B7','B8'), nr=2))
y <- t(matrix(c('B8','B7','B7','B6','B6','B5','B5','B4','B4','B3','B3','B2','B2','B1'), nr=2))
all <- rbind(talk, use, x)#use[,c(2,1)], x, y))
n1 <- attr(use.e, 'nActors')[1]
n2 <- attr(use.e, 'nActors')[2]
multilevel <- graph_from_edgelist(all, directed=F)
vertex_attr(multilevel, 'lvl') <- ifelse(substr(V(multilevel)$name,1,1)=='A',1,2)
node_labels <- rep('', n1+n2)
node_labels[V(multilevel)$name=='B1'] <- 'LLINs'
node_labels[V(multilevel)$name=='B2'] <- 'Covering clothes'
node_labels[V(multilevel)$name=='B3'] <- 'Boots'
node_labels[V(multilevel)$name=='B4'] <- 'Gloves'
node_labels[V(multilevel)$name=='B5'] <- 'Cream'
node_labels[V(multilevel)$name=='B6'] <- 'Coil'
node_labels[V(multilevel)$name=='B7'] <- 'Vaporizer'
node_labels[V(multilevel)$name=='B8'] <- 'Burn'
if (i==2) set.seed(130)
if (i==3) set.seed(192)
if (i==5) set.seed(194)
if (i==7) set.seed(96)
if (i==8) set.seed(99)
if (i %in% c(1, 4, 6, 9:10)) set.seed(156)
layout <- layout_as_multilevel(multilevel,type = "separate",
FUN1=layout_with_fr,
FUN2=layout_with_fr,
alpha = 50, beta = 30)
pdf(paste0('figure_c',i,'.pdf'), w=9, h=9)
print(ggraph(multilevel, "manual", x = layout[, 1], y = layout[, 2]) +
geom_edge_link0(
aes(filter = (node1.lvl == 1 & node2.lvl == 1)),
edge_colour = "firebrick3",
alpha = 0.8,
edge_width = 0.6
) +
# geom_edge_link0(
# aes(filter = (node1.lvl != node2.lvl)),
# alpha = 0.05,
# edge_width = 0.4,
# edge_colour = "blue"
# ) +
geom_edge_link0(
aes(filter = (node1.lvl != node2.lvl & node2.name == 'B1')),
alpha = 0.3,
edge_width = 0.5,
edge_colour = col[1]
) +
geom_edge_link0(
aes(filter = (node1.lvl != node2.lvl & node2.name == 'B2')),
alpha = 0.3,
edge_width = 0.5,
edge_colour = col[2]
) +
geom_edge_link0(
aes(filter = (node1.lvl != node2.lvl & node2.name == 'B3')),
alpha = 0.3,
edge_width = 0.5,
edge_colour = col[3]
) +
geom_edge_link0(
aes(filter = (node1.lvl != node2.lvl & node2.name == 'B4')),
alpha = 0.3,
edge_width = 0.5,
edge_colour = col[4]
) +
geom_edge_link0(
aes(filter = (node1.lvl != node2.lvl & node2.name == 'B5')),
alpha = 0.3,
edge_width = 0.5,
edge_colour = col[5]
) +
geom_edge_link0(
aes(filter = (node1.lvl != node2.lvl & node2.name == 'B6')),
alpha = 0.3,
edge_width = 0.5,
edge_colour = col[6]
) +
geom_edge_link0(
aes(filter = (node1.lvl != node2.lvl & node2.name == 'B7')),
alpha = 0.3,
edge_width = 0.5,
edge_colour = col[7]
) +
geom_edge_link0(
aes(filter = (node1.lvl != node2.lvl & node2.name == 'B8')),
alpha = 0.3,
edge_width = 0.5,
edge_colour = col[8]
) +
# geom_edge_link0(
# aes(filter = (node1.lvl == 2 &
# node2.lvl == 2)),
# edge_colour = "blue",
# edge_width = 1,
# alpha = 0.5
# ) +
geom_node_point(aes(shape = as.factor(lvl)), fill = "grey70", size = 3) +
geom_node_text(aes(label = node_labels), nudge_y=0.05, size=5) +
scale_shape_manual(values = c(21, 22)) +
theme_graph() +
coord_cartesian(clip = "off", expand = F) +
theme(legend.position = "none", panel.border=element_blank())
)
dev.off()
}
##################
##################
### APPENDIX D ###
##################
### load data
rm(list=ls())
load('04_logit_all_results.RData')
### Tables D2-D9: backward regression tables
library(jtools)
for (i in 1:length(vars.dep.obj)) {
# identify villages, result object names, positions of final models in each object
which.villages <- which(logit.villages[i,]==1)
objects <- paste0('logit.backward.', vars.dep.obj[i], '.v', which.villages)
objects.lengths <- vapply(paste0('length(', objects, ')'),
function(x) eval(parse(text=x)), 0, USE.NAMES=F)
# identify list of all variables that appear in any of the final models
if (select.by=='p') {
temp <- paste0('unique(c(',
paste(paste0('names(', objects, '[[', objects, '[[', objects.lengths,
']]]]$coefficients)'), collapse=', '), '))')
}
if (select.by=='fit') {
temp <- paste0('unique(c(', paste(paste0('names(', objects, '$coefficients)'),
collapse=', '), '))')
}
vars <- eval(parse(text=temp))
rm(temp)
# prepare results for table
if (select.by=='p') {
temp <- paste(paste0(objects, '[[', objects, '[[', objects.lengths, ']]]]'),
collapse=', ')
}
if (select.by=='fit') {
temp <- paste(objects, collapse=', ')
}
res.list <- eval(parse(text=paste0('list(', temp, ')')))
rm(temp)
# set up variable names
names(vars) <- vapply(vars, function(x) ifelse(substr(x, 1, 1)=='\`',
substr(x, 2, nchar(x)-1), x),
'a', USE.NAMES=F)
vars.all <- c('(Intercept)', vars.indiv, vars.oplead, vars.netsize, vars.netexp,
vars.hhexp)
vars <- vars[vars.all[vars.all %in% names(vars)]]
names(vars)[1] <- 'Intercept'
# export table
export_summs(res.list,
to.file='xlsx',
file.name=paste0('logit_backward_', vars.dep.obj[i], '.xlsx'),
model.names = village.names[which.villages],
coefs=vars,
stars=c(`***` = 0.001, `**` = 0.01, `*` = 0.05))
}
rm(i, which.villages, objects, objects.lengths, vars, res.list, vars.all)
### Tables D10-D17: logit models with identical specifications
library(jtools)
for (i in vars.dep.obj) {
which.villages <- which(logit.villages[rownames(logit.villages)==i,]==1)
models <- eval(parse(text=paste0('logit.identical.', i, '[which.villages]')))
filename <- paste0('logit_identical_', i, '.xlsx')
modelnames <- village.names[which.villages]
vars <- names(models[[which.villages[1]]]$coefficients)
names(vars) <- vapply(vars, function(x) ifelse(substr(x, 1, 1)=='\`',
substr(x, 2, nchar(x)-1), x),
'a', USE.NAMES=F)
names(vars)[1] <- 'Intercept'
export_summs(models,
to.file='xlsx',
file.name=filename,
model.names=modelnames,
coefs=vars,
stars=c(`***` = 0.001, `**` = 0.01, `*` = 0.05))
}
rm(i, which.villages, models, filename, modelnames, vars)
### Table D18: overview of villages used in meta-analysis
library(xlsx)
table.d18 <- logit.villages
rownames(table.d18) <- substring(vars.dep, 6)
write.xlsx(table.d18, file='table_d18.xlsx')
rm(table.d18)
### D19-26: meta analyses
library(xlsx)
# print results to files
for (i in vars.dep.obj) {
meta <- eval(parse(text=paste0('logit.meta.', i)))
write.xlsx(round(meta, 3), file=paste0('logit_meta_', i, '.xlsx'))
}
rm(i, meta)
### D27-38: model fit comparisons
fit.ttests <- matrix(NA, 10, 8)
colnames(fit.ttests) <- c('Model A', 'Model B', 'Mean A', 'Mean B',
'Difference of means', 't-stat.', 'df', 'p')
fit.ttests[,'Model A'] <- c(1, 2, 3, 4, 5, 6, 7, 6, 7, 7)
fit.ttests[,'Model B'] <- c(0, 1, 2, 3, 4, 4, 4, 5, 6, 5)
a <- t.test(as.numeric(fit.all[1,,]))
fit.ttests[1,'Mean A'] <- mean(as.numeric(fit.all[1,,]), na.rm=T)
fit.ttests[1,'Mean B'] <- 0
fit.ttests[1,'Difference of means'] <- a$estimate
fit.ttests[1,'t-stat.'] <- a$statistic
fit.ttests[1,'df'] <- a$parameter
fit.ttests[1,'p'] <- a$p.value
for (i in 2:nrow(fit.ttests)) {
a <- t.test(as.numeric(fit.all[fit.ttests[i,'Model A'],,]),
as.numeric(fit.all[fit.ttests[i,'Model B'],,]),
paired=T)
fit.ttests[i,'Mean A'] <- mean(fit.all[fit.ttests[i,'Model A'],,], na.rm=T)
fit.ttests[i,'Mean B'] <- mean(fit.all[fit.ttests[i,'Model B'],,], na.rm=T)
fit.ttests[i,'Difference of means'] <- a$estimate
fit.ttests[i,'t-stat.'] <- a$statistic
fit.ttests[i,'df'] <- a$parameter
fit.ttests[i,'p'] <- a$p.value
}
rm(i,a)
fit.ttests[,3:6] <- round(fit.ttests[,3:6], 2)
fit.ttests[,8] <- round(fit.ttests[,8], 3)
fit.ttests[,3:6] <- format(fit.ttests[,3:6], digits=2)
fit.ttests[,8] <- format(fit.ttests[,8], digits=3)
fit.ttests[,1:2][which(fit.ttests[,1:2]==0)] <- ''
fit.ttests[,1:2][which(fit.ttests[,1:2]==1)] <- '(1) Baseline'
fit.ttests[,1:2][which(fit.ttests[,1:2]==2)] <- '(2) Individual'
fit.ttests[,1:2][which(fit.ttests[,1:2]==3)] <- '(3) Opinion leader'
fit.ttests[,1:2][which(fit.ttests[,1:2]==4)] <- '(4) Network size'
fit.ttests[,1:2][which(fit.ttests[,1:2]==5)] <- '(5) Network exposure'
fit.ttests[,1:2][which(fit.ttests[,1:2]==6)] <- '(6) Household exposure'
fit.ttests[,1:2][which(fit.ttests[,1:2]==7)] <- '(7) Full'
fit.ttests[fit.ttests[,'p']=='0 ','p'] <- '<0.001'
stars <- rep('', nrow(fit.ttests))
stars[fit.ttests[,'p']<0.05] <- '*'
stars[fit.ttests[,'p']<0.01] <- '**'
stars[fit.ttests[,'p']=='<0.001'] <- '***'
fit.ttests <- cbind(fit.ttests, stars)
colnames(fit.ttests)[colnames(fit.ttests)=='stars'] <- ''
rm(stars)
extrarow <- c('* p<0.05 ** p<0.01 *** p<0.001', rep('', ncol(fit.ttests)-1))
fit.ttests <- rbind(fit.ttests, extrarow)
rm(extrarow)
extrarow <- colnames(fit.ttests)
fit.ttests <- rbind(extrarow, fit.ttests)
rm(extrarow)
library(xlsx)
write.xlsx(fit.ttests, file='tabl_d28.xlsx', row.names=F, col.names=F)
write.xlsx(round(fit.all, 2), file='table_d29-38.xlsx', row.names=T, col.names=T)
write.xlsx(round(apply(fit.all, 2, rowMeans, na.rm=T), 2),
file='table_d27.xlsx', row.names=T, col.names=T)
rm(fit.ttests)
##################
##################
### APPENDIX E ###
##################
### load data
rm(list=ls())
load('05_saom_all_results.RData')
### Table E3-E4: score-type tests for full model
library(xlsx)
write.xlsx(saom.scoretests.rate3.7full, file='table_e03-04.xlsx', row.names=F, col.names=T)
#### Table E5-E6: Full village-level SAOM results
library(RSiena)
par <- matrix(NA, length(saom.covars), 10)
rownames(par) <- saom.covars
colnames(par) <- village.names
se <- matrix(NA, length(saom.covars), 10)
rownames(se) <- saom.covars
colnames(se) <- village.names
for (i in 1:10) {
eval(parse(text=paste0('s.result <- saom.v', i, '.rate3.7full')))
par[,i] <- s.result$theta
se[,i] <- s.result$se
}
rm(i)
stars <- par/se
stars[is.na(stars)] <- 0
stars <- abs(stars)
stars[stars > 3.29] <- '***' # 0.001
stars[stars > 2.58] <- '**' # 0.01
stars[stars > 1.96] <- '*' # 0.05
stars[stars <= 1.96 & stars >= 0] <- '' # rest
par <- round(par, 2)
se <- round(se, 2)
temp <- paste0('(', se, ')')
se_brackets <- se
se_brackets[] <- temp
rm(temp)
parstars <- matrix(c(par[,1], stars[,1], par[,2], stars[,2], par[,3], stars[,3],
par[,4], stars[,4], par[,5], stars[,5], par[,6], stars[,6],
par[,7], stars[,7], par[,8], stars[,8], par[,9], stars[,9],
par[,10], stars[,10]), nr=length(saom.covars))
se_brackets2 <- matrix(c(se_brackets[,1], rep('', length(saom.covars)), se_brackets[,2], rep('', length(saom.covars)),
se_brackets[,3], rep('', length(saom.covars)), se_brackets[,4], rep('', length(saom.covars)),
se_brackets[,5], rep('', length(saom.covars)), se_brackets[,6], rep('', length(saom.covars)),
se_brackets[,7], rep('', length(saom.covars)), se_brackets[,8], rep('', length(saom.covars)),
se_brackets[,9], rep('', length(saom.covars)), se_brackets[,10], rep('', length(saom.covars))),
nr=length(saom.covars))
# cerate object to write
towrite <- matrix('', nrow(parstars)*2, ncol(parstars))
# add parameters and stars
j <- 1
for (i in seq(1, nrow(towrite), by=2)) {
towrite[i,] <- parstars[j,]
j <- j + 1
}
rm(i,j)
# add standard errors
j <- 1
for (i in seq(2, nrow(towrite), by=2)) {
towrite[i,] <- se_brackets2[j,]
j <- j + 1
}
rm(i,j)
# add effect and village names
towrite <- cbind(unlist(lapply(saom.covars, function(x) c(x, ''))), towrite)
towrite <- rbind(c('', unlist(lapply(village.names, function(x) c(x, '')))), towrite)
# write to file
library(xlsx)
write.xlsx(towrite, file='table_e05-06.xlsx', row.names=F, col.names=F)
### Table E7-E8: SAOM meta-analysis results
library(xlsx)
write.xlsx(saom.meta.rate3.7full, file='table_e07-08.xlsx', row.names=T, col.names=T)
### Table E9: full SAOM GoF in each village
library(xlsx)
write.xlsx(saom.gofp.rate3.7full, file='table_e09.xlsx', row.names=T, col.names=T)
### Table E10: mixed triad census GoF in 7 nested models in each village
gof.compare <- matrix(c(saom.gofp.rate3.1baseline['mixed triad census',],
saom.gofp.rate3.2individual['mixed triad census',],
saom.gofp.rate3.3oplead['mixed triad census',],
saom.gofp.rate3.4netsize['mixed triad census',],
saom.gofp.rate3.5netexp['mixed triad census',],
saom.gofp.rate3.6hhexp['mixed triad census',],
saom.gofp.rate3.7full['mixed triad census',]),
nr=7, byrow=T)
gof.compare <- cbind(gof.compare, rowMeans(gof.compare))
rownames(gof.compare) <- c('(1) Baseline model', '(2) Individual model',
'(3) Opinion leader model', '(4) Network size model',
'(5) Network exposure model', '(6) Household exposure model',
'(7) Full model')
colnames(gof.compare) <- c(village.names, 'Mean')
library(xlsx)
write.xlsx(gof.compare, file='table_e10.xlsx', row.names=T, col.names=T)
rm(gof.compare)
### Table E11: SAOM GoF comparison t-tests
library(xlsx)
write.xlsx(saom.fit.ttests, file='table_e11.xlsx', row.names=F, col.names=T)
### Table E12: SAOM rate parameter comparison
rate.compare <- cbind(saom.meta.rate3.7full[,c('mu', 'se', 'p', 'n')],
saom.meta.rate5.7full[,c('mu', 'se', 'p', 'n')],
saom.meta.rate8.7full[,c('mu', 'se', 'p', 'n')])
library(xlsx)
write.xlsx(rate.compare, file='table_e12.xlsx', row.names=T, col.names=T)
rm(rate.compare)