Embarrassingly Parallel Linear Mixed Model calculations spread across local cores which repeat until convergence.
The goal of lmmpar is to ...
You can install lmmpar from github with:
# install.packages("devtools")devtools::install_github("fulyagokalp/lmmpar")
This is a basic example which shows you how to solve a common problem:
# Set up fake datan <- 10000 # number of subjectsm <- 4 # number of repeatsN <- n * m # true size of datap <- 50 # number of betasq <- 2 # width of random effects# Initial parameters# beta has a 1 for the first value. all other values are ~N(10, 1)beta <- rbind(1, matrix(rnorm(p, 10), p, 1))R <- diag(m)D <- matrix(c(16, 0, 0, 0.025), nrow = q)sigma <- 1# Set up datasubject <- rep(1:n, each = m)repeats <- rep(1:m, n)subj_x <- lapply(1:n, function(i) cbind(1, matrix(rnorm(m * p), nrow = m)))X <- do.call(rbind, subj_x)Z <- X[, 1:q]subj_beta <- lapply(1:n, function(i) mnormt::rmnorm(1, rep(0, q), D))subj_err <- lapply(1:n, function(i) mnormt::rmnorm(1, rep(0, m), sigma * R))# create a known responsesubj_y <- lapply(seq_len(n),function(i) {(subj_x[[i]] %*% beta) +(subj_x[[i]][, 1:q] %*% subj_beta[[i]]) +subj_err[[i]]})Y <- do.call(rbind, subj_y)# run the algorithm in parallel to recover the known betasans <- lmmpar(Y,X,Z,subject,beta = beta,R = R,D = D,cores = 4,sigma = sigma,verbose = TRUE)