From e9ace55636bfd796ea0f8d99ae41ab3ced5f86f3 Mon Sep 17 00:00:00 2001 From: Giorgio Paulon Date: Fri, 5 Nov 2021 15:21:22 -0500 Subject: [PATCH] Updates for CRAN --- DESCRIPTION | 4 + R/fcts.R | 832 +++++++++++++++++++++++++++++++++- README.Rmd | 3 +- README.md | 3 +- cran-comments.md | 4 +- vignettes/.gitignore | 2 + vignettes/minimal_example.Rmd | 66 +++ 7 files changed, 889 insertions(+), 25 deletions(-) create mode 100644 vignettes/.gitignore create mode 100644 vignettes/minimal_example.Rmd diff --git a/DESCRIPTION b/DESCRIPTION index 02b11bd..963189f 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -17,3 +17,7 @@ LazyData: true LinkingTo: Rcpp, RcppArmadillo, RcppProgress, rgen RoxygenNote: 7.1.2 Roxygen: list(markdown = TRUE) +Suggests: + rmarkdown, + knitr +VignetteBuilder: knitr diff --git a/R/fcts.R b/R/fcts.R index e1e3421..8e46f08 100755 --- a/R/fcts.R +++ b/R/fcts.R @@ -341,7 +341,7 @@ LDDMM <- function(data, hypers, fix_boundary = FALSE, fit <- LDDMM_full(data, hypers, Niter, burnin, thin) } else { - fit <- LDDMM_fix_bound(data, hypers, Niter, burnin, thin) + fit <- LDDMM_fix_all_bound(data, hypers, Niter, burnin, thin) } return (fit) @@ -1252,6 +1252,785 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) delta_old <- array(NA, dim = c(d_j[1], n_ind)) beta_mu_old <- array(NA, dim = c(J, d_j)) delta_dat <- array(NA, dim = n) + b_old <- array(NA, dim = d_j) + B_beta_b_dat <- array(0, dim = c(n, d_j[1])) + for (s_temp in 1:d_j[1]){ + for (i_temp in 1:n_ind){ + idx_temp <- which((D[,1] == s_temp) & (ind == i_temp)) + if (length(idx_temp) > 0){ + delta_old[s_temp,i_temp] <- min(tau[idx_temp])/2 + delta_dat[idx_temp] <- delta_old[s_temp,i_temp] + } + else{ + delta_old[s_temp,i_temp] <- 1E-3 + } + } + for(j in 1:d_j[2]){ + beta_mu_old[,s_temp,j] <- rep(0.5*log(mean(tau[D[,1] == s_temp])) - log(sd(tau[D[,1] == s_temp])), J) + b_old[s_temp,j] <- 0.5*log(mean(tau[D[,1] == s_temp])) - log(sd(tau[D[,1] == s_temp])) + B_beta_b_dat[which((D[,1] == s_temp)),j] <- b_old[s_temp,j] + } + } + # b_old <- 1.5*log(mean(tau)) - log(sd(tau)) + # B_beta_b_dat <- array(b_old, dim = c(n, d_j[1])) + + + low_bound_mu <- min(beta_mu_old) - 1.5 + upp_bound_mu <- max(beta_mu_old) + 1 + + beta_mu_star_prop <- array(NA, dim = c(J, Z_max)) + beta_mu_u_old <- array(0, dim = c(n_ind, J)) + sigma2_1_mu_old <- 0.005 + sigma2_mu_us_old <- 0.005 + sigma2_mu_ua_old <- 0.005 + + + # Message passing structures + beta_mess <- array(NA, dim = c(J, dim_HB)) + z_old <- list() + + for (j in 1:d_j[1]){ + z_old[[j]] <- array(NA, dim = c(d_j[2], J)) + for (jj in 1:d_j[2]){ + if (j == jj){ + z_old[[j]][jj,] <- j + } + else{ + z_old[[j]][jj,] <- sample((d_j[1] + 1):Z_max, 1) + } + } + } + z_temp <- do.call(rbind, z_old) + + + beta_mu_star_old <- array(NA, dim = c(J, Z_max)) + for (i in 1:Z_max){ + beta_mu_star_old[,i] <- beta_mu_old[1,idx_xy[which(apply(z_temp == i, 1, prod) == 1)[1],1], + idx_xy[which(apply(z_temp == i, 1, prod) == 1)[1],2]] + } + + rand_mat <- array(rnorm(prod(d_j)), dim = d_j) + idx_succ <- which(rand_mat == diag(rand_mat)) + + v_old <- array(NA, dim = c(d_j[2], J)) + z_prop <- list() + + # Transition dynamics objects + alpha_S_old <- alpha_F_old <- 1 + Q_S_old <- array(NA, dim = c(Z_max, Z_max)) + Q_F_old <- array(NA, dim = c(Z_max, Z_max)) + pi_S_0 <- rdirichlet(rep(alpha_S_old / Z_max, Z_max) + + table_int(z_temp[idx_succ,1], Z_max)) + pi_F_0 <- rdirichlet(rep(alpha_F_old / Z_max, Z_max) + + table_int(z_temp[-idx_succ,1], Z_max)) + tr_count_S <- count_assign(z_temp[idx_succ,], Z_max) + tr_count_F <- count_assign(z_temp[-idx_succ,], Z_max) + for (h in 1:Z_max){ + Q_S_old[h,] <- rdirichlet(rep(alpha_S_old / Z_max, Z_max) + tr_count_S[h,]) + Q_F_old[h,] <- rdirichlet(rep(alpha_F_old / Z_max, Z_max) + tr_count_F[h,]) + } + + + # Auxiliary variables + prob_mat <- array(NA, dim = c(dim_HB, dim_HB)) + prob_vec <- array(NA, dim = dim_HB) + + # MH proposal parameters + sd_MH_delta <- array(0.3, dim = c(d_j[1], n_ind)) + sd_MH_beta_mu <- 0.4 + sd_MH_beta_b <- array(0.05, dim = d_j) + acc_b <- array(0, dim = d_j) + sd_beta_mu_u <- array(0.4, dim = c(n_ind, J)) + sd_beta_b_u <- array(0.4, dim = c(n_ind, J)) + acc_delta <- array(0, dim = c(d_j[1], n_ind)) + acc_beta_mu_u <- array(0, dim = c(n_ind, J)) + acc_beta_b_u <- array(0, dim = c(n_ind, J)) + n_batch <- 0 + + # Auxiliary variables + B_beta_mu_dat <- array(0, dim = c(n, d_j[1])) + B_beta_mu_u_dat <- array(0, dim = c(n, d_j[1])) + B_beta_b_prop_dat <- array(0, dim = c(n, d_j[1])) + mu_dat <- exp(B_beta_mu_dat + B_beta_mu_u_dat) + b_dat <- matrix(exp(b_old), n, d_j[1]) + + + # Gibbs Sampler + it <- 1 + pb <- txtProgressBar(style = 3) + for (iter in 1:Niter){ + + # (0) Adaptively tune the MH variance for the proposals of \delta_{s, i}, + # beta_u_mu, beta_u_b + if (iter %% 20 == 0){ + n_batch <- n_batch + 1 + delta_n <- min(0.01, n_batch^(-0.5)) + + for (i in 1:n_ind){ + for (x_temp in 1:d_j[1]){ + if (acc_delta[x_temp,i]/iter > 0.44){ + sd_MH_delta[x_temp,i] <- exp(log(sd_MH_delta[x_temp,i]) + delta_n) + } + else{ + sd_MH_delta[x_temp,i] <- exp(log(sd_MH_delta[x_temp,i]) - delta_n) + } + } + for (k in 1:J){ + if (acc_beta_mu_u[i,k]/iter > 0.44){ + sd_beta_mu_u[i,k] <- exp(log(sd_beta_mu_u[i,k]) + delta_n) + } + else{ + sd_beta_mu_u[i,k] <- exp(log(sd_beta_mu_u[i,k]) - delta_n) + } + if (acc_beta_b_u[i,k]/iter > 0.44){ + sd_beta_b_u[i,k] <- exp(log(sd_beta_b_u[i,k]) + delta_n) + } + else{ + sd_beta_b_u[i,k] <- exp(log(sd_beta_b_u[i,k]) - delta_n) + } + } + } + for (d1 in 1:d_j[1]){ + for (d2 in 1:d_j[2]){ + if (acc_b[d1,d2]/iter > 0.44){ + sd_MH_beta_b[d1,d2] <- exp(log(sd_MH_beta_b[d1,d2]) + delta_n) + } + else{ + sd_MH_beta_b[d1,d2] <- exp(log(sd_MH_beta_b[d1,d2]) - delta_n) + } + + } + } + } + + + # (1) Update of the delta parameter: \delta_{s,i}: MH with log normal + # proposal + for (s_temp in 1:d_j[1]){ + for (i_temp in 1:n_ind){ + idx_temp <- which((D[,1] == s_temp) & (ind == i_temp)) + tau_temp <- tau[idx_temp] + cens_temp <- cens[idx_temp] + D_temp <- D[idx_temp,] + + # log-normal proposal distribution centered on the current value + delta_prop <- exp(rnorm(1, log(delta_old[s_temp,i_temp]), sd_MH_delta[s_temp,i_temp])) + while (delta_prop > min(tau[idx_temp])){ + delta_prop <- exp(rnorm(1, log(delta_old[s_temp,i_temp]), sd_MH_delta[s_temp,i_temp])) + } + loglik_prop <- log_likelihood(tau_temp, mu_dat[idx_temp,], + b_dat[idx_temp,], + rep(delta_prop, length(idx_temp)), + cens_temp, D_temp, TRUE) + loglik_old <- log_likelihood(tau_temp, mu_dat[idx_temp,], + b_dat[idx_temp,], + rep(delta_old[s_temp,i_temp], length(idx_temp)), + cens_temp, D_temp, TRUE) + + alpha_acc <- min(0, loglik_prop + log(delta_prop) - + loglik_old - log(delta_old[s_temp,i_temp])) + l_u <- log(runif(1)) + + if (l_u < alpha_acc){ + delta_old[s_temp,i_temp] <- delta_prop + delta_dat[idx_temp] <- delta_old[s_temp,i_temp] + acc_delta[s_temp,i_temp] <- acc_delta[s_temp,i_temp] + 1 + } + + } + } + + + # (2) Update of mu parameters: \mu_{x,y}^{(i)}(t) + for (k in 1:J){ # loop over locations + if (k == 1){ # only data at t = 1 influence the first coefficient + idx_time <- T_min + } + else if (k == J){ # only data at t = T influence the last coefficient + idx_time <- T_max + } + else { # data at t = {k-1,k} influence the kth coefficient + idx_time <- (k-1):k + } + + for (h in 1:Z_max){ # loop over possible latent values + # tuples (combinations of covariates) that are clustered together via + # the latent h + idx_cov <- matrix(idx_xy[which(z_temp[,k] == h),], length(which(z_temp[,k] == h)), p) + + X_1k <- unique(idx_cov[,1]) # all possible values of x in this cluster + X_2k <- unique(idx_cov[,2]) # all possible values of y in this cluster + + if (length(X_1k) > 0){ # h \in \mathcal{Z}_{j,k}: posterior update + # Pick data with covariate levels of x clustered in group h and + # at the correct locations + idx_i <- which( (D[,1] %in% X_1k) & (time %in% idx_time) ) + tau_temp <- tau[idx_i] + cens_temp <- cens[idx_i] + D_temp <- D[which((D[,1] %in% X_1k) & (time %in% idx_time)),] + + + # Normal proposal distribution centered on the current value + if (k == 1){ + beta_mu_star_prop[,h] <- beta_mu_star_old[,h] + beta_mu_star_prop[k,h] <- rnorm(1, beta_mu_star_old[k,h], sd_MH_beta_mu) + } + # Normal proposal from the prior + else if (k == J){ + beta_pre1 <- beta_mu_star_old[k-1, unique(z_temp[which(z_temp[,k] == h),k-1])] + + beta_mu_star_prop[,h] <- beta_mu_star_old[,h] + beta_mu_star_prop[k,h] <- rnorm(1, beta_pre1, sqrt(sigma2_1_mu_old)) + } + # Normal proposal from the prior + else { + beta_pre1 <- beta_mu_star_old[k-1, unique(z_temp[which(z_temp[,k] == h),k-1])] + + beta_mu_star_prop[,h] <- beta_mu_star_old[,h] + beta_mu_star_prop[k,h] <- rnorm(1, beta_pre1, sqrt(sigma2_1_mu_old)) + } + B_beta_mu_prop_dat <- B_beta_mu_dat[idx_i,] + + # Modify the proposed values in the corresponding positions + for (hh in 1:nrow(idx_cov)){ + B_beta_mu_prop_dat[which(D_temp[,1] == idx_cov[hh,1]),idx_cov[hh,2]] <- + B[idx_i[which(D_temp[,1] == idx_cov[hh,1])],] %*% beta_mu_star_prop[,h] + } + + # This is the proposed value for \b_{x,y}^{(i)}(t), \mu_{x,y}^{(i)}(t) + mu_prop_dat <- exp(B_beta_mu_prop_dat + B_beta_mu_u_dat[idx_i,]) + + + if (k == 1){ + beta_post1 <- beta_mu_star_old[k+1, unique(z_temp[which(z_temp[,k] == h),k+1])] + + logpost_prop <- log_likelihood(tau_temp, mu_prop_dat, + b_dat[idx_i,], delta_dat[idx_i], + cens_temp, D_temp, TRUE) - + 0.5/sigma2_1_mu_old * sum((beta_mu_star_prop[k,h] - beta_post1)^2) + logpost_old <- log_likelihood(tau_temp, mu_dat[idx_i,], + b_dat[idx_i,], delta_dat[idx_i], + cens_temp, D_temp, TRUE) - + 0.5/sigma2_1_mu_old * sum((beta_mu_star_old[k,h] - beta_post1)^2) + } + else if (k == J){ + logpost_prop <- log_likelihood(tau_temp, mu_prop_dat, + b_dat[idx_i,], delta_dat[idx_i], + cens_temp, D_temp, TRUE) + logpost_old <- log_likelihood(tau_temp, mu_dat[idx_i,], + b_dat[idx_i,], delta_dat[idx_i], + cens_temp, D_temp, TRUE) + } + else { + beta_post1 <- beta_mu_star_old[k+1, unique(z_temp[which(z_temp[,k] == h),k+1])] + + logpost_prop <- log_likelihood(tau_temp, mu_prop_dat, + b_dat[idx_i,], delta_dat[idx_i], + cens_temp, D_temp, TRUE) - + 0.5/sigma2_1_mu_old * sum((beta_mu_star_prop[k,h] - beta_post1)^2) + logpost_old <- log_likelihood(tau_temp, mu_dat[idx_i,], + b_dat[idx_i,], delta_dat[idx_i], + cens_temp, D_temp, TRUE) - + 0.5/sigma2_1_mu_old * sum((beta_mu_star_old[k,h] - beta_post1)^2) + } + + alpha_acc <- min(0, logpost_prop - logpost_old) + l_u <- log(runif(1)) + + if (l_u < alpha_acc){ + + beta_mu_star_old[k,h] <- beta_mu_star_prop[k,h] + B_beta_mu_dat[idx_i,] <- B_beta_mu_prop_dat + mu_dat[idx_i,] <- mu_prop_dat + } + } + else { # h \notin \mathcal{Z}_{1,k}: prior sampling + beta_mu_star_old[k,h] <- runif(1, low_bound_mu, upp_bound_mu) + } + } + } + + # 2(a) Update the single scalar boundary parameter + # b_prop <- rnorm(1, b_old, sd_MH_beta_b) + # + # # Modify the proposed values in the corresponding positions + # B_beta_b_prop_dat <- matrix(b_prop, n, d_j[1]) + # + # # This is the proposed value for b + # b_prop_dat <- exp(B_beta_b_prop_dat) + # + # logpost_prop <- log_likelihood(tau, mu_dat, + # b_prop_dat, delta_dat, + # cens, D, TRUE) + # logpost_old <- log_likelihood(tau, mu_dat, + # b_dat, delta_dat, + # cens, D, TRUE) + # + # alpha_acc <- min(0, logpost_prop - logpost_old) + # l_u <- log(runif(1)) + # + # if (l_u < alpha_acc){ + # b_old <- b_prop + # b_dat <- b_prop_dat + # } + for (d1 in 1:d_j[1]){ + idx_i <- which( (D[,1] == d1) ) + tau_temp <- tau[idx_i] + cens_temp <- cens[idx_i] + D_temp <- D[which( (D[,1] == d1) ),] + + for (d2 in 1:d_j[2]){ + b_prop <- rnorm(1, b_old[d1,d2], sd_MH_beta_b[d1,d2]) + + # Modify the proposed values in the corresponding positions + B_beta_b_prop_dat <- B_beta_b_dat[idx_i,] + B_beta_b_prop_dat[,d2] <- b_prop + + # This is the proposed value for b + b_prop_dat <- exp(B_beta_b_prop_dat) + + logpost_prop <- log_likelihood(tau_temp, mu_dat[idx_i,], + b_prop_dat, delta_dat[idx_i], + cens_temp, D_temp, TRUE) + logpost_old <- log_likelihood(tau_temp, mu_dat[idx_i,], + b_dat[idx_i,], delta_dat[idx_i], + cens_temp, D_temp, TRUE) + + alpha_acc <- min(0, logpost_prop - logpost_old) + l_u <- log(runif(1)) + + if (l_u < alpha_acc){ + b_old[d1,d2] <- b_prop + # b_dat <- b_prop_dat + B_beta_b_dat[idx_i,] <- B_beta_b_prop_dat + b_dat[idx_i,] <- b_prop_dat + acc_b[d1,d2] = acc_b[d1,d2] + 1 + } + } + } + + + # (3) Update the cluster assignments + for (x_temp in 1:d_j[1]){ # loop over possible latent values + beta_mess <- array(-Inf, dim = c(J, dim_HB)) + beta_mess[J,] <- 1/dim_HB + + v_old[,J] <- H_ball_unif(z_old[[x_temp]][,J], S = Z_max, r = r_HB) + z_prop[[J]] <- H_ball(v_old[,J], S = Z_max, r = r_HB) + for (k in (J - 1):1){ + idx_i <- which( (time == k) & (D[,1] == x_temp) ) + tau_temp <- tau[idx_i] + cens_temp <- cens[idx_i] + D_temp <- D[idx_i,] + + # (i) Sample the auxiliary variables + v_temp <- H_ball(z_old[[x_temp]][,k], S = Z_max, r = r_HB) + + probs <- rep(-Inf, dim_HB) + for (h in 1:dim_HB){ + B_beta_mu_prop_dat <- B_beta_mu_dat[idx_i,] + + B_beta_mu_prop_dat <- 0.5 * (beta_mu_star_old[k,v_temp[,h]] + + beta_mu_star_old[k+1,z_old[[x_temp]][,k+1]]) + + mu_dat_prop <- exp(t(B_beta_mu_prop_dat + t(B_beta_mu_u_dat[idx_i,]))) + + probs[h] <- g_HB(log_likelihood(tau_temp, mu_dat_prop, b_dat[idx_i,], + delta_dat[idx_i], cens_temp, D_temp, TRUE)) + } + probs <- as.numeric(normalise_log(probs)) + + v_old[,k] <- v_temp[,sample(1:dim_HB, 1, prob = probs)] + z_prop[[k]] <- H_ball(v_old[,k], S = Z_max, r = r_HB) + + + # (ii) Pass messages backwards only in the restricted state space given + # by the slice + z_kp1_temp <- which(beta_mess[k+1,] > 0) + prob_mat <- array(-Inf, dim = c(dim_HB, dim_HB)) + + for (h1 in z_kp1_temp){ + for (h2 in 1:dim_HB){ + + B_beta_mu_prop_dat <- B_beta_mu_dat[idx_i,] + B_beta_mu_prop_dat_1 <- 0.5 * (beta_mu_star_old[k,z_prop[[k]][,h2]] + + beta_mu_star_old[k+1,z_prop[[k+1]][,h1]]) + B_beta_mu_prop_dat_2 <- 0.5 * (beta_mu_star_old[k,v_old[,k]] + + beta_mu_star_old[k+1,z_prop[[k+1]][,h1]]) + + mu_dat_prop_1 <- exp(t(B_beta_mu_prop_dat_1 + t(B_beta_mu_u_dat[idx_i,]))) + mu_dat_prop_2 <- exp(t(B_beta_mu_prop_dat_2 + t(B_beta_mu_u_dat[idx_i,]))) + + prob_mat[h2,h1] <- log(beta_mess[k+1,h1]) - + 0.5 / sigma2_1_mu_old * sum((beta_mu_star_old[k,z_prop[[k]][,h2]] - + beta_mu_star_old[k+1,z_prop[[k+1]][,h1]])^2) + + log_likelihood(tau_temp, mu_dat_prop_1, b_dat[idx_i,], + delta_dat[idx_i], cens_temp, D_temp, TRUE) + + g_HB(log_likelihood(tau_temp, mu_dat_prop_2, b_dat[idx_i,], + delta_dat[idx_i], cens_temp, D_temp, TRUE)) + + sum(log(Q_F_old[cbind(z_prop[[k]][-x_temp,h2],z_prop[[k+1]][-x_temp,h1])])) + + log(Q_S_old[z_prop[[k]][x_temp,h2],z_prop[[k+1]][x_temp,h1]]) + } + } + if ( sum(is.infinite(sum_rows_log(prob_mat))) == dim_HB){ + beta_mess[k,] <- 1/dim_HB + } + else{ + beta_mess[k,] <- as.numeric(sum_rows_log(prob_mat)) + beta_mess[k,] <- as.numeric(normalise_log(beta_mess[k,])) + } + } + + + # (iii) Sample states forward (only on allowed states) + idx_fail <- (1:d_j[2])[-x_temp] + # Sample z_1 + prob_vec <- log(beta_mess[1,]) + log(pi_S_0[z_prop[[1]][x_temp,]]) + + colSums(matrix(log(pi_F_0[z_prop[[1]][-x_temp,]]), d_j[2] - 1, dim_HB)) + prob_vec <- as.numeric(normalise_log(prob_vec)) + + idx_samp <- sample(1:dim_HB, 1, FALSE, prob_vec) + z_old[[x_temp]][,1] <- z_prop[[1]][,idx_samp] + + # Sample z_k + for (k in 2:J){ + idx_km1 <- which( (time == k - 1) & (D[,1] == x_temp) ) + tau_temp <- tau[idx_km1] + cens_temp <- cens[idx_km1] + D_temp <- D[idx_km1,] + + prob_vec <- log(beta_mess[k,]) + + log(Q_S_old[cbind(z_old[[x_temp]][x_temp,k-1], z_prop[[k]][x_temp,])]) + for (kkk in idx_fail){ + prob_vec <- prob_vec + log(Q_F_old[cbind(z_old[[x_temp]][kkk,k-1], z_prop[[k]][kkk,])]) + } + + for (z_k_temp in which(is.finite(prob_vec))){ + B_beta_mu_prop_dat <- B_beta_mu_dat[idx_km1,] + + B_beta_mu_prop_dat_1 <- 0.5 * (beta_mu_star_old[k-1,z_old[[x_temp]][,k-1]] + + beta_mu_star_old[k,z_prop[[k]][,z_k_temp]]) + B_beta_mu_prop_dat_2 <- 0.5 * (beta_mu_star_old[k-1,v_old[,k-1]] + + beta_mu_star_old[k,z_prop[[k]][,z_k_temp]]) + + mu_dat_prop_1 <- exp(t(B_beta_mu_prop_dat_1 + t(B_beta_mu_u_dat[idx_km1,]))) + mu_dat_prop_2 <- exp(t(B_beta_mu_prop_dat_2 + t(B_beta_mu_u_dat[idx_km1,]))) + + prob_vec[z_k_temp] <- prob_vec[z_k_temp] - + 0.5 / sigma2_1_mu_old * sum((beta_mu_star_old[k-1,z_old[[x_temp]][,k-1]] - + beta_mu_star_old[k,z_prop[[k]][,z_k_temp]])^2) + + log_likelihood(tau_temp, mu_dat_prop_1, b_dat[idx_km1,], + delta_dat[idx_km1], cens_temp, D_temp, TRUE) + + g_HB(log_likelihood(tau_temp, mu_dat_prop_2, b_dat[idx_km1,], + delta_dat[idx_km1], cens_temp, D_temp, TRUE)) + } + prob_vec <- as.numeric(normalise_log(prob_vec)) + + idx_samp <- sample(1:dim_HB, 1, FALSE, prob_vec) + z_old[[x_temp]][,k] <- z_prop[[k]][,idx_samp] + } + + + # (4) Assign the cluster specific curves f_{\mu} + for (y_temp in 1:d_j[2]){ + beta_mu_old[,x_temp,y_temp] <- beta_mu_star_old[cbind(1:J, z_old[[x_temp]][y_temp,])] + } + B_beta_mu_dat[(D[,1] == x_temp),] <- B[D[,1] == x_temp,] %*% beta_mu_old[,x_temp,] + } + z_temp <- do.call(rbind, z_old) + mu_dat <- exp(B_beta_mu_dat + B_beta_mu_u_dat) + + + # (5) Update the transition probabilities + pi_S_0 <- rdirichlet(rep(alpha_S_old / Z_max, Z_max) + table_int(z_temp[idx_succ,1], Z_max)) + pi_F_0 <- rdirichlet(rep(alpha_F_old / Z_max, Z_max) + table_int(z_temp[-idx_succ,1], Z_max)) + tr_count_S <- count_assign(z_temp[idx_succ,], Z_max) + tr_count_F <- count_assign(z_temp[-idx_succ,], Z_max) + for (h in 1:Z_max){ + Q_S_old[h,] <- rdirichlet(rep(alpha_S_old / Z_max, Z_max) + + tr_count_S[h,]) + Q_F_old[h,] <- rdirichlet(rep(alpha_F_old / Z_max, Z_max) + + tr_count_F[h,]) + } + + + alpha_S_prop <- exp(rnorm(1, log(alpha_S_old), 0.1)) + while ((alpha_S_prop < 0.01) | (alpha_S_prop > 10)){ + alpha_S_prop <- exp(rnorm(1, log(alpha_S_old), 0.1)) + } + alpha_acc <- dgamma(alpha_S_prop, 1, 1, log = T) + + Z_max * lgamma(alpha_S_prop) - + Z_max^2 * lgamma(alpha_S_prop/Z_max) + + (alpha_S_prop/Z_max - 1) * sum(log(Q_S_old)) - ( + dgamma(alpha_S_old, 1, 1, log = T) + + Z_max * lgamma(alpha_S_old) - + Z_max^2 * lgamma(alpha_S_old/Z_max) + + (alpha_S_old/Z_max - 1) * sum(log(Q_S_old))) + + l_u <- log(runif(1)) + if (!is.na(alpha_acc)){ + if (l_u < alpha_acc){ + alpha_S_old <- alpha_S_prop + } + } + + alpha_F_prop <- exp(rnorm(1, log(alpha_F_old), 0.1)) + while ((alpha_F_prop < 0.01) | (alpha_F_prop > 10)){ + alpha_F_prop <- exp(rnorm(1, log(alpha_F_old), 0.1)) + } + alpha_acc <- dgamma(alpha_F_prop, 1, 1, log = T) + + Z_max * lgamma(alpha_F_prop) - + Z_max^2 * lgamma(alpha_F_prop/Z_max) + + (alpha_F_prop/Z_max - 1) * sum(log(Q_F_old)) - ( + dgamma(alpha_F_old, 1, 1, log = T) + + Z_max * lgamma(alpha_F_old) - + Z_max^2 * lgamma(alpha_F_old/Z_max) + + (alpha_F_old/Z_max - 1) * sum(log(Q_F_old))) + + l_u <- log(runif(1)) + if (!is.na(alpha_acc)){ + if (l_u < alpha_acc){ + alpha_F_old <- alpha_F_prop + } + } + + + # (6) Correction term for random effects. + corr_mu <- colMeans(beta_mu_u_old) + beta_mu_u_old <- t(t(beta_mu_u_old) - corr_mu) + + for (k in 1:J){ + if (k == 1){ + idx_time <- which(time == T_min) + } + else if (k == J){ + idx_time <- which(time == T_max) + } + else { + idx_time <- which(time %in% (k-1):k) + } + beta_mu_old[k,,] <- beta_mu_old[k,,] + corr_mu[k] + beta_mu_star_old[k,] <- beta_mu_star_old[k,] + corr_mu[k] + B_beta_mu_dat[idx_time,] <- B_beta_mu_dat[idx_time,] + + as.numeric(B[idx_time,] %*% corr_mu) + B_beta_mu_u_dat[idx_time,] <- B_beta_mu_u_dat[idx_time,] - + as.numeric(B[idx_time,] %*% corr_mu) + } + + + # (7) Update the cluster specific smoothness parameters + RSS_mu <- RSS_b <- 0 + for (h1 in 1:d_j[1]){ + for (h2 in 1:d_j[2]){ + RSS_mu <- RSS_mu + as.numeric(crossprod(beta_mu_old[,h1,h2], P_smooth) %*% + beta_mu_old[,h1,h2]) + } + } + + sigma2_1_mu_temp <- exp(rnorm(1, log(sigma2_1_mu_old), 0.1)) + while(sigma2_1_mu_temp > 0.2){ + sigma2_1_mu_temp <- exp(rnorm(1, log(sigma2_1_mu_old), 0.1)) + } + l_alpha <- min(c(0, log(sigma2_1_mu_temp) - + 0.5 * (prod(d_j)*(J-1)) * log(sigma2_1_mu_temp) - + 0.5/sigma2_1_mu_temp * RSS_mu + + dhalfcauhy(sqrt(sigma2_1_mu_temp), hypers$s_sigma_mu, T) - + (log(sigma2_1_mu_old) - + 0.5 * (prod(d_j)*(J-1)) * log(sigma2_1_mu_old) - + 0.5/sigma2_1_mu_old * RSS_mu + + dhalfcauhy(sqrt(sigma2_1_mu_old), hypers$s_sigma_mu, T)))) + l_u <- log(runif(1)) + if (l_u < l_alpha){ + sigma2_1_mu_old <- sigma2_1_mu_temp + } + + + # (9a) Update random effects for mu + reff <- sample_reff_mu(tau, D, cens, beta_mu_u_old, delta_dat, b_dat, + B_beta_mu_dat, mu_dat, B, P_smooth, ind, time, + sigma2_mu_us_old, sigma2_mu_ua_old, sd_beta_mu_u, + acc_beta_mu_u) + beta_mu_u_old <- reff$beta_u_old + B_beta_mu_u_dat <- reff$B_beta_u_dat + mu_dat <- exp(B_beta_mu_dat + B_beta_mu_u_dat) + acc_beta_mu_u <- reff$acc_beta_u + + + # (9b) Update random effects variances: MH with log normal proposal + ls_var <- sample_smooth_var(sigma2_mu_ua_old, sigma2_mu_us_old, + beta_mu_u_old, P_smooth, n_ind) + sigma2_mu_ua_old <- ls_var$sigma2_ua_old + sigma2_mu_us_old <- ls_var$sigma2_us_old + + + + # After burnin, save parameters in the chain + if ( (iter > burnin) && (iter %% thin == 0) ){ + # This is the correction for the random effects integration: we need to + # compute the variance of the random effects as detailed in the Supplementary + # Materials + cov_reff_mu <- solve(diag(J) / sigma2_mu_ua_old + P_smooth / sigma2_mu_us_old) + corr_term_gr_mu <- rep(0, nrow(Bgrid)) + corr_term_mu <- rep(0, T_max) + for (k in 1:J){ + corr_term_gr_mu <- corr_term_gr_mu + rowSums(Bgrid[,k] * t(t(Bgrid) * cov_reff_mu[,k])) + corr_term_mu <- corr_term_mu + rowSums(B_basis(1:T_max, knots)[,k] * + t(t(B_basis(1:T_max, knots)) * cov_reff_mu[,k])) + } + + + + for (h1 in 1:d_j[1]){ + for (h2 in 1:d_j[2]){ + post_mean_mu[,h1,h2,it] <- exp(Bgrid %*% beta_mu_old[,h1,h2] + 0.5 * corr_term_gr_mu) + post_mean_b[,h1,h2,it] <- exp(b_old[h1,h2]) + + for (i in 1:n_ind){ + post_ind_mu[,i,h1,h2,it] <- exp(Bgrid %*% beta_mu_old[,h1,h2] + Bgrid %*% beta_mu_u_old[i,]) + post_ind_b[,i,h1,h2,it] <- exp(b_old[h1,h2]) + } + } + } + post_ind_delta[,,it] <- delta_old + + + # Sample from the predictive distributions of response category and + # response times (so we can use predictive checks to evaluate goodness of fit) + for (t in 1:T_max){ + for (x_temp in 1:d_j[1]){ + mu_temp <- as.numeric(exp(B_basis(t, knots) %*% beta_mu_old[,x_temp,] + + 0.5 * corr_term_mu[t])) + b_temp <- exp(b_old[x_temp,]) + delta_temp <- mean(delta_old[x_temp,]) + pred_temp <- delta_temp + LaplacesDemon::rinvgaussian(d_j[2], b_temp/mu_temp, + b_temp^2) + pred_ans[t,x_temp,it] <- which.min(pred_temp) + pred_time[t,x_temp,it] <- min(pred_temp) + + for (i in 1:n_ind){ + mu_temp <- as.numeric(exp(B_basis(t, knots) %*% (beta_mu_old[,x_temp,] + + beta_mu_u_old[i,]))) + b_temp <- exp(b_old[x_temp,]) + delta_temp <- delta_old[x_temp,i] + pred_temp <- delta_temp + LaplacesDemon::rinvgaussian(d_j[2], b_temp/mu_temp, + b_temp^2) + pred_ans_ind[i,t,x_temp,it] <- which.min(pred_temp) + pred_time_ind[i,t,x_temp,it] <- min(pred_temp) + } + } + } + + + # Save MCMC objects + post_mean_delta[it,] <- rowMeans(delta_old) + sigma2_mu_us[it] <- sigma2_mu_us_old + sigma2_mu_ua[it] <- sigma2_mu_ua_old + sigma2_1_mu[it] <- sigma2_1_mu_old + z[,,it] <- z_temp + + it <- it + 1 + } + setTxtProgressBar(pb, iter/Niter) + } + + return(list('Z' = z, + 'post_mean_delta' = post_mean_delta, + 'post_mean_mu' = post_mean_mu, + 'post_mean_b' = post_mean_b, + 'post_ind_delta' = post_ind_delta, + 'post_ind_mu' = post_ind_mu, + 'post_ind_b' = post_ind_b, + 'sigma2_mu_us' = sigma2_mu_us, + 'sigma2_mu_ua' = sigma2_mu_ua, + 'sigma2_1_mu' = sigma2_1_mu, + 'pred_ans' = pred_ans, + 'pred_time' = pred_time, + 'pred_ans_ind' = pred_ans_ind, + 'pred_time_ind' = pred_time_ind + )) +} + + +LDDMM_fix_all_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5){ + + # Choose the number of knots (default is between the beginning and the end of + # the study, at every block) + T_min <- min(data$block) + T_max <- max(data$block) + knots <- T_min:T_max + K <- length(knots) + P_smooth <- P_smooth1(K + 1) + + # Choose a fine grid in order to plot the curves resulting from the spline basis + # expansion + xgrid <- seq(T_min, T_max, by = 1) + + # Extract quantities of interest + tau <- data$r_time + ind <- data$subject + time <- data$block + cens <- data$cens + D <- cbind(data$s, data$d) + + n_ind <- length(unique(ind)) + ind <- as.numeric(plyr::mapvalues(factor(ind), + from = levels(factor(ind)), + to = 1:n_ind)) + B <- B_basis(data$block, knots) + Bgrid <- B_basis(xgrid, knots) + + samp_size <- (Niter - burnin)/thin # sample size + p <- ncol(D) # number of covariates + d_j <- rep(0, p) # Number of levels for each covariate + for (j in 1:p){ + d_j[j] <- length(unique(D[!is.na(D[,j]),j])) + } + J <- ncol(B) # number of locations + n <- nrow(B) # total number of observations + n_ind <- length(unique(ind)) # number of individuals + + + # Rescale the time steps in \{1, ..., T_max\} + T_max <- max(time) + T_min <- min(time) + time <- time - T_min + 1 + T_max <- max(time) + T_min <- min(time) + idx_xy <- t(apply(expand.grid(y = 1:d_j[2], x = 1:d_j[1]), 1, rev)) + colnames(idx_xy) <- NULL + + + # Set HB structures + r_HB <- 1 + Z_max <- min(prod(d_j), 6) + dim_HB <- sum((Z_max - 1)^(0:r_HB) * choose(d_j[2], 0:r_HB)) + + # Set MCMC objects + z <- array(NA, dim = c(prod(d_j), J, samp_size)) + post_mean_delta <- array(NA, dim = c(samp_size, d_j[1])) + post_mean_mu <- array(NA, dim = c(nrow(Bgrid), d_j, samp_size)) + post_mean_b <- array(NA, dim = c(nrow(Bgrid), d_j, samp_size)) + post_ind_delta <- array(NA, dim = c(d_j[1], n_ind, samp_size)) + post_ind_mu <- array(NA, dim = c(nrow(Bgrid), n_ind, d_j, samp_size)) + post_ind_b <- array(NA, dim = c(nrow(Bgrid), n_ind, d_j, samp_size)) + sigma2_mu_us <- array(NA, dim = samp_size) + sigma2_mu_ua <- array(NA, dim = samp_size) + sigma2_b_us <- array(NA, dim = samp_size) + sigma2_b_ua <- array(NA, dim = samp_size) + sigma2_1_mu <- array(NA, dim = samp_size) + sigma2_1_b <- array(NA, dim = samp_size) + pred_time <- array(NA, dim = c(T_max, d_j[1], samp_size)) + pred_ans <- array(NA, dim = c(T_max, d_j[1], samp_size)) + pred_time_ind <- array(NA, dim = c(n_ind, T_max, d_j[1], samp_size)) + pred_ans_ind <- array(NA, dim = c(n_ind, T_max, d_j[1], samp_size)) + # loglik_chain <- array(NA, dim = c(samp_size, n)) + + # Set initial values + delta_old <- array(NA, dim = c(d_j[1], n_ind)) + beta_mu_old <- array(NA, dim = c(J, d_j)) + delta_dat <- array(NA, dim = n) + b_old <- array(NA, dim = d_j) + B_beta_b_dat <- array(0, dim = c(n, d_j[1])) for (s_temp in 1:d_j[1]){ for (i_temp in 1:n_ind){ idx_temp <- which((D[,1] == s_temp) & (ind == i_temp)) @@ -1263,16 +2042,17 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) delta_old[s_temp,i_temp] <- 1E-3 } } - for(j in 1:d_j[1]){ + for(j in 1:d_j[2]){ beta_mu_old[,s_temp,j] <- rep(0.5*log(mean(tau[D[,1] == s_temp])) - log(sd(tau[D[,1] == s_temp])), J) } } b_old <- 1.5*log(mean(tau)) - log(sd(tau)) + B_beta_b_dat <- array(b_old, dim = c(n, d_j[1])) low_bound_mu <- min(beta_mu_old) - 1.5 upp_bound_mu <- max(beta_mu_old) + 1 - + beta_mu_star_prop <- array(NA, dim = c(J, Z_max)) beta_mu_u_old <- array(0, dim = c(n_ind, J)) sigma2_1_mu_old <- 0.005 @@ -1334,6 +2114,7 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) sd_MH_delta <- array(0.3, dim = c(d_j[1], n_ind)) sd_MH_beta_mu <- 0.4 sd_MH_beta_b <- 0.05 + acc_b <- 0 sd_beta_mu_u <- array(0.4, dim = c(n_ind, J)) sd_beta_b_u <- array(0.4, dim = c(n_ind, J)) acc_delta <- array(0, dim = c(d_j[1], n_ind)) @@ -1344,10 +2125,9 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) # Auxiliary variables B_beta_mu_dat <- array(0, dim = c(n, d_j[1])) B_beta_mu_u_dat <- array(0, dim = c(n, d_j[1])) - B_beta_b_dat <- array(0, dim = c(n, d_j[1])) B_beta_b_prop_dat <- array(0, dim = c(n, d_j[1])) mu_dat <- exp(B_beta_mu_dat + B_beta_mu_u_dat) - b_dat <- exp(B_beta_b_dat) + b_dat <- matrix(exp(b_old), n, d_j[1]) # Gibbs Sampler @@ -1385,6 +2165,15 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) } } } + + if (acc_b/iter > 0.44){ + sd_MH_beta_b <- exp(log(sd_MH_beta_b) + delta_n) + } + else{ + sd_MH_beta_b <- exp(log(sd_MH_beta_b) - delta_n) + } + + } @@ -1536,18 +2325,18 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) # 2(a) Update the single scalar boundary parameter b_prop <- rnorm(1, b_old, sd_MH_beta_b) - + # Modify the proposed values in the corresponding positions B_beta_b_prop_dat <- matrix(b_prop, n, d_j[1]) # This is the proposed value for b b_prop_dat <- exp(B_beta_b_prop_dat) - logpost_prop <- log_likelihood(tau, mu_dat, - b_prop_dat, delta_dat, + logpost_prop <- log_likelihood(tau, mu_dat, + b_prop_dat, delta_dat, cens, D, TRUE) - logpost_old <- log_likelihood(tau, mu_dat, - b_dat, delta_dat, + logpost_old <- log_likelihood(tau, mu_dat, + b_dat, delta_dat, cens, D, TRUE) alpha_acc <- min(0, logpost_prop - logpost_old) @@ -1555,10 +2344,11 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) if (l_u < alpha_acc){ b_old <- b_prop - B_beta_b_dat <- B_beta_b_prop_dat b_dat <- b_prop_dat + B_beta_b_dat <- B_beta_b_prop_dat + acc_b = acc_b + 1 } - + # (3) Update the cluster assignments for (x_temp in 1:d_j[1]){ # loop over possible latent values @@ -1579,12 +2369,12 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) probs <- rep(-Inf, dim_HB) for (h in 1:dim_HB){ B_beta_mu_prop_dat <- B_beta_mu_dat[idx_i,] - + B_beta_mu_prop_dat <- 0.5 * (beta_mu_star_old[k,v_temp[,h]] + beta_mu_star_old[k+1,z_old[[x_temp]][,k+1]]) - + mu_dat_prop <- exp(t(B_beta_mu_prop_dat + t(B_beta_mu_u_dat[idx_i,]))) - + probs[h] <- g_HB(log_likelihood(tau_temp, mu_dat_prop, b_dat[idx_i,], delta_dat[idx_i], cens_temp, D_temp, TRUE)) } @@ -1610,7 +2400,7 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) mu_dat_prop_1 <- exp(t(B_beta_mu_prop_dat_1 + t(B_beta_mu_u_dat[idx_i,]))) mu_dat_prop_2 <- exp(t(B_beta_mu_prop_dat_2 + t(B_beta_mu_u_dat[idx_i,]))) - + prob_mat[h2,h1] <- log(beta_mess[k+1,h1]) - 0.5 / sigma2_1_mu_old * sum((beta_mu_star_old[k,z_prop[[k]][,h2]] - beta_mu_star_old[k+1,z_prop[[k+1]][,h1]])^2) + @@ -1657,15 +2447,15 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) for (z_k_temp in which(is.finite(prob_vec))){ B_beta_mu_prop_dat <- B_beta_mu_dat[idx_km1,] - + B_beta_mu_prop_dat_1 <- 0.5 * (beta_mu_star_old[k-1,z_old[[x_temp]][,k-1]] + beta_mu_star_old[k,z_prop[[k]][,z_k_temp]]) B_beta_mu_prop_dat_2 <- 0.5 * (beta_mu_star_old[k-1,v_old[,k-1]] + beta_mu_star_old[k,z_prop[[k]][,z_k_temp]]) - + mu_dat_prop_1 <- exp(t(B_beta_mu_prop_dat_1 + t(B_beta_mu_u_dat[idx_km1,]))) mu_dat_prop_2 <- exp(t(B_beta_mu_prop_dat_2 + t(B_beta_mu_u_dat[idx_km1,]))) - + prob_vec[z_k_temp] <- prob_vec[z_k_temp] - 0.5 / sigma2_1_mu_old * sum((beta_mu_star_old[k-1,z_old[[x_temp]][,k-1]] - beta_mu_star_old[k,z_prop[[k]][,z_k_temp]])^2) + @@ -1748,7 +2538,7 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) # (6) Correction term for random effects. corr_mu <- colMeans(beta_mu_u_old) beta_mu_u_old <- t(t(beta_mu_u_old) - corr_mu) - + for (k in 1:J){ if (k == 1){ idx_time <- which(time == T_min) @@ -1793,7 +2583,7 @@ LDDMM_fix_bound <- function(data, hypers, Niter = 5000, burnin = 2000, thin = 5) if (l_u < l_alpha){ sigma2_1_mu_old <- sigma2_1_mu_temp } - + # (9a) Update random effects for mu reff <- sample_reff_mu(tau, D, cens, beta_mu_u_old, delta_dat, b_dat, diff --git a/README.Rmd b/README.Rmd index 043ab56..0311c2e 100644 --- a/README.Rmd +++ b/README.Rmd @@ -51,7 +51,8 @@ The following is a minimal example of a simple model fit. ```{r, eval = TRUE, results = 'hide', fig.show = 'hide', warning = FALSE, message = FALSE} # Load libraries library(RColorBrewer) -library(tidyverse) +library(ggplot2) +library(dplyr) library(reshape2) library(latex2exp) library(lddmm) diff --git a/README.md b/README.md index 5bfb4c7..2806249 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,8 @@ The following is a minimal example of a simple model fit. ``` r # Load libraries library(RColorBrewer) -library(tidyverse) +library(ggplot2) +library(dplyr) library(reshape2) library(latex2exp) library(lddmm) diff --git a/cran-comments.md b/cran-comments.md index 90670e6..f99d596 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,8 +1,8 @@ ## R CMD check results -── R CMD check results ─────────────────────────────────── lddmm 0.1 ──── -Duration: 46.6s +── R CMD check results ────────────────────────────────────────────────────────────────────────────────────────────── lddmm 0.1.0 ──── +Duration: 1m 7.1s 0 errors ✓ | 0 warnings ✓ | 0 notes ✓ diff --git a/vignettes/.gitignore b/vignettes/.gitignore new file mode 100644 index 0000000..097b241 --- /dev/null +++ b/vignettes/.gitignore @@ -0,0 +1,2 @@ +*.html +*.R diff --git a/vignettes/minimal_example.Rmd b/vignettes/minimal_example.Rmd new file mode 100644 index 0000000..2234509 --- /dev/null +++ b/vignettes/minimal_example.Rmd @@ -0,0 +1,66 @@ +--- +title: "minimal_example" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{minimal_example} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>" +) +``` + +The following is a minimal example of a simple model fit. + +```{r setup} +# Load libraries +library(RColorBrewer) +library(ggplot2) +library(dplyr) +library(reshape2) +library(latex2exp) +library(lddmm) + +theme_set(theme_bw(base_size = 14)) +cols <- brewer.pal(9, "Set1") +``` + + +```{r, eval = TRUE, results = 'hide', fig.show = 'hide', warning = FALSE, message = FALSE} +# Load the data +data('data') + +# Descriptive plots +plot_accuracy(data) +plot_RT(data) + +# Run the model +hypers <- NULL +hypers$s_sigma_mu <- hypers$s_sigma_b <- 0.1 + +# Change the number of iterations when running the model +# Here the number is small so that the code can run in less than 1 minute +Niter <- 25 +burnin <- 15 +thin <- 1 +samp_size <- (Niter - burnin) / thin + +set.seed(123) +fit <- LDDMM(data = data, + hypers = hypers, + fix_boundary = FALSE, + Niter = Niter, + burnin = burnin, + thin = thin) + +# Plot the results +plot_post_pars(data, fit, par = 'drift') +plot_post_pars(data, fit, par = 'boundary') +``` + +To extract relevant posterior draws or posterior summaries instead of simply plotting them, one can use the functions `extract_post_mean` or `extract_post_draws`. +An auxiliary function that fixes the boundary parameters can be called with the option `fix_boundary = TRUE`.