Paso 2.2

Análisis de clases latentes exploratoria y comparativa con predictores

Andrés González Santa Cruz
May 04, 2023
Show code
script src = "https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"
Show code
 $(document).ready(function() {
    $('body').prepend('<div class=\"zoomDiv\"><img src=\"\" class=\"zoomImg\"></div>');
    // onClick function for all plots (img's)
    $('img:not(.zoomImg)').click(function() {
      $('.zoomImg').attr('src', $(this).attr('src')).css({width: '100%'});
      $('.zoomDiv').css({opacity: '1', width: 'auto', border: '1px solid white', borderRadius: '5px', position: 'fixed', top: '50%', left: '50%', marginRight: '-50%', transform: 'translate(-50%, -50%)', boxShadow: '0px 0px 50px #888888', zIndex: '50', overflow: 'auto', maxHeight: '100%'});
    });
    // onClick function for zoomImg
    $('img.zoomImg').click(function() {
      $('.zoomDiv').css({opacity: '0', width: '0%'}); 
    });
  });
  
Show code
<script src="hideOutput.js"></script> 
Show code
$(document).ready(function() {    
    $chunks = $('.fold');    
    $chunks.each(function () {      // add button to source code chunks     
    if ( $(this).hasClass('s') ) {       
        $('pre.r', this).prepend("<div class=\"showopt\">Show Source</div><br style=\"line-height:22px;\"/>");
            $('pre.r', this).children('code').attr('class', 'folded');     
            }      // add button to output chunks     
        if ( $(this).hasClass('o') ) {       
            $('pre:not(.r)', this).has('code').prepend("<div class=\"showopt\">Show Output</div><br style=\"line-height:22px;\"/>");       
            $('pre:not(.r)', this).children('code:not(r)').addClass('folded');        // add button to plots       
            $(this).find('img').wrap('<pre class=\"plot\"></pre>');       
            $('pre.plot', this).prepend("<div class=\"showopt\">Show Plot</div><br style=\"line-height:22px;\"/>");       
            $('pre.plot', this).children('img').addClass('folded');      
            }   
});    // hide all chunks when document is loaded   
    $('.folded').css('display', 'none')    // function to toggle the visibility   
    $('.showopt').click(function() {     
            var label = $(this).html();     
            if (label.indexOf("Show") >= 0) {       
                $(this).html(label.replace("Show", "Hide"));     
            } else {
              $(this).html(label.replace("Hide", "Show"));     
            }     
    $(this).siblings('code, img').slideToggle('fast', 'swing');   
    }); 
}); 

Cargamos los datos

Show code
rm(list = ls());gc()
         used (Mb) gc trigger (Mb) max used (Mb)
Ncells 535815 28.7    1209044 64.6   643711 34.4
Vcells 907865  7.0    8388608 64.0  1649632 12.6
Show code
load("data2_lca2_2023_04_26.RData")

Cargamos los paquetes

Show code
knitr::opts_chunk$set(echo = TRUE)

if(!require(poLCA)){install.packages("poLCA")}
if(!require(poLCAParallel)){devtools::install_github("QMUL/poLCAParallel@package")}
if(!require(compareGroups)){install.packages("compareGroups")}
if(!require(parallel)){install.packages("parallel")}
if(!require(Hmisc)){install.packages("Hmisc")}
if(!require(tidyverse)){install.packages("tidyverse")}
try(if(!require(sjPlot)){install.packages("sjPlot")})
if(!require(emmeans)){install.packages("emmeans")}
if(!require(nnet)){install.packages("nnet")}
if(!require(here)){install.packages("here")}
if(!require(doParallel)){install.packages("doParallel")}
if(!require(progress)){install.packages("progress")}
if(!require(caret)){install.packages("caret")}
if(!require(rpart)){install.packages("rpart")}
if(!require(rpart.plot)){install.packages("rpart.plot")}
if(!require(partykit)){install.packages("partykit")}
if(!require(randomForest)){install.packages("randomForest")}
if(!require(ggcorrplot)){install.packages("ggcorrplot")}
if(!require(polycor)){install.packages("polycor")}
if(!require(tableone)){install.packages("tableone")}
if(!require(broom)){install.packages("broom")}
if(!require(plotly)){install.packages("plotly")}
if(!require(rsvg)){install.packages("rsvg")}
if(!require(DiagrammeRsvg)){install.packages("DiagrammeRsvg")}

#if(!require(poLCA)){githubinstall::gh_install_packages("poLCA", ref = github_pull("14"))}

#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:

Definimos ciertas constantes

Show code
clus_iter= 500 #500
n_thread <- parallel::detectCores()
nrep <- clus_iter # number of different initial values (could be n_thread too)
n_class_max <- 10 # maximum number of classes to investigate
n_bootstrap <- 100 #30 # 50 number of bootstrap samples
print(n_thread)
[1] 8

Análisis de clases latentes

Show code
library(DiagrammeR) #⋉
gr_lca2<-
DiagrammeR::grViz([1357 chars quoted with '"'])#, width = 1200, height = 900
#https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3733703/
#Cohort matching on a variable associated with both outcome and censoring
#Cohort matching on a confounder. We let A denote an exposure, Y denote an outcome, and C denote a confounder and matching variable. The variable S indicates whether an individual in the source population is selected for the matched study (1: selected, 0: not selected). See Section 2-7 for details.
#https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7064555/
DPI = 1200
WidthCM = 21
HeightCM = 8

gr_lca2 %>%
  export_svg %>% charToRaw %>% rsvg_pdf("_flowchart_lca_adj.pdf")

gr_lca2 %>% export_svg()%>%charToRaw %>% rsvg(width = WidthCM *(DPI/2.54), height = HeightCM *(DPI/2.54)) %>% png::writePNG("_flowchart_lca0_adj.png")

htmlwidgets::saveWidget(gr_lca2, "_flowchart_lca_adj.html")
webshot::webshot("_flowchart_lca_adj.html", "_flowchart_lca_adj.png",vwidth = 1200, vheight = 900,
        zoom = 2)
Gráfico esquemático

Figure 1: Gráfico esquemático

Modelo alternativo

Análisis ACL alt

Show code
#Biemer, P. P., & Wiesen, C. (2002). Measurement error evaluation of self-reported drug use: a latent class analysis of the US National Household Survey on Drug Abuse. Journal of the Royal Statistical Society: Series A (Statistics in Society), 165(1), 97–119. doi:10.1111/1467-985x.00612  
#lca_entropia(x="ppio", seed= 2125, k= 8, f= f_preds, dat= mydata_preds, nbr_repet= 30, na_rm= T)
#3
#<div style="border: 1px solid #ddd; padding: 5px; overflow-y: scroll; height:400px; overflow-x: scroll; width:100%">
# f is the selected variables
# dat is the data
# nb_var is the number of selected variables
# k is the number of latent class generated
# nbr_repet is the number of repetition to  
# reach the convergence of EM algorithm
# x es el código para las variables de los modelos
#seed es el numero random para las semillas. ej: 4345.
#Modo de calcular el mejor modelo.
#z_ # 
#2023-01-20
#https://github.com/QMUL/poLCAParallel/blob/master/exec/3_blrt.R
#0h s
f_adj<-cbind(CAUSAL, EDAD_MUJER_REC, PUEBLO_ORIGINARIO_REC, PAIS_ORIGEN_REC, HITO1_EDAD_GEST_SEM_REC, MACROZONA, PREV_TRAMO_REC)~ outcome

seed<-2125
old <- Sys.time()

require(progress)

set.seed(seed)
model_array_adj <- list()  # Initialize an empty list to store the results
pb <- progress_bar$new(total = n_class_max, message_class = "Running poLCA")

#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_
for (k in 1:n_class_max) {
  nrep_int <- nrep  # Initialize nrep to a reasonable value
  while (nrep_int >= 1) {  # Try running poLCA with decreasing nrep until nrep reaches 1
    tryCatch({
      set.seed(seed)
      mod <- poLCAParallel::poLCA(
        f_adj, 
        mydata_preds3 %>% dplyr::mutate(outcome=ifelse(outcome==1,1,0)), # %>% janitor::tabyl(outcome)
        nclass = k, 
        nrep = nrep_int, 
        maxiter = 1e4,
        n.thread = 12,
        verbose = FALSE
      )
      model_array_adj[[k]] <- mod  # Store the result if no error occurs
      break  # Exit the loop if poLCA succeeds
    }, error = function(e) {
      message(paste("Error in poLCA for k =", k, ", nrep =", nrep_int, ":", conditionMessage(e)))
      nrep_int <- nrep_int / 2  # Reduce nrep by half if poLCA fails
    })
  }
#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_  
  pb$tick()  # Increment the progress bar
  cat(sprintf("\r%d%% completed", round(k/n_class_max*100)))  # Print progress percentage
  Sys.sleep(.05)
}

 ALERT: covariates not allowed when nclass=1;
                 will be ignored. 
 

10% completed
20% completed
30% completed
40% completed
50% completed
60% completed
70% completed
80% completed
90% completed
100% completed
Show code
pb$terminate()  # Close the progress bar
cat(': Done')  # Print "Done" message  
: Done
Show code
model_array_adj_ppio<-model_array_adj

#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_
#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#BOOTSTRAP#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_
#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_

new_med<-(Sys.time())
paste0("The model took ",round(new_med-old,2)," until every LCA was computed")
[1] "The model took 7.76 until every LCA was computed"

Luego calculamos la razón de verosimilitud mediante remuestreo bootstrap (BLRT) entre los distintos modelos con el que asume una clase menos.

Show code
# store p values for each nclass, 1 to n_class_max
# store 0 for 1 number of class, ie this says you cannot have zero number of
# classes
p_value_array_adj <- c(0)
# for all number of classes investigated:
#   - store the log likelihood ratio
#   - store all bootstrap samples log likelihoods ratios
fitted_log_ratio_array_adj <- rep(NaN, n_class_max)
bootstrap_log_ratio_array_adj <- list()

#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_
# do the bootstrap likelihood ratio test for each number of classes
for (nclass in 2:n_class_max) {

  # get the null and alt models
  # these are models with one number of class differences
  null_model_adj <- model_array_adj_ppio[[nclass - 1]]
  alt_model_adj <- model_array_adj_ppio[[nclass]]

  # for each bootstrap sample, store the log likelihood ratio here
  bootstrap_results_adj <- poLCAParallel::blrt(
    null_model_adj, alt_model_adj,
    n_bootstrap, n_thread, nrep
  )

  # log likelihood ratio to compare the two models
  fitted_log_ratio_array_adj[nclass] <- bootstrap_results_adj[["fitted_log_ratio"]]
  # store the log likelihoods ratios for all bootstrap samples
  bootstrap_log_ratio_array_adj[[nclass]] <-
    bootstrap_results_adj[["bootstrap_log_ratio"]]
  # store the p value for this nclass
  p_value_array_adj <- c(p_value_array_adj, bootstrap_results_adj[["p_value"]])
  
  #progress bar
  cat(paste0(round(nclass / n_class_max * 100), '% completed'))
  Sys.sleep(.05)
  if (nclass == n_class_max) cat(': Done')
  else cat('\014')
}
20% completed30% completed40% completed50% completed60% completed70% completed80% completed90% completed100% completed: Done
Show code
#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_
new<-(Sys.time())
time_diff <- (Sys.time() - old)/60
paste0("The model took ",round(new-old,2)," minutes")
[1] "The model took 13.57 minutes"
Show code
model_array_ppio2 <- model_array_adj
fitted_log_ratio_array_adj_ppio <- fitted_log_ratio_array_adj
bootstrap_log_ratio_array_adj_ppio <- bootstrap_log_ratio_array_adj
bootstrap_results_adj_ppio <- bootstrap_results_adj
p_value_array_adj_ppio <- p_value_array_adj

# Get the BIC values for all models in model_array_ppio2
bic_values_adj <- sapply(model_array_ppio2, function(model) model$bic)

# Identify the index of the model with the lowest BIC
best_model_index_adj <- which.min(bic_values_adj)

# Select the best model
LCA_best_model_adj_ppio <- model_array_ppio2[[best_model_index_adj]]
#####################################################################################################################################################################
#Within poLCA, parameter estimates are obtained by a procedure that repeatedly improves estimates.
#This is stopped when no further improvements are obtained, or until a maximum number of iterations is reached. The starting values are the values at which such repetitions were started. Increasing the number 4 R. ACHTERHOF ET AL.of iterations (cycles within each estimation) and setting more different starting values for each repetition results in a greater likelihood that the global (rather than local) maximum of the log-likelihood function (and thus, the best possible solution) is reached. The maximum number of iterations was chosen as 10.000, and 500 different sets of starting values were used (thus going beyond the recommendations by Linzer & Lewis, 2011; Oberski, 2016). As such, the influence of chance was minimized while the reproducibility of the results was maximized# 

Resultados ACL alt

Hicimos un gráfico de los resultados

Show code
# Initialize an empty data frame
tab_ppio2 <- data.frame()##

# Loop through each model
for (i in 2:n_class_max) {
  skip_to_next <- FALSE

  # Get the model and the previous model
  mod2 <- model_array_ppio2[[i]]
  mod2_min1 <- model_array_ppio2[[(i-1)]]

  # Check if the model has valid predictions
  if (is.null(mod2$predclass)) {
    skip_to_next <- TRUE
  }

  # If the model has valid predictions, calculate the measures and add them to the data frame
  if (!skip_to_next) {
    # Number of latent classes
    mod2$C <- max(t(matrix(apply(mod2$y, 2, max))))
    # Number of manifest variables
    mod2$J <- ncol(mod2$y)
    # Total number of items
    mod2$I <- mod2$J * mod2$C
    # Degrees of freedom
    mod2$df <- mod2$C^mod2$I - mod2$npar - 1
    # Chi-square test
    mod2$Chisq.pvalue <- (1 - pchisq(mod2$Chisq, mod2$df))
    # AIC
    mod2$aic <- round(mod2$aic, 2)
    # BIC
    mod2$bic <- round(mod2$bic, 2)
    # Adjusted BIC n*=(n+2)/24 (https://github.com/dlinzer/poLCA/issues/10)
    mod2$aBIC <- round((-2 * mod2$llik) + (log((mod2$N+2)/24) * mod2$npar), 2) 
    # Conditional AIC
    mod2$cAIC <-  round((-2 * mod2$llik) + (2 * mod2$Nobs * log(mod2$N/mod2$Nobs)), 2)
    # approximate weight of evidence criterion  #https://jbds.isdsa.org/public/journals/1/html/v2n2/qiu/
    mod2$awe <-  round((-2 * mod2$llik) + (2 * mod2$npar * log(mod2$Nobs)+1.5), 2)    
    # Gsq: deviance
    mod2$Gsq
    # Likelihood ratio test
    mod2$Gsq.pvalue <- (1 - pchisq(mod2$Gsq, mod2$df))
    # Relative entropy
    mod2$RelEnt <- round(relative.entropy(mod2), 2)
    # Entropy R-squared
    mod2$EntR2 <- round(entropy.R2(mod2), 2)
    # Deviance change
    mod2$DevChange <- round(mod2_min1$Gsq - mod2$Gsq, 2)
    # Degrees of freedom change
    mod2$dfChange <- mod2_min1$resid.df - mod2$resid.df
    # P-value for deviance change
    mod2$pvalDevChange <- round(pchisq(mod2$DevChange, mod2$dfChange, lower.tail = FALSE), 4)
    mod2$BLRT <- round(fitted_log_ratio_array_adj_ppio[[i]],2)
    mod2$BLRT.pvalue <- p_value_array_adj_ppio[[i]]
    # Add the model index to the data frame
    mod2$ModelIndex <- i

    tab_ppio2 <- rbind.data.frame(tab_ppio2, t(data.matrix(mod2[c("llik", "Chisq", "Chisq.pvalue", "resid.df", "aic", "bic", "aBIC", "cAIC", "awe", "Gsq", "Gsq.pvalue", "RelEnt", "EntR2", "DevChange", "dfChange", "pvalDevChange", "ModelIndex","BLRT", "BLRT.pvalue")])))

    #  } else {}
  }
}

# identify the list-like columns
list_cols2 <- sapply(tab_ppio2, is.list)
# unlist the list-like columns
unlisted_cols2 <- lapply(tab_ppio2[list_cols2], unlist)
# bind the unlisted columns as a data frame
tab_ppio2 <- cbind(tab_ppio2[!list_cols2], do.call(cbind, unlisted_cols2))
#Erase rownames
rownames(tab_ppio2) <- NULL

manualcolors2 <- c('indianred1', 'cornflowerblue', 'gray50', 'darkolivegreen4', 'slateblue2', 
                  'firebrick4', 'goldenrod4')
levels2 <- c("llik", "Chisq", "Chisq.pvalue", "resid.df", "aic", "bic", "aBIC", "cAIC", "awe",
            "Gsq", "Gsq.pvalue", "RelEnt", "EntR2", "DevChange", "dfChange",
            "pvalDevChange", "BLRT", "BLRT.pvalue")
labels2 <- c('Log-Verosimilitud', 'Chi2', 'valor p Chi2', 'Grados de libertad', 
            'Criterio de Información\nde Akaike(AIC)'','Criterio de Información\nBayesiano (BIC)')''BIC Ajustado (SABIC)')'"AIC Corregido"o'Peso de evidencia aproximado(awe)')'G-squared/Deviance'e''Valor p G-squared'd''Entropía Relativa'va'Entropía R2' R'Cambio en Deviance\n(con modelo previo)'io'Grados de libertad del cambio'bi'valor p cambio deviance'nc'BLRT'LR'valor p BLRT')RT')
fig_lca_fit2<- tab_ppio2 %>%
  dplyr::mutate_if(is.character, as.numeric) %>%  # convert character columns to numeric
  tidyr::pivot_longer(cols = -ModelIndex, #"evryone but index"
                       names_to = "indices", values_to = "value", values_drop_na = F) %>%
  dplyr::mutate(indices = factor(indices, levels = levels2, labels = labels2)) %>%
  dplyr::filter(grepl("(AIC|BIC|awe)",indices, ignore.case=T))%>%
  dplyr::mutate(ModelIndex= factor(ModelIndex, levels=2:n_class_max)) %>% 
  ggplot(aes(x = ModelIndex, y = value, group = indices, color = indices, linetype = indices)) +
  geom_line(size = 1.5) +
  scale_color_manual(values = manualcolors) +
  #scale_linetype_manual(values = c("solid", "dashed", "dotted")) +
  labs(x = "Número de clases"", y=="Valor"", color=="Medida"", linetype=="Medida"))++
  #facet_wrap(.~indices, scales = "free_y", nrow = 4, ncol = 1) +
  theme_bw()

fig_lca_fit2
Show code
ggsave("_fig2_comparison_adj.png",fig_lca_fit2, dpi=600)

Luego en una tabla

Show code
tab_ppio2 %>%#
  dplyr::select(ModelIndex, everything()) %>% 
    dplyr::mutate_if(is.character, as.numeric) %>%  # convert character columns to numeric
    knitr::kable(format="markdown", caption="Fit measures of models")
Table 1: Fit measures of models
ModelIndex llik Chisq Chisq.pvalue resid.df aic bic aBIC cAIC awe Gsq Gsq.pvalue RelEnt EntR2 DevChange dfChange pvalDevChange BLRT BLRT.pvalue
2 -27703.87 60239154.82 1 3735 55515.75 55852.70 55681.11 55407.75 56299.15 5523.722 1 0.95 0.93 2898.44 28 0 2942.82 0
3 -27399.61 19546823.31 1 3707 54963.21 55474.88 55214.32 54799.21 56152.05 4988.060 1 0.79 0.77 535.66 28 0 608.53 0
4 -27197.56 11419796.89 1 3679 54615.12 55301.51 54951.98 54395.12 56209.39 4621.646 1 0.75 0.74 366.41 28 0 404.09 0
5 -27049.85 20322958.13 1 3651 54375.69 55236.79 54798.29 54099.69 56375.39 4310.599 1 0.80 0.79 311.05 28 0 295.43 0
6 -26897.50 39536.75 1 3623 54127.00 55162.81 54635.35 53795.00 56532.13 4025.823 1 0.81 0.79 284.78 28 0 304.69 0
7 -27075.54 18559726.21 1 3595 54539.07 55749.60 55133.16 54151.07 57349.64 4247.378 1 0.85 0.84 -221.55 28 1 -356.07 1
8 -27313.85 23712031.69 1 3567 55071.69 56456.94 55751.53 54627.69 58287.69 4965.128 1 0.90 0.83 -717.75 28 1 -476.62 1
9 -27242.73 24347544.13 1 3539 54985.46 56545.42 55751.04 54485.46 58606.88 4839.520 1 0.89 0.82 125.61 28 0 142.24 0
10 -27115.19 11323930.14 1 3511 54786.38 56521.06 55637.71 54230.38 58813.24 4633.860 1 0.86 0.81 205.66 28 0 255.07 0

Presentamos el modelo con mejor ajuste

Show code
print(LCA_best_model_adj_ppio) #
Conditional item response (column) probabilities,
 by outcome variable, for each class (row) 
 
$CAUSAL
          Pr(1)  Pr(2)  Pr(3)  Pr(4)
class 1:      0 0.9731 0.0269 0.0000
class 2:      0 0.7469 0.2531 0.0000
class 3:      0 0.1320 0.8680 0.0000
class 4:      0 0.3813 0.6187 0.0000
class 5:      0 0.0747 0.9253 0.0000
class 6:      0 0.0094 0.0000 0.9906

$EDAD_MUJER_REC
           Pr(1)  Pr(2)  Pr(3)  Pr(4)  Pr(5)  Pr(6)
class 1:  0.0007 0.0117 0.2421 0.4157 0.2944 0.0355
class 2:  0.0683 0.0342 0.2964 0.3124 0.2506 0.0381
class 3:  0.0017 0.0463 0.3003 0.3206 0.2643 0.0667
class 4:  0.0000 0.0175 0.3057 0.3071 0.3136 0.0560
class 5:  0.0035 0.0000 0.1023 0.3158 0.5046 0.0738
class 6:  0.0014 0.3290 0.3111 0.2186 0.1182 0.0218

$PUEBLO_ORIGINARIO_REC
           Pr(1)  Pr(2)  Pr(3)
class 1:  0.0846 0.8659 0.0494
class 2:  1.0000 0.0000 0.0000
class 3:  0.1288 0.8069 0.0642
class 4:  0.1311 0.8324 0.0365
class 5:  0.1385 0.8615 0.0000
class 6:  0.1478 0.8087 0.0435

$PAIS_ORIGEN_REC
           Pr(1)  Pr(2)  Pr(3)
class 1:  0.0000 0.8768 0.1232
class 2:  0.0853 0.8245 0.0902
class 3:  0.0000 0.9678 0.0322
class 4:  0.0084 0.0000 0.9916
class 5:  0.0000 0.9226 0.0774
class 6:  0.0000 0.7795 0.2205

$HITO1_EDAD_GEST_SEM_REC
           Pr(1)  Pr(2)  Pr(3)  Pr(4)  Pr(5)  Pr(6)
class 1:  0.0746 0.2052 0.2292 0.4856 0.0000 0.0054
class 2:  0.0578 0.0000 0.0861 0.2475 0.2965 0.3122
class 3:  0.0014 0.0000 0.2863 0.3711 0.2446 0.0965
class 4:  0.0090 0.0148 0.3182 0.3823 0.2174 0.0583
class 5:  0.0160 0.0046 0.6077 0.2574 0.0994 0.0150
class 6:  0.0109 0.7838 0.2052 0.0000 0.0000 0.0000

$MACROZONA
           Pr(1)  Pr(2)  Pr(3)  Pr(4)  Pr(5)  Pr(6)
class 1:  0.0000 0.3981 0.1631 0.1681 0.1120 0.1587
class 2:  0.0329 0.1872 0.3353 0.1628 0.0986 0.1831
class 3:  0.0009 0.2833 0.1820 0.2320 0.0904 0.2113
class 4:  0.0000 0.6318 0.0836 0.0139 0.2522 0.0185
class 5:  0.0000 0.6803 0.1170 0.0600 0.0713 0.0713
class 6:  0.0054 0.4219 0.1551 0.1171 0.1318 0.1686

$PREV_TRAMO_REC
           Pr(1)  Pr(2)  Pr(3)  Pr(4)  Pr(5)
class 1:  0.0012 0.0872 0.5608 0.3508 0.0000
class 2:  0.0243 0.0405 0.6497 0.2304 0.0551
class 3:  0.0012 0.0115 0.6462 0.3410 0.0000
class 4:  0.0067 0.0000 0.6030 0.2764 0.1139
class 5:  0.0000 0.5703 0.1720 0.2507 0.0069
class 6:  0.0070 0.0561 0.6791 0.1935 0.0643

Estimated class population shares 
 0.1902 0.0472 0.3135 0.0864 0.1679 0.1948 
 
Predicted class memberships (by modal posterior prob.) 
 0.2096 0.0465 0.3088 0.1019 0.1396 0.1937 
 
========================================================= 
Fit for 6 latent classes: 
========================================================= 
2 / 1 
            Coefficient  Std. error  t value  Pr(>|t|)
(Intercept)    -2.07324     0.43328   -4.785     0.000
outcome         0.78141     0.43678    1.789     0.074
========================================================= 
3 / 1 
            Coefficient  Std. error  t value  Pr(>|t|)
(Intercept)     0.93375     0.14463    6.456         0
outcome        -0.55955     0.14811   -3.778         0
========================================================= 
4 / 1 
            Coefficient  Std. error  t value  Pr(>|t|)
(Intercept)    -1.06360     0.23958   -4.439     0.000
outcome         0.32587     0.23990    1.358     0.174
========================================================= 
5 / 1 
            Coefficient  Std. error  t value  Pr(>|t|)
(Intercept)    -1.36746     0.29832   -4.584         0
outcome         1.38656     0.28493    4.866         0
========================================================= 
6 / 1 
            Coefficient  Std. error  t value  Pr(>|t|)
(Intercept)    -0.80572     0.18167   -4.435         0
outcome         0.94481     0.18663    5.062         0
========================================================= 
number of observations: 3789 
number of estimated parameters: 166 
residual degrees of freedom: 3623 
maximum log-likelihood: -26897.5 
 
AIC(6): 54127
BIC(6): 55162.81
X^2(6): 39536.75 (Chi-square goodness of fit) 
 
ALERT: estimation algorithm automatically restarted with new initial values 
 
Show code
save.image("data2_lca2_adj.RData")
Show code
require(tidyverse)
sesion_info <- devtools::session_info()
dplyr::select(
  tibble::as_tibble(sesion_info$packages),
  c(package, loadedversion, source)
) %>% 
  DT::datatable(filter = 'top', colnames = c('Row number' =1,'Variable' = 2, 'Percentage'= 3),
              caption = htmltools::tags$caption(
        style = 'caption-side: top; text-align: left;',
        '', htmltools::em('Packages')),
      options=list(
initComplete = htmlwidgets::JS(
        "function(settings, json) {",
        "$(this.api().tables().body()).css({
            'font-family': 'Helvetica Neue',
            'font-size': '50%', 
            'code-inline-font-size': '15%', 
            'white-space': 'nowrap',
            'line-height': '0.75em',
            'min-height': '0.5em'
            });",#;
        "}")))