Paso 2.7

Análisis de clases latentes exploratoria y comparativa sin predictores, aunque con datos imputados (salvo en “pertenece a pueblo originario”)

Andrés González Santa Cruz
May 05, 2023
Show code
script src = "https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"
Show code
 $(document).ready(function() {
    $('body').prepend('<div class=\"zoomDiv\"><img src=\"\" class=\"zoomImg\"></div>');
    // onClick function for all plots (img's)
    $('img:not(.zoomImg)').click(function() {
      $('.zoomImg').attr('src', $(this).attr('src')).css({width: '100%'});
      $('.zoomDiv').css({opacity: '1', width: 'auto', border: '1px solid white', borderRadius: '5px', position: 'fixed', top: '50%', left: '50%', marginRight: '-50%', transform: 'translate(-50%, -50%)', boxShadow: '0px 0px 50px #888888', zIndex: '50', overflow: 'auto', maxHeight: '100%'});
    });
    // onClick function for zoomImg
    $('img.zoomImg').click(function() {
      $('.zoomDiv').css({opacity: '0', width: '0%'}); 
    });
  });
  
Show code
<script src="hideOutput.js"></script> 
Show code
$(document).ready(function() {    
    $chunks = $('.fold');    
    $chunks.each(function () {      // add button to source code chunks     
    if ( $(this).hasClass('s') ) {       
        $('pre.r', this).prepend("<div class=\"showopt\">Show Source</div><br style=\"line-height:22px;\"/>");
            $('pre.r', this).children('code').attr('class', 'folded');     
            }      // add button to output chunks     
        if ( $(this).hasClass('o') ) {       
            $('pre:not(.r)', this).has('code').prepend("<div class=\"showopt\">Show Output</div><br style=\"line-height:22px;\"/>");       
            $('pre:not(.r)', this).children('code:not(r)').addClass('folded');        // add button to plots       
            $(this).find('img').wrap('<pre class=\"plot\"></pre>');       
            $('pre.plot', this).prepend("<div class=\"showopt\">Show Plot</div><br style=\"line-height:22px;\"/>");       
            $('pre.plot', this).children('img').addClass('folded');      
            }   
});    // hide all chunks when document is loaded   
    $('.folded').css('display', 'none')    // function to toggle the visibility   
    $('.showopt').click(function() {     
            var label = $(this).html();     
            if (label.indexOf("Show") >= 0) {       
                $(this).html(label.replace("Show", "Hide"));     
            } else {
              $(this).html(label.replace("Hide", "Show"));     
            }     
    $(this).siblings('code, img').slideToggle('fast', 'swing');   
    }); 
}); 

Cargamos los datos

Show code
rm(list = ls());gc()
         used (Mb) gc trigger (Mb) max used (Mb)
Ncells 536414 28.7    1210755 64.7   643711 34.4
Vcells 911917  7.0    8388608 64.0  1649632 12.6
Show code
load("data2.RData")

Cargamos los paquetes

Show code
knitr::opts_chunk$set(echo = TRUE)

if(!require(poLCA)){install.packages("poLCA")}
if(!require(poLCAParallel)){devtools::install_github("QMUL/poLCAParallel@package")}
if(!require(compareGroups)){install.packages("compareGroups")}
if(!require(parallel)){install.packages("parallel")}
if(!require(Hmisc)){install.packages("Hmisc")}
if(!require(tidyverse)){install.packages("tidyverse")}
try(if(!require(sjPlot)){install.packages("sjPlot")})
if(!require(emmeans)){install.packages("emmeans")}
if(!require(nnet)){install.packages("nnet")}
if(!require(here)){install.packages("here")}
if(!require(doParallel)){install.packages("doParallel")}
if(!require(progress)){install.packages("progress")}
if(!require(caret)){install.packages("caret")}
if(!require(rpart)){install.packages("rpart")}
if(!require(rpart.plot)){install.packages("rpart.plot")}
if(!require(partykit)){install.packages("partykit")}
if(!require(randomForest)){install.packages("randomForest")}
if(!require(ggcorrplot)){install.packages("ggcorrplot")}
if(!require(polycor)){install.packages("polycor")}
if(!require(tableone)){install.packages("tableone")}
if(!require(DiagrammeR)){install.packages("DiagrammeR")}
if(!require(rsvg)){install.packages("rsvg")}
if(!require(DiagrammeRsvg)){install.packages("DiagrammeRsvg")}
if(!require(webshot)){install.packages("webshot")}
if(!require(htmlwidgets)){install.packages("htmlwidgets")}
if(!require(missForest)){install.packages("missForest")}
if(!require(missRanger)){install.packages("missRanger")}
#if(!require(poLCA)){githubinstall::gh_install_packages("poLCA", ref = github_pull("14"))}

#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
lca_dir<-here::here()
#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#
#  
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:

tryNA <- function(x){
    x <- try(x)
    if(inherits(x,'try-error')) return(NA)
    x
}

#https://rdrr.io/github/hyunsooseol/snowRMM/src/R/lca.b.R
#https://github.com/dlinzer/poLCA/issues/7

#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#' Bivariate residuals for latent class models
#' 
#' Calculate the "bivariate residuals" (BVRs) between pairs of variables 
#' in a latent class model.
#' 
#' This function compares the model-implied (expected) counts in the crosstables
#' of all pairs of observed dependent variables to the observed counts. For each
#' pair, it calculates a "chi-square" statistic,
#' 
#' \deqn{\text{BVR} = \sum_{j, j'} \frac{(n_{jj'} - e_{jj'})^2}{e_{jj'}}},
#' 
#' where \eqn{n_{jj'}} are the observed counts for categories \eqn{j} and \eqn{j'} 
#' of the variables being crosstabulated, and \eqn{e_{jj'}} are
#' the expected counts under the latent class model. 
#' 
#' Note that the BVR does not follow an asymptotic chi-square distribution and
#' for accurate p-values, parametric bootstrapping is necessary (Oberski et al. 2013).
#' 
#' @param fit A poLCA fit object
#' @param tol Optional: tolerance for small expected counts
#' @param rescale_to_df Optional: whether to divide the pairwise "chi-square" values by 
#' the degrees of freedom of the local crosstable. Default is TRUE.
#' @return The table of bivariate residuals
#' @author Daniel Oberski (daniel.oberski@gmail.com)
#' @seealso \code{\link{poLCA}} for fitting the latent class model.
#' @references 
#' Oberski, DL, Van Kollenburg, GH and Vermunt, JK (2013). 
#'   A Monte Carlo evaluation of three methods to detect local dependence in binary data latent class models. 
#'   Advances in Data Analysis and Classification 7 (3), 267-279.
#' @examples
#' data(values)
#' f <- cbind(A, B, C, D) ~ 1
#' M0 <- poLCA(f,values, nclass=1, verbose = FALSE) 
#' bvr(M0) # 12.4, 5.7, 8.3, 15.6, ... 
bvr <- function(fit, tol = 1e-3, rescale_to_df = TRUE) {
  stopifnot(class(fit) == "poLCA")

  ov_names <- names(fit$predcell)[1:(ncol(fit$predcell) - 2)]
  ov_combn <- combn(ov_names, 2)

  get_bvr <- function(ov_pair) {
    form_obs <- as.formula(paste0("observed ~ ", ov_pair[1], " + ", ov_pair[2]))
    form_exp <- as.formula(paste0("expected ~ ", ov_pair[1], " + ", ov_pair[2]))

    counts_obs <- xtabs(form_obs, data = fit$predcell)
    counts_exp <- xtabs(form_exp, data = fit$predcell)
    counts_exp <- ifelse(counts_exp < tol, tol, counts_exp) # Prevent Inf/NaN

    bvr_df <- prod(dim(counts_exp) - 1)
    bvr_value <- sum((counts_obs - counts_exp)^2 / counts_exp)

    if(rescale_to_df) bvr_value <- bvr_value / bvr_df

    attr(bvr_value, "df") <- bvr_df

    bvr_value
  }

  bvr_pairs <- apply(ov_combn, 2, get_bvr)

  attr(bvr_pairs, "rescale_to_df") <- rescale_to_df
  attr(bvr_pairs, "class") <- "dist"
  attr(bvr_pairs, "Size") <- length(ov_names)
  attr(bvr_pairs, "Labels") <- ov_names
  attr(bvr_pairs, "Diag") <- FALSE
  attr(bvr_pairs, "Upper") <- FALSE

  bvr_pairs
}

#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
poLCA.entropy.fix <- function (lc)
{
  K.j <- sapply(lc$probs, ncol)
  fullcell <- expand.grid(sapply(K.j, seq, from = 1))
  P.c <- poLCA.predcell(lc, fullcell)
  return(-sum(P.c * log(P.c), na.rm = TRUE))
}

#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#Calculate entropy R2 for poLCA model

# MIT license
# Author: Daniel Oberski
# Input: result of a poLCA model fit
# Output: entropy R^2 statistic (Vermunt & Magidson, 2013, p. 71)
# See: daob.nl/wp-content/uploads/2015/07/ESRA-course-slides.pdf
# And: https://www.statisticalinnovations.com/wp-content/uploads/LGtecnical.pdf
machine_tolerance <- sqrt(.Machine$double.eps)
entropy.R2 <- function(fit) {
  entropy <- function(p) {
    p <- p[p > machine_tolerance] # since Lim_{p->0} p log(p) = 0
    sum(-p * log(p))
  }
  error_prior <- entropy(fit$P) # Class proportions
  error_post <- mean(apply(fit$posterior, 1, entropy))
  R2_entropy <- (error_prior - error_post) / error_prior
  R2_entropy
}

#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#http://researchdata.gla.ac.uk/879/1/Survey_data_processed_using_R.pdf
##Function to plot variable probabilites by latent class

## Function to undertake chisquare analayis and plot graphs of residuals and contributions
chisquaretest.predictions.function <-
 function(indfactor.data,
   predclass.data,
   noclasses,
   pitem,
   gitem,
   chirows,
   chicols) {
   chisquare.results <- chisq.test(indfactor.data, predclass.data)
   residuals.data <- chisquare.results$residuals
   colnames(residuals.data) <- chicols
   rownames(residuals.data) <- chirows
     title.text <-
       paste(
       "Residuals: chi Square Crosstabulation of\n",
       pitem,
       "and",
       gitem,
       "\n(Chisquare =",
       round(chisquare.results$statistic, 3),
       " p <",
       round(chisquare.results$p.value, 3),
       ")",
       sep = " "
       )
     corrplot(
       residuals.data,
       is.cor = FALSE,
       title = title.text,
       mar = c(0, 0, 4, 0)
       )
     contrib.data <-
     100 * residuals.data ^ 2 / chisquare.results$statistic
     round(contrib.data, 3)
     colnames(contrib.data) <- chicols
     rownames(contrib.data) <- chirows
     title.text <-
     paste(
       "Contributions: chi Square Crosstabulation of\n",
       pitem,
       "and",
       gitem,
       "\n(Chisquare =",
       round(chisquare.results$statistic, 3),
       " p <",
       round(chisquare.results$p.value, 3),
       ")",
       sep = " "
       )
     corrplot(
       contrib.data,
       is.cor = FALSE,
       title = title.text,
       mar = c(0, 0, 4, 0)
       )
     return(chisquare.results)
 }
##Funciton for Cramers V test
cv.test = function(x, y) {
   CV = sqrt(chisq.test(x, y, correct = FALSE)$statistic /
   (length(x) * (min(
   length(unique(x)), length(unique(y))
   ) - 1)))
   print.noquote("Cramér V / Phi:"))
   return(as.numeric(CV))
  }

#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:

if(.Platform$OS.type == "windows") withAutoprint({
  memory.size()
  memory.size(TRUE)
  memory.limit(size=56000)
})

path<-try(dirname(rstudioapi::getSourceEditorContext()$path))

options(knitr.kable.NA = '')


#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:

knitr::knit_hooks$set(time_it = local({
  now <- NULL
  function(before, options) {
    if (before) {
      # record the current time before each chunk
      now <<- Sys.time()
    } else {
      # calculate the time difference after a chunk
      res <- ifelse(difftime(Sys.time(), now)>(60^2),difftime(Sys.time(), now)/(60^2),difftime(Sys.time(), now)/(60^1))
      # return a character string to show the time
      x<-ifelse(difftime(Sys.time(), now)>(60^2),paste("Time for this code chunk to run:", round(res,1), "hours"),paste("Time for this code chunk to run:", round(res,1), "minutes"))
      paste('<div class="message">', gsub('##', '\n', x),'</div>', sep = '\n')
    }
  }
}))
knitr::opts_chunk$set(time_it = TRUE)
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:

#to format rows in bold
format_cells <- function(df, rows ,cols, value = c("italics", "bold", "strikethrough")){

  # select the correct markup
  # one * for italics, two ** for bold
  map <- setNames(c("*", "**", "~~"), c("italics", "bold", "strikethrough"))
  markup <- map[value]  

  for (r in rows){
    for(c in cols){

      # Make sure values are not factors
      df[[c]] <- as.character( df[[c]])

      # Update formatting
      df[r, c] <- ifelse(nchar(df[r, c])==0,"",paste0(markup, gsub(" ", "", df[r, c]), markup))
    }
  }

  return(df)
}
#To produce line breaks in messages and warnings
knitr::knit_hooks$set(
   error = function(x, options) {
     paste('\n\n<div class="alert alert-danger">',
           gsub('##', '\n', gsub('^##\ Error', '**Error**', x)),
           '</div>', sep = '\n')
   },
   warning = function(x, options) {
     paste('\n\n<div class="alert alert-warning">',
           gsub('##', '\n', gsub('^##\ Warning:', '**Warning**', x)),
           '</div>', sep = '\n')
   },
   message = function(x, options) {
     paste('<div class="message">',
           gsub('##', '\n', x),
           '</div>', sep = '\n')
   }
)
#
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:

as.data.frame.TableOne <- function(x, ...) {capture.output(print(x,
                          showAllLevels = TRUE, ...) -> x)
  y <- as.data.frame(x)
  y$charactersitic <- dplyr::na_if(rownames(x), "")
  y <- y %>%
  fill(charactersitic, .direction = "down") %>%
  select(charactersitic, everything())
  rownames(y) <- NULL
  y}

Definimos ciertas constantes

Show code
clus_iter= 500#500 #500
n_thread <- parallel::detectCores()
nrep <- clus_iter # number of different initial values (could be n_thread too)
n_class_max <- 12 # maximum number of classes to investigate
n_bootstrap <- 100#00 #30 # 50 number of bootstrap samples
print(n_thread)
[1] 8

Time for this code chunk to run: 0 minutes

Análisis de clases latentes

Especificar modelo

Los valores ceros se declararon datos perdidos. Posteriormente, se imputaron.

Show code
#defino vector de variables de interés
preds <- c("CAUSAL","EDAD_MUJER_REC", "PUEBLO_ORIGINARIO_REC","PAIS_ORIGEN_REC", "HITO1_EDAD_GEST_SEM_REC", "MACROZONA", "PREV_TRAMO_REC")

data2_imp <-    data2%>%
                missRanger::missRanger(
                formula = CAUSAL+ EDAD_MUJER_REC+ PAIS_ORIGEN_REC+ HITO1_EDAD_GEST_SEM_REC+ MACROZONA+ PREV_TRAMO_REC ~ . - CAUSAL+ EDAD_MUJER_REC+ PAIS_ORIGEN_REC+ HITO1_EDAD_GEST_SEM_REC+ MACROZONA+ PREV_TRAMO_REC,
                num.trees = 200, 
                pmm.k = 3,                
                returnOOB=T,
                maxiter= 50,
                verbose = 2, 
                seed = 2125)

Missing value imputation by random forests

  Variables to impute:      EDAD_MUJER_REC, PAIS_ORIGEN_REC, HITO1_EDAD_GEST_SEM_REC, MACROZONA, PREV_TRAMO_REC
  Variables used to impute: AÑO, MES, PUEBLO_ORIGINARIO, SEREMIoSERVICIO, HITO2_DECISION_MUJER_IVE, N_C_PSICOLOGO, N_C_ASIST_SOCIAL, N_C_DUPLA, N_C_PSIQUIATRA, N_VISITAS_DOM, TOTAL_ATENCIONES_ACOMPAÑAMIENTO, PAIS_ORIGEN_REC, EDAD_MUJER_REC, HITO1_EDAD_GEST_SEM_REC, PREV_TRAMO_REC, MACROZONA, HITO2_DECISION_MUJER_IVE_bin
    MACROZ  PREV_T  EDAD_M  PAIS_O  HITO1_
iter 1: 0.1776  0.3621  0.6797  0.8208  0.6248  
iter 2: 8e-04   0.0021  0.0011  0.0043  3e-04   
iter 3: 5e-04   8e-04   0.0000  0.0052  5e-04   
iter 4: 8e-04   5e-04   0.0016  0.0036  5e-04   
iter 5: 8e-04   8e-04   0.0016  0.0036  3e-04   

Time for this code chunk to run: 0.1 minutes

Show code
#table(data2$MACROZONA,data2$REGION_RESIDENCIA, exclude=NULL)
preds <- c("CAUSAL","EDAD_MUJER_REC", "PUEBLO_ORIGINARIO_REC","PAIS_ORIGEN_REC", "HITO1_EDAD_GEST_SEM_REC", "MACROZONA", "PREV_TRAMO_REC")

#c("AÑO","MES_NUM",    "HITO3_COMPLICACION_POST_IVE", "HITO3_CONDICION_MUJER_POST_IVE", "HITO3_TIPO_ATENCION",  "HITO3_SERV_SALUD_ESTABLECIMIENTO", "HITO4_MUJER_ACEPTA_ACOM"),

mydata_preds <- data2_imp%>% dplyr::mutate(across(preds, ~ as.numeric(factor(.))+1))%>% dplyr::mutate(across(preds, ~ dplyr::case_when(is.na(.)~ 1, T~ .))) 

#comprobar 
#table(mydata_preds$sus_ini_mod_mvv, data2$sus_ini_mod_mvv, exclude=NULL)

f_preds<-cbind(CAUSAL, EDAD_MUJER_REC, PUEBLO_ORIGINARIO_REC, PAIS_ORIGEN_REC, HITO1_EDAD_GEST_SEM_REC, MACROZONA, PREV_TRAMO_REC)~ 1

Time for this code chunk to run: 0 minutes

Modificamos la base de datos para incluir la variable resultado.

Show code
#dropped region name because it was too different,many categories
preds2 <-c(setdiff(preds,""),"HITO2_DECISION_MUJER_IVE")  

mydata_preds2 <- data2_imp%>% 
dplyr::mutate(across(preds, ~ as.numeric(factor(.))+1))%>% dplyr::mutate(across(preds, ~ dplyr::case_when(is.na(.)~ 1, T~ .)))  %>% dplyr::select(any_of(preds2)) %>% dplyr::mutate(outcome= factor(as.numeric(grepl('INTERRUMPIR', HITO2_DECISION_MUJER_IVE)))) 

#data.table::data.table(.)
#
#table(mydata_preds2$HITO2_DECISION_MUJER_IVE)
#lapply(preds2, function(p) {prop.table(table(mydata_preds2[p]))})

Time for this code chunk to run: 0 minutes

Cambiamos formato

Show code
mydata_preds3 <-
  mydata_preds2 %>% 
#dplyr::mutate_if(is.numeric, ~as.character(.)) %>% #si convierto a caracter entrega errores
  data.table::data.table(.)

mydata_preds3 %>% rio::export("mydata_preds3_2023_04_21.dta")

Time for this code chunk to run: 0 minutes

Show code
tab2<-
    CreateTableOne(vars =c("CAUSAL", "EDAD_MUJER_REC", "PUEBLO_ORIGINARIO_REC", "PAIS_ORIGEN_REC", "HITO1_EDAD_GEST_SEM_REC","MACROZONA", "AÑO", "PREV_TRAMO_REC","HITO2_DECISION_MUJER_IVE"),
                 #transforma la variable en numerico
                 data= mydata_preds3, 
                 factorVars=c("CAUSAL", "EDAD_MUJER_REC", "PUEBLO_ORIGINARIO_REC", "PAIS_ORIGEN_REC", "HITO1_EDAD_GEST_SEM_REC","MACROZONA", "AÑO", "PREV_TRAMO_REC"),
                 strata= "HITO2_DECISION_MUJER_IVE",addOverall = T, includeNA =T)

as.data.frame.TableOne(tab2) %>% knitr::kable("markdown", caption="Descriptivos (acotado)")
Table 1: Descriptivos (acotado)
charactersitic level Overall CONTINUAR EL EMBARAZO INTERRUMPIR EL EMBARAZO NO APLICA, INSCONSCIENTE p test
n 3789 593 3183 13
CAUSAL (%) 2 1171 (30.9) 190 ( 32.0) 968 ( 30.4) 13 (100.0) <0.001
CAUSAL (%) 3 1887 (49.8) 346 ( 58.3) 1541 ( 48.4) 0 ( 0.0)
CAUSAL (%) 4 731 (19.3) 57 ( 9.6) 674 ( 21.2) 0 ( 0.0)
EDAD_MUJER_REC (%) 2 318 ( 8.4) 61 ( 10.3) 257 ( 8.1) 0 ( 0.0) 0.014
EDAD_MUJER_REC (%) 3 987 (26.0) 136 ( 22.9) 849 ( 26.7) 2 ( 15.4)
EDAD_MUJER_REC (%) 4 1206 (31.8) 171 ( 28.8) 1028 ( 32.3) 7 ( 53.8)
EDAD_MUJER_REC (%) 5 1085 (28.6) 182 ( 30.7) 899 ( 28.2) 4 ( 30.8)
EDAD_MUJER_REC (%) 6 193 ( 5.1) 43 ( 7.3) 150 ( 4.7) 0 ( 0.0)
PUEBLO_ORIGINARIO_REC (%) 1 633 (16.7) 98 ( 16.5) 535 ( 16.8) 0 ( 0.0) 0.061
PUEBLO_ORIGINARIO_REC (%) 2 3000 (79.2) 460 ( 77.6) 2527 ( 79.4) 13 (100.0)
PUEBLO_ORIGINARIO_REC (%) 3 156 ( 4.1) 35 ( 5.9) 121 ( 3.8) 0 ( 0.0)
PAIS_ORIGEN_REC (%) 2 3105 (81.9) 511 ( 86.2) 2583 ( 81.1) 11 ( 84.6) 0.014
PAIS_ORIGEN_REC (%) 3 684 (18.1) 82 ( 13.8) 600 ( 18.9) 2 ( 15.4)
HITO1_EDAD_GEST_SEM_REC (%) 2 752 (19.8) 58 ( 9.8) 693 ( 21.8) 1 ( 7.7) <0.001
HITO1_EDAD_GEST_SEM_REC (%) 3 1204 (31.8) 127 ( 21.4) 1077 ( 33.8) 0 ( 0.0)
HITO1_EDAD_GEST_SEM_REC (%) 4 1140 (30.1) 203 ( 34.2) 935 ( 29.4) 2 ( 15.4)
HITO1_EDAD_GEST_SEM_REC (%) 5 478 (12.6) 138 ( 23.3) 336 ( 10.6) 4 ( 30.8)
HITO1_EDAD_GEST_SEM_REC (%) 6 215 ( 5.7) 67 ( 11.3) 142 ( 4.5) 6 ( 46.2)
MACROZONA (%) 2 1608 (42.4) 195 ( 32.9) 1410 ( 44.3) 3 ( 23.1) <0.001
MACROZONA (%) 3 610 (16.1) 66 ( 11.1) 536 ( 16.8) 8 ( 61.5)
MACROZONA (%) 4 555 (14.6) 153 ( 25.8) 401 ( 12.6) 1 ( 7.7)
MACROZONA (%) 5 439 (11.6) 72 ( 12.1) 366 ( 11.5) 1 ( 7.7)
MACROZONA (%) 6 577 (15.2) 107 ( 18.0) 470 ( 14.8) 0 ( 0.0)
PREV_TRAMO_REC (%) 2 499 (13.2) 41 ( 6.9) 458 ( 14.4) 0 ( 0.0) <0.001
PREV_TRAMO_REC (%) 3 2099 (55.4) 373 ( 62.9) 1717 ( 53.9) 9 ( 69.2)
PREV_TRAMO_REC (%) 4 1092 (28.8) 176 ( 29.7) 913 ( 28.7) 3 ( 23.1)
PREV_TRAMO_REC (%) 5 99 ( 2.6) 3 ( 0.5) 95 ( 3.0) 1 ( 7.7)
HITO2_DECISION_MUJER_IVE (%) CONTINUAR EL EMBARAZO 593 (15.7) 593 (100.0) 0 ( 0.0) 0 ( 0.0) <0.001
HITO2_DECISION_MUJER_IVE (%) INTERRUMPIR EL EMBARAZO 3183 (84.0) 0 ( 0.0) 3183 (100.0) 0 ( 0.0)
HITO2_DECISION_MUJER_IVE (%) NO APLICA, INSCONSCIENTE 13 ( 0.3) 0 ( 0.0) 0 ( 0.0) 13 (100.0)
Show code
#as.data.frame.TableOne(tab2)%>% rio::export("tabla12.xlsx")

Time for this code chunk to run: 0 minutes

Show code
#<div class="fold o"> 
paste0("Con Estamento")

mydata_preds3 %>% 
    dplyr::select(HITO2_DECISION_MUJER_IVE, c("CAUSAL", "EDAD_MUJER_REC", "PUEBLO_ORIGINARIO_REC", "PAIS_ORIGEN_REC", "HITO1_EDAD_GEST_SEM_REC","MACROZONA", "AÑO", "PREV_TRAMO_REC")) %>% 
    tidyr::gather(variable,measure, -HITO2_DECISION_MUJER_IVE) %>% 
    dplyr::group_by(variable) %>%
    dplyr::do(chisq.test(.$HITO2_DECISION_MUJER_IVE, .$measure) %>% broom::tidy()) %>%
    dplyr::mutate(p.value=ifelse(p.value<.001,"<0.001",sprintf("%1.3f",p.value))) %>% 
    dplyr::mutate(statistic=sprintf("%2.0f",statistic)) %>% 
    dplyr::mutate(report=paste0("X²(",parameter,", ",nrow(mydata_preds3),")=",statistic,"; p", ifelse(p.value=="<0.001",p.value, paste0("=",p.value)))) %>%
    dplyr::mutate(report=sub("0\\.","0,",sub("\\.",",",report))) 

# explanatory63 = c("p40_istas_soporte", "p40_cond_trab_istas_10_p40_rec", "ratio_nec_insuf_mask2", "p8_exp_c19_cerca_cont_rec", "p36_cond_trab_reasig_rec", "p26_cond_trab_ent_protec_3_p26_rec", "p45_demo_sexo", "estamento")

fisher_1<-
fisher.test(table(mydata_preds3$p8_exp_c19_cerca_cont, 
                  mydata_preds3$HITO2_DECISION_MUJER_IVE), simulate.p.value=T, B=10000)

mydata_preds3 %>% 
    dplyr::select(HITO2_DECISION_MUJER_IVE, c("CAUSAL", "EDAD_MUJER_REC", "PUEBLO_ORIGINARIO_REC", "PAIS_ORIGEN_REC", "HITO1_EDAD_GEST_SEM_REC","MACROZONA", "AÑO", "PREV_TRAMO_REC")) %>% 
    tidyr::gather(variable,measure, -HITO2_DECISION_MUJER_IVE) %>% 
    dplyr::group_by(variable) %>%
    dplyr::do(fisher.test(.$HITO2_DECISION_MUJER_IVE, .$measure, workspace = 2e10, simulate.p.value = T, B=1e5) %>% broom::tidy()) 
# 
paste0("H(",kruskal.test(p40_istas_soporte ~ estamento, data=mydata_preds3)$parameter,")=",
       round(kruskal.test(p40_istas_soporte ~ estamento, data=mydata_preds3)$statistic,1), ", p=",
       round(kruskal.test(p40_istas_soporte ~ estamento, data=mydata_preds3)$p.value,3)) %>%
  copy_names()

#_#_#_#_#_#_#_#_

paste0("Con sexo")


paste0("Wlicoxon test, sexo-soporte ")
paste0("W= ",round(broom::tidy(wilcox.test(p40_istas_soporte ~ p45_demo_sexo, data=mydata_preds3))$statistic/sqrt(nrow(mydata_preds3)),0),", p= ",round(broom::tidy(wilcox.test(p40_istas_soporte ~ p45_demo_sexo, data=mydata_preds3))$p.value,3))

Time for this code chunk to run: 0 minutes

Así se ven los datos como los usa poLCA

Corremos una versión paralelizada de poLCA.

Análisis

Show code
#Biemer, P. P., & Wiesen, C. (2002). Measurement error evaluation of self-reported drug use: a latent class analysis of the US National Household Survey on Drug Abuse. Journal of the Royal Statistical Society: Series A (Statistics in Society), 165(1), 97–119. doi:10.1111/1467-985x.00612  
#lca_entropia(x="ppio", seed= 2125, k= 8, f= f_preds, dat= mydata_preds, nbr_repet= 30, na_rm= T)
#3
#<div style="border: 1px solid #ddd; padding: 5px; overflow-y: scroll; height:400px; overflow-x: scroll; width:100%">
# f is the selected variables
# dat is the data
# nb_var is the number of selected variables
# k is the number of latent class generated
# nbr_repet is the number of repetition to  
# reach the convergence of EM algorithm
# x es el código para las variables de los modelos
#seed es el numero random para las semillas. ej: 4345.
#Modo de calcular el mejor modelo.
#z_ # 
#2023-01-20
#https://github.com/QMUL/poLCAParallel/blob/master/exec/3_blrt.R
#0h s
seed<-2125
old <- Sys.time()

require(progress)

set.seed(seed)
model_array <- list()  # Initialize an empty list to store the results
pb <- progress_bar$new(total = n_class_max, message_class = "Running poLCA")

#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_
for (k in 1:n_class_max) {
  nrep_int <- nrep  # Initialize nrep to a reasonable value
  while (nrep_int >= 1) {  # Try running poLCA with decreasing nrep until nrep reaches 1
    tryCatch({
      set.seed(seed)
      mod <- poLCAParallel::poLCA(
        f_preds, 
        mydata_preds3,
        nclass = k, 
        nrep = nrep_int, 
        maxiter = 1e4,
        n.thread = 12,
        verbose = FALSE
      )
      model_array[[k]] <- mod  # Store the result if no error occurs
      break  # Exit the loop if poLCA succeeds
    }, error = function(e) {
      message(paste("Error in poLCA for k =", k, ", nrep =", nrep_int, ":", conditionMessage(e)))
      nrep_int <- nrep_int / 2  # Reduce nrep by half if poLCA fails
    })
  }
#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_  
  pb$tick()  # Increment the progress bar
  cat(sprintf("\r%d%% completed", round(k/n_class_max*100)))  # Print progress percentage
  Sys.sleep(.05)
}

8% completed
17% completed
25% completed
33% completed
42% completed
50% completed
58% completed
67% completed
75% completed
83% completed
92% completed
100% completed
Show code
pb$terminate()  # Close the progress bar
cat(': Done')  # Print "Done" message  
: Done
Show code
model_array_ppio<-model_array

#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_
#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#BOOTSTRAP#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_
#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_

new_med<-(Sys.time())
paste0("The model took ",round(new_med-old,2)," until every LCA was computed")
[1] "The model took 26.53 until every LCA was computed"

Time for this code chunk to run: 0.4 minutes

Luego calculamos la razón de verosimilitud mediante remuestreo bootstrap (BLRT) entre los distintos modelos con el que asume una clase menos.

Show code
# store p values for each nclass, 1 to n_class_max
# store 0 for 1 number of class, ie this says you cannot have zero number of
# classes
p_value_array <- c(0)
# for all number of classes investigated:
#   - store the log likelihood ratio
#   - store all bootstrap samples log likelihoods ratios
fitted_log_ratio_array <- rep(NaN, n_class_max)
bootstrap_log_ratio_array <- list()

#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_
# do the bootstrap likelihood ratio test for each number of classes
for (nclass in 2:n_class_max) {

  # get the null and alt models
  # these are models with one number of class differences
  null_model <- model_array[[nclass - 1]]
  alt_model <- model_array[[nclass]]

  # for each bootstrap sample, store the log likelihood ratio here
  bootstrap_results <- poLCAParallel::blrt(
    null_model, alt_model,
    n_bootstrap, n_thread, nrep
  )

  # log likelihood ratio to compare the two models
  fitted_log_ratio_array[nclass] <- bootstrap_results[["fitted_log_ratio"]]
  # store the log likelihoods ratios for all bootstrap samples
  bootstrap_log_ratio_array[[nclass]] <-
    bootstrap_results[["bootstrap_log_ratio"]]
  # store the p value for this nclass
  p_value_array <- c(p_value_array, bootstrap_results[["p_value"]])
  
  #progress bar
  cat(paste0(round(nclass / n_class_max * 100), '% completed'))
  Sys.sleep(.05)
  if (nclass == n_class_max) cat(': Done')
  else cat('\014')
}
17% completed25% completed33% completed42% completed50% completed58% completed67% completed75% completed83% completed92% completed100% completed: Done
Show code
#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_
new<-(Sys.time())
time_diff <- (Sys.time() - old)/60
paste0("The model took ",round(new-old,2)," minutes")
[1] "The model took 20.27 minutes"
Show code
model_array_ppio2 <- model_array
fitted_log_ratio_array_ppio <- fitted_log_ratio_array
bootstrap_log_ratio_array_ppio <- bootstrap_log_ratio_array
bootstrap_results_ppio <- bootstrap_results
p_value_array_ppio <- p_value_array

# Get the BIC values for all models in model_array_ppio2
bic_values <- sapply(model_array_ppio2, function(model) model$bic)

# Identify the index of the model with the lowest BIC
best_model_index <- which.min(bic_values)

# Select the best model
LCA_best_model_ppio <- model_array_ppio2[[best_model_index]]
#####################################################################################################################################################################
#Within poLCA, parameter estimates are obtained by a procedure that repeatedly improves estimates.
#This is stopped when no further improvements are obtained, or until a maximum number of iterations is reached. The starting values are the values at which such repetitions were started. Increasing the number 4 R. ACHTERHOF ET AL.of iterations (cycles within each estimation) and setting more different starting values for each repetition results in a greater likelihood that the global (rather than local) maximum of the log-likelihood function (and thus, the best possible solution) is reached. The maximum number of iterations was chosen as 10.000, and 500 different sets of starting values were used (thus going beyond the recommendations by Linzer & Lewis, 2011; Oberski, 2016). As such, the influence of chance was minimized while the reproducibility of the results was maximized# 

Time for this code chunk to run: 0.3 minutes

Resultados

Hicimos un gráfico de los resultados

Show code
relative.entropy<-function(lc){
  en<--sum(lc$posterior*
             log(lc$posterior),na.rm=T)
  e<-1-en/(nrow(lc$posterior)*log(ncol(lc$posterior)))
  return(e)
}
machine_tolerance <- sqrt(.Machine$double.eps)
entropy.R2 <- function(fit) {
  entropy <- function(p) {
    p <- p[p > machine_tolerance] # since Lim_{p->0} p log(p) = 0
    sum(-p * log(p))
  }
  error_prior <- entropy(fit$P) # Class proportions
  error_post <- mean(apply(fit$posterior, 1, entropy))
  R2_entropy <- (error_prior - error_post) / error_prior
  R2_entropy
}

# "log-likelihood": The log-likelihood of the model is computed by mod$llik.
# "Chi2": The chi-squared test statistic for the model is computed by mod$Chisq.
# "Chi2_pval": The p-value for the chi-squared test is computed by mod$Chisq.pvalue.
# "resid. df": The residual degrees of freedom for the model is computed by mod$resid.df.
# "AIC" : Akaike Information Criterion computed by mod$aic.
# "BIC": The Bayesian Information Criterion for the model is computed by mod$bic.
# "aBIC": The adjusted Bayesian Information Criterion for the model is computed by (-2*mod$llik) + (log((mod$N + 2)/24) * mod$npar).
# "cAIC": The consistent Akaike Information Criterion for the model is computed by (-2*mod$llik) + mod$npar * (1 + log(mod$N)). Making AIC asymptotically coherent
# "likelihood-ratio": The likelihood-ratio test statistic for the model is computed by mod$Gsq.
# "LLik_pval": The p-value for the likelihood-ratio test is computed by mod$Gsq.pvalue.
# "Entropy": The entropy of the model, which is a measure of the quality of classification, is computed by the relative.entropy() function.
# "Entropy.R2": The proportion of variance in the data accounted for by the model entropy is computed by the entropy.R2() function.
# "Dev Change": The change in deviance (G-squared) from the previous model in model_array_ppio2 is computed by (mod_min1$Gsq - mod$Gsq).
# "df": The number of degrees of freedom for the model is computed by mod$C^mod$I - mod$npar - 1.
# "pval": The p-value for the deviance change test is computed by pchisq(mod_min1$Gsq - mod$Gsq, mod_min1$resid.df - mod$resid.df).
#  "BLRT": log-likelihood ratio between the null(nclass -1 ) and alternative models
#  "BLRT.pvalue": p-value for the og-likelihood ratio test between the null(nclass -1 ) and alternative models#654

# Initialize an empty data frame
tab_ppio <- data.frame()##

# Loop through each model
for (i in 2:n_class_max) {
  skip_to_next <- FALSE

  # Get the model and the previous model
  mod <- model_array_ppio2[[i]]
  mod_min1 <- model_array_ppio2[[(i-1)]]

  # Check if the model has valid predictions
  if (is.null(mod$predclass)) {
    skip_to_next <- TRUE
  }

  # If the model has valid predictions, calculate the measures and add them to the data frame
  if (!skip_to_next) {
    # Number of latent classes
    mod$C <- max(t(matrix(apply(mod$y, 2, max))))
    # Number of manifest variables
    mod$J <- ncol(mod$y)
    # Total number of items
    mod$I <- mod$J * mod$C
    # Degrees of freedom
    mod$df <- mod$C^mod$I - mod$npar - 1
    # Chi-square test
    mod$Chisq.pvalue <- (1 - pchisq(mod$Chisq, mod$df))
    # AIC
    mod$aic <- round(mod$aic, 2)
    # BIC
    mod$bic <- round(mod$bic, 2)
    # Adjusted BIC n*=(n+2)/24 (https://github.com/dlinzer/poLCA/issues/10)
    mod$aBIC <- round((-2 * mod$llik) + (log((mod$N+2)/24) * mod$npar), 2) 
    # Conditional AIC
    mod$cAIC <-  round((-2 * mod$llik) + (2 * mod$Nobs * log(mod$N/mod$Nobs)), 2)
    # approximate weight of evidence criterion  #https://jbds.isdsa.org/public/journals/1/html/v2n2/qiu/
    mod$awe <-  round((-2 * mod$llik) + (2 * mod$npar * log(mod$Nobs)+1.5), 2)    
    # Gsq: deviance  -2*(fit$llik) + 2*(fit$npar)*(log(fit$Nobs)+1.5)
    mod$Gsq
    # Likelihood ratio test
    mod$Gsq.pvalue <- (1 - pchisq(mod$Gsq, mod$df))
    # Relative entropy
    mod$RelEnt <- round(relative.entropy(mod), 2)
    # Entropy R-squared
    mod$EntR2 <- round(entropy.R2(mod), 2)
    # Deviance change
    mod$DevChange <- round(mod_min1$Gsq - mod$Gsq, 2)
    # Degrees of freedom change
    mod$dfChange <- mod_min1$resid.df - mod$resid.df
    # P-value for deviance change
    mod$pvalDevChange <- round(pchisq(mod$DevChange, mod$dfChange, lower.tail = FALSE), 4)
    mod$BLRT <- round(fitted_log_ratio_array_ppio[[i]],2)
    mod$BLRT.pvalue <- p_value_array_ppio[[i]]
    # Add the model index to the data frame
    mod$ModelIndex <- i
      # Check if the data.frame is empty or has the same number of columns as mod
      #if (nrow(tab_ppio) == 0 || ncol(tab_ppio) == ncol(mod)) {
      # Add the measures to the data frame
    tab_ppio <- rbind.data.frame(tab_ppio, t(data.matrix(mod[c("llik", "Chisq", "Chisq.pvalue", "resid.df", "aic", "bic", "aBIC", "cAIC", "awe", "Gsq", "Gsq.pvalue", "RelEnt", "EntR2", "DevChange", "dfChange", "pvalDevChange", "ModelIndex","BLRT", "BLRT.pvalue")])))

    #  } else {}
  }
}

# identify the list-like columns
list_cols <- sapply(tab_ppio, is.list)
# unlist the list-like columns
unlisted_cols <- lapply(tab_ppio[list_cols], unlist)
# bind the unlisted columns as a data frame
tab_ppio <- cbind(tab_ppio[!list_cols], do.call(cbind, unlisted_cols))
#Erase rownames
rownames(tab_ppio) <- NULL

manualcolors <- c('indianred1', 'cornflowerblue', 'gray50', 'darkolivegreen4', 'slateblue2', 
                  'firebrick4', 'goldenrod4')

levels <- c("llik", "Chisq", "Chisq.pvalue", "resid.df", "aic", "bic", "aBIC", "cAIC", "awe",
            "Gsq", "Gsq.pvalue", "RelEnt", "EntR2", "DevChange", "dfChange",
            "pvalDevChange", "BLRT", "BLRT.pvalue")

labels <- c('Log-Verosimilitud', 'Chi2', 'valor p Chi2', 'Grados de libertad', 
            'Criterio de Información\nde Akaike(AIC)'','Criterio de Información\nBayesiano (BIC)')''BIC Ajustado (SABIC)')'"AIC Corregido"o"'Peso de evidencia aproximado(awe)')'G-squared/Deviance'e''Valor p G-squared'd''Entropía Relativa'va'Entropía R2' R'Cambio en Deviance\n(con modelo previo)'io'Grados de libertad del cambio'bi'valor p cambio deviance'nc'BLRT'LR'valor p BLRT')RT')

fig_lca_fit<- tab_ppio %>%
  dplyr::mutate_if(is.character, as.numeric) %>%  # convert character columns to numeric
  tidyr::pivot_longer(cols = -ModelIndex, #"evryone but index"
                       names_to = "indices", values_to = "value", values_drop_na = F) %>%
  dplyr::mutate(indices = factor(indices, levels = levels, labels = labels)) %>%
  dplyr::filter(grepl("(AIC|BIC|awe)",indices, ignore.case=T))%>%
  dplyr::mutate(ModelIndex= factor(ModelIndex, levels=2:n_class_max)) %>% 
  ggplot(aes(x = ModelIndex, y = value, group = indices, color = indices, linetype = indices)) +
  geom_line(size = 1.5) +
  scale_color_manual(values = manualcolors) +
  #scale_linetype_manual(values = c("solid", "dashed", "dotted")) +
  labs(x = "Número de clases"", y=="Valor"", color=="Medida"", linetype=="Medida"))++
  #facet_wrap(.~indices, scales = "free_y", nrow = 4, ncol = 1) +
  theme_bw()

fig_lca_fit
Show code
ggsave("_fig1_comparison_imp.png",fig_lca_fit, dpi=600)
#International Journal of Workplace Health Management  (Zhang et al., 2018).

Time for this code chunk to run: 0.1 minutes

Luego en una tabla

Show code
tab_ppio %>%#
  dplyr::select(ModelIndex, everything()) %>% 
    dplyr::mutate_if(is.character, as.numeric) %>%  # convert character columns to numeric
    knitr::kable(format="markdown", caption="Fit measures of models")
Table 2: Fit measures of models
ModelIndex llik Chisq Chisq.pvalue resid.df aic bic aBIC cAIC awe Gsq Gsq.pvalue RelEnt EntR2 DevChange dfChange pvalDevChange BLRT BLRT.pvalue
2 -27155.49 11587.975 1 3736 54416.98 54747.69 54579.29 54310.98 55185.91 4753.008 1 0.94 0.93 2837.54 27 0e+00 2837.54 0
3 -26898.07 9397.293 1 3709 53956.15 54455.34 54201.14 53796.15 55116.03 4238.176 1 0.84 0.81 514.83 27 0e+00 514.83 0
4 -26737.68 9249.189 1 3682 53689.36 54357.02 54017.03 53475.36 55240.19 3917.384 1 0.93 0.92 320.79 27 0e+00 320.79 0
5 -26596.21 8494.639 1 3655 53460.42 54296.56 53870.77 53192.42 55402.20 3634.447 1 0.81 0.79 282.94 27 0e+00 282.94 0
6 -26470.23 8780.257 1 3628 53262.45 54267.07 53755.49 52940.45 55595.19 3382.480 1 0.82 0.80 251.97 27 0e+00 251.97 0
7 -26359.05 8327.746 1 3601 53094.10 54267.20 53669.82 52718.10 55817.79 3160.128 1 0.86 0.84 222.35 27 0e+00 222.35 0
8 -26288.47 7694.552 1 3574 53006.94 54348.51 53665.34 52576.94 56121.58 3018.969 1 0.87 0.85 141.16 27 0e+00 141.16 0
9 -26229.28 8344.754 1 3547 52942.55 54452.60 53683.63 52458.55 56448.14 2900.577 1 0.85 0.83 118.39 27 0e+00 118.39 0
10 -26188.48 7372.688 1 3520 52914.97 54593.49 53738.74 52376.97 56811.51 2818.995 1 0.83 0.81 81.58 27 0e+00 81.58 0
11 -26154.69 6518.600 1 3493 52901.37 54748.37 53807.82 52309.37 57188.87 2751.400 1 0.81 0.79 67.59 27 0e+00 67.59 0
12 -26126.73 6838.882 1 3466 52899.47 54914.94 53888.60 52253.47 57577.91 2695.493 1 0.80 0.78 55.91 27 9e-04 55.91 0

Time for this code chunk to run: 0 minutes

Presentamos el modelo con mejor ajuste

Show code
print(model_array_ppio[best_model_index]) #
[[1]]
Conditional item response (column) probabilities,
 by outcome variable, for each class (row) 
 
$CAUSAL
          Pr(1)  Pr(2)  Pr(3)  Pr(4)
class 1:      0 0.2725 0.7275 0.0000
class 2:      0 0.0051 0.0000 0.9949
class 3:      0 0.9695 0.0305 0.0000
class 4:      0 0.3994 0.6006 0.0000
class 5:      0 0.0943 0.9057 0.0000
class 6:      0 0.0444 0.0000 0.9556

$EDAD_MUJER_REC
          Pr(1)  Pr(2)  Pr(3)  Pr(4)  Pr(5)  Pr(6)
class 1:      0 0.0434 0.3117 0.3254 0.2609 0.0585
class 2:      0 0.3692 0.2969 0.2089 0.1060 0.0191
class 3:      0 0.0075 0.2150 0.4359 0.3061 0.0355
class 4:      0 0.0167 0.2968 0.3364 0.2908 0.0593
class 5:      0 0.0020 0.0967 0.3037 0.5211 0.0765
class 6:      0 0.1865 0.3631 0.2472 0.1733 0.0299

$PUEBLO_ORIGINARIO_REC
           Pr(1)  Pr(2)  Pr(3)
class 1:  0.2110 0.7324 0.0567
class 2:  0.1525 0.8048 0.0427
class 3:  0.1101 0.8429 0.0469
class 4:  0.1574 0.8041 0.0385
class 5:  0.1451 0.8549 0.0000
class 6:  0.1286 0.8304 0.0410

$PAIS_ORIGEN_REC
          Pr(1)  Pr(2)  Pr(3)
class 1:      0 1.0000 0.0000
class 2:      0 1.0000 0.0000
class 3:      0 0.8833 0.1167
class 4:      0 0.0000 1.0000
class 5:      0 0.9121 0.0879
class 6:      0 0.0242 0.9758

$HITO1_EDAD_GEST_SEM_REC
          Pr(1)  Pr(2)  Pr(3)  Pr(4)  Pr(5)  Pr(6)
class 1:      0 0.0000 0.3019 0.3736 0.2123 0.1122
class 2:      0 0.7803 0.2180 0.0000 0.0000 0.0017
class 3:      0 0.2826 0.2118 0.4971 0.0000 0.0085
class 4:      0 0.0080 0.3216 0.3787 0.2130 0.0787
class 5:      0 0.0100 0.5761 0.2617 0.1320 0.0202
class 6:      0 0.8113 0.1827 0.0000 0.0000 0.0060

$MACROZONA
          Pr(1)  Pr(2)  Pr(3)  Pr(4)  Pr(5)  Pr(6)
class 1:      0 0.2813 0.2053 0.2150 0.0887 0.2097
class 2:      0 0.3851 0.1754 0.1463 0.0860 0.2072
class 3:      0 0.4124 0.1586 0.1599 0.1237 0.1454
class 4:      0 0.5700 0.1037 0.0521 0.2291 0.0452
class 5:      0 0.6795 0.1033 0.0689 0.0766 0.0718
class 6:      0 0.5517 0.0851 0.0180 0.3024 0.0428

$PREV_TRAMO_REC
          Pr(1)  Pr(2)  Pr(3)  Pr(4)  Pr(5)
class 1:      0 0.0043 0.6629 0.3268 0.0060
class 2:      0 0.0703 0.7263 0.1997 0.0037
class 3:      0 0.1113 0.5262 0.3625 0.0000
class 4:      0 0.0000 0.6293 0.2768 0.0939
class 5:      0 0.6164 0.1258 0.2526 0.0051
class 6:      0 0.0303 0.5280 0.1650 0.2767

Estimated class population shares 
 0.387 0.1501 0.1481 0.1043 0.1648 0.0456 
 
Predicted class memberships (by modal posterior prob.) 
 0.4086 0.1504 0.1533 0.1206 0.1233 0.0438 
 
========================================================= 
Fit for 6 latent classes: 
========================================================= 
number of observations: 3789 
number of estimated parameters: 161 
residual degrees of freedom: 3628 
maximum log-likelihood: -26470.23 
 
AIC(6): 53262.45
BIC(6): 54267.07
G^2(6): 3382.48 (Likelihood ratio/deviance statistic) 
X^2(6): 8780.257 (Chi-square goodness of fit) 
 

Time for this code chunk to run: 0 minutes

Show code
save.image("data2_lca2_imp.RData")

Time for this code chunk to run: 0 minutes

Show code
require(tidyverse)
sesion_info <- devtools::session_info()
dplyr::select(
  tibble::as_tibble(sesion_info$packages),
  c(package, loadedversion, source)
) %>% 
  DT::datatable(filter = 'top', colnames = c('Row number' =1,'Variable' = 2, 'Percentage'= 3),
              caption = htmltools::tags$caption(
        style = 'caption-side: top; text-align: left;',
        '', htmltools::em('Packages')),
      options=list(
initComplete = htmlwidgets::JS(
        "function(settings, json) {",
        "$(this.api().tables().body()).css({
            'font-family': 'Helvetica Neue',
            'font-size': '50%', 
            'code-inline-font-size': '15%', 
            'white-space': 'nowrap',
            'line-height': '0.75em',
            'min-height': '0.5em'
            });",#;
        "}")))

Time for this code chunk to run: 0.1 minutes