Análisis de clases latentes exploratoria y comparativa con predictores (glca), sin pueblo originario, pero con año y recategorización de edad mujer y semana gestacional hito 1
script src = "https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js" $(document).ready(function() {
$('body').prepend('<div class=\"zoomDiv\"><img src=\"\" class=\"zoomImg\"></div>');
// onClick function for all plots (img's)
$('img:not(.zoomImg)').click(function() {
$('.zoomImg').attr('src', $(this).attr('src')).css({width: '100%'});
$('.zoomDiv').css({opacity: '1', width: 'auto', border: '1px solid white', borderRadius: '5px', position: 'fixed', top: '50%', left: '50%', marginRight: '-50%', transform: 'translate(-50%, -50%)', boxShadow: '0px 0px 50px #888888', zIndex: '50', overflow: 'auto', maxHeight: '100%'});
});
// onClick function for zoomImg
$('img.zoomImg').click(function() {
$('.zoomDiv').css({opacity: '0', width: '0%'});
});
});
<script src="hideOutput.js"></script> $(document).ready(function() {
$chunks = $('.fold');
$chunks.each(function () { // add button to source code chunks
if ( $(this).hasClass('s') ) {
$('pre.r', this).prepend("<div class=\"showopt\">Show Source</div><br style=\"line-height:22px;\"/>");
$('pre.r', this).children('code').attr('class', 'folded');
} // add button to output chunks
if ( $(this).hasClass('o') ) {
$('pre:not(.r)', this).has('code').prepend("<div class=\"showopt\">Show Output</div><br style=\"line-height:22px;\"/>");
$('pre:not(.r)', this).children('code:not(r)').addClass('folded'); // add button to plots
$(this).find('img').wrap('<pre class=\"plot\"></pre>');
$('pre.plot', this).prepend("<div class=\"showopt\">Show Plot</div><br style=\"line-height:22px;\"/>");
$('pre.plot', this).children('img').addClass('folded');
}
}); // hide all chunks when document is loaded
$('.folded').css('display', 'none') // function to toggle the visibility
$('.showopt').click(function() {
var label = $(this).html();
if (label.indexOf("Show") >= 0) {
$(this).html(label.replace("Show", "Hide"));
} else {
$(this).html(label.replace("Hide", "Show"));
}
$(this).siblings('code, img').slideToggle('fast', 'swing');
});
}); Cargamos los datos
used (Mb) gc trigger (Mb) max used (Mb)
Ncells 652813 34.9 1308954 70 912046 48.8
Vcells 1170238 9.0 8388608 64 2056718 15.7
load("data2_lca211_sin_po_alt_2023_05_12 (3).RData")
Cargamos los paquetes
knitr::opts_chunk$set(echo = TRUE)
if(!require(poLCA)){install.packages("poLCA")}
if(!require(poLCAParallel)){devtools::install_github("QMUL/poLCAParallel@package")}
if(!require(compareGroups)){install.packages("compareGroups")}
if(!require(parallel)){install.packages("parallel")}
if(!require(Hmisc)){install.packages("Hmisc")}
if(!require(tidyverse)){install.packages("tidyverse")}
try(if(!require(sjPlot)){install.packages("sjPlot")})
if(!require(emmeans)){install.packages("emmeans")}
if(!require(nnet)){install.packages("nnet")}
if(!require(here)){install.packages("here")}
if(!require(doParallel)){install.packages("doParallel")}
if(!require(progress)){install.packages("progress")}
if(!require(caret)){install.packages("caret")}
if(!require(rpart)){install.packages("rpart")}
if(!require(rpart.plot)){install.packages("rpart.plot")}
if(!require(partykit)){install.packages("partykit")}
if(!require(randomForest)){install.packages("randomForest")}
if(!require(ggcorrplot)){install.packages("ggcorrplot")}
if(!require(polycor)){install.packages("polycor")}
if(!require(tableone)){install.packages("tableone")}
if(!require(broom)){install.packages("broom")}
if(!require(plotly)){install.packages("plotly")}
if(!require(rsvg)){install.packages("rsvg")}
if(!require(DiagrammeRsvg)){install.packages("DiagrammeRsvg")}
if(!require(glca)){install.packages("glca")}
#if(!require(poLCA)){githubinstall::gh_install_packages("poLCA", ref = github_pull("14"))}
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:#:
Definimos ciertas constantes
clus_iter= 500 #500
n_thread <- parallel::detectCores()
nrep <- clus_iter # number of different initial values (could be n_thread too)
n_class_max <- 10 # maximum number of classes to investigate
n_bootstrap <- 100 #30 # 50 number of bootstrap samples
print(n_thread)
[1] 8
Los valores ceros se declararon datos perdidos.
f_adj<- item(AÑO, CAUSAL, EDAD_MUJER_REC, PAIS_ORIGEN_REC, HITO1_EDAD_GEST_SEM_REC, MACROZONA, PREV_TRAMO_REC) ~ outcome#Biemer, P. P., & Wiesen, C. (2002). Measurement error evaluation of self-reported drug use: a latent class analysis of the US National Household Survey on Drug Abuse. Journal of the Royal Statistical Society: Series A (Statistics in Society), 165(1), 97–119. doi:10.1111/1467-985x.00612
#lca_entropia(x="ppio", seed= 2125, k= 8, f= f_preds, dat= mydata_preds, nbr_repet= 30, na_rm= T)
#3
#<div style="border: 1px solid #ddd; padding: 5px; overflow-y: scroll; height:400px; overflow-x: scroll; width:100%">
# f is the selected variables
# dat is the data
# nb_var is the number of selected variables
# k is the number of latent class generated
# nbr_repet is the number of repetition to
# reach the convergence of EM algorithm
# x es el código para las variables de los modelos
#seed es el numero random para las semillas. ej: 4345.
#Modo de calcular el mejor modelo.
#z_ #
#2023-01-20
#https://github.com/QMUL/poLCAParallel/blob/master/exec/3_blrt.R
#0h s
seed<-2125
old <- Sys.time()
lca22 <- glca(f_adj, data = mydata_preds3, nclass = 2, seed = seed, verbose = FALSE, n.init = 5e2, decreasing=T, maxiter = 1e4,testiter = 500)
lca23 <- glca(f_adj, data = mydata_preds3, nclass = 3, seed = seed, verbose = FALSE, n.init = 5e2, decreasing=T, maxiter = 1e4,testiter = 500)
lca24 <- glca(f_adj, data = mydata_preds3, nclass = 4, seed = seed, verbose = FALSE, n.init = 5e2, decreasing=T, maxiter = 1e4,testiter = 500)
lca25 <- glca(f_adj, data = mydata_preds3, nclass = 5, seed = seed, verbose = FALSE, n.init = 5e2, decreasing=T, maxiter = 1e4,testiter = 500)
lca26 <- glca(f_adj, data = mydata_preds3, nclass = 6, seed = seed, verbose = FALSE, n.init = 5e2, decreasing=T, maxiter = 1e4,testiter = 500)
lca27 <- glca(f_adj, data = mydata_preds3, nclass = 7, seed = seed, verbose = FALSE, n.init = 5e2, decreasing=T, maxiter = 1e4,testiter = 500)
lca28 <- glca(f_adj, data = mydata_preds3, nclass = 8, seed = seed, verbose = FALSE, n.init = 5e2, decreasing=T, maxiter = 1e4,testiter = 500)
lca29 <- glca(f_adj, data = mydata_preds3, nclass = 9, seed = seed, verbose = FALSE, n.init = 5e2, decreasing=T, maxiter = 1e4,testiter = 500)
lca210 <- glca(f_adj, data = mydata_preds3, nclass = 10, seed = seed, verbose = FALSE, n.init = 5e2, decreasing=T, maxiter = 1e4,testiter = 500)
new_med<-(Sys.time())
paste0("The model took ",round(new_med-old,2)," until every LCA was computed")
[1] "The model took 2.29 until every LCA was computed"
Luego calculamos la razón de verosimilitud mediante remuestreo bootstrap (BLRT) entre los distintos modelos con el que asume una clase menos.
Hicimos un gráfico de los resultados
manualcolors <- c('indianred1', 'cornflowerblue', 'gray50', 'darkolivegreen4', 'slateblue2',
'firebrick4', 'goldenrod4')
levels4 <- c("logLik", "Gsq", "AIC", "CAIC", "BIC", "entropy", "Res.Df")
labels4 <- c('Log-Verosimilitud', 'Chi2', 'Criterio de Información\nde Akaike(AIC)'','AIC Corregido'','Criterio de Información\nBayesiano (BIC)')''Entropía'a'Grados de libertad residuales')s')
fig_lca_fit4<- cbind.data.frame(rn=2:10,gof2$gtable) %>%
data.frame() %>%
dplyr::mutate_if(is.character, as.numeric) %>% # convert character columns to numeric
tidyr::pivot_longer(cols = -rn,names_to = "indices", values_to = "value", values_drop_na = F) %>%
dplyr::mutate(indices = factor(indices, levels = levels4, labels = labels4)) %>%
dplyr::filter(grepl("(AIC|BIC)",indices, ignore.case=T))%>%
dplyr::mutate(ModelIndex= factor(rn, levels=2:10)) %>%
ggplot(aes(x = ModelIndex, y = value, group = indices, color = indices, linetype = indices)) +
geom_line(size = 1.5) +
scale_color_manual(values = manualcolors) +
#scale_linetype_manual(values = c("solid", "dashed", "dotted")) +
labs(x = "Número de clases"", y=="Valor"", color=="Medida"", linetype=="Medida"))++
#facet_wrap(.~indices, scales = "free_y", nrow = 4, ncol = 1) +
theme_bw()
fig_lca_fit4
ggsave("_fig1_comparison_glca_sin_po.png",fig_lca_fit4, dpi=600)
#International Journal of Workplace Health Management (Zhang et al., 2018).
Luego en una tabla
cbind.data.frame(rn=2:10,gof2$gtable) %>%#
dplyr::select(rn, everything()) %>%
dplyr::mutate_if(is.character, as.numeric) %>% # convert character columns to numeric
knitr::kable(format="markdown", caption="Índices de ajuste modelos")
| rn | logLik | AIC | CAIC | BIC | entropy | Res.Df | Gsq |
|---|---|---|---|---|---|---|---|
| 2 | -28610.52 | 57317.05 | 57664.56 | 57616.56 | 0.9876848 | 3740 | 6632.909 |
| 3 | -28366.46 | 56878.93 | 57407.44 | 57334.44 | 0.8853549 | 3715 | 6144.788 |
| 4 | -28183.24 | 56562.49 | 57271.99 | 57173.99 | 0.8380636 | 3690 | 5778.347 |
| 5 | -28045.48 | 56336.96 | 57227.47 | 57104.47 | 0.8538308 | 3665 | 5502.824 |
| 6 | -27936.49 | 56168.97 | 57240.47 | 57092.47 | 0.8858425 | 3640 | 5284.837 |
| 7 | -27801.08 | 55948.15 | 57200.65 | 57027.65 | 0.8113430 | 3615 | 5014.013 |
| 8 | -27741.32 | 55878.65 | 57312.14 | 57114.14 | 0.8006377 | 3590 | 4894.511 |
| 9 | -27687.07 | 55820.14 | 57434.63 | 57211.63 | 0.7415083 | 3565 | 4786.003 |
| 10 | -27636.93 | 55769.86 | 57565.34 | 57317.34 | 0.7555191 | 3540 | 4685.717 |
best_model2<-
as.numeric(cbind.data.frame(rn=2:10,gof2$gtable) %>% dplyr::summarise(which.min(BIC)+1))
Presentamos el modelo con mejor ajuste
Call:
glca(formula = f_adj, data = mydata_preds3, nclass = 7, n.init = 500,
decreasing = T, testiter = 500, maxiter = 10000, seed = seed,
verbose = FALSE)
Manifest items : AÑO CAUSAL EDAD_MUJER_REC PAIS_ORIGEN_REC HITO1_EDAD_GEST_SEM_REC MACROZONA PREV_TRAMO_REC
Covariates (Level 1) : outcome
Categories for manifest items :
Y = 1 Y = 2 Y = 3 Y = 4 Y = 5 Y = 6
AÑO 2 3 4 5 6
CAUSAL 2 3 4
EDAD_MUJER_REC 1 2 3 4
PAIS_ORIGEN_REC 1 2 3
HITO1_EDAD_GEST_SEM_REC 1 2 3 4
MACROZONA 1 2 3 4 5 6
PREV_TRAMO_REC 1 2 3 4 5
Model : Latent class analysis
Number of latent classes : 7
Number of observations : 3789
Number of parameters : 173
log-likelihood : -27801.08
G-squared : 5014.013
AIC : 55948.15
BIC : 57027.65
Marginal prevalences for latent classes :
Class 1 Class 2 Class 3 Class 4 Class 5 Class 6 Class 7
0.05225 0.12116 0.15229 0.12730 0.40283 0.04512 0.09906
Class prevalences by group :
Class 1 Class 2 Class 3 Class 4 Class 5 Class 6 Class 7
ALL 0.05225 0.12116 0.15229 0.1273 0.40283 0.04512 0.09906
Logistic regression coefficients :
Class 1/7 Class 2/7 Class 3/7 Class 4/7 Class 5/7
(Intercept) -1.0711 1.6402 0.1435 -1.1488 1.8684
outcome1 0.4737 -1.9441 0.3169 1.4885 -0.5415
Class 6/7
(Intercept) -1.3631
outcome1 0.6295
Item-response probabilities :
AÑO
Y = 1 Y = 2 Y = 3 Y = 4 Y = 5
Class 1 0.9341 0.0000 0.0019 0.0640 0.0000
Class 2 0.1881 0.2480 0.2299 0.1971 0.1369
Class 3 0.1787 0.1892 0.2029 0.1915 0.2377
Class 4 0.1629 0.2023 0.1899 0.2466 0.1983
Class 5 0.1400 0.2480 0.1523 0.2410 0.2187
Class 6 0.1380 0.1716 0.2450 0.1223 0.3232
Class 7 0.1113 0.2384 0.1947 0.2627 0.1928
CAUSAL
Y = 1 Y = 2 Y = 3
Class 1 0.7388 0.2612 0.0000
Class 2 0.0527 0.9473 0.0000
Class 3 0.0000 0.0066 0.9934
Class 4 0.1759 0.8241 0.0000
Class 5 0.4730 0.5270 0.0000
Class 6 0.0561 0.0209 0.9230
Class 7 0.4907 0.5093 0.0000
EDAD_MUJER_REC
Y = 1 Y = 2 Y = 3 Y = 4
Class 1 0.5246 0.0228 0.2119 0.2407
Class 2 0.3374 0.0326 0.1655 0.4646
Class 3 0.3153 0.3497 0.2456 0.0894
Class 4 0.4935 0.0000 0.0351 0.4714
Class 5 0.5531 0.0122 0.2234 0.2112
Class 6 0.4898 0.1566 0.2314 0.1222
Class 7 0.5793 0.0060 0.1675 0.2472
PAIS_ORIGEN_REC
Y = 1 Y = 2 Y = 3
Class 1 0.0853 0.7945 0.1201
Class 2 0.0024 0.8955 0.1021
Class 3 0.0000 0.9944 0.0056
Class 4 0.0000 0.9279 0.0721
Class 5 0.0000 0.9836 0.0164
Class 6 0.0000 0.0000 1.0000
Class 7 0.0000 0.0000 1.0000
HITO1_EDAD_GEST_SEM_REC
Y = 1 Y = 2 Y = 3 Y = 4
Class 1 0.0467 0.0687 0.3900 0.4945
Class 2 0.0000 0.0000 0.5140 0.4860
Class 3 0.0104 0.9861 0.0035 0.0000
Class 4 0.0135 0.3928 0.5707 0.0231
Class 5 0.0350 0.2300 0.7273 0.0077
Class 6 0.0250 0.9750 0.0000 0.0000
Class 7 0.0309 0.0910 0.8192 0.0589
MACROZONA
Y = 1 Y = 2 Y = 3 Y = 4 Y = 5 Y = 6
Class 1 0.0404 0.1673 0.4148 0.1293 0.0696 0.1786
Class 2 0.0000 0.3434 0.1257 0.2738 0.0922 0.1649
Class 3 0.0035 0.3841 0.1747 0.1464 0.0862 0.2052
Class 4 0.0000 0.7205 0.0972 0.0462 0.0711 0.0650
Class 5 0.0000 0.3370 0.1822 0.1829 0.1035 0.1944
Class 6 0.0058 0.5465 0.0897 0.0170 0.2921 0.0489
Class 7 0.0000 0.6401 0.0774 0.0398 0.2210 0.0218
PREV_TRAMO_REC
Y = 1 Y = 2 Y = 3 Y = 4 Y = 5
Class 1 0.0215 0.0272 0.6448 0.2438 0.0626
Class 2 0.0082 0.0582 0.5991 0.3346 0.0000
Class 3 0.0018 0.0695 0.7241 0.1994 0.0053
Class 4 0.0000 0.7295 0.0542 0.2072 0.0091
Class 5 0.0000 0.0357 0.6123 0.3520 0.0000
Class 6 0.0292 0.0066 0.5235 0.1647 0.2761
Class 7 0.0000 0.0222 0.6002 0.2924 0.0852
save.image("data2_lca2_adj4_alt_sin_po.RData")
require(tidyverse)
sesion_info <- devtools::session_info()
dplyr::select(
tibble::as_tibble(sesion_info$packages),
c(package, loadedversion, source)
) %>%
DT::datatable(filter = 'top', colnames = c('Row number' =1,'Variable' = 2, 'Percentage'= 3),
caption = htmltools::tags$caption(
style = 'caption-side: top; text-align: left;',
'', htmltools::em('Packages')),
options=list(
initComplete = htmlwidgets::JS(
"function(settings, json) {",
"$(this.api().tables().body()).css({
'font-family': 'Helvetica Neue',
'font-size': '50%',
'code-inline-font-size': '15%',
'white-space': 'nowrap',
'line-height': '0.75em',
'min-height': '0.5em'
});",#;
"}")))