This example uses the immortal Holzinger Swineford data set.
library(OpenMx)
data(HS.ability.data)
The OpenMx model looks like this:
$ageym <- HS.ability.data$agey*12 + HS.ability.data$agem
HS.ability.data$male <- as.numeric(HS.ability.data$Gender == 'Male')
HS.ability.data
# Specify variables
c('visual','cubes','paper','flags','paperrev','flagssub',
indicators <-'general','paragrap','sentence','wordc','wordm')
c("male","ageym","grade")
covariates <- c("g", covariates)
latents =
# Build the model
mxModel(
mimicModel <-"MIMIC", type="RAM",
manifestVars = indicators, latentVars = latents,
# Set up exogenous predictors
mxPath("one", covariates, labels=paste0('data.',covariates), free=FALSE),
# Fix factor variance
mxPath('g', arrows=2, free=FALSE, values=1),
# Error variances:
mxPath(from=c(indicators), arrows=2, free=TRUE, values=10),
# Means (saturated means model):
mxPath(from="one", to=indicators, values=rep(5, length(indicators))),
# Loadings:
mxPath(from="g", to=indicators, values=.5),
# Covariate paths
mxPath(covariates, "g", labels=covariates),
# Data
mxData(observed = HS.ability.data, type = "raw"))
# Get some good starting values for regularization. This
# saves 2-3 minutes on my laptop.
mxRun(mimicModel) mimicModel <-
## Running MIMIC with 36 parameters
Add the penalty:
mxModel(
mimicModel <-
mimicModel,mxMatrix('Full',1,1,free=TRUE,values=0,labels="lambda",name="hparam"),
# Set scale to ML estimates for adaptive lasso
mxPenaltyLASSO(what=covariates, name="LASSO",
scale = coef(mimicModel)[covariates],
lambda = 0, lambda.max =2, lambda.step=.04)
)
Run the regularization. With only three covariates, the plot of results is not very exciting. We learn that sex is not a good predictor of this factor.
mxPenaltySearch(mimicModel) regMIMIC <-
## Running MIMIC with 37 parameters
## Warning: In model 'MIMIC' Optimizer returned a non-zero status code 6. The model
## does not satisfy the first-order optimality conditions to the required accuracy,
## and no improved point for the merit function could be found during the final
## linesearch (Mx status RED)
regMIMIC$compute$steps$PS$output$detail
detail <-
library(reshape2)
library(ggplot2)
detail[,c(covariates, 'lambda')]
est <-ggplot(melt(est, id.vars = 'lambda')) +
geom_line(aes(x=lambda, y=value, color=variable)) +
geom_vline(aes(xintercept=coef(regMIMIC)['lambda']),
linetype="dashed", alpha=.5)
The regularized factor loadings can be found here,
$EBIC == min(detail$EBIC), covariates] detail[detail
## male ageym grade
## 35 3.551246e-07 -0.02792019 1.060184
The regularization causes a lot of bias. One way to deal with this is to fix zerod parameters to zero, discard the regularization penalty, and re-fit model.
mxPenaltyZap(regMIMIC) regMIMIC <-
## Zapping 'male'
## Fixing 'lambda'
## Tip: Use
## model = mxRun(model)
## to re-estimate the model without any penalty terms.
mxRun(regMIMIC) regMIMIC <-
## Running MIMIC with 35 parameters
summary(regMIMIC)
## Summary of MIMIC
##
## free parameters:
## name matrix row col Estimate Std.Error A
## 1 MIMIC.A[1,12] A visual g 2.61817539 0.359734332
## 2 MIMIC.A[2,12] A cubes g 0.93495892 0.249140391
## 3 MIMIC.A[3,12] A paper g 0.70084647 0.148839958
## 4 MIMIC.A[4,12] A flags g 1.57351355 0.485114112
## 5 MIMIC.A[5,12] A paperrev g 0.99010508 0.241840086
## 6 MIMIC.A[6,12] A flagssub g 3.33338049 0.638126581
## 7 MIMIC.A[7,12] A general g 9.23700633 0.532147467
## 8 MIMIC.A[8,12] A paragrap g 2.53491888 0.154443963
## 9 MIMIC.A[9,12] A sentence g 3.96091929 0.218438121
## 10 MIMIC.A[10,12] A wordc g 3.80400196 0.254970074
## 11 MIMIC.A[11,12] A wordm g 5.73048429 0.332716170
## 12 ageym A g ageym -0.02870552 0.005178867
## 13 grade A g grade 1.08144998 0.144108407
## 14 MIMIC.S[1,1] S visual visual 40.27135796 3.352951308 !
## 15 MIMIC.S[2,2] S cubes cubes 21.00804335 1.720709660
## 16 MIMIC.S[3,3] S paper paper 7.36561698 0.605079121
## 17 MIMIC.S[4,4] S flags flags 78.47431024 6.427213845
## 18 MIMIC.S[5,5] S paperrev paperrev 8.35238391 0.994207854 !
## 19 MIMIC.S[6,6] S flagssub flagssub 56.56060067 6.793459063
## 20 MIMIC.S[7,7] S general general 45.64552429 4.717436705 !
## 21 MIMIC.S[8,8] S paragrap paragrap 4.06597962 0.402767938
## 22 MIMIC.S[9,9] S sentence sentence 6.80450465 0.749101684
## 23 MIMIC.S[10,10] S wordc wordc 13.88565834 1.285595220 !
## 24 MIMIC.S[11,11] S wordm wordm 17.27850943 1.798287305
## 25 MIMIC.M[1,1] M 1 visual 21.29314806 2.978783300
## 26 MIMIC.M[1,2] M 1 cubes 21.38053062 1.281420562
## 27 MIMIC.M[1,3] M 1 paper 12.00170261 0.885460579
## 28 MIMIC.M[1,4] M 1 flags 13.00215459 2.293226807
## 29 MIMIC.M[1,5] M 1 paperrev 12.12972056 1.350150836
## 30 MIMIC.M[1,6] M 1 flagssub 24.45758218 4.270239961
## 31 MIMIC.M[1,7] M 1 general 11.26617876 9.736136751
## 32 MIMIC.M[1,8] M 1 paragrap 1.12587264 2.698409922
## 33 MIMIC.M[1,9] M 1 sentence 4.77295161 4.165463544
## 34 MIMIC.M[1,10] M 1 wordc 14.03580671 4.049673917
## 35 MIMIC.M[1,11] M 1 wordm -2.91446624 6.048415610
##
## Model Statistics:
## | Parameters | Degrees of Freedom | Fit (-2lnL units)
## Model: 35 2964 17843.68
## Saturated: 77 2922 NA
## Independence: 22 2977 NA
## Number of observations/statistics: 301/2999
##
## Information Criteria:
## | df Penalty | Parameters Penalty | Sample-Size Adjusted
## AIC: 11915.6773 17913.68 17923.19
## BIC: 927.8025 18043.43 17932.43
## To get additional fit indices, see help(mxRefModels)
## timestamp: 2022-02-21 08:50:02
## Wall clock time: 1.383407 secs
## optimizer: SLSQP
## OpenMx version number: 2.20.6
## Need help? See help(mxSummary)