library('lava')
We consider the measurement models given by
\[X_{j} = \eta_{1} + \epsilon_{j}^{x}, \quad j=1,2,3\] \[Y_{j} = \eta_{2} + \epsilon_{j}^{y}, \quad j=1,2,3\] and with a structural model given by \[\eta_{2} = f(\eta_{1}) + Z + \zeta_{2}\label{ex:eta2}\] \[\eta_{1} = Z + \zeta_{1}\label{ex:eta1}\] with iid measurement errors \(\epsilon_{j}^{x},\epsilon_{j}^{y},\zeta_{1},\zeta_{2}\sim\mathcal{N}(0,1), j=1,2,3.\) and standard normal distributed covariate \(Z\). To simulate from this model we use the following syntax:
<- function(x) cos(1.25*x) + x - 0.25*x^2
f <- lvm(x1+x2+x3 ~ eta1, y1+y2+y3 ~ eta2, latent=~eta1+eta2)
m regression(m) <- eta1+eta2 ~ z
functional(m, eta2~eta1) <- f
<- sim(m, n=200, seed=42) # Default is all parameters are 1 d
plot(m, plot.engine="visNetwork")
We refer to holstjoergensen_lava for details on the syntax for model specification.
To estimate the parameters using the two-stage estimator described in lava_nlin, the first step is now to specify the measurement models
<- lvm(x1+x2+x3 ~ eta1, eta1 ~ z, latent=~eta1)
m1 <- lvm(y1+y2+y3 ~ eta2, eta2 ~ z, latent=~eta2) m2
Next, we specify a quadratic relationship between the two latent variables
nonlinear(m2, type="quadratic") <- eta2 ~ eta1
and the model can then be estimated using the two-stage estimator
<- twostage(m1, m2, data=d)
e1
e1#> Estimate Std. Error Z-value P-value
#> Measurements:
#> y2~eta2 0.97686 0.03451 28.30865 <1e-12
#> y3~eta2 1.04485 0.03485 29.98153 <1e-12
#> Regressions:
#> eta2~z 0.88513 0.20778 4.25997 2.045e-05
#> eta2~eta1_1 1.14072 0.17410 6.55194 5.679e-11
#> eta2~eta1_2 -0.45055 0.07161 -6.29199 3.134e-10
#> Intercepts:
#> y2 -0.12198 0.10915 -1.11749 0.2638
#> y3 -0.09879 0.10545 -0.93680 0.3489
#> eta2 0.67814 0.17363 3.90567 9.397e-05
#> Residual Variances:
#> y1 1.30730 0.17743 7.36790
#> y2 1.11056 0.14478 7.67064
#> y3 0.80961 0.13203 6.13219
#> eta2 2.08483 0.28985 7.19274
We see a clear statistically significant effect of the second order term (eta2~eta1_2
). For comparison we can also estimate the full MLE of the linear model:
<- estimate(regression(m1%++%m2, eta2~eta1), d)
e0 estimate(e0,keep="^eta2~[a-z]",regex=TRUE) ## Extract coef. matching reg.ex.
#> Estimate Std.Err 2.5% 97.5% P-value
#> eta2~eta1 1.4140 0.2261 0.97083 1.857 4.014e-10
#> eta2~z 0.6374 0.2778 0.09291 1.182 2.177e-02
Next, we calculate predictions from the quadratic model using the estimated parameter coefficients \[ \mathbb{E}_{\widehat{\theta}_{2}}(\eta_{2} \mid \eta_{1}, Z=0), \]
<- expand.grid(eta1=seq(-4, 4, by=0.1), z=0)
newd <- predict(e1, newdata=newd, x=TRUE)
pred1 head(pred1)
#> y1 y2 y3 eta2
#> [1,] -11.093569 -10.958869 -11.689950 -11.093569
#> [2,] -10.623561 -10.499736 -11.198861 -10.623561
#> [3,] -10.162565 -10.049406 -10.717187 -10.162565
#> [4,] -9.710579 -9.607878 -10.244928 -9.710579
#> [5,] -9.267605 -9.175153 -9.782084 -9.267605
#> [6,] -8.833641 -8.751230 -9.328656 -8.833641
To obtain a potential better fit we next proceed with a natural cubic spline
<- seq(-3,3,length.out=5)
kn nonlinear(m2, type="spline", knots=kn) <- eta2 ~ eta1
<- twostage(m1, m2, data=d)
e2
e2#> Estimate Std. Error Z-value P-value
#> Measurements:
#> y2~eta2 0.97752 0.03455 28.29248 <1e-12
#> y3~eta2 1.04508 0.03488 29.96248 <1e-12
#> Regressions:
#> eta2~z 0.86729 0.20273 4.27795 1.886e-05
#> eta2~eta1_1 2.86231 0.67270 4.25495 2.091e-05
#> eta2~eta1_2 0.00344 0.10097 0.03409 0.9728
#> eta2~eta1_3 -0.26270 0.29398 -0.89360 0.3715
#> eta2~eta1_4 0.50778 0.35191 1.44293 0.149
#> Intercepts:
#> y2 -0.12185 0.10922 -1.11563 0.2646
#> y3 -0.09874 0.10545 -0.93638 0.3491
#> eta2 1.83814 1.66416 1.10454 0.2694
#> Residual Variances:
#> y1 1.31286 0.17750 7.39647
#> y2 1.10412 0.14455 7.63850
#> y3 0.81124 0.13185 6.15286
#> eta2 1.99404 0.27004 7.38416
Confidence limits can be obtained via the Delta method using the estimate
method:
<- cbind(eta1=newd$eta1,
p estimate(e2,f=function(p) predict(e2,p=p,newdata=newd))$coefmat)
head(p)
#> eta1 Estimate Std.Err 2.5% 97.5% P-value
#> p1 -4.0 -9.611119 1.2650975 -12.09066 -7.131573 3.027546e-14
#> p2 -3.9 -9.324887 1.2054915 -11.68761 -6.962167 1.031269e-14
#> p3 -3.8 -9.038656 1.1467339 -11.28621 -6.791099 3.219584e-15
#> p4 -3.7 -8.752425 1.0889618 -10.88675 -6.618099 9.176287e-16
#> p5 -3.6 -8.466193 1.0323409 -10.48954 -6.442842 2.384616e-16
#> p6 -3.5 -8.179962 0.9770712 -10.09499 -6.264938 5.668684e-17
The fitted function can be obtained with the following code:
plot(I(eta2-z) ~ eta1, data=d, col=Col("black",0.5), pch=16,
xlab=expression(eta[1]), ylab=expression(eta[2]), xlim=c(-4,4))
lines(Estimate ~ eta1, data=as.data.frame(p), col="darkblue", lwd=5)
confband(p[,1], lower=p[,4], upper=p[,5], polygon=TRUE,
border=NA, col=Col("darkblue",0.2))
A more formal comparison of the different models can be obtained by cross-validation. Here we specify linear, quadratic and cubic spline models with 4 and 9 degrees of freedom.
<- nonlinear(m2, type="linear", eta2~eta1)
m2a <- nonlinear(m2, type="quadratic", eta2~eta1)
m2b <- seq(-3,3,length.out=5)
kn1 <- seq(-3,3,length.out=8)
kn2 <- nonlinear(m2, type="spline", knots=kn1, eta2~eta1)
m2c <- nonlinear(m2, type="spline", knots=kn2, eta2~eta1) m2d
To assess the model fit average RMSE is estimated with 5-fold cross-validation repeated two times
## Scale models in stage 2 to allow for a fair RMSE comparison
<- d
d0 for (i in endogenous(m2))
<- scale(d0[,i],center=TRUE,scale=TRUE)
d0[,i] ## Repeated 5-fold cross-validation:
<- lapply(list(linear=m2a,quadratic=m2b,spline4=m2c,spline6=m2d),
ff function(m) function(data,...) twostage(m1,m,data=data,stderr=FALSE,control=list(start=coef(e0),contrain=TRUE)))
<- cv(ff,data=d,K=5,rep=2,mc.cores=parallel::detectCores(),seed=1) fit.cv
summary(fit.cv)
#> RMSE
#> linear 4.616861
#> quadratic 3.283140
#> spline4 3.067121
#> spline6 3.119909
Here the RMSE is in favour of the splines model with 4 degrees of freedom:
<- lapply(list(m2a,m2b,m2c,m2d),
fit function(x) {
<- twostage(m1,x,data=d)
e <- cbind(eta1=newd$eta1,predict(e,newdata=newd$eta1,x=TRUE))
pr return(list(estimate=e,predict=as.data.frame(pr)))
})
plot(I(eta2-z) ~ eta1, data=d, col=Col("black",0.5), pch=16,
xlab=expression(eta[1]), ylab=expression(eta[2]), xlim=c(-4,4))
<- c("orange","darkred","darkgreen","darkblue")
col <- c(3,4,1,5)
lty for (i in seq_along(fit)) {
with(fit[[i]]$pr, lines(eta2 ~ eta1, col=col[i], lwd=4, lty=lty[i]))
}legend("bottomright",
c("linear","quadratic","spline(df=4)","spline(df=6)"),
col=col, lty=lty, lwd=3)
For convenience, the function twostageCV
can be used to do the cross-validation (also for choosing the mixture distribution via the ``nmix`` argument, see the section below). For example,
<- twostageCV(m1, m2, data=d, df=2:4, nmix=1:2,
selmod nfolds=2, rep=1, mc.cores=parallel::detectCores())
applies cross-validation (here just 2 folds for simplicity) to select the best splines with degrees of freedom varying from from 1-3 (the linear model is automatically included)
selmod#> Length Class Mode
#> model1 11 summary.lvm.mixture list
#> AIC1 2 -none- numeric
#> cv 4 -none- numeric
#> knots 4 -none- list
#> model2 11 summary.lvmfit list
Next, we show how to specify a general functional relation of multiple different latent or exogenous variables. This is achieved via the predict.fun
argument. To illustrate this we include interactions between the latent variable \(\eta_{1}\) and a dichotomized version of the covariate \(z\)
$g <- (d$z<0)*1 ## Group variable
d<- regression(m1, ~g) # Add grouping variable as exogenous variable (effect specified via 'predict.fun')
mm1 <- regression(m2, eta2~ u1+u2+u1:g+u2:g+z)
mm2 <- function(mu,var,data,...) {
pred cbind("u1"=mu[,1],"u2"=mu[,1]^2+var[1],
"u1:g"=mu[,1]*data[,"g"],"u2:g"=(mu[,1]^2+var[1])*data[,"g"])
}<- twostage(mm1, model2=mm2, data=d, predict.fun=pred)
ee1 estimate(ee1,keep="eta2~u",regex=TRUE)
#> Estimate Std.Err 2.5% 97.5% P-value
#> eta2~u1 0.9891 0.3020 0.3971 1.5810 0.001057
#> eta2~u2 -0.3962 0.1443 -0.6791 -0.1133 0.006047
#> eta2~u1:g 0.4487 0.4620 -0.4568 1.3543 0.331409
#> eta2~u2:g 0.0441 0.2166 -0.3804 0.4686 0.838667
A formal test show no statistically significant effect of this interaction
summary(estimate(ee1,keep="(:g)", regex=TRUE))
#> Call: estimate.default(x = ee1, keep = "(:g)", regex = TRUE)
#> __________________________________________________
#> Estimate Std.Err 2.5% 97.5% P-value
#> eta2~u1:g 0.4487 0.4620 -0.4568 1.3543 0.3314
#> eta2~u2:g 0.0441 0.2166 -0.3804 0.4686 0.8387
#>
#> Null Hypothesis:
#> [eta2~u1:g] = 0
#> [eta2~u2:g] = 0
#>
#> chisq = 0.9441, df = 2, p-value = 0.6237
Lastly, we demonstrate how the distributional assumptions of stage 1 model can be relaxed by letting the conditional distribution of the latent variable given covariates follow a Gaussian mixture distribution. The following code explictly defines the parameter constraints of the model by setting the intercept of the first indicator variable, \(x_{1}\), to zero and the factor loading parameter of the same variable to one.
<- baptize(m1) ## Label all parameters
m1 intercept(m1, ~x1+eta1) <- list(0,NA) ## Set intercept of x1 to zero. Remove the label of eta1
regression(m1,x1~eta1) <- 1 ## Factor loading fixed to 1
The mixture model may then be estimated using the mixture
method (note, this requires the mets
package to be installed), where the Parameter names shared across the different mixture components given in the list
will be constrained to be identical in the mixture model. Thus, only the intercept of \(\eta_{1}\) is allowed to vary between the mixtures.
set.seed(1)
<- mixture(m1, k=2, data=d) em0
To decrease the risk of using a local maximizer of the likelihood we can rerun the estimation with different random starting values
<- NULL
em0 <- c()
ll for (i in 1:5) {
set.seed(i)
<- mixture(m1, k=2, data=d, control=list(trace=0))
em <- c(ll,logLik(em))
ll if (is.null(em0) || logLik(em0)<tail(ll,1))
<- em
em0 }
summary(em0)
#> Cluster 1 (n=162, Prior=0.776):
#> --------------------------------------------------
#> Estimate Std. Error Z value Pr(>|z|)
#> Measurements:
#> x1~eta1 1.00000
#> x2~eta1 0.99581 0.07940 12.54099 <1e-12
#> x3~eta1 1.06345 0.08436 12.60541 <1e-12
#> Regressions:
#> eta1~z 1.06675 0.08527 12.50989 <1e-12
#> Intercepts:
#> x1 0.00000
#> x2 0.03845 0.09890 0.38883 0.6974
#> x3 -0.02549 0.10333 -0.24667 0.8052
#> eta1 0.20925 0.13162 1.58984 0.1119
#> Residual Variances:
#> x1 0.98540 0.13316 7.40025
#> x2 0.97180 0.13156 7.38695
#> x3 1.01316 0.14294 7.08815
#> eta1 0.29046 0.11129 2.61004
#>
#> Cluster 2 (n=38, Prior=0.224):
#> --------------------------------------------------
#> Estimate Std. Error Z value Pr(>|z|)
#> Measurements:
#> x1~eta1 1.00000
#> x2~eta1 0.99581 0.07940 12.54099 <1e-12
#> x3~eta1 1.06345 0.08436 12.60541 <1e-12
#> Regressions:
#> eta1~z 1.06675 0.08527 12.50989 <1e-12
#> Intercepts:
#> x1 0.00000
#> x2 0.03845 0.09890 0.38883 0.6974
#> x3 -0.02549 0.10333 -0.24667 0.8052
#> eta1 -1.44290 0.25867 -5.57812 2.431e-08
#> Residual Variances:
#> x1 0.98540 0.13316 7.40025
#> x2 0.97180 0.13156 7.38695
#> x3 1.01316 0.14294 7.08815
#> eta1 0.29046 0.11129 2.61004
#> --------------------------------------------------
#> AIC= 1958.803
#> ||score||^2= 8.81839e-06
Measured by AIC there is a slight improvement in the model fit using the mixture model
<- estimate(m1,data=d)
e0 AIC(e0,em0)
#> df AIC
#> e0 10 1961.839
#> em0 12 1958.803
The spline model may then be estimated as before with the two-stage
method
<- twostage(em0,m2,data=d)
em2
em2#> Estimate Std. Error Z-value P-value
#> Measurements:
#> y2~eta2 0.97823 0.03469 28.19904 <1e-12
#> y3~eta2 1.04530 0.03484 30.00721 <1e-12
#> Regressions:
#> eta2~z 1.02884 0.22330 4.60752 4.075e-06
#> eta2~eta1_1 2.80413 0.65493 4.28155 1.856e-05
#> eta2~eta1_2 -0.02249 0.09996 -0.22499 0.822
#> eta2~eta1_3 -0.17333 0.28933 -0.59906 0.5491
#> eta2~eta1_4 0.38672 0.33982 1.13801 0.2551
#> Intercepts:
#> y2 -0.12171 0.10925 -1.11407 0.2653
#> y3 -0.09870 0.10546 -0.93592 0.3493
#> eta2 2.12372 1.66552 1.27511 0.2023
#> Residual Variances:
#> y1 1.31872 0.17657 7.46861
#> y2 1.09690 0.14503 7.56340
#> y3 0.81345 0.13259 6.13509
#> eta2 1.99590 0.28454 7.01453
In this example the results are very similar to the Gaussian model:
plot(I(eta2-z) ~ eta1, data=d, col=Col("black",0.5), pch=16,
xlab=expression(eta[1]), ylab=expression(eta[2]))
lines(Estimate ~ eta1, data=as.data.frame(p), col="darkblue", lwd=5)
confband(p[,1], lower=p[,4], upper=p[,5], polygon=TRUE,
border=NA, col=Col("darkblue",0.2))
<- cbind(eta1=newd$eta1,
pm estimate(em2, f=function(p) predict(e2,p=p,newdata=newd))$coefmat)
lines(Estimate ~ eta1, data=as.data.frame(pm), col="darkred", lwd=5)
confband(pm[,1], lower=pm[,4], upper=pm[,5], polygon=TRUE,
border=NA, col=Col("darkred",0.2))
legend("bottomright", c("Gaussian","Mixture"),
col=c("darkblue","darkred"), lwd=2, bty="n")
[holstjoergensen_lava] Holst & Budtz-J, Linear latent variable models: the lava-package, Computational Statistics, 28(4), 1385-1452 (2013). doi. ↩︎
[lava_nlin] Klaus Kähler Holst & Esben Budtz-Jørgensen, A two-stage estimation procedure for non-linear structural equation models, Biostatistics, (in press), (2020). doi. ↩︎