library('sigr')
set.seed(352532)
<- data.frame(x=1:10, z=c(4,5))
d $y <- 2*d$x + 0.1*rnorm(nrow(d)) d
<- lm(y~x+z, data=d)
model $pred <- predict(model, newdata = d)
d
<- summary(model)
s print(s)
##
## Call:
## lm(formula = y ~ x + z, data = d)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.086937 -0.032358 -0.000563 0.052421 0.065637
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.175154 0.178216 0.983 0.358
## x 2.020521 0.006953 290.602 1.51e-15 ***
## z -0.054716 0.039941 -1.370 0.213
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.06219 on 7 degrees of freedom
## Multiple R-squared: 0.9999, Adjusted R-squared: 0.9999
## F-statistic: 4.347e+04 on 2 and 7 DF, p-value: 4.681e-15
print(s$fstatistic)
## value numdf dendf
## 43473.84 2.00 7.00
cat(render(wrapFTest(model),
pSmallCutoff=0))
F Test summary: (R2=0.9999, F(2,7)=4.347e+04, p=4.681e-15).
cat(render(wrapFTest(d, 'pred', 'y', nParameters=2),
pSmallCutoff=0))
F Test summary: (R2=0.9999, F(2,7)=4.347e+04, p=4.681e-15).
Intentionally forget to inform wrapFTest
of the true number of parameters:
cat(render(wrapFTest(d, 'pred', 'y'),
pSmallCutoff=0))
F Test summary: (R2=0.9999, F(1,8)=9.937e+04, p=1.148e-17).