--- title: "mfrmr Workflow" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{mfrmr Workflow} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.width = 7, fig.height = 5 ) ``` This vignette outlines a reproducible workflow for: - loading packaged simulation data - fitting an MFRM with flexible facets - running diagnostics and residual PCA - generating APA and visual summary outputs - moving from fitted models into design simulation and fixed-calibration prediction For a plot-first companion guide, see the separate `mfrmr-visual-diagnostics` vignette. For speed-sensitive work, a useful pattern is: - start with `method = "JML"` or `quad_points = 7` - use `diagnose_mfrm(..., residual_pca = "none")` for the first pass - reuse the same diagnostics object in downstream reports and plots ## Load Data ```{r load-data} library(mfrmr) list_mfrmr_data() data("ej2021_study1", package = "mfrmr") head(ej2021_study1) study1_alt <- load_mfrmr_data("study1") identical(names(ej2021_study1), names(study1_alt)) ``` ## Minimal Runnable Example We start with the packaged `example_core` dataset. It is intentionally compact, category-complete, and generated from a single latent trait plus facet main effects so that help-page examples stay fast without relying on undersized toy data. The same object is also available via `data("mfrmr_example_core", package = "mfrmr")`: ```{r toy-setup} data("mfrmr_example_core", package = "mfrmr") toy <- mfrmr_example_core fit_toy <- fit_mfrm( data = toy, person = "Person", facets = c("Rater", "Criterion"), score = "Score", method = "JML", model = "RSM", maxit = 15 ) diag_toy <- diagnose_mfrm(fit_toy, residual_pca = "none") summary(fit_toy)$overview summary(diag_toy)$overview names(plot(fit_toy, draw = FALSE)) ``` ## Diagnostics and Reporting ```{r diagnostics-reporting} t4_toy <- unexpected_response_table( fit_toy, diagnostics = diag_toy, abs_z_min = 1.5, prob_max = 0.4, top_n = 10 ) t12_toy <- fair_average_table(fit_toy, diagnostics = diag_toy) t13_toy <- bias_interaction_report( estimate_bias(fit_toy, diag_toy, facet_a = "Rater", facet_b = "Criterion", max_iter = 2), top_n = 10 ) class(summary(t4_toy)) class(summary(t12_toy)) class(summary(t13_toy)) names(plot(t4_toy, draw = FALSE)) names(plot(t12_toy, draw = FALSE)) names(plot(t13_toy, draw = FALSE)) ``` ## Fit and Diagnose with Full Data For a realistic analysis, we use the packaged Study 1 dataset: ```{r fit-full} fit <- fit_mfrm( data = ej2021_study1, person = "Person", facets = c("Rater", "Criterion"), score = "Score", method = "MML", model = "RSM", quad_points = 7 ) diag <- diagnose_mfrm( fit, residual_pca = "none" ) summary(fit) summary(diag) ``` If you need dimensionality evidence for a final report, you can add residual PCA after the initial diagnostic pass: ```{r fit-full-pca} diag_pca <- diagnose_mfrm( fit, residual_pca = "both", pca_max_factors = 6 ) summary(diag_pca) ``` ## Residual PCA and Reporting ```{r residual-pca} pca <- analyze_residual_pca(diag_pca, mode = "both") plot_residual_pca(pca, mode = "overall", plot_type = "scree") ``` ```{r bias-apa} data("mfrmr_example_bias", package = "mfrmr") bias_df <- mfrmr_example_bias fit_bias <- fit_mfrm( bias_df, person = "Person", facets = c("Rater", "Criterion"), score = "Score", method = "MML", model = "RSM", quad_points = 7 ) diag_bias <- diagnose_mfrm(fit_bias, residual_pca = "none") bias <- estimate_bias(fit_bias, diag_bias, facet_a = "Rater", facet_b = "Criterion") fixed <- build_fixed_reports(bias) apa <- build_apa_outputs(fit_bias, diag_bias, bias_results = bias) mfrm_threshold_profiles() vis <- build_visual_summaries(fit_bias, diag_bias, threshold_profile = "standard") vis$warning_map$residual_pca_overall ``` The same `example_bias` dataset also carries a `Group` variable so DIF-oriented examples can show a non-null pattern instead of a fully clean result. It can be loaded either with `load_mfrmr_data("example_bias")` or `data("mfrmr_example_bias", package = "mfrmr")`. ## Human-Readable Reporting API ```{r reporting-api} spec <- specifications_report(fit, title = "Study run") data_qc <- data_quality_report( fit, data = ej2021_study1, person = "Person", facets = c("Rater", "Criterion"), score = "Score" ) iter <- estimation_iteration_report(fit, max_iter = 8) subset_rep <- subset_connectivity_report(fit, diagnostics = diag) facet_stats <- facet_statistics_report(fit, diagnostics = diag) cat_structure <- category_structure_report(fit, diagnostics = diag) cat_curves <- category_curves_report(fit, theta_points = 101) bias_rep <- bias_interaction_report(bias, top_n = 20) plot_bias_interaction(bias_rep, plot = "scatter") ``` ## Design Simulation and Prediction The package also supports a separate simulation/prediction layer. The key distinction is: - `evaluate_mfrm_design()` and `predict_mfrm_population()` are design-level helpers that summarize expected operating characteristics under an explicit simulation specification. - `predict_mfrm_units()` and `sample_mfrm_plausible_values()` score future or partially observed persons under a fixed `MML` calibration. ```{r design-prediction} sim_spec <- build_mfrm_sim_spec( n_person = 30, n_rater = 4, n_criterion = 4, raters_per_person = 2, assignment = "rotating" ) pred_pop <- predict_mfrm_population( sim_spec = sim_spec, reps = 2, maxit = 10, seed = 1 ) summary(pred_pop)$forecast[, c("Facet", "MeanSeparation", "McseSeparation")] keep_people <- unique(toy$Person)[1:18] toy_mml <- suppressWarnings( fit_mfrm( toy[toy$Person %in% keep_people, , drop = FALSE], person = "Person", facets = c("Rater", "Criterion"), score = "Score", method = "MML", quad_points = 5, maxit = 15 ) ) new_units <- data.frame( Person = c("NEW01", "NEW01"), Rater = unique(toy$Rater)[1], Criterion = unique(toy$Criterion)[1:2], Score = c(2, 3) ) pred_units <- predict_mfrm_units(toy_mml, new_units, n_draws = 0) pv_units <- sample_mfrm_plausible_values(toy_mml, new_units, n_draws = 2, seed = 1) summary(pred_units)$estimates[, c("Person", "Estimate", "Lower", "Upper")] summary(pv_units)$draw_summary[, c("Person", "Draws", "MeanValue")] ```