1 /* pspp - a program for statistical analysis.
2 Copyright (C) 2012 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 1. "Coding Logistic Regression with Newton-Raphson", James McCaffrey
21 http://msdn.microsoft.com/en-us/magazine/jj618304.aspx
23 2. "SPSS Statistical Algorithms" Chapter LOGISTIC REGRESSION Algorithms
26 The Newton Raphson method finds successive approximations to $\bf b$ where
27 approximation ${\bf b}_t$ is (hopefully) better than the previous ${\bf b}_{t-1}$.
29 $ {\bf b}_t = {\bf b}_{t -1} + ({\bf X}^T{\bf W}_{t-1}{\bf X})^{-1}{\bf X}^T({\bf y} - {\bf \pi}_{t-1})$
32 $\bf X$ is the $n \times p$ design matrix, $n$ being the number of cases,
33 $p$ the number of parameters, \par
34 $\bf W$ is the diagonal matrix whose diagonal elements are
35 $\hat{\pi}_0(1 - \hat{\pi}_0), \, \hat{\pi}_1(1 - \hat{\pi}_2)\dots \hat{\pi}_{n-1}(1 - \hat{\pi}_{n-1})$
42 #include <gsl/gsl_blas.h>
44 #include <gsl/gsl_linalg.h>
45 #include <gsl/gsl_cdf.h>
46 #include <gsl/gsl_matrix.h>
47 #include <gsl/gsl_vector.h>
50 #include "data/case.h"
51 #include "data/casegrouper.h"
52 #include "data/casereader.h"
53 #include "data/dataset.h"
54 #include "data/dictionary.h"
55 #include "data/format.h"
56 #include "data/value.h"
57 #include "language/command.h"
58 #include "language/dictionary/split-file.h"
59 #include "language/lexer/lexer.h"
60 #include "language/lexer/value-parser.h"
61 #include "language/lexer/variable-parser.h"
62 #include "libpspp/assertion.h"
63 #include "libpspp/ll.h"
64 #include "libpspp/message.h"
65 #include "libpspp/misc.h"
66 #include "math/categoricals.h"
67 #include "math/interaction.h"
68 #include "libpspp/hmap.h"
69 #include "libpspp/hash-functions.h"
71 #include "output/tab.h"
74 #define _(msgid) gettext (msgid)
79 #define PRINT_EACH_STEP 0x01
80 #define PRINT_SUMMARY 0x02
81 #define PRINT_CORR 0x04
82 #define PRINT_ITER 0x08
83 #define PRINT_GOODFIT 0x10
87 #define PRINT_DEFAULT (PRINT_SUMMARY | PRINT_EACH_STEP)
90 The constant parameters of the procedure.
91 That is, those which are set by the user.
95 /* The dependent variable */
96 const struct variable *dep_var;
98 /* The predictor variables (excluding categorical ones) */
99 const struct variable **predictor_vars;
100 size_t n_predictor_vars;
102 /* The categorical predictors */
103 struct interaction **cat_predictors;
104 size_t n_cat_predictors;
107 /* The union of the categorical and non-categorical variables */
108 const struct variable **indep_vars;
112 /* Which classes of missing vars are to be excluded */
113 enum mv_class exclude;
115 /* The weight variable */
116 const struct variable *wv;
118 /* The dictionary of the dataset */
119 const struct dictionary *dict;
121 /* True iff the constant (intercept) is to be included in the model */
124 /* Ths maximum number of iterations */
127 /* Other iteration limiting conditions */
132 /* The confidence interval (in percent) */
135 /* What results should be presented */
138 /* Inverse logit of the cut point */
139 double ilogit_cut_point;
143 /* The results and intermediate result of the procedure.
144 These are mutated as the procedure runs. Used for
145 temporary variables etc.
149 /* Used to indicate if a pass should flag a warning when
150 invalid (ie negative or missing) weight values are encountered */
151 bool warn_bad_weight;
153 /* The two values of the dependent variable. */
158 /* The sum of caseweights */
161 /* The number of missing and nonmissing cases */
162 casenumber n_missing;
163 casenumber n_nonmissing;
168 /* The categoricals and their payload. Null if the analysis has no
169 categorical predictors */
170 struct categoricals *cats;
174 /* The estimates of the predictor coefficients */
175 gsl_vector *beta_hat;
177 /* The predicted classifications:
178 True Negative, True Positive, False Negative, False Positive */
179 double tn, tp, fn, fp;
184 Convert INPUT into a dichotomous scalar, according to how the dependent variable's
186 For simple cases, this is a 1:1 mapping
187 The return value is always either 0 or 1
190 map_dependent_var (const struct lr_spec *cmd, const struct lr_result *res, const union value *input)
192 const int width = var_get_width (cmd->dep_var);
193 if (value_equal (input, &res->y0, width))
196 if (value_equal (input, &res->y1, width))
199 /* This should never happen. If it does, then y0 and/or y1 have probably not been set */
205 static void output_classification_table (const struct lr_spec *cmd, const struct lr_result *res);
207 static void output_categories (const struct lr_spec *cmd, const struct lr_result *res);
209 static void output_depvarmap (const struct lr_spec *cmd, const struct lr_result *);
211 static void output_variables (const struct lr_spec *cmd,
212 const struct lr_result *);
214 static void output_model_summary (const struct lr_result *,
215 double initial_likelihood, double likelihood);
217 static void case_processing_summary (const struct lr_result *);
220 /* Return the value of case C corresponding to the INDEX'th entry in the
223 predictor_value (const struct ccase *c,
224 const struct variable **x, size_t n_x,
225 const struct categoricals *cats,
228 /* Values of the scalar predictor variables */
230 return case_data (c, x[index])->f;
232 /* Coded values of categorical predictor variables (or interactions) */
233 if (cats && index - n_x < categoricals_df_total (cats))
235 double x = categoricals_get_dummy_code_for_case (cats, index - n_x, c);
239 /* The constant term */
245 Return the probability beta_hat (that is the estimator logit(y) )
246 corresponding to the coefficient estimator for case C
249 pi_hat (const struct lr_spec *cmd,
250 const struct lr_result *res,
251 const struct variable **x, size_t n_x,
252 const struct ccase *c)
256 size_t n_coeffs = res->beta_hat->size;
260 pi += gsl_vector_get (res->beta_hat, res->beta_hat->size - 1);
264 for (v0 = 0; v0 < n_coeffs; ++v0)
266 pi += gsl_vector_get (res->beta_hat, v0) *
267 predictor_value (c, x, n_x, res->cats, v0);
270 pi = 1.0 / (1.0 + exp(-pi));
277 Calculates the Hessian matrix X' V X,
278 where: X is the n by N_X matrix comprising the n cases in INPUT
279 V is a diagonal matrix { (pi_hat_0)(1 - pi_hat_0), (pi_hat_1)(1 - pi_hat_1), ... (pi_hat_{N-1})(1 - pi_hat_{N-1})}
280 (the partial derivative of the predicted values)
282 If ALL predicted values derivatives are close to zero or one, then CONVERGED
286 hessian (const struct lr_spec *cmd,
287 struct lr_result *res,
288 struct casereader *input,
289 const struct variable **x, size_t n_x,
292 struct casereader *reader;
295 double max_w = -DBL_MAX;
297 gsl_matrix_set_zero (res->hessian);
299 for (reader = casereader_clone (input);
300 (c = casereader_read (reader)) != NULL; case_unref (c))
303 double pi = pi_hat (cmd, res, x, n_x, c);
305 double weight = dict_get_case_weight (cmd->dict, c, &res->warn_bad_weight);
306 double w = pi * (1 - pi);
311 for (v0 = 0; v0 < res->beta_hat->size; ++v0)
313 double in0 = predictor_value (c, x, n_x, res->cats, v0);
314 for (v1 = 0; v1 < res->beta_hat->size; ++v1)
316 double in1 = predictor_value (c, x, n_x, res->cats, v1);
317 double *o = gsl_matrix_ptr (res->hessian, v0, v1);
322 casereader_destroy (reader);
324 if ( max_w < cmd->min_epsilon)
327 msg (MN, _("All predicted values are either 1 or 0"));
332 /* Calculates the value X' (y - pi)
333 where X is the design model,
334 y is the vector of observed independent variables
335 pi is the vector of estimates for y
338 the likelihood is stored in LIKELIHOOD;
339 the predicted values are placed in the respective tn, fn, tp fp values in RES
342 xt_times_y_pi (const struct lr_spec *cmd,
343 struct lr_result *res,
344 struct casereader *input,
345 const struct variable **x, size_t n_x,
346 const struct variable *y_var,
349 struct casereader *reader;
351 gsl_vector *output = gsl_vector_calloc (res->beta_hat->size);
354 res->tn = res->tp = res->fn = res->fp = 0;
355 for (reader = casereader_clone (input);
356 (c = casereader_read (reader)) != NULL; case_unref (c))
360 double pi = pi_hat (cmd, res, x, n_x, c);
361 double weight = dict_get_case_weight (cmd->dict, c, &res->warn_bad_weight);
364 double y = map_dependent_var (cmd, res, case_data (c, y_var));
366 *llikelihood += (weight * y) * log (pi) + log (1 - pi) * weight * (1 - y);
368 for (v0 = 0; v0 < res->beta_hat->size; ++v0)
370 double in0 = predictor_value (c, x, n_x, res->cats, v0);
371 double *o = gsl_vector_ptr (output, v0);
372 *o += in0 * (y - pi) * weight;
373 pred_y += gsl_vector_get (res->beta_hat, v0) * in0;
376 /* Count the number of cases which would be correctly/incorrectly classified by this
378 if (pred_y <= cmd->ilogit_cut_point)
394 casereader_destroy (reader);
401 /* "payload" functions for the categoricals.
402 The only function is to accumulate the frequency of each
407 frq_create (const void *aux1 UNUSED, void *aux2 UNUSED)
409 return xzalloc (sizeof (double));
413 frq_update (const void *aux1 UNUSED, void *aux2 UNUSED,
414 void *ud, const struct ccase *c UNUSED , double weight)
421 frq_destroy (const void *aux1 UNUSED, void *aux2 UNUSED, void *user_data UNUSED)
429 Makes an initial pass though the data, doing the following:
431 * Checks that the dependent variable is dichotomous,
432 * Creates and initialises the categoricals,
433 * Accumulates summary results,
434 * Calculates necessary initial values.
435 * Creates an initial value for \hat\beta the vector of beta_hats of \beta
437 Returns true if successful
440 initial_pass (const struct lr_spec *cmd, struct lr_result *res, struct casereader *input)
442 const int width = var_get_width (cmd->dep_var);
445 struct casereader *reader;
454 size_t n_coefficients = cmd->n_predictor_vars;
458 /* Create categoricals if appropriate */
459 if (cmd->n_cat_predictors > 0)
461 res->cp.create = frq_create;
462 res->cp.update = frq_update;
463 res->cp.calculate = NULL;
464 res->cp.destroy = frq_destroy;
466 res->cats = categoricals_create (cmd->cat_predictors, cmd->n_cat_predictors,
467 cmd->wv, cmd->exclude, MV_ANY);
469 categoricals_set_payload (res->cats, &res->cp, cmd, res);
473 for (reader = casereader_clone (input);
474 (c = casereader_read (reader)) != NULL; case_unref (c))
477 bool missing = false;
478 double weight = dict_get_case_weight (cmd->dict, c, &res->warn_bad_weight);
479 const union value *depval = case_data (c, cmd->dep_var);
481 if (var_is_value_missing (cmd->dep_var, depval, cmd->exclude))
486 for (v = 0; v < cmd->n_indep_vars; ++v)
488 const union value *val = case_data (c, cmd->indep_vars[v]);
489 if (var_is_value_missing (cmd->indep_vars[v], val, cmd->exclude))
496 /* Accumulate the missing and non-missing counts */
504 /* Find the values of the dependent variable */
507 value_clone (&res->y0, depval, width);
512 if ( !value_equal (&res->y0, depval, width))
514 value_clone (&res->y1, depval, width);
520 if (! value_equal (&res->y0, depval, width)
522 ! value_equal (&res->y1, depval, width)
525 msg (ME, _("Dependent variable's values are not dichotomous."));
531 if (v0set && value_equal (&res->y0, depval, width))
534 if (v1set && value_equal (&res->y1, depval, width))
540 categoricals_update (res->cats, c);
542 casereader_destroy (reader);
544 categoricals_done (res->cats);
548 /* Ensure that Y0 is less than Y1. Otherwise the mapping gets
549 inverted, which is confusing to users */
550 if (var_is_numeric (cmd->dep_var) && value_compare_3way (&res->y0, &res->y1, width) > 0)
553 value_clone (&tmp, &res->y0, width);
554 value_copy (&res->y0, &res->y1, width);
555 value_copy (&res->y1, &tmp, width);
556 value_destroy (&tmp, width);
560 n_coefficients += categoricals_df_total (res->cats);
561 res->beta_hat = gsl_vector_calloc (n_coefficients);
565 double mean = sum / res->cc;
566 gsl_vector_set (res->beta_hat, res->beta_hat->size - 1, log (mean / (1 - mean)));
572 casereader_destroy (reader);
578 /* Start of the logistic regression routine proper */
580 run_lr (const struct lr_spec *cmd, struct casereader *input,
581 const struct dataset *ds UNUSED)
585 bool converged = false;
587 /* Set the log likelihoods to a sentinel value */
588 double log_likelihood = SYSMIS;
589 double prev_log_likelihood = SYSMIS;
590 double initial_log_likelihood = SYSMIS;
592 struct lr_result work;
594 work.n_nonmissing = 0;
595 work.warn_bad_weight = true;
597 work.beta_hat = NULL;
600 /* Get the initial estimates of \beta and their standard errors.
601 And perform other auxilliary initialisation. */
602 if (! initial_pass (cmd, &work, input))
605 for (i = 0; i < cmd->n_cat_predictors; ++i)
607 if (1 >= categoricals_n_count (work.cats, i))
610 ds_init_empty (&str);
612 interaction_to_string (cmd->cat_predictors[i], &str);
614 msg (ME, _("Category %s does not have at least two distinct values. Logistic regression will not be run."),
621 output_depvarmap (cmd, &work);
623 case_processing_summary (&work);
626 input = casereader_create_filter_missing (input,
633 input = casereader_create_filter_missing (input,
640 work.hessian = gsl_matrix_calloc (work.beta_hat->size, work.beta_hat->size);
642 /* Start the Newton Raphson iteration process... */
643 for( i = 0 ; i < cmd->max_iter ; ++i)
649 hessian (cmd, &work, input,
650 cmd->predictor_vars, cmd->n_predictor_vars,
653 gsl_linalg_cholesky_decomp (work.hessian);
654 gsl_linalg_cholesky_invert (work.hessian);
656 v = xt_times_y_pi (cmd, &work, input,
657 cmd->predictor_vars, cmd->n_predictor_vars,
663 gsl_vector *delta = gsl_vector_alloc (v->size);
664 gsl_blas_dgemv (CblasNoTrans, 1.0, work.hessian, v, 0, delta);
668 gsl_vector_add (work.beta_hat, delta);
670 gsl_vector_minmax (delta, &min, &max);
672 if ( fabs (min) < cmd->bcon && fabs (max) < cmd->bcon)
674 msg (MN, _("Estimation terminated at iteration number %d because parameter estimates changed by less than %g"),
679 gsl_vector_free (delta);
684 if (-log_likelihood > -(1.0 - cmd->lcon) * prev_log_likelihood)
686 msg (MN, _("Estimation terminated at iteration number %d because Log Likelihood decreased by less than %g%%"), i + 1, 100 * cmd->lcon);
691 initial_log_likelihood = log_likelihood;
692 prev_log_likelihood = log_likelihood;
701 msg (MW, _("Estimation terminated at iteration number %d because maximum iterations has been reached"), i );
704 output_model_summary (&work, initial_log_likelihood, log_likelihood);
707 output_categories (cmd, &work);
709 output_classification_table (cmd, &work);
710 output_variables (cmd, &work);
712 casereader_destroy (input);
713 gsl_matrix_free (work.hessian);
714 gsl_vector_free (work.beta_hat);
715 categoricals_destroy (work.cats);
720 casereader_destroy (input);
721 gsl_matrix_free (work.hessian);
722 gsl_vector_free (work.beta_hat);
723 categoricals_destroy (work.cats);
730 struct hmap_node node; /* Node in hash map. */
731 const struct variable *var; /* The variable */
734 static struct variable_node *
735 lookup_variable (const struct hmap *map, const struct variable *var, unsigned int hash)
737 struct variable_node *vn = NULL;
738 HMAP_FOR_EACH_WITH_HASH (vn, struct variable_node, node, hash, map)
743 fprintf (stderr, "Warning: Hash table collision\n");
750 /* Parse the LOGISTIC REGRESSION command syntax */
752 cmd_logistic (struct lexer *lexer, struct dataset *ds)
755 /* Temporary location for the predictor variables.
756 These may or may not include the categorical predictors */
757 const struct variable **pred_vars;
763 lr.dict = dataset_dict (ds);
764 lr.n_predictor_vars = 0;
765 lr.predictor_vars = NULL;
767 lr.wv = dict_get_weight (lr.dict);
771 lr.min_epsilon = 0.00000001;
774 lr.print = PRINT_DEFAULT;
775 lr.cat_predictors = NULL;
776 lr.n_cat_predictors = 0;
777 lr.indep_vars = NULL;
780 if (lex_match_id (lexer, "VARIABLES"))
781 lex_match (lexer, T_EQUALS);
783 if (! (lr.dep_var = parse_variable_const (lexer, lr.dict)))
786 if (! lex_force_match (lexer, T_WITH))
789 if (!parse_variables_const (lexer, lr.dict,
790 &pred_vars, &n_pred_vars,
795 while (lex_token (lexer) != T_ENDCMD)
797 lex_match (lexer, T_SLASH);
799 if (lex_match_id (lexer, "MISSING"))
801 lex_match (lexer, T_EQUALS);
802 while (lex_token (lexer) != T_ENDCMD
803 && lex_token (lexer) != T_SLASH)
805 if (lex_match_id (lexer, "INCLUDE"))
807 lr.exclude = MV_SYSTEM;
809 else if (lex_match_id (lexer, "EXCLUDE"))
815 lex_error (lexer, NULL);
820 else if (lex_match_id (lexer, "ORIGIN"))
824 else if (lex_match_id (lexer, "NOORIGIN"))
828 else if (lex_match_id (lexer, "NOCONST"))
832 else if (lex_match_id (lexer, "EXTERNAL"))
834 /* This is for compatibility. It does nothing */
836 else if (lex_match_id (lexer, "CATEGORICAL"))
838 lex_match (lexer, T_EQUALS);
841 lr.cat_predictors = xrealloc (lr.cat_predictors,
842 sizeof (*lr.cat_predictors) * ++lr.n_cat_predictors);
843 lr.cat_predictors[lr.n_cat_predictors - 1] = 0;
845 while (parse_design_interaction (lexer, lr.dict,
846 lr.cat_predictors + lr.n_cat_predictors - 1));
847 lr.n_cat_predictors--;
849 else if (lex_match_id (lexer, "PRINT"))
851 lex_match (lexer, T_EQUALS);
852 while (lex_token (lexer) != T_ENDCMD && lex_token (lexer) != T_SLASH)
854 if (lex_match_id (lexer, "DEFAULT"))
856 lr.print |= PRINT_DEFAULT;
858 else if (lex_match_id (lexer, "SUMMARY"))
860 lr.print |= PRINT_SUMMARY;
863 else if (lex_match_id (lexer, "CORR"))
865 lr.print |= PRINT_CORR;
867 else if (lex_match_id (lexer, "ITER"))
869 lr.print |= PRINT_ITER;
871 else if (lex_match_id (lexer, "GOODFIT"))
873 lr.print |= PRINT_GOODFIT;
876 else if (lex_match_id (lexer, "CI"))
878 lr.print |= PRINT_CI;
879 if (lex_force_match (lexer, T_LPAREN))
881 if (! lex_force_num (lexer))
883 lex_error (lexer, NULL);
886 lr.confidence = lex_number (lexer);
888 if ( ! lex_force_match (lexer, T_RPAREN))
890 lex_error (lexer, NULL);
895 else if (lex_match_id (lexer, "ALL"))
901 lex_error (lexer, NULL);
906 else if (lex_match_id (lexer, "CRITERIA"))
908 lex_match (lexer, T_EQUALS);
909 while (lex_token (lexer) != T_ENDCMD && lex_token (lexer) != T_SLASH)
911 if (lex_match_id (lexer, "BCON"))
913 if (lex_force_match (lexer, T_LPAREN))
915 if (! lex_force_num (lexer))
917 lex_error (lexer, NULL);
920 lr.bcon = lex_number (lexer);
922 if ( ! lex_force_match (lexer, T_RPAREN))
924 lex_error (lexer, NULL);
929 else if (lex_match_id (lexer, "ITERATE"))
931 if (lex_force_match (lexer, T_LPAREN))
933 if (! lex_force_int (lexer))
935 lex_error (lexer, NULL);
938 lr.max_iter = lex_integer (lexer);
940 if ( ! lex_force_match (lexer, T_RPAREN))
942 lex_error (lexer, NULL);
947 else if (lex_match_id (lexer, "LCON"))
949 if (lex_force_match (lexer, T_LPAREN))
951 if (! lex_force_num (lexer))
953 lex_error (lexer, NULL);
956 lr.lcon = lex_number (lexer);
958 if ( ! lex_force_match (lexer, T_RPAREN))
960 lex_error (lexer, NULL);
965 else if (lex_match_id (lexer, "EPS"))
967 if (lex_force_match (lexer, T_LPAREN))
969 if (! lex_force_num (lexer))
971 lex_error (lexer, NULL);
974 lr.min_epsilon = lex_number (lexer);
976 if ( ! lex_force_match (lexer, T_RPAREN))
978 lex_error (lexer, NULL);
983 else if (lex_match_id (lexer, "CUT"))
985 if (lex_force_match (lexer, T_LPAREN))
987 if (! lex_force_num (lexer))
989 lex_error (lexer, NULL);
992 cp = lex_number (lexer);
994 if (cp < 0 || cp > 1.0)
996 msg (ME, _("Cut point value must be in the range [0,1]"));
1000 if ( ! lex_force_match (lexer, T_RPAREN))
1002 lex_error (lexer, NULL);
1009 lex_error (lexer, NULL);
1016 lex_error (lexer, NULL);
1021 lr.ilogit_cut_point = - log (1/cp - 1);
1024 /* Copy the predictor variables from the temporary location into the
1025 final one, dropping any categorical variables which appear there.
1026 FIXME: This is O(NxM).
1029 struct variable_node *vn, *next;
1030 struct hmap allvars;
1031 hmap_init (&allvars);
1032 for (v = x = 0; v < n_pred_vars; ++v)
1035 const struct variable *var = pred_vars[v];
1038 unsigned int hash = hash_pointer (var, 0);
1039 struct variable_node *vn = lookup_variable (&allvars, var, hash);
1042 vn = xmalloc (sizeof *vn);
1044 hmap_insert (&allvars, &vn->node, hash);
1047 for (cv = 0; cv < lr.n_cat_predictors ; ++cv)
1050 const struct interaction *iact = lr.cat_predictors[cv];
1051 for (iv = 0 ; iv < iact->n_vars ; ++iv)
1053 const struct variable *ivar = iact->vars[iv];
1054 unsigned int hash = hash_pointer (ivar, 0);
1055 struct variable_node *vn = lookup_variable (&allvars, ivar, hash);
1058 vn = xmalloc (sizeof *vn);
1061 hmap_insert (&allvars, &vn->node, hash);
1074 lr.predictor_vars = xrealloc (lr.predictor_vars, sizeof *lr.predictor_vars * (x + 1));
1075 lr.predictor_vars[x++] = var;
1076 lr.n_predictor_vars++;
1080 lr.n_indep_vars = hmap_count (&allvars);
1081 lr.indep_vars = xmalloc (lr.n_indep_vars * sizeof *lr.indep_vars);
1083 /* Interate over each variable and push it into the array */
1085 HMAP_FOR_EACH_SAFE (vn, next, struct variable_node, node, &allvars)
1087 lr.indep_vars[x++] = vn->var;
1090 hmap_destroy (&allvars);
1094 /* logistical regression for each split group */
1096 struct casegrouper *grouper;
1097 struct casereader *group;
1100 grouper = casegrouper_create_splits (proc_open (ds), lr.dict);
1101 while (casegrouper_get_next_group (grouper, &group))
1102 ok = run_lr (&lr, group, ds);
1103 ok = casegrouper_destroy (grouper);
1104 ok = proc_commit (ds) && ok;
1107 for (i = 0 ; i < lr.n_cat_predictors; ++i)
1109 interaction_destroy (lr.cat_predictors[i]);
1111 free (lr.predictor_vars);
1112 free (lr.cat_predictors);
1113 free (lr.indep_vars);
1119 for (i = 0 ; i < lr.n_cat_predictors; ++i)
1121 interaction_destroy (lr.cat_predictors[i]);
1123 free (lr.predictor_vars);
1124 free (lr.cat_predictors);
1125 free (lr.indep_vars);
1133 /* Show the Dependent Variable Encoding box.
1134 This indicates how the dependent variable
1135 is mapped to the internal zero/one values.
1138 output_depvarmap (const struct lr_spec *cmd, const struct lr_result *res)
1140 const int heading_columns = 0;
1141 const int heading_rows = 1;
1142 struct tab_table *t;
1146 int nr = heading_rows + 2;
1148 t = tab_create (nc, nr);
1149 tab_title (t, _("Dependent Variable Encoding"));
1151 tab_headers (t, heading_columns, 0, heading_rows, 0);
1153 tab_box (t, TAL_2, TAL_2, -1, TAL_1, 0, 0, nc - 1, nr - 1);
1155 tab_hline (t, TAL_2, 0, nc - 1, heading_rows);
1156 tab_vline (t, TAL_2, heading_columns, 0, nr - 1);
1158 tab_text (t, 0, 0, TAB_CENTER | TAT_TITLE, _("Original Value"));
1159 tab_text (t, 1, 0, TAB_CENTER | TAT_TITLE, _("Internal Value"));
1163 ds_init_empty (&str);
1164 var_append_value_name (cmd->dep_var, &res->y0, &str);
1165 tab_text (t, 0, 0 + heading_rows, 0, ds_cstr (&str));
1168 var_append_value_name (cmd->dep_var, &res->y1, &str);
1169 tab_text (t, 0, 1 + heading_rows, 0, ds_cstr (&str));
1172 tab_double (t, 1, 0 + heading_rows, 0, map_dependent_var (cmd, res, &res->y0), NULL, RC_INTEGER);
1173 tab_double (t, 1, 1 + heading_rows, 0, map_dependent_var (cmd, res, &res->y1), NULL, RC_INTEGER);
1180 /* Show the Variables in the Equation box */
1182 output_variables (const struct lr_spec *cmd,
1183 const struct lr_result *res)
1186 const int heading_columns = 1;
1187 int heading_rows = 1;
1188 struct tab_table *t;
1194 int idx_correction = 0;
1196 if (cmd->print & PRINT_CI)
1202 nr = heading_rows + cmd->n_predictor_vars;
1207 nr += categoricals_df_total (res->cats) + cmd->n_cat_predictors;
1209 t = tab_create (nc, nr);
1210 tab_title (t, _("Variables in the Equation"));
1212 tab_headers (t, heading_columns, 0, heading_rows, 0);
1214 tab_box (t, TAL_2, TAL_2, -1, TAL_1, 0, 0, nc - 1, nr - 1);
1216 tab_hline (t, TAL_2, 0, nc - 1, heading_rows);
1217 tab_vline (t, TAL_2, heading_columns, 0, nr - 1);
1219 tab_text (t, 0, row + 1, TAB_CENTER | TAT_TITLE, _("Step 1"));
1221 tab_text (t, 2, row, TAB_CENTER | TAT_TITLE, _("B"));
1222 tab_text (t, 3, row, TAB_CENTER | TAT_TITLE, _("S.E."));
1223 tab_text (t, 4, row, TAB_CENTER | TAT_TITLE, _("Wald"));
1224 tab_text (t, 5, row, TAB_CENTER | TAT_TITLE, _("df"));
1225 tab_text (t, 6, row, TAB_CENTER | TAT_TITLE, _("Sig."));
1226 tab_text (t, 7, row, TAB_CENTER | TAT_TITLE, _("Exp(B)"));
1228 if (cmd->print & PRINT_CI)
1230 tab_joint_text_format (t, 8, 0, 9, 0,
1231 TAB_CENTER | TAT_TITLE, _("%d%% CI for Exp(B)"), cmd->confidence);
1233 tab_text (t, 8, row, TAB_CENTER | TAT_TITLE, _("Lower"));
1234 tab_text (t, 9, row, TAB_CENTER | TAT_TITLE, _("Upper"));
1237 for (row = heading_rows ; row < nr; ++row)
1239 const int idx = row - heading_rows - idx_correction;
1241 const double b = gsl_vector_get (res->beta_hat, idx);
1242 const double sigma2 = gsl_matrix_get (res->hessian, idx, idx);
1243 const double wald = pow2 (b) / sigma2;
1244 const double df = 1;
1246 if (idx < cmd->n_predictor_vars)
1248 tab_text (t, 1, row, TAB_LEFT | TAT_TITLE,
1249 var_to_string (cmd->predictor_vars[idx]));
1251 else if (i < cmd->n_cat_predictors)
1254 bool summary = false;
1256 const struct interaction *cat_predictors = cmd->cat_predictors[i];
1257 const int df = categoricals_df (res->cats, i);
1259 ds_init_empty (&str);
1260 interaction_to_string (cat_predictors, &str);
1264 /* Calculate the Wald statistic,
1265 which is \beta' C^-1 \beta .
1266 where \beta is the vector of the coefficient estimates comprising this
1267 categorial variable. and C is the corresponding submatrix of the
1270 gsl_matrix_const_view mv =
1271 gsl_matrix_const_submatrix (res->hessian, idx, idx, df, df);
1272 gsl_matrix *subhessian = gsl_matrix_alloc (mv.matrix.size1, mv.matrix.size2);
1273 gsl_vector_const_view vv = gsl_vector_const_subvector (res->beta_hat, idx, df);
1274 gsl_vector *temp = gsl_vector_alloc (df);
1276 gsl_matrix_memcpy (subhessian, &mv.matrix);
1277 gsl_linalg_cholesky_decomp (subhessian);
1278 gsl_linalg_cholesky_invert (subhessian);
1280 gsl_blas_dgemv (CblasTrans, 1.0, subhessian, &vv.vector, 0, temp);
1281 gsl_blas_ddot (temp, &vv.vector, &wald);
1283 tab_double (t, 4, row, 0, wald, NULL, RC_OTHER);
1284 tab_double (t, 5, row, 0, df, NULL, RC_INTEGER);
1285 tab_double (t, 6, row, 0, gsl_cdf_chisq_Q (wald, df), NULL, RC_PVALUE);
1289 gsl_matrix_free (subhessian);
1290 gsl_vector_free (temp);
1294 ds_put_format (&str, "(%d)", ivar);
1297 tab_text (t, 1, row, TAB_LEFT | TAT_TITLE, ds_cstr (&str));
1300 ++i; /* next interaction */
1311 tab_text (t, 1, row, TAB_LEFT | TAT_TITLE, _("Constant"));
1314 tab_double (t, 2, row, 0, b, NULL, RC_OTHER);
1315 tab_double (t, 3, row, 0, sqrt (sigma2), NULL, RC_OTHER);
1316 tab_double (t, 4, row, 0, wald, NULL, RC_OTHER);
1317 tab_double (t, 5, row, 0, df, NULL, RC_INTEGER);
1318 tab_double (t, 6, row, 0, gsl_cdf_chisq_Q (wald, df), NULL, RC_PVALUE);
1319 tab_double (t, 7, row, 0, exp (b), NULL, RC_OTHER);
1321 if (cmd->print & PRINT_CI)
1324 double wc = gsl_cdf_ugaussian_Pinv (0.5 + cmd->confidence / 200.0);
1325 wc *= sqrt (sigma2);
1332 tab_double (t, 8, row, 0, exp (b - wc), NULL, RC_OTHER);
1333 tab_double (t, 9, row, 0, exp (b + wc), NULL, RC_OTHER);
1342 /* Show the model summary box */
1344 output_model_summary (const struct lr_result *res,
1345 double initial_log_likelihood, double log_likelihood)
1347 const int heading_columns = 0;
1348 const int heading_rows = 1;
1349 struct tab_table *t;
1352 int nr = heading_rows + 1;
1355 t = tab_create (nc, nr);
1356 tab_title (t, _("Model Summary"));
1358 tab_headers (t, heading_columns, 0, heading_rows, 0);
1360 tab_box (t, TAL_2, TAL_2, -1, TAL_1, 0, 0, nc - 1, nr - 1);
1362 tab_hline (t, TAL_2, 0, nc - 1, heading_rows);
1363 tab_vline (t, TAL_2, heading_columns, 0, nr - 1);
1365 tab_text (t, 0, 0, TAB_LEFT | TAT_TITLE, _("Step 1"));
1366 tab_text (t, 1, 0, TAB_CENTER | TAT_TITLE, _("-2 Log likelihood"));
1367 tab_double (t, 1, 1, 0, -2 * log_likelihood, NULL, RC_OTHER);
1370 tab_text (t, 2, 0, TAB_CENTER | TAT_TITLE, _("Cox & Snell R Square"));
1371 cox = 1.0 - exp((initial_log_likelihood - log_likelihood) * (2 / res->cc));
1372 tab_double (t, 2, 1, 0, cox, NULL, RC_OTHER);
1374 tab_text (t, 3, 0, TAB_CENTER | TAT_TITLE, _("Nagelkerke R Square"));
1375 tab_double (t, 3, 1, 0, cox / ( 1.0 - exp(initial_log_likelihood * (2 / res->cc))), NULL, RC_OTHER);
1381 /* Show the case processing summary box */
1383 case_processing_summary (const struct lr_result *res)
1385 const int heading_columns = 1;
1386 const int heading_rows = 1;
1387 struct tab_table *t;
1390 const int nr = heading_rows + 3;
1393 t = tab_create (nc, nr);
1394 tab_title (t, _("Case Processing Summary"));
1396 tab_headers (t, heading_columns, 0, heading_rows, 0);
1398 tab_box (t, TAL_2, TAL_2, -1, TAL_1, 0, 0, nc - 1, nr - 1);
1400 tab_hline (t, TAL_2, 0, nc - 1, heading_rows);
1401 tab_vline (t, TAL_2, heading_columns, 0, nr - 1);
1403 tab_text (t, 0, 0, TAB_LEFT | TAT_TITLE, _("Unweighted Cases"));
1404 tab_text (t, 1, 0, TAB_CENTER | TAT_TITLE, _("N"));
1405 tab_text (t, 2, 0, TAB_CENTER | TAT_TITLE, _("Percent"));
1408 tab_text (t, 0, 1, TAB_LEFT | TAT_TITLE, _("Included in Analysis"));
1409 tab_text (t, 0, 2, TAB_LEFT | TAT_TITLE, _("Missing Cases"));
1410 tab_text (t, 0, 3, TAB_LEFT | TAT_TITLE, _("Total"));
1412 tab_double (t, 1, 1, 0, res->n_nonmissing, NULL, RC_INTEGER);
1413 tab_double (t, 1, 2, 0, res->n_missing, NULL, RC_INTEGER);
1415 total = res->n_nonmissing + res->n_missing;
1416 tab_double (t, 1, 3, 0, total , NULL, RC_INTEGER);
1418 tab_double (t, 2, 1, 0, 100 * res->n_nonmissing / (double) total, NULL, RC_OTHER);
1419 tab_double (t, 2, 2, 0, 100 * res->n_missing / (double) total, NULL, RC_OTHER);
1420 tab_double (t, 2, 3, 0, 100 * total / (double) total, NULL, RC_OTHER);
1426 output_categories (const struct lr_spec *cmd, const struct lr_result *res)
1428 const struct fmt_spec *wfmt =
1429 cmd->wv ? var_get_print_format (cmd->wv) : &F_8_0;
1433 const int heading_columns = 2;
1434 const int heading_rows = 2;
1435 struct tab_table *t;
1445 for (i = 0; i < cmd->n_cat_predictors; ++i)
1447 size_t n = categoricals_n_count (res->cats, i);
1448 size_t df = categoricals_df (res->cats, i);
1454 nc = heading_columns + 1 + max_df;
1455 nr = heading_rows + total_cats;
1457 t = tab_create (nc, nr);
1458 tab_set_format (t, RC_WEIGHT, wfmt);
1460 tab_title (t, _("Categorical Variables' Codings"));
1462 tab_headers (t, heading_columns, 0, heading_rows, 0);
1464 tab_box (t, TAL_2, TAL_2, -1, TAL_1, 0, 0, nc - 1, nr - 1);
1466 tab_hline (t, TAL_2, 0, nc - 1, heading_rows);
1467 tab_vline (t, TAL_2, heading_columns, 0, nr - 1);
1470 tab_text (t, heading_columns, 1, TAB_CENTER | TAT_TITLE, _("Frequency"));
1472 tab_joint_text_format (t, heading_columns + 1, 0, nc - 1, 0,
1473 TAB_CENTER | TAT_TITLE, _("Parameter coding"));
1476 for (i = 0; i < max_df; ++i)
1478 int c = heading_columns + 1 + i;
1479 tab_text_format (t, c, 1, TAB_CENTER | TAT_TITLE, _("(%d)"), i + 1);
1483 for (v = 0; v < cmd->n_cat_predictors; ++v)
1486 const struct interaction *cat_predictors = cmd->cat_predictors[v];
1487 int df = categoricals_df (res->cats, v);
1489 ds_init_empty (&str);
1491 interaction_to_string (cat_predictors, &str);
1493 tab_text (t, 0, heading_rows + r, TAB_LEFT | TAT_TITLE, ds_cstr (&str) );
1497 for (cat = 0; cat < categoricals_n_count (res->cats, v) ; ++cat)
1500 const struct ccase *c = categoricals_get_case_by_category_real (res->cats, v, cat);
1501 const double *freq = categoricals_get_user_data_by_category_real (res->cats, v, cat);
1504 ds_init_empty (&str);
1506 for (x = 0; x < cat_predictors->n_vars; ++x)
1508 const union value *val = case_data (c, cat_predictors->vars[x]);
1509 var_append_value_name (cat_predictors->vars[x], val, &str);
1511 if (x < cat_predictors->n_vars - 1)
1512 ds_put_cstr (&str, " ");
1515 tab_text (t, 1, heading_rows + r, 0, ds_cstr (&str));
1517 tab_double (t, 2, heading_rows + r, 0, *freq, NULL, RC_WEIGHT);
1519 for (x = 0; x < df; ++x)
1521 tab_double (t, heading_columns + 1 + x, heading_rows + r, 0, (cat == x), NULL, RC_INTEGER);
1525 cumulative_df += df;
1534 output_classification_table (const struct lr_spec *cmd, const struct lr_result *res)
1536 const struct fmt_spec *wfmt =
1537 cmd->wv ? var_get_print_format (cmd->wv) : &F_8_0;
1539 const int heading_columns = 3;
1540 const int heading_rows = 3;
1542 struct string sv0, sv1;
1544 const int nc = heading_columns + 3;
1545 const int nr = heading_rows + 3;
1547 struct tab_table *t = tab_create (nc, nr);
1548 tab_set_format (t, RC_WEIGHT, wfmt);
1550 ds_init_empty (&sv0);
1551 ds_init_empty (&sv1);
1553 tab_title (t, _("Classification Table"));
1555 tab_headers (t, heading_columns, 0, heading_rows, 0);
1557 tab_box (t, TAL_2, TAL_2, -1, -1, 0, 0, nc - 1, nr - 1);
1558 tab_box (t, -1, -1, -1, TAL_1, heading_columns, 0, nc - 1, nr - 1);
1560 tab_hline (t, TAL_2, 0, nc - 1, heading_rows);
1561 tab_vline (t, TAL_2, heading_columns, 0, nr - 1);
1563 tab_text (t, 0, heading_rows, TAB_CENTER | TAT_TITLE, _("Step 1"));
1566 tab_joint_text (t, heading_columns, 0, nc - 1, 0,
1567 TAB_CENTER | TAT_TITLE, _("Predicted"));
1569 tab_joint_text (t, heading_columns, 1, heading_columns + 1, 1,
1570 0, var_to_string (cmd->dep_var) );
1572 tab_joint_text (t, 1, 2, 2, 2,
1573 TAB_LEFT | TAT_TITLE, _("Observed"));
1575 tab_text (t, 1, 3, TAB_LEFT, var_to_string (cmd->dep_var) );
1578 tab_joint_text (t, nc - 1, 1, nc - 1, 2,
1579 TAB_CENTER | TAT_TITLE, _("Percentage\nCorrect"));
1582 tab_joint_text (t, 1, nr - 1, 2, nr - 1,
1583 TAB_LEFT | TAT_TITLE, _("Overall Percentage"));
1586 tab_hline (t, TAL_1, 1, nc - 1, nr - 1);
1588 var_append_value_name (cmd->dep_var, &res->y0, &sv0);
1589 var_append_value_name (cmd->dep_var, &res->y1, &sv1);
1591 tab_text (t, 2, heading_rows, TAB_LEFT, ds_cstr (&sv0));
1592 tab_text (t, 2, heading_rows + 1, TAB_LEFT, ds_cstr (&sv1));
1594 tab_text (t, heading_columns, 2, 0, ds_cstr (&sv0));
1595 tab_text (t, heading_columns + 1, 2, 0, ds_cstr (&sv1));
1600 tab_double (t, heading_columns, 3, 0, res->tn, NULL, RC_WEIGHT);
1601 tab_double (t, heading_columns + 1, 4, 0, res->tp, NULL, RC_WEIGHT);
1603 tab_double (t, heading_columns + 1, 3, 0, res->fp, NULL, RC_WEIGHT);
1604 tab_double (t, heading_columns, 4, 0, res->fn, NULL, RC_WEIGHT);
1606 tab_double (t, heading_columns + 2, 3, 0, 100 * res->tn / (res->tn + res->fp), NULL, RC_OTHER);
1607 tab_double (t, heading_columns + 2, 4, 0, 100 * res->tp / (res->tp + res->fn), NULL, RC_OTHER);
1609 tab_double (t, heading_columns + 2, 5, 0,
1610 100 * (res->tp + res->tn) / (res->tp + res->tn + res->fp + res->fn), NULL, RC_OTHER);