From: John Darrington Date: Mon, 19 Nov 2012 14:35:43 +0000 (+0100) Subject: Logistic Regression: Added the Classification Table X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f7bdeb7b76a65cb2da2dd0bceea30a526dba84da;p=pspp Logistic Regression: Added the Classification Table Added a table to logistic regression to show how the predicted values would be classified by the calculated coefficients. --- diff --git a/doc/statistics.texi b/doc/statistics.texi index 5723323337..a4f03ca6dd 100644 --- a/doc/statistics.texi +++ b/doc/statistics.texi @@ -741,7 +741,8 @@ LOGISTIC REGRESSION [VARIABLES =] @var{dependent_var} WITH @var{predictors} [/PRINT = [SUMMARY] [DEFAULT] [CI(@var{confidence})] [ALL]] [/CRITERIA = [BCON(@var{min_delta})] [ITERATE(@var{max_interations})] - [LCON(@var{min_likelihood_delta})] [EPS(@var{min_epsilon})]] + [LCON(@var{min_likelihood_delta})] [EPS(@var{min_epsilon})] + [CUT(@var{cut_point})]] [/MISSING = @{INCLUDE|EXCLUDE@}] @end display @@ -773,10 +774,14 @@ If you want a model without the constant term @math{b_0}, use the keyword @subcm @subcmd{/NOCONST} is a synonym for @subcmd{/ORIGIN}. An iterative Newton-Raphson procedure is used to fit the model. -The @subcmd{/CRITERIA} subcommand is used to specify the stopping criteria of the procedure. +The @subcmd{/CRITERIA} subcommand is used to specify the stopping criteria of the procedure, +and other parameters. +The value of @var{cut_point} is used in the classification table. It is the +threshold above which predicted values are considered to be 1. Values +of @var{cut_point} must lie in the range [0,1]. During iterations, if any one of the stopping criteria are satisfied, the procedure is considered complete. -The criteria are: +The stopping criteria are: @itemize @item The number of iterations exceeds @var{max_iterations}. The default value of @var{max_iterations} is 20. @@ -790,6 +795,7 @@ In other words, the probabilities are close to zero or one. The default value of @var{min_epsilon} is 0.00000001. @end itemize + The @subcmd{PRINT} subcommand controls the display of optional statistics. Currently there is one such option, @subcmd{CI}, which indicates that the confidence interval of the odds ratio should be displayed as well as its value. diff --git a/src/language/stats/logistic.c b/src/language/stats/logistic.c index 7171563032..cdd31c268d 100644 --- a/src/language/stats/logistic.c +++ b/src/language/stats/logistic.c @@ -172,6 +172,10 @@ struct lr_result /* The estimates of the predictor coefficients */ gsl_vector *beta_hat; + + /* The predicted classifications: + True Negative, True Positive, False Negative, False Positive */ + double tn, tp, fn, fp; }; @@ -197,6 +201,7 @@ map_dependent_var (const struct lr_spec *cmd, const struct lr_result *res, const return SYSMIS; } +static void output_classification_table (const struct lr_spec *cmd, const struct lr_result *res); static void output_categories (const struct lr_spec *cmd, const struct lr_result *res); @@ -328,7 +333,9 @@ hessian (const struct lr_spec *cmd, y is the vector of observed independent variables pi is the vector of estimates for y - As a side effect, the likelihood is stored in LIKELIHOOD + Side effects: + the likelihood is stored in LIKELIHOOD; + the predicted values are placed in the respective tn, fn, tp fp values in RES */ static gsl_vector * xt_times_y_pi (const struct lr_spec *cmd, @@ -343,9 +350,11 @@ xt_times_y_pi (const struct lr_spec *cmd, gsl_vector *output = gsl_vector_calloc (res->beta_hat->size); *likelihood = 1.0; + res->tn = res->tp = res->fn = res->fp = 0; for (reader = casereader_clone (input); (c = casereader_read (reader)) != NULL; case_unref (c)) { + double pred_y = 0; int v0; double pi = pi_hat (cmd, res, x, n_x, c); double weight = dict_get_case_weight (cmd->dict, c, &res->warn_bad_weight); @@ -360,6 +369,26 @@ xt_times_y_pi (const struct lr_spec *cmd, double in0 = predictor_value (c, x, n_x, res->cats, v0); double *o = gsl_vector_ptr (output, v0); *o += in0 * (y - pi) * weight; + pred_y += gsl_vector_get (res->beta_hat, v0) * in0; + } + + pred_y = 1 / (1.0 + exp(-pred_y)); + assert (pred_y >= 0); + assert (pred_y <= 1); + + if (pred_y <= cmd->cut_point) + { + if (y == 0) + res->tn += weight; + else + res->fn += weight; + } + else + { + if (y == 0) + res->fp += weight; + else + res->tp += weight; } } @@ -665,6 +694,7 @@ run_lr (const struct lr_spec *cmd, struct casereader *input, if (work.cats) output_categories (cmd, &work); + output_classification_table (cmd, &work); output_variables (cmd, &work); gsl_matrix_free (work.hessian); @@ -928,6 +958,29 @@ cmd_logistic (struct lexer *lexer, struct dataset *ds) } } } + else if (lex_match_id (lexer, "CUT")) + { + if (lex_force_match (lexer, T_LPAREN)) + { + if (! lex_force_num (lexer)) + { + lex_error (lexer, NULL); + goto error; + } + lr.cut_point = lex_number (lexer); + if (lr.cut_point < 0 || lr.cut_point > 1.0) + { + msg (ME, _("Cut point value must be in the range [0,1]")); + goto error; + } + lex_get (lexer); + if ( ! lex_force_match (lexer, T_RPAREN)) + { + lex_error (lexer, NULL); + goto error; + } + } + } else { lex_error (lexer, NULL); @@ -1436,3 +1489,86 @@ output_categories (const struct lr_spec *cmd, const struct lr_result *res) tab_submit (t); } + + +static void +output_classification_table (const struct lr_spec *cmd, const struct lr_result *res) +{ + const struct fmt_spec *wfmt = + cmd->wv ? var_get_print_format (cmd->wv) : &F_8_0; + + const int heading_columns = 3; + const int heading_rows = 3; + + struct string sv0, sv1; + + const int nc = heading_columns + 3; + const int nr = heading_rows + 3; + + struct tab_table *t = tab_create (nc, nr); + + ds_init_empty (&sv0); + ds_init_empty (&sv1); + + tab_title (t, _("Classification Table")); + + tab_headers (t, heading_columns, 0, heading_rows, 0); + + tab_box (t, TAL_2, TAL_2, -1, -1, 0, 0, nc - 1, nr - 1); + tab_box (t, -1, -1, -1, TAL_1, heading_columns, 0, nc - 1, nr - 1); + + tab_hline (t, TAL_2, 0, nc - 1, heading_rows); + tab_vline (t, TAL_2, heading_columns, 0, nr - 1); + + tab_text (t, 0, heading_rows, TAB_CENTER | TAT_TITLE, _("Step 1")); + + + tab_joint_text (t, heading_columns, 0, nc - 1, 0, + TAB_CENTER | TAT_TITLE, _("Predicted")); + + tab_joint_text (t, heading_columns, 1, heading_columns + 1, 1, + 0, var_to_string (cmd->dep_var) ); + + tab_joint_text (t, 1, 2, 2, 2, + TAB_LEFT | TAT_TITLE, _("Observed")); + + tab_text (t, 1, 3, TAB_LEFT, var_to_string (cmd->dep_var) ); + + + tab_joint_text (t, nc - 1, 1, nc - 1, 2, + TAB_CENTER | TAT_TITLE, _("Percentage\nCorrect")); + + + tab_joint_text (t, 1, nr - 1, 2, nr - 1, + TAB_LEFT | TAT_TITLE, _("Overall Percentage")); + + + tab_hline (t, TAL_1, 1, nc - 1, nr - 1); + + var_append_value_name (cmd->dep_var, &res->y0, &sv0); + var_append_value_name (cmd->dep_var, &res->y1, &sv1); + + tab_text (t, 2, heading_rows, TAB_LEFT, ds_cstr (&sv0)); + tab_text (t, 2, heading_rows + 1, TAB_LEFT, ds_cstr (&sv1)); + + tab_text (t, heading_columns, 2, 0, ds_cstr (&sv0)); + tab_text (t, heading_columns + 1, 2, 0, ds_cstr (&sv1)); + + ds_destroy (&sv0); + ds_destroy (&sv1); + + tab_double (t, heading_columns, 3, 0, res->tn, wfmt); + tab_double (t, heading_columns + 1, 4, 0, res->tp, wfmt); + + tab_double (t, heading_columns + 1, 3, 0, res->fp, wfmt); + tab_double (t, heading_columns, 4, 0, res->fn, wfmt); + + tab_double (t, heading_columns + 2, 3, 0, 100 * res->tn / (res->tn + res->fp), 0); + tab_double (t, heading_columns + 2, 4, 0, 100 * res->tp / (res->tp + res->fn), 0); + + tab_double (t, heading_columns + 2, 5, 0, + 100 * (res->tp + res->tn) / (res->tp + res->tn + res->fp + res->fn), 0); + + + tab_submit (t); +} diff --git a/tests/language/stats/logistic.at b/tests/language/stats/logistic.at index 9ab04f8cec..5f6232f7ca 100644 --- a/tests/language/stats/logistic.at +++ b/tests/language/stats/logistic.at @@ -112,6 +112,15 @@ Table: Model Summary Step 1,-2 Log likelihood,Cox & Snell R Square,Nagelkerke R Square ,37.323,.455,.659 +Table: Classification Table +,,,Predicted,, +,,,outcome,,"Percentage +Correct" +,Observed,,1.000,2.000, +Step 1,outcome,1.000,43,5,89.583 +,,2.000,4,14,77.778 +,Overall Percentage,,,,86.364 + Table: Variables in the Equation ,,B,S.E.,Wald,df,Sig.,Exp(B) Step 1,survrate,-.081,.019,17.756,1,.000,.922 @@ -213,6 +222,12 @@ AT_CHECK([diff unweighted-result weighted-result], [1], [dnl < Total,66,100.000 --- > Total,63,100.000 +23,24c23,24 +< Step 1,outcome,1.000,43,5,89.583 +< ,,2.000,4,14,77.778 +--- +> Step 1,outcome,1.000,43.000,5.000,89.583 +> ,,2.000,4.000,14.000,77.778 ]) @@ -259,6 +274,15 @@ Table: Model Summary Step 1,-2 Log likelihood,Cox & Snell R Square,Nagelkerke R Square ,275.637,.008,.011 +Table: Classification Table +,,,Predicted,, +,,,female,,"Percentage +Correct" +,Observed,,.00,1.00, +Step 1,female,.00,0,91,.000 +,,1.00,0,109,100.000 +,Overall Percentage,,,,54.500 + Table: Variables in the Equation ,,B,S.E.,Wald,df,Sig.,Exp(B) Step 1,constant,.180,.142,1.616,1,.204,1.198 @@ -738,6 +762,15 @@ bcat,1.000,61,1,0,0 ,3.000,121,0,0,1 ,4.000,67,0,0,0 +Table: Classification Table +,,,Predicted,, +,,,y,,"Percentage +Correct" +,Observed,,4.000,9.000, +Step 1,y,4.000,254,19,93.040 +,,9.000,97,30,23.622 +,Overall Percentage,,,,71.000 + Table: Variables in the Equation ,,B,S.E.,Wald,df,Sig.,Exp(B) Step 1,b1,.002,.001,4.284,1,.038,1.002 @@ -997,6 +1030,15 @@ ses,a,47,1,0 ,b,95,0,1 ,c,58,0,0 +Table: Classification Table +,,,Predicted,, +,,,honcomp,,"Percentage +Correct" +,Observed,,.000,1.000, +Step 1,honcomp,.000,132,15,89.796 +,,1.000,26,27,50.943 +,Overall Percentage,,,,79.500 + Table: Variables in the Equation ,,B,S.E.,Wald,df,Sig.,Exp(B) Step 1,read,.098,.025,15.199,1,.000,1.103