From: John Darrington Date: Tue, 7 Apr 2009 11:30:23 +0000 (+0800) Subject: Merge commit 'origin/stable' X-Git-Tag: v0.7.3~176^2 X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=7fbfc32fc3c636959b0a25b3e76609f86519e84a;p=pspp-builds.git Merge commit 'origin/stable' Conflicts: src/language/stats/crosstabs.q src/language/stats/examine.q src/language/stats/frequencies.q src/language/stats/oneway.q tests/command/examine-extremes.sh tests/command/examine.sh --- 7fbfc32fc3c636959b0a25b3e76609f86519e84a diff --cc src/language/stats/binomial.c index b7b672ef,7a0ac722..f4344b76 --- a/src/language/stats/binomial.c +++ b/src/language/stats/binomial.c @@@ -153,11 -144,10 +154,12 @@@ voi binomial_execute (const struct dataset *ds, struct casereader *input, enum mv_class exclude, - const struct npar_test *test) + const struct npar_test *test, + bool exact UNUSED, + double timer UNUSED) { int v; + const struct dictionary *dict = dataset_dict (ds); const struct binomial_test *bst = (const struct binomial_test *) test; const struct one_sample_test *ost = (const struct one_sample_test*) test; @@@ -185,15 -165,17 +187,19 @@@ if ( bst->category2 != SYSMIS ) { + int i; union value v; v.f = bst->category2; - cat2->value = value_dup (&v, 0); + for (i = 0; i < ost->n_vars; i++) + cat2[i].value = value_dup (&v, 0); } - if (do_binomial (dataset_dict(ds), input, bst, cat1, cat2, exclude)) + if (do_binomial (dict, input, bst, cat1, cat2, exclude)) { + const struct variable *wvar = dict_get_weight (dict); + const struct fmt_spec *wfmt = wvar ? + var_get_print_format (wvar) : & F_8_0; + struct tab_table *table = tab_create (7, ost->n_vars * 3 + 1, 0); tab_dim (table, tab_natural_dimensions); @@@ -241,19 -216,20 +247,20 @@@ tab_text (table, 2, 2 + v * 3, TAB_NONE, ds_cstr (&catstr2)); /* Observed N */ - tab_float (table, 3, 1 + v * 3, TAB_NONE, cat1[v].count, 8, 0); - tab_float (table, 3, 2 + v * 3, TAB_NONE, cat2[v].count, 8, 0); + tab_double (table, 3, 1 + v * 3, TAB_NONE, cat1[v].count, wfmt); + tab_double (table, 3, 2 + v * 3, TAB_NONE, cat2[v].count, wfmt); n_total = cat1[v].count + cat2[v].count; - tab_float (table, 3, 3 + v * 3, TAB_NONE, n_total, 8, 0); + tab_double (table, 3, 3 + v * 3, TAB_NONE, n_total, wfmt); /* Observed Proportions */ - tab_float (table, 4, 1 + v * 3, TAB_NONE, - cat1[v].count / n_total, 8, 3); - tab_float (table, 4, 2 + v * 3, TAB_NONE, - cat2[v].count / n_total, 8, 3); - tab_float (table, 4, 3 + v * 3, TAB_NONE, - (cat1[v].count + cat2[v].count) / n_total, 8, 2); + tab_double (table, 4, 1 + v * 3, TAB_NONE, + cat1[v].count / n_total, NULL); + tab_double (table, 4, 2 + v * 3, TAB_NONE, + cat2[v].count / n_total, NULL); + + tab_double (table, 4, 3 + v * 3, TAB_NONE, - (cat1[v].count + cat2[v].count) / n_total, wfmt); ++ (cat1[v].count + cat2[v].count) / n_total, NULL); /* Significance */ sig = calculate_binomial (cat1[v].count, cat2[v].count, bst->p); diff --cc src/language/stats/crosstabs.q index 68d11e30,186ee12b..309b27fc --- a/src/language/stats/crosstabs.q +++ b/src/language/stats/crosstabs.q @@@ -177,9 -177,9 +177,10 @@@ static struct pool *pl_col; /* For colu static int internal_cmd_crosstabs (struct lexer *lexer, struct dataset *ds); static void precalc (struct casereader *, const struct dataset *); -static void calc_general (struct ccase *, const struct dataset *); -static void calc_integer (struct ccase *, const struct dataset *); +static void calc_general (const struct ccase *, const struct dataset *); +static void calc_integer (const struct ccase *, const struct dataset *); - static void postcalc (void); + static void postcalc (const struct dataset *); ++ static void submit (struct tab_table *); static void format_short (char *s, const struct fmt_spec *fp, diff --cc src/language/stats/examine.q index 2649968b,febb60fe..51f28320 --- a/src/language/stats/examine.q +++ b/src/language/stats/examine.q @@@ -90,140 -82,71 +90,141 @@@ static struct cmd_examine cmd; static const struct variable **dependent_vars; - static size_t n_dependent_vars; +/* PERCENTILES */ + +static subc_list_double percentile_list; +static enum pc_alg percentile_algorithm; -struct factor +struct factor_metrics { - /* The independent variable */ - struct variable *indep_var[2]; + struct moments1 *moments; + + struct percentile **ptl; + size_t n_ptiles; + + struct statistic *tukey_hinges; + struct statistic *box_whisker; + struct statistic *trimmed_mean; + struct statistic *histogram; + struct order_stats *np; + + /* Three quartiles indexing into PTL */ + struct percentile **quartiles; + + /* A reader sorted in ASCENDING order */ + struct casereader *up_reader; + + /* The minimum value of all the weights */ + double cmin; + + /* Sum of all weights, including those for missing values */ + double n; + + /* Sum of weights of non_missing values */ + double n_valid; + double mean; - /* Hash table of factor stats indexed by 2 values */ - struct hsh_table *fstats; + double variance; - /* The hash table after it has been crunched */ - struct factor_statistics **fs; + double skewness; - struct factor *next; + double kurtosis; + double se_mean; + + struct extrema *minima; + struct extrema *maxima; }; -/* Linked list of factors */ -static struct factor *factors = 0; +struct factor_result +{ + struct ll ll; -static struct metrics *totals = 0; + union value *value[2]; -/* Parse the clause specifying the factors */ -static int examine_parse_independent_vars (struct lexer *lexer, const struct dictionary *dict, struct cmd_examine *cmd); + /* An array of factor metrics, one for each variable */ + struct factor_metrics *metrics; +}; +struct xfactor +{ + /* We need to make a list of this structure */ + struct ll ll; + /* The independent variable */ + const struct variable const* indep_var[2]; -/* Output functions */ -static void show_summary (const struct variable **dependent_var, int n_dep_var, - const struct dictionary *dict, - const struct factor *f); + /* A list of results for this factor */ + struct ll_list result_list ; +}; -static void show_extremes (const struct variable **dependent_var, - int n_dep_var, - const struct factor *factor, - int n_extremities); -static void show_descriptives (const struct variable **dependent_var, - int n_dep_var, - struct factor *factor); +static void +factor_destroy (struct xfactor *fctr) +{ + struct ll *ll = ll_head (&fctr->result_list); + while (ll != ll_null (&fctr->result_list)) + { + int v; + struct factor_result *result = + ll_data (ll, struct factor_result, ll); -static void show_percentiles (const struct variable **dependent_var, - int n_dep_var, - struct factor *factor); + for (v = 0; v < n_dependent_vars; ++v) + { + int i; + moments1_destroy (result->metrics[v].moments); + extrema_destroy (result->metrics[v].minima); + extrema_destroy (result->metrics[v].maxima); + statistic_destroy (result->metrics[v].trimmed_mean); + statistic_destroy (result->metrics[v].tukey_hinges); + statistic_destroy (result->metrics[v].box_whisker); + statistic_destroy (result->metrics[v].histogram); + for (i = 0 ; i < result->metrics[v].n_ptiles; ++i) + statistic_destroy ((struct statistic *) result->metrics[v].ptl[i]); + free (result->metrics[v].ptl); + free (result->metrics[v].quartiles); + casereader_destroy (result->metrics[v].up_reader); + } + + free (result->value[0]); + free (result->value[1]); + free (result->metrics); + ll = ll_next (ll); + free (result); + } +} +static struct xfactor level0_factor; +static struct ll_list factor_list; + +/* Parse the clause specifying the factors */ +static int examine_parse_independent_vars (struct lexer *lexer, + const struct dictionary *dict, + struct cmd_examine *cmd); +/* Output functions */ +static void show_summary (const struct variable **dependent_var, int n_dep_var, ++ const struct dictionary *dict, + const struct xfactor *f); -void np_plot (const struct metrics *m, const char *factorname); +static void show_descriptives (const struct variable **dependent_var, + int n_dep_var, + const struct xfactor *f); -void box_plot_group (const struct factor *fctr, - const struct variable **vars, int n_vars, - const struct variable *id - ) ; +static void show_percentiles (const struct variable **dependent_var, + int n_dep_var, + const struct xfactor *f); -void box_plot_variables (const struct factor *fctr, - const struct variable **vars, int n_vars, - const struct variable *id - ); +static void show_extremes (const struct variable **dependent_var, + int n_dep_var, + const struct xfactor *f); + @@@ -626,66 -406,36 +627,66 @@@ show_boxplot_variables (const struct va } -/* Create a hash table of percentiles and their values from the list of - percentiles */ -static struct hsh_table * -list_to_ptile_hash (const subc_list_double *l) +/* Show all the appropriate tables */ +static void - output_examine (void) ++output_examine (const struct dictionary *dict) { - int i; + struct ll *ll; + - show_summary (dependent_vars, n_dependent_vars, &level0_factor); ++ show_summary (dependent_vars, n_dependent_vars, dict, &level0_factor); - struct hsh_table *h ; + if ( cmd.a_statistics[XMN_ST_EXTREME] ) + show_extremes (dependent_vars, n_dependent_vars, &level0_factor); - h = hsh_create (subc_list_double_count (l), - (hsh_compare_func *) ptile_compare, - (hsh_hash_func *) ptile_hash, - (hsh_free_func *) free, - 0); + if ( cmd.a_statistics[XMN_ST_DESCRIPTIVES] ) + show_descriptives (dependent_vars, n_dependent_vars, &level0_factor); + if ( cmd.sbc_percentiles) + show_percentiles (dependent_vars, n_dependent_vars, &level0_factor); - for ( i = 0 ; i < subc_list_double_count (l) ; ++i ) + if ( cmd.sbc_plot) { - struct percentile *p = xmalloc (sizeof *p); - - p->p = subc_list_double_at (l,i); - p->v = SYSMIS; + if (cmd.a_plot[XMN_PLT_BOXPLOT]) + show_boxplot_groups (dependent_vars, n_dependent_vars, &level0_factor); - hsh_insert (h, p); + if (cmd.a_plot[XMN_PLT_HISTOGRAM]) + show_histogram (dependent_vars, n_dependent_vars, &level0_factor); + if (cmd.a_plot[XMN_PLT_NPPLOT]) + show_npplot (dependent_vars, n_dependent_vars, &level0_factor); } - return h; + for (ll = ll_head (&factor_list); + ll != ll_null (&factor_list); ll = ll_next (ll)) + { + struct xfactor *factor = ll_data (ll, struct xfactor, ll); - show_summary (dependent_vars, n_dependent_vars, factor); ++ show_summary (dependent_vars, n_dependent_vars, dict, factor); + + if ( cmd.a_statistics[XMN_ST_EXTREME] ) + show_extremes (dependent_vars, n_dependent_vars, factor); + if ( cmd.a_statistics[XMN_ST_DESCRIPTIVES] ) + show_descriptives (dependent_vars, n_dependent_vars, factor); + + if ( cmd.sbc_percentiles) + show_percentiles (dependent_vars, n_dependent_vars, factor); + + if (cmd.a_plot[XMN_PLT_BOXPLOT] && + cmd.cmp == XMN_GROUPS) + show_boxplot_groups (dependent_vars, n_dependent_vars, factor); + + + if (cmd.a_plot[XMN_PLT_BOXPLOT] && + cmd.cmp == XMN_VARIABLES) + show_boxplot_variables (dependent_vars, n_dependent_vars, + factor); + + if (cmd.a_plot[XMN_PLT_HISTOGRAM]) + show_histogram (dependent_vars, n_dependent_vars, factor); + + if (cmd.a_plot[XMN_PLT_NPPLOT]) + show_npplot (dependent_vars, n_dependent_vars, factor); + } } /* Parse the PERCENTILES subcommand */ @@@ -867,350 -620,312 +868,354 @@@ examine_parse_independent_vars (struct return success; } +static void +examine_group (struct cmd_examine *cmd, struct casereader *reader, int level, + const struct dictionary *dict, struct xfactor *factor) +{ + struct ccase *c; + const struct variable *wv = dict_get_weight (dict); + int v; + int n_extrema = 1; + struct factor_result *result = xzalloc (sizeof (*result)); + + result->metrics = xcalloc (n_dependent_vars, sizeof (*result->metrics)); + if ( cmd->a_statistics[XMN_ST_EXTREME] ) + n_extrema = cmd->st_n; -static void populate_percentiles (struct tab_table *tbl, int col, int row, - const struct metrics *m); + c = casereader_peek (reader, 0); + if (c != NULL) + { + if ( level > 0) + { + result->value[0] = + value_dup (case_data (c, factor->indep_var[0]), + var_get_width (factor->indep_var[0])); + + if ( level > 1) + result->value[1] = + value_dup (case_data (c, factor->indep_var[1]), + var_get_width (factor->indep_var[1])); + } + case_unref (c); + } -static void populate_descriptives (struct tab_table *t, int col, int row, - const struct variable *, - const struct metrics *fs); + for (v = 0; v < n_dependent_vars; ++v) + { + struct casewriter *writer; + struct casereader *input = casereader_clone (reader); + + result->metrics[v].moments = moments1_create (MOMENT_KURTOSIS); + result->metrics[v].minima = extrema_create (n_extrema, EXTREME_MINIMA); + result->metrics[v].maxima = extrema_create (n_extrema, EXTREME_MAXIMA); + result->metrics[v].cmin = DBL_MAX; + + if (cmd->a_statistics[XMN_ST_DESCRIPTIVES] || + cmd->a_plot[XMN_PLT_BOXPLOT] || + cmd->a_plot[XMN_PLT_NPPLOT] || + cmd->sbc_percentiles) + { + /* In this case, we need to sort the data, so we create a sorting + casewriter */ + struct subcase up_ordering; + subcase_init_var (&up_ordering, dependent_vars[v], SC_ASCEND); + writer = sort_create_writer (&up_ordering, + casereader_get_value_cnt (reader)); + subcase_destroy (&up_ordering); + } + else + { + /* but in this case, sorting is unnecessary, so an ordinary + casewriter is sufficient */ + writer = + autopaging_writer_create (casereader_get_value_cnt (reader)); + } -static void populate_extremes (struct tab_table *t, int col, int row, int n, - const struct variable *var, - const struct metrics *m); -static void populate_summary (struct tab_table *t, int col, int row, - const struct dictionary *dict, - const struct metrics *m); + /* Sort or just iterate, whilst calculating moments etc */ + while ((c = casereader_read (input)) != NULL) + { + const casenumber loc = + case_data_idx (c, casereader_get_value_cnt (reader) - 1)->f; + const double weight = wv ? case_data (c, wv)->f : 1.0; + const union value *value = case_data (c, dependent_vars[v]); + if (weight != SYSMIS) + minimize (&result->metrics[v].cmin, weight); + moments1_add (result->metrics[v].moments, + value->f, + weight); -/* Perform calculations for the sub factors */ -void -factor_calc (const struct ccase *c, int case_no, double weight, - bool case_missing) -{ - size_t v; - struct factor *fctr = factors; + result->metrics[v].n += weight; - while ( fctr) - { - struct factor_statistics **foo ; - union value *indep_vals[2] ; + if ( ! var_is_value_missing (dependent_vars[v], value, MV_ANY) ) + result->metrics[v].n_valid += weight; - indep_vals[0] = value_dup ( - case_data (c, fctr->indep_var[0]), - var_get_width (fctr->indep_var[0]) - ); + extrema_add (result->metrics[v].maxima, + value->f, + weight, + loc); - if ( fctr->indep_var[1] ) - indep_vals[1] = value_dup ( - case_data (c, fctr->indep_var[1]), - var_get_width (fctr->indep_var[1]) - ); - else - { - const union value sm = {SYSMIS}; - indep_vals[1] = value_dup (&sm, 0); + extrema_add (result->metrics[v].minima, + value->f, + weight, + loc); + + casewriter_write (writer, c); } + casereader_destroy (input); + result->metrics[v].up_reader = casewriter_make_reader (writer); + } - assert (fctr->fstats); + /* If percentiles or descriptives have been requested, then a + second pass through the data (which has now been sorted) + is necessary */ + if ( cmd->a_statistics[XMN_ST_DESCRIPTIVES] || + cmd->a_plot[XMN_PLT_BOXPLOT] || + cmd->a_plot[XMN_PLT_NPPLOT] || + cmd->sbc_percentiles) + { + for (v = 0; v < n_dependent_vars; ++v) + { + int i; + int n_os; + struct order_stats **os ; + struct factor_metrics *metric = &result->metrics[v]; - foo = ( struct factor_statistics ** ) - hsh_probe (fctr->fstats, (void *) &indep_vals); + metric->n_ptiles = percentile_list.n_data; - if ( !*foo ) - { + metric->ptl = xcalloc (metric->n_ptiles, + sizeof (struct percentile *)); - *foo = create_factor_statistics (n_dependent_vars, - indep_vals[0], - indep_vals[1]); + metric->quartiles = xcalloc (3, sizeof (*metric->quartiles)); - for ( v = 0 ; v < n_dependent_vars ; ++v ) + for (i = 0 ; i < metric->n_ptiles; ++i) { - metrics_precalc ( & (*foo)->m[v] ); + metric->ptl[i] = (struct percentile *) + percentile_create (percentile_list.data[i] / 100.0, metric->n_valid); + + if ( percentile_list.data[i] == 25) + metric->quartiles[0] = metric->ptl[i]; + else if ( percentile_list.data[i] == 50) + metric->quartiles[1] = metric->ptl[i]; + else if ( percentile_list.data[i] == 75) + metric->quartiles[2] = metric->ptl[i]; } - } - else - { - free (indep_vals[0]); - free (indep_vals[1]); - } + metric->tukey_hinges = tukey_hinges_create (metric->n, metric->cmin); + metric->trimmed_mean = trimmed_mean_create (metric->n, 0.05); - for ( v = 0 ; v < n_dependent_vars ; ++v ) - { - const struct variable *var = dependent_vars[v]; - union value *val = value_dup ( - case_data (c, var), - var_get_width (var) - ); + n_os = metric->n_ptiles + 2; - if (case_missing || var_is_value_missing (var, val, exclude_values)) + if ( cmd->a_plot[XMN_PLT_NPPLOT] ) { - free (val); - val = NULL; + metric->np = np_create (metric->moments); + n_os ++; } - metrics_calc ( & (*foo)->m[v], val, weight, case_no); + os = xcalloc (sizeof (struct order_stats *), n_os); - free (val); - } + for (i = 0 ; i < metric->n_ptiles ; ++i ) + { + os[i] = (struct order_stats *) metric->ptl[i]; + } - fctr = fctr->next; - } -} + os[i] = (struct order_stats *) metric->tukey_hinges; + os[i+1] = (struct order_stats *) metric->trimmed_mean; -static void -run_examine (struct cmd_examine *cmd, struct casereader *input, - struct dataset *ds) -{ - struct dictionary *dict = dataset_dict (ds); - casenumber case_no; - struct ccase c; - int v; - bool ok; + if (cmd->a_plot[XMN_PLT_NPPLOT]) + os[i+2] = metric->np; - struct factor *fctr; - - if (!casereader_peek (input, 0, &c)) - { - casereader_destroy (input); - return; + order_stats_accumulate (os, n_os, + casereader_clone (metric->up_reader), + wv, dependent_vars[v], MV_ANY); + free (os); + } } - output_split_file_values (ds, &c); - case_destroy (&c); - - input = casereader_create_filter_weight (input, dict, NULL, NULL); - input = casereader_create_counter (input, &case_no, 0); - /* Make sure we haven't got rubbish left over from a - previous split. */ - fctr = factors; - while (fctr) + /* FIXME: Do this in the above loop */ + if ( cmd->a_plot[XMN_PLT_HISTOGRAM] ) { - struct factor *next = fctr->next; + struct ccase *c; + struct casereader *input = casereader_clone (reader); - hsh_clear (fctr->fstats); + for (v = 0; v < n_dependent_vars; ++v) + { + const struct extremum *max, *min; + struct factor_metrics *metric = &result->metrics[v]; - fctr->fs = 0; + const struct ll_list *max_list = + extrema_list (result->metrics[v].maxima); - fctr = next; - } + const struct ll_list *min_list = + extrema_list (result->metrics[v].minima); - for ( v = 0 ; v < n_dependent_vars ; ++v ) - metrics_precalc (&totals[v]); + if ( ll_is_empty (max_list)) + { + msg (MW, _("Not creating plot because data set is empty.")); + continue; + } - for (; casereader_read (input, &c); case_destroy (&c)) - { - bool case_missing = false; - const double weight = dict_get_case_weight (dict, &c, NULL); + assert (! ll_is_empty (min_list)); - if ( cmd->miss == XMN_LISTWISE ) - { - for ( v = 0 ; v < n_dependent_vars ; ++v ) - { - const struct variable *var = dependent_vars[v]; - union value *val = value_dup ( - case_data (&c, var), - var_get_width (var) - ); + max = (const struct extremum *) + ll_data (ll_head(max_list), struct extremum, ll); - if ( var_is_value_missing (var, val, exclude_values)) - case_missing = true; + min = (const struct extremum *) + ll_data (ll_head (min_list), struct extremum, ll); - free (val); - } + metric->histogram = histogram_create (10, min->value, max->value); } - for ( v = 0 ; v < n_dependent_vars ; ++v ) + while ((c = casereader_read (input)) != NULL) { - const struct variable *var = dependent_vars[v]; - union value *val = value_dup ( - case_data (&c, var), - var_get_width (var) - ); - - if ( var_is_value_missing (var, val, exclude_values) - || case_missing ) + const double weight = wv ? case_data (c, wv)->f : 1.0; + + for (v = 0; v < n_dependent_vars; ++v) { - free (val) ; - val = NULL; + struct factor_metrics *metric = &result->metrics[v]; + if ( metric->histogram) + histogram_add ((struct histogram *) metric->histogram, + case_data (c, dependent_vars[v])->f, weight); } - - metrics_calc (&totals[v], val, weight, case_no); - - free (val); + case_unref (c); } - - factor_calc (&c, case_no, weight, case_missing); + casereader_destroy (input); } - ok = casereader_destroy (input); - for ( v = 0 ; v < n_dependent_vars ; ++v) + /* In this case, a third iteration is required */ + if (cmd->a_plot[XMN_PLT_BOXPLOT]) { - fctr = factors; - while ( fctr ) + for (v = 0; v < n_dependent_vars; ++v) { - struct hsh_iterator hi; - struct factor_statistics *fs; + struct factor_metrics *metric = &result->metrics[v]; + + metric->box_whisker = + box_whisker_create ((struct tukey_hinges *) metric->tukey_hinges, + cmd->v_id, + casereader_get_value_cnt (metric->up_reader) + - 1); + + order_stats_accumulate ((struct order_stats **) &metric->box_whisker, + 1, + casereader_clone (metric->up_reader), + wv, dependent_vars[v], MV_ANY); + } + } - for ( fs = hsh_first (fctr->fstats, &hi); - fs != 0 ; - fs = hsh_next (fctr->fstats, &hi)) - { + ll_push_tail (&factor->result_list, &result->ll); + casereader_destroy (reader); +} - fs->m[v].ptile_hash = list_to_ptile_hash (&percentile_list); - fs->m[v].ptile_alg = percentile_algorithm; - metrics_postcalc (&fs->m[v]); - } - fctr = fctr->next; - } +static void +run_examine (struct cmd_examine *cmd, struct casereader *input, + struct dataset *ds) +{ + struct ll *ll; + const struct dictionary *dict = dataset_dict (ds); + struct ccase *c; + struct casereader *level0 = casereader_clone (input); - totals[v].ptile_hash = list_to_ptile_hash (&percentile_list); - totals[v].ptile_alg = percentile_algorithm; - metrics_postcalc (&totals[v]); + c = casereader_peek (input, 0); + if (c == NULL) + { + casereader_destroy (input); + return; } + output_split_file_values (ds, c); + case_unref (c); - /* Make sure that the combination of factors are complete */ - - fctr = factors; - while ( fctr ) - { - struct hsh_iterator hi; - struct hsh_iterator hi0; - struct hsh_iterator hi1; - struct factor_statistics *fs; + ll_init (&level0_factor.result_list); - struct hsh_table *idh0 = NULL; - struct hsh_table *idh1 = NULL; - union value **val0; - union value **val1; + examine_group (cmd, level0, 0, dict, &level0_factor); - idh0 = hsh_create (4, (hsh_compare_func *) compare_ptr_values, - (hsh_hash_func *) hash_ptr_value, - 0,0); + for (ll = ll_head (&factor_list); + ll != ll_null (&factor_list); + ll = ll_next (ll)) + { + struct xfactor *factor = ll_data (ll, struct xfactor, ll); - idh1 = hsh_create (4, (hsh_compare_func *) compare_ptr_values, - (hsh_hash_func *) hash_ptr_value, - 0,0); + struct casereader *group = NULL; + struct casereader *level1; + struct casegrouper *grouper1 = NULL; + level1 = casereader_clone (input); + level1 = sort_execute_1var (level1, factor->indep_var[0]); + grouper1 = casegrouper_create_vars (level1, &factor->indep_var[0], 1); - for ( fs = hsh_first (fctr->fstats, &hi); - fs != 0 ; - fs = hsh_next (fctr->fstats, &hi)) + while (casegrouper_get_next_group (grouper1, &group)) { - hsh_insert (idh0, &fs->id[0]); - hsh_insert (idh1, &fs->id[1]); - } + struct casereader *group_copy = casereader_clone (group); - /* Ensure that the factors combination is complete */ - for ( val0 = hsh_first (idh0, &hi0); - val0 != 0 ; - val0 = hsh_next (idh0, &hi0)) - { - for ( val1 = hsh_first (idh1, &hi1); - val1 != 0 ; - val1 = hsh_next (idh1, &hi1)) + if ( !factor->indep_var[1]) + examine_group (cmd, group_copy, 1, dict, factor); + else { - struct factor_statistics **ffs; - union value *key[2]; - key[0] = *val0; - key[1] = *val1; - - ffs = (struct factor_statistics **) - hsh_probe (fctr->fstats, &key ); - - if ( !*ffs ) { - size_t i; - (*ffs) = create_factor_statistics (n_dependent_vars, - key[0], key[1]); - for ( i = 0 ; i < n_dependent_vars ; ++i ) - metrics_precalc ( & (*ffs)->m[i]); - } - } - } + int n_groups = 0; + struct casereader *group2 = NULL; + struct casegrouper *grouper2 = NULL; - hsh_destroy (idh0); - hsh_destroy (idh1); + group_copy = sort_execute_1var (group_copy, + factor->indep_var[1]); - fctr->fs = (struct factor_statistics **) hsh_sort_copy (fctr->fstats); + grouper2 = casegrouper_create_vars (group_copy, + &factor->indep_var[1], 1); + + while (casegrouper_get_next_group (grouper2, &group2)) + { + examine_group (cmd, group2, 2, dict, factor); + n_groups++; + } + casegrouper_destroy (grouper2); + } - fctr = fctr->next; + casereader_destroy (group); + } + casegrouper_destroy (grouper1); } - if (ok) - output_examine (dict); + casereader_destroy (input); - output_examine (); ++ output_examine (dict); + + factor_destroy (&level0_factor); + + { + struct ll *ll; + for (ll = ll_head (&factor_list); + ll != ll_null (&factor_list); + ll = ll_next (ll)) + { + struct xfactor *f = ll_data (ll, struct xfactor, ll); + factor_destroy (f); + } + } - if ( totals ) - { - size_t i; - for ( i = 0 ; i < n_dependent_vars ; ++i ) - { - metrics_destroy (&totals[i]); - } - } } static void show_summary (const struct variable **dependent_var, int n_dep_var, + const struct dictionary *dict, - const struct factor *fctr) + const struct xfactor *fctr) { ++ const struct variable *wv = dict_get_weight (dict); ++ const struct fmt_spec *wfmt = wv ? var_get_print_format (wv) : & F_8_0; ++ static const char *subtitle[]= { - N_ ("Valid"), - N_ ("Missing"), - N_ ("Total") + N_("Valid"), + N_("Missing"), + N_("Total") }; - int i; - int heading_columns ; + int v, j; + int heading_columns = 1; int n_cols; const int heading_rows = 3; struct tab_table *tbl; @@@ -1309,129 -1026,86 +1314,129 @@@ } } - - for ( i = 0 ; i < n_dep_var ; ++i ) + for (v = 0 ; v < n_dep_var ; ++v) { - int n_factors = 1; - if ( fctr ) - n_factors = hsh_count (fctr->fstats); + int j = 0; + struct ll *ll; + union value *last_value = NULL; - if ( i > 0 ) - tab_hline (tbl, TAL_1, 0, n_cols -1 , i * n_factors + heading_rows); + if ( v > 0 ) + tab_hline (tbl, TAL_1, 0, n_cols -1 , + v * ll_count (&fctr->result_list) + + heading_rows); tab_text (tbl, - 0, i * n_factors + heading_rows, + 0, + v * ll_count (&fctr->result_list) + heading_rows, TAB_LEFT | TAT_TITLE, - var_to_string (dependent_var[i]) + var_to_string (dependent_var[v]) ); - if ( !fctr ) - populate_summary (tbl, heading_columns, - (i * n_factors) + heading_rows, - dict, - &totals[i]); - else + + for (ll = ll_head (&fctr->result_list); + ll != ll_null (&fctr->result_list); ll = ll_next (ll)) { - struct factor_statistics **fs = fctr->fs; - int count = 0 ; - const union value *prev = NULL; + double n; + const struct factor_result *result = + ll_data (ll, struct factor_result, ll); - while (*fs) + if ( fctr->indep_var[0] ) { - if ( !prev || - 0 != compare_values (prev, (*fs)->id[0], - var_get_width (fctr->indep_var[0]))) + + if ( last_value == NULL || + compare_values_short (last_value, result->value[0], + fctr->indep_var[0])) { - struct string vstr; - ds_init_empty (&vstr); - var_append_value_name (fctr->indep_var[0], - (*fs)->id[0], &vstr); - - tab_text (tbl, - 1, - (i * n_factors ) + count + - heading_rows, + struct string str; + + last_value = result->value[0]; + ds_init_empty (&str); + + var_append_value_name (fctr->indep_var[0], result->value[0], + &str); + + tab_text (tbl, 1, + heading_rows + j + + v * ll_count (&fctr->result_list), TAB_LEFT | TAT_TITLE, - ds_cstr (&vstr) - ); + ds_cstr (&str)); - ds_destroy (&vstr); + ds_destroy (&str); - if (fctr->indep_var[1] && count > 0 ) + if ( fctr->indep_var[1] && j > 0) tab_hline (tbl, TAL_1, 1, n_cols - 1, - (i * n_factors ) + count + heading_rows); + heading_rows + j + + v * ll_count (&fctr->result_list)); } - prev = (*fs)->id[0]; - if ( fctr->indep_var[1]) { - struct string vstr; - ds_init_empty (&vstr); + struct string str; + + ds_init_empty (&str); + var_append_value_name (fctr->indep_var[1], - (*fs)->id[1], &vstr); - tab_text (tbl, - 2, - (i * n_factors ) + count + - heading_rows, + result->value[1], &str); + + tab_text (tbl, 2, + heading_rows + j + + v * ll_count (&fctr->result_list), TAB_LEFT | TAT_TITLE, - ds_cstr (&vstr) - ); - ds_destroy (&vstr); + ds_cstr (&str)); + + ds_destroy (&str); } + } - populate_summary (tbl, heading_columns, - (i * n_factors) + count - + heading_rows, - dict, - & (*fs)->m[i]); - count++ ; - fs++; - } + moments1_calculate (result->metrics[v].moments, + &n, &result->metrics[v].mean, + &result->metrics[v].variance, + &result->metrics[v].skewness, + &result->metrics[v].kurtosis); + + result->metrics[v].se_mean = sqrt (result->metrics[v].variance / n) ; + + /* Total Valid */ - tab_float (tbl, heading_columns, ++ tab_double (tbl, heading_columns, + heading_rows + j + v * ll_count (&fctr->result_list), + TAB_LEFT, - n, 8, 0); ++ n, wfmt); + + tab_text (tbl, heading_columns + 1, + heading_rows + j + v * ll_count (&fctr->result_list), + TAB_RIGHT | TAT_PRINTF, + "%g%%", n * 100.0 / result->metrics[v].n); + + /* Total Missing */ - tab_float (tbl, heading_columns + 2, ++ tab_double (tbl, heading_columns + 2, + heading_rows + j + v * ll_count (&fctr->result_list), + TAB_LEFT, + result->metrics[v].n - n, - 8, 0); ++ wfmt); + + tab_text (tbl, heading_columns + 3, + heading_rows + j + v * ll_count (&fctr->result_list), + TAB_RIGHT | TAT_PRINTF, + "%g%%", + (result->metrics[v].n - n) * 100.0 / result->metrics[v].n + ); + + /* Total Valid + Missing */ - tab_float (tbl, heading_columns + 4, ++ tab_double (tbl, heading_columns + 4, + heading_rows + j + v * ll_count (&fctr->result_list), + TAB_LEFT, + result->metrics[v].n, - 8, 0); ++ wfmt); + + tab_text (tbl, heading_columns + 5, + heading_rows + j + v * ll_count (&fctr->result_list), + TAB_RIGHT | TAT_PRINTF, + "%g%%", + (result->metrics[v].n) * 100.0 / result->metrics[v].n + ); + + ++j; } } @@@ -1484,272 -1195,209 +1489,272 @@@ show_descriptives (const struct variabl 0, 0, n_cols - 1, n_rows - 1); - tab_hline (tbl, TAL_2, 0, n_cols - 1, heading_rows ); - tab_title (tbl, _ ("Extreme Values")); + tab_hline (tbl, TAL_2, 0, n_cols - 1, heading_rows ); + tab_hline (tbl, TAL_2, 1, n_cols - 1, heading_rows ); - tab_vline (tbl, TAL_2, n_cols - 2, 0, n_rows -1); - tab_vline (tbl, TAL_1, n_cols - 1, 0, n_rows -1); + tab_vline (tbl, TAL_1, n_cols - 1, 0, n_rows - 1); - if ( fctr ) - { - tab_text (tbl, 1, heading_rows - 1, TAB_CENTER | TAT_TITLE, - var_to_string (fctr->indep_var[0])); - if ( fctr->indep_var[1] ) - tab_text (tbl, 2, heading_rows - 1, TAB_CENTER | TAT_TITLE, - var_to_string (fctr->indep_var[1])); - } + if ( fctr->indep_var[0]) + tab_text (tbl, 1, 0, TAT_TITLE, var_to_string (fctr->indep_var[0])); - tab_text (tbl, n_cols - 1, 0, TAB_CENTER | TAT_TITLE, _ ("Value")); - tab_text (tbl, n_cols - 2, 0, TAB_CENTER | TAT_TITLE, _ ("Case Number")); + if ( fctr->indep_var[1]) + tab_text (tbl, 2, 0, TAT_TITLE, var_to_string (fctr->indep_var[1])); - for ( i = 0 ; i < n_dep_var ; ++i ) + for (v = 0 ; v < n_dep_var ; ++v ) { + struct ll *ll; + int i = 0; - if ( i > 0 ) - tab_hline (tbl, TAL_1, 0, n_cols -1 , - i * 2 * n_extremities * n_factors + heading_rows); + const int row_var_start = + v * DESCRIPTIVE_ROWS * ll_count(&fctr->result_list); - tab_text (tbl, 0, - i * 2 * n_extremities * n_factors + heading_rows, + tab_text (tbl, + 0, + heading_rows + row_var_start, TAB_LEFT | TAT_TITLE, - var_to_string (dependent_var[i]) + var_to_string (dependent_var[v]) ); - - if ( !fctr ) - populate_extremes (tbl, heading_columns - 2, - i * 2 * n_extremities * n_factors + heading_rows, - n_extremities, - dependent_var[i], - &totals[i]); - else + for (ll = ll_head (&fctr->result_list); + ll != ll_null (&fctr->result_list); i++, ll = ll_next (ll)) { - struct factor_statistics **fs = fctr->fs; - int count = 0 ; - const union value *prev = NULL; - - while (*fs) - { - const int row = heading_rows + ( 2 * n_extremities ) * - ( ( i * n_factors ) + count ); - - - if ( !prev || 0 != compare_values (prev, (*fs)->id[0], - var_get_width (fctr->indep_var[0]))) - { - struct string vstr; - ds_init_empty (&vstr); - var_append_value_name (fctr->indep_var[0], - (*fs)->id[0], &vstr); - - if ( count > 0 ) - tab_hline (tbl, TAL_1, 1, n_cols - 1, row); - - tab_text (tbl, - 1, row, - TAB_LEFT | TAT_TITLE, - ds_cstr (&vstr) - ); - - ds_destroy (&vstr); - } - - prev = (*fs)->id[0]; - - if (fctr->indep_var[1] && count > 0 ) - tab_hline (tbl, TAL_1, 2, n_cols - 1, row); - - if ( fctr->indep_var[1]) - { - struct string vstr; - ds_init_empty (&vstr); - var_append_value_name (fctr->indep_var[1], (*fs)->id[1], &vstr); - - tab_text (tbl, 2, row, - TAB_LEFT | TAT_TITLE, - ds_cstr (&vstr) - ); - - ds_destroy (&vstr); - } + const struct factor_result *result = + ll_data (ll, struct factor_result, ll); - populate_extremes (tbl, heading_columns - 2, - row, n_extremities, - dependent_var[i], - & (*fs)->m[i]); + const double t = + gsl_cdf_tdist_Qinv ((1 - cmd.n_cinterval[0] / 100.0) / 2.0, + result->metrics[v].n - 1); - count++ ; - fs++; + if ( i > 0 || v > 0 ) + { + const int left_col = (i == 0) ? 0 : 1; + tab_hline (tbl, TAL_1, left_col, n_cols - 1, + heading_rows + row_var_start + i * DESCRIPTIVE_ROWS); } - } - } - - tab_submit (tbl); -} - - -/* Fill in the extremities table */ -static void -populate_extremes (struct tab_table *t, - int col, int row, int n, - const struct variable *var, - const struct metrics *m) -{ - int extremity; - int idx=0; - - tab_text (t, col, row, - TAB_RIGHT | TAT_TITLE , - _ ("Highest") - ); - - tab_text (t, col, row + n , - TAB_RIGHT | TAT_TITLE , - _ ("Lowest") - ); - - - tab_hline (t, TAL_1, col, col + 3, row + n ); - - for (extremity = 0; extremity < n ; ++extremity ) - { - /* Highest */ - tab_fixed (t, col + 1, row + extremity, - TAB_RIGHT, - extremity + 1, 8, 0); - - - /* Lowest */ - tab_fixed (t, col + 1, row + extremity + n, - TAB_RIGHT, - extremity + 1, 8, 0); - - } - - - /* Lowest */ - for (idx = 0, extremity = 0; extremity < n && idx < m->n_data ; ++idx ) - { - int j; - const struct weighted_value *wv = m->wvp[idx]; - struct case_node *cn = wv->case_nos; - - - for (j = 0 ; j < wv->w ; ++j ) - { - if ( extremity + j >= n ) - break ; - - tab_value (t, col + 3, row + extremity + j + n, - TAB_RIGHT, - &wv->v, var_get_print_format (var)); + if ( fctr->indep_var[0]) + { + struct string vstr; + ds_init_empty (&vstr); + var_append_value_name (fctr->indep_var[0], + result->value[0], &vstr); + + tab_text (tbl, 1, + heading_rows + row_var_start + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + ds_cstr (&vstr) + ); - tab_fixed (t, col + 2, row + extremity + j + n, - TAB_RIGHT, - cn->num, 10, 0); + ds_destroy (&vstr); + } - if ( cn->next ) - cn = cn->next; + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Mean")); + + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + 1 + i * DESCRIPTIVE_ROWS, + TAB_LEFT | TAT_PRINTF, + _("%g%% Confidence Interval for Mean"), + cmd.n_cinterval[0]); + + tab_text (tbl, n_cols - 3, + heading_rows + row_var_start + 1 + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Lower Bound")); + + tab_text (tbl, n_cols - 3, + heading_rows + row_var_start + 2 + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Upper Bound")); + + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + 3 + i * DESCRIPTIVE_ROWS, + TAB_LEFT | TAT_PRINTF, + _("5%% Trimmed Mean")); + + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + 4 + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Median")); + + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + 5 + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Variance")); + + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + 6 + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Std. Deviation")); + + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + 7 + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Minimum")); + + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + 8 + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Maximum")); + + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + 9 + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Range")); + + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + 10 + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Interquartile Range")); + + + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + 11 + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Skewness")); + + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + 12 + i * DESCRIPTIVE_ROWS, + TAB_LEFT, + _("Kurtosis")); + + + /* Now the statistics ... */ + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + result->metrics[v].mean, - 8, 2); ++ NULL); + - tab_float (tbl, n_cols - 1, ++ tab_double (tbl, n_cols - 1, + heading_rows + row_var_start + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + result->metrics[v].se_mean, - 8, 3); ++ NULL); + + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 1 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + result->metrics[v].mean - t * + result->metrics[v].se_mean, - 8, 3); ++ NULL); + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 2 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + result->metrics[v].mean + t * + result->metrics[v].se_mean, - 8, 3); ++ NULL); + + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 3 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + trimmed_mean_calculate ((struct trimmed_mean *) result->metrics[v].trimmed_mean), - 8, 2); ++ NULL); + + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 4 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + percentile_calculate (result->metrics[v].quartiles[1], percentile_algorithm), - 8, 2); ++ NULL); + + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 5 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + result->metrics[v].variance, - 8, 3); ++ NULL); + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 6 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + sqrt (result->metrics[v].variance), - 8, 3); ++ NULL); + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 10 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + percentile_calculate (result->metrics[v].quartiles[2], + percentile_algorithm) - + percentile_calculate (result->metrics[v].quartiles[0], + percentile_algorithm), - 8, 2); ++ NULL); + + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 11 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + result->metrics[v].skewness, - 8, 3); ++ NULL); + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 12 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + result->metrics[v].kurtosis, - 8, 3); ++ NULL); + - tab_float (tbl, n_cols - 1, ++ tab_double (tbl, n_cols - 1, + heading_rows + row_var_start + 11 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + calc_seskew (result->metrics[v].n), - 8, 3); ++ NULL); + - tab_float (tbl, n_cols - 1, ++ tab_double (tbl, n_cols - 1, + heading_rows + row_var_start + 12 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + calc_sekurt (result->metrics[v].n), - 8, 3); ++ NULL); + + { + struct extremum *minimum, *maximum ; + + struct ll *max_ll = ll_head (extrema_list (result->metrics[v].maxima)); + struct ll *min_ll = ll_head (extrema_list (result->metrics[v].minima)); + + maximum = ll_data (max_ll, struct extremum, ll); + minimum = ll_data (min_ll, struct extremum, ll); + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 7 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + minimum->value, - 8, 3); ++ NULL); + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 8 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + maximum->value, - 8, 3); ++ NULL); + - tab_float (tbl, n_cols - 2, ++ tab_double (tbl, n_cols - 2, + heading_rows + row_var_start + 9 + i * DESCRIPTIVE_ROWS, + TAB_CENTER, + maximum->value - minimum->value, - 8, 3); ++ NULL); + } } - - extremity += wv->w ; } + tab_vline (tbl, TAL_2, heading_columns, 0, n_rows - 1); - /* Highest */ - for (idx = m->n_data - 1, extremity = 0; extremity < n && idx >= 0; --idx ) - { - int j; - const struct weighted_value *wv = m->wvp[idx]; - struct case_node *cn = wv->case_nos; - - for (j = 0 ; j < wv->w ; ++j ) - { - if ( extremity + j >= n ) - break ; - - tab_value (t, col + 3, row + extremity + j, - TAB_RIGHT, - &wv->v, var_get_print_format (var)); - - tab_fixed (t, col + 2, row + extremity + j, - TAB_RIGHT, - cn->num, 10, 0); + tab_title (tbl, _("Descriptives")); - if ( cn->next ) - cn = cn->next; + tab_text (tbl, n_cols - 2, 0, TAB_CENTER | TAT_TITLE, + _("Statistic")); - } + tab_text (tbl, n_cols - 1, 0, TAB_CENTER | TAT_TITLE, + _("Std. Error")); - extremity += wv->w ; - } + tab_submit (tbl); } @@@ -1797,198 -1453,596 +1802,201 @@@ show_extremes (const struct variable ** 0, 0, n_cols - 1, n_rows - 1); - tab_hline (tbl, TAL_2, 0, n_cols - 1, heading_rows ); - tab_vline (tbl, TAL_1, heading_columns, 0, n_rows - 1); - tab_vline (tbl, TAL_2, n_cols - 2, 0, n_rows - 1); + tab_hline (tbl, TAL_2, 0, n_cols - 1, heading_rows ); + tab_hline (tbl, TAL_2, 1, n_cols - 1, heading_rows ); tab_vline (tbl, TAL_1, n_cols - 1, 0, n_rows - 1); - tab_text (tbl, n_cols - 2, 0, TAB_CENTER | TAT_TITLE, _ ("Statistic")); - tab_text (tbl, n_cols - 1, 0, TAB_CENTER | TAT_TITLE, _ ("Std. Error")); + if ( fctr->indep_var[0]) + tab_text (tbl, 1, 0, TAT_TITLE, var_to_string (fctr->indep_var[0])); - tab_title (tbl, _ ("Descriptives")); + if ( fctr->indep_var[1]) + tab_text (tbl, 2, 0, TAT_TITLE, var_to_string (fctr->indep_var[1])); - - for ( i = 0 ; i < n_dep_var ; ++i ) + for (v = 0 ; v < n_dep_var ; ++v ) { - const int row = heading_rows + i * n_stat_rows * n_factors ; - - if ( i > 0 ) - tab_hline (tbl, TAL_1, 0, n_cols - 1, row ); + struct ll *ll; + int i = 0; + const int row_var_start = v * cmd.st_n * 2 * ll_count(&fctr->result_list); - tab_text (tbl, 0, - i * n_stat_rows * n_factors + heading_rows, + tab_text (tbl, + 0, + heading_rows + row_var_start, TAB_LEFT | TAT_TITLE, - var_to_string (dependent_var[i]) + var_to_string (dependent_var[v]) ); - - if ( fctr ) + for (ll = ll_head (&fctr->result_list); + ll != ll_null (&fctr->result_list); i++, ll = ll_next (ll)) { - const union value *prev = NULL; + int e ; + struct ll *min_ll; + struct ll *max_ll; + const int row_result_start = i * cmd.st_n * 2; - struct factor_statistics **fs = fctr->fs; - int count = 0; + const struct factor_result *result = + ll_data (ll, struct factor_result, ll); - tab_text (tbl, 1, heading_rows - 1, TAB_CENTER | TAT_TITLE, - var_to_string (fctr->indep_var[0])); + if (i > 0 || v > 0) + tab_hline (tbl, TAL_1, 1, n_cols - 1, + heading_rows + row_var_start + row_result_start); + tab_hline (tbl, TAL_1, heading_columns - 2, n_cols - 1, + heading_rows + row_var_start + row_result_start + cmd.st_n); - if ( fctr->indep_var[1]) - tab_text (tbl, 2, heading_rows - 1, TAB_CENTER | TAT_TITLE, - var_to_string (fctr->indep_var[1])); - - while ( *fs ) + for ( e = 1; e <= cmd.st_n; ++e ) { - const int row = heading_rows + n_stat_rows * - ( ( i * n_factors ) + count ); - - - if ( !prev || 0 != compare_values (prev, (*fs)->id[0], - var_get_width (fctr->indep_var[0]))) - { - struct string vstr; - ds_init_empty (&vstr); - var_append_value_name (fctr->indep_var[0], - (*fs)->id[0], &vstr); - - if ( count > 0 ) - tab_hline (tbl, TAL_1, 1, n_cols - 1, row); - - tab_text (tbl, - 1, row, - TAB_LEFT | TAT_TITLE, - ds_cstr (&vstr) - ); - - ds_destroy (&vstr); - } + tab_text (tbl, n_cols - 3, + heading_rows + row_var_start + row_result_start + e - 1, + TAB_RIGHT | TAT_PRINTF, + _("%d"), e); + + tab_text (tbl, n_cols - 3, + heading_rows + row_var_start + row_result_start + cmd.st_n + e - 1, + TAB_RIGHT | TAT_PRINTF, + _("%d"), e); + } - prev = (*fs)->id[0]; - if (fctr->indep_var[1] && count > 0 ) - tab_hline (tbl, TAL_1, 2, n_cols - 1, row); + min_ll = ll_head (extrema_list (result->metrics[v].minima)); + for (e = 0; e < cmd.st_n;) + { + struct extremum *minimum = ll_data (min_ll, struct extremum, ll); + double weight = minimum->weight; - if ( fctr->indep_var[1]) + while (weight-- > 0 && e < cmd.st_n) { - tab_float (tbl, n_cols - 1, - struct string vstr; - ds_init_empty (&vstr); - var_append_value_name (fctr->indep_var[1], (*fs)->id[1], &vstr); - - tab_text (tbl, 2, row, - TAB_LEFT | TAT_TITLE, - ds_cstr (&vstr) - ); - - ds_destroy (&vstr); ++ tab_double (tbl, n_cols - 1, + heading_rows + row_var_start + row_result_start + cmd.st_n + e, + TAB_RIGHT, + minimum->value, - 8, 2); ++ NULL); + + - tab_float (tbl, n_cols - 2, - heading_rows + row_var_start + row_result_start + cmd.st_n + e, ++ tab_fixed (tbl, n_cols - 2, ++ heading_rows + row_var_start + ++ row_result_start + cmd.st_n + e, + TAB_RIGHT, + minimum->location, - 8, 0); ++ 10, 0); + ++e; } - populate_descriptives (tbl, heading_columns - 2, - row, - dependent_var[i], - & (*fs)->m[i]); - - count++ ; - fs++; + min_ll = ll_next (min_ll); } - } - - else - { - - populate_descriptives (tbl, heading_columns - 2, - i * n_stat_rows * n_factors + heading_rows, - dependent_var[i], - &totals[i]); - } - } - - tab_submit (tbl); -} - -/* Fill in the descriptives data */ -static void -populate_descriptives (struct tab_table *tbl, int col, int row, - const struct variable *var, - const struct metrics *m) -{ - const double t = gsl_cdf_tdist_Qinv ((1 - cmd.n_cinterval[0] / 100.0)/2.0, - m->n -1); - - tab_text (tbl, col, - row, - TAB_LEFT | TAT_TITLE, - _ ("Mean")); - - tab_double (tbl, col + 2, - row, - TAB_CENTER, - m->mean, - NULL); - - tab_double (tbl, col + 3, - row, - TAB_CENTER, - m->se_mean, - NULL); - - - tab_text (tbl, col, - row + 1, - TAB_LEFT | TAT_TITLE | TAT_PRINTF, - _ ("%g%% Confidence Interval for Mean"), cmd.n_cinterval[0]); - - - tab_text (tbl, col + 1, - row + 1, - TAB_LEFT | TAT_TITLE, - _ ("Lower Bound")); - - tab_double (tbl, col + 2, - row + 1, - TAB_CENTER, - m->mean - t * m->se_mean, - NULL); - - tab_text (tbl, col + 1, - row + 2, - TAB_LEFT | TAT_TITLE, - _ ("Upper Bound")); - - - tab_double (tbl, col + 2, - row + 2, - TAB_CENTER, - m->mean + t * m->se_mean, - NULL); - - tab_text (tbl, col, - row + 3, - TAB_LEFT | TAT_TITLE | TAT_PRINTF, - _ ("5%% Trimmed Mean")); - - tab_double (tbl, col + 2, - row + 3, - TAB_CENTER, - m->trimmed_mean, - NULL); - - tab_text (tbl, col, - row + 4, - TAB_LEFT | TAT_TITLE, - _ ("Median")); - - { - struct percentile *p; - double d = 50; - - p = hsh_find (m->ptile_hash, &d); - - assert (p); - - - tab_double (tbl, col + 2, - row + 4, - TAB_CENTER, - p->v, - NULL); - } - - - tab_text (tbl, col, - row + 5, - TAB_LEFT | TAT_TITLE, - _ ("Variance")); - - tab_double (tbl, col + 2, - row + 5, - TAB_CENTER, - m->var, - NULL); - - - tab_text (tbl, col, - row + 6, - TAB_LEFT | TAT_TITLE, - _ ("Std. Deviation")); - - - tab_double (tbl, col + 2, - row + 6, - TAB_CENTER, - m->stddev, - NULL); - - - tab_text (tbl, col, - row + 7, - TAB_LEFT | TAT_TITLE, - _ ("Minimum")); - - tab_double (tbl, col + 2, - row + 7, - TAB_CENTER, - m->min, var_get_print_format (var)); - - tab_text (tbl, col, - row + 8, - TAB_LEFT | TAT_TITLE, - _ ("Maximum")); - - tab_double (tbl, col + 2, - row + 8, - TAB_CENTER, - m->max, var_get_print_format (var)); - - tab_text (tbl, col, - row + 9, - TAB_LEFT | TAT_TITLE, - _ ("Range")); - - - tab_double (tbl, col + 2, - row + 9, - TAB_CENTER, - m->max - m->min, - NULL); - - tab_text (tbl, col, - row + 10, - TAB_LEFT | TAT_TITLE, - _ ("Interquartile Range")); - - { - struct percentile *p1; - struct percentile *p2; - - double d = 75; - p1 = hsh_find (m->ptile_hash, &d); - - d = 25; - p2 = hsh_find (m->ptile_hash, &d); - - assert (p1); - assert (p2); - - tab_double (tbl, col + 2, - row + 10, - TAB_CENTER, - p1->v - p2->v, - NULL); - } - - tab_text (tbl, col, - row + 11, - TAB_LEFT | TAT_TITLE, - _ ("Skewness")); - - - tab_double (tbl, col + 2, - row + 11, - TAB_CENTER, - m->skewness, - NULL); - - /* stderr of skewness */ - tab_double (tbl, col + 3, - row + 11, - TAB_CENTER, - calc_seskew (m->n), - NULL); - - tab_text (tbl, col, - row + 12, - TAB_LEFT | TAT_TITLE, - _ ("Kurtosis")); - - - tab_double (tbl, col + 2, - row + 12, - TAB_CENTER, - m->kurtosis, - NULL); - - /* stderr of kurtosis */ - tab_double (tbl, col + 3, - row + 12, - TAB_CENTER, - calc_sekurt (m->n), - NULL); -} - - - -void -box_plot_variables (const struct factor *fctr, - const struct variable **vars, int n_vars, - const struct variable *id) -{ - - int i; - struct factor_statistics **fs ; - - if ( ! fctr ) - { - box_plot_group (fctr, vars, n_vars, id); - return; - } - - for ( fs = fctr->fs ; *fs ; ++fs ) - { - struct string str; - double y_min = DBL_MAX; - double y_max = -DBL_MAX; - struct chart *ch = chart_create (); - ds_init_empty (&str); - factor_to_string (fctr, *fs, 0, &str ); - - chart_write_title (ch, ds_cstr (&str)); - - for ( i = 0 ; i < n_vars ; ++i ) - { - y_max = MAX (y_max, (*fs)->m[i].max); - y_min = MIN (y_min, (*fs)->m[i].min); - } - - boxplot_draw_yscale (ch, y_max, y_min); - - for ( i = 0 ; i < n_vars ; ++i ) - { - - const double box_width = (ch->data_right - ch->data_left) - / (n_vars * 2.0 ) ; - - const double box_centre = ( i * 2 + 1) * box_width - + ch->data_left; - - boxplot_draw_boxplot (ch, - box_centre, box_width, - & (*fs)->m[i], - var_to_string (vars[i])); - - - } - - chart_submit (ch); - ds_destroy (&str); - } -} - - - -/* Do a box plot, grouping all factors into one plot ; - each dependent variable has its own plot. -*/ -void -box_plot_group (const struct factor *fctr, - const struct variable **vars, - int n_vars, - const struct variable *id UNUSED) -{ - - int i; - - for ( i = 0 ; i < n_vars ; ++i ) - { - struct factor_statistics **fs ; - struct chart *ch; - - ch = chart_create (); + max_ll = ll_head (extrema_list (result->metrics[v].maxima)); + for (e = 0; e < cmd.st_n;) + { + struct extremum *maximum = ll_data (max_ll, struct extremum, ll); + double weight = maximum->weight; - boxplot_draw_yscale (ch, totals[i].max, totals[i].min); + while (weight-- > 0 && e < cmd.st_n) + { - tab_float (tbl, n_cols - 1, - heading_rows + row_var_start + row_result_start + e, ++ tab_double (tbl, n_cols - 1, ++ heading_rows + row_var_start + ++ row_result_start + e, + TAB_RIGHT, + maximum->value, - 8, 2); ++ NULL); + + - tab_float (tbl, n_cols - 2, - heading_rows + row_var_start + row_result_start + e, ++ tab_fixed (tbl, n_cols - 2, ++ heading_rows + row_var_start + ++ row_result_start + e, + TAB_RIGHT, + maximum->location, - 8, 0); ++ 10, 0); + ++e; + } - if ( fctr ) - { - int n_factors = 0; - int f=0; - for ( fs = fctr->fs ; *fs ; ++fs ) - ++n_factors; + max_ll = ll_next (max_ll); + } - chart_write_title (ch, _ ("Boxplot of %s vs. %s"), - var_to_string (vars[i]), var_to_string (fctr->indep_var[0]) ); - for ( fs = fctr->fs ; *fs ; ++fs ) + if ( fctr->indep_var[0]) { - struct string str; - const double box_width = (ch->data_right - ch->data_left) - / (n_factors * 2.0 ) ; - - const double box_centre = ( f++ * 2 + 1) * box_width - + ch->data_left; - - ds_init_empty (&str); - factor_to_string_concise (fctr, *fs, &str); + struct string vstr; + ds_init_empty (&vstr); + var_append_value_name (fctr->indep_var[0], + result->value[0], &vstr); + + tab_text (tbl, 1, + heading_rows + row_var_start + row_result_start, + TAB_LEFT, + ds_cstr (&vstr) + ); - boxplot_draw_boxplot (ch, - box_centre, box_width, - & (*fs)->m[i], - ds_cstr (&str)); - ds_destroy (&str); + ds_destroy (&vstr); } - } - else if ( ch ) - { - const double box_width = (ch->data_right - ch->data_left) / 3.0; - const double box_centre = (ch->data_right + ch->data_left) / 2.0; - chart_write_title (ch, _ ("Boxplot")); - boxplot_draw_boxplot (ch, - box_centre, box_width, - &totals[i], - var_to_string (vars[i]) ); + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + row_result_start, + TAB_RIGHT, + _("Highest")); + tab_text (tbl, n_cols - 4, + heading_rows + row_var_start + row_result_start + cmd.st_n, + TAB_RIGHT, + _("Lowest")); } - - chart_submit (ch); } -} - -/* Plot the normal and detrended normal plots for m - Label the plots with factorname */ -void -np_plot (const struct metrics *m, const char *factorname) -{ - int i; - double yfirst=0, ylast=0; - - /* Normal Plot */ - struct chart *np_chart; - - /* Detrended Normal Plot */ - struct chart *dnp_chart; - - /* The slope and intercept of the ideal normal probability line */ - const double slope = 1.0 / m->stddev; - const double intercept = - m->mean / m->stddev; - - /* Cowardly refuse to plot an empty data set */ - if ( m->n_data == 0 ) - return ; - - np_chart = chart_create (); - dnp_chart = chart_create (); - - if ( !np_chart || ! dnp_chart ) - return ; - - chart_write_title (np_chart, _ ("Normal Q-Q Plot of %s"), factorname); - chart_write_xlabel (np_chart, _ ("Observed Value")); - chart_write_ylabel (np_chart, _ ("Expected Normal")); - - - chart_write_title (dnp_chart, _ ("Detrended Normal Q-Q Plot of %s"), - factorname); - chart_write_xlabel (dnp_chart, _ ("Observed Value")); - chart_write_ylabel (dnp_chart, _ ("Dev from Normal")); - - yfirst = gsl_cdf_ugaussian_Pinv (m->wvp[0]->rank / ( m->n + 1)); - ylast = gsl_cdf_ugaussian_Pinv (m->wvp[m->n_data-1]->rank / ( m->n + 1)); - - - { - /* Need to make sure that both the scatter plot and the ideal fit into the - plot */ - double x_lower = MIN (m->min, (yfirst - intercept) / slope) ; - double x_upper = MAX (m->max, (ylast - intercept) / slope) ; - double slack = (x_upper - x_lower) * 0.05 ; - - chart_write_xscale (np_chart, x_lower - slack, x_upper + slack, 5); - - chart_write_xscale (dnp_chart, m->min, m->max, 5); - - } - - chart_write_yscale (np_chart, yfirst, ylast, 5); - - { - /* We have to cache the detrended data, beacause we need to - find its limits before we can plot it */ - double *d_data = xnmalloc (m->n_data, sizeof *d_data); - double d_max = -DBL_MAX; - double d_min = DBL_MAX; - for ( i = 0 ; i < m->n_data; ++i ) - { - const double ns = gsl_cdf_ugaussian_Pinv (m->wvp[i]->rank / ( m->n + 1)); + tab_vline (tbl, TAL_2, heading_columns, 0, n_rows - 1); - chart_datum (np_chart, 0, m->wvp[i]->v.f, ns); - d_data[i] = (m->wvp[i]->v.f - m->mean) / m->stddev - ns; + tab_title (tbl, _("Extreme Values")); - if ( d_data[i] < d_min ) d_min = d_data[i]; - if ( d_data[i] > d_max ) d_max = d_data[i]; - } - chart_write_yscale (dnp_chart, d_min, d_max, 5); - for ( i = 0 ; i < m->n_data; ++i ) - chart_datum (dnp_chart, 0, m->wvp[i]->v.f, d_data[i]); + tab_text (tbl, n_cols - 2, 0, TAB_CENTER | TAT_TITLE, + _("Case Number")); - free (d_data); - } - chart_line (np_chart, slope, intercept, yfirst, ylast , CHART_DIM_Y); - chart_line (dnp_chart, 0, 0, m->min, m->max , CHART_DIM_X); + tab_text (tbl, n_cols - 1, 0, TAB_CENTER | TAT_TITLE, + _("Value")); - chart_submit (np_chart); - chart_submit (dnp_chart); + tab_submit (tbl); } +#define PERCENTILE_ROWS 2 - - -/* Show the percentiles */ -void +static void show_percentiles (const struct variable **dependent_var, - int n_dep_var, - struct factor *fctr) + int n_dep_var, + const struct xfactor *fctr) { - struct tab_table *tbl; int i; + int v; + int heading_columns = 2; + int n_cols; + const int n_percentiles = subc_list_double_count (&percentile_list); + const int heading_rows = 2; + struct tab_table *tbl; - int n_cols, n_rows; - int n_factors; - - struct hsh_table *ptiles ; - - int n_heading_columns; - const int n_heading_rows = 2; - const int n_stat_rows = 2; + int n_rows ; + n_rows = n_dep_var; - int n_ptiles ; + assert (fctr); - if ( fctr ) + if ( fctr->indep_var[0] ) { - struct factor_statistics **fs = fctr->fs ; - n_heading_columns = 3; - n_factors = hsh_count (fctr->fstats); - - ptiles = (*fs)->m[0].ptile_hash; + heading_columns = 3; if ( fctr->indep_var[1] ) - n_heading_columns = 4; - } - else - { - n_factors = 1; - n_heading_columns = 2; - - ptiles = totals[0].ptile_hash; + { + heading_columns = 4; + } } - n_ptiles = hsh_count (ptiles); - - n_rows = n_heading_rows + n_dep_var * n_stat_rows * n_factors; + n_rows *= ll_count (&fctr->result_list) * PERCENTILE_ROWS; + n_rows += heading_rows; - n_cols = n_heading_columns + n_ptiles ; + n_cols = heading_columns + n_percentiles; tbl = tab_create (n_cols, n_rows, 0); - - tab_headers (tbl, n_heading_columns + 1, 0, n_heading_rows, 0); + tab_headers (tbl, heading_columns, 0, heading_rows, 0); tab_dim (tbl, tab_natural_dimensions); @@@ -1999,140 -2053,147 +2007,140 @@@ 0, 0, n_cols - 1, n_rows - 1); - tab_hline (tbl, TAL_2, 0, n_cols - 1, n_heading_rows ); - - tab_vline (tbl, TAL_2, n_heading_columns, 0, n_rows - 1); - - - tab_title (tbl, _ ("Percentiles")); - - - tab_hline (tbl, TAL_1, n_heading_columns, n_cols - 1, 1 ); - - tab_box (tbl, - -1, -1, - -1, TAL_1, - 0, n_heading_rows, - n_heading_columns - 1, n_rows - 1); - - - tab_box (tbl, - -1, -1, - -1, TAL_1, - n_heading_columns, n_heading_rows - 1, - n_cols - 1, n_rows - 1); - - tab_joint_text (tbl, n_heading_columns + 1, 0, - n_cols - 1 , 0, - TAB_CENTER | TAT_TITLE , - _ ("Percentiles")); - - - { - /* Put in the percentile break points as headings */ + tab_hline (tbl, TAL_2, 0, n_cols - 1, heading_rows ); + tab_hline (tbl, TAL_2, 1, n_cols - 1, heading_rows ); - struct percentile **p = (struct percentile **) hsh_sort (ptiles); + if ( fctr->indep_var[0]) + tab_text (tbl, 1, 1, TAT_TITLE, var_to_string (fctr->indep_var[0])); - i = 0; - while ( (*p) ) - { - tab_fixed (tbl, n_heading_columns + i++ , 1, - TAB_CENTER, - (*p)->p, - 8, 0); - p++; - } + if ( fctr->indep_var[1]) + tab_text (tbl, 2, 1, TAT_TITLE, var_to_string (fctr->indep_var[1])); - } - - for ( i = 0 ; i < n_dep_var ; ++i ) + for (v = 0 ; v < n_dep_var ; ++v ) { - const int n_stat_rows = 2; - const int row = n_heading_rows + i * n_stat_rows * n_factors ; + double hinges[3]; + struct ll *ll; + int i = 0; - if ( i > 0 ) - tab_hline (tbl, TAL_1, 0, n_cols - 1, row ); + const int row_var_start = + v * PERCENTILE_ROWS * ll_count(&fctr->result_list); - tab_text (tbl, 0, - i * n_stat_rows * n_factors + n_heading_rows, + tab_text (tbl, + 0, + heading_rows + row_var_start, TAB_LEFT | TAT_TITLE, - var_to_string (dependent_var[i]) + var_to_string (dependent_var[v]) ); - if ( fctr ) + for (ll = ll_head (&fctr->result_list); + ll != ll_null (&fctr->result_list); i++, ll = ll_next (ll)) { - const union value *prev = NULL ; - struct factor_statistics **fs = fctr->fs; - int count = 0; + int j; + const struct factor_result *result = + ll_data (ll, struct factor_result, ll); - tab_text (tbl, 1, n_heading_rows - 1, - TAB_CENTER | TAT_TITLE, - var_to_string (fctr->indep_var[0])); + if ( i > 0 || v > 0 ) + { + const int left_col = (i == 0) ? 0 : 1; + tab_hline (tbl, TAL_1, left_col, n_cols - 1, + heading_rows + row_var_start + i * PERCENTILE_ROWS); + } + if ( fctr->indep_var[0]) + { + struct string vstr; + ds_init_empty (&vstr); + var_append_value_name (fctr->indep_var[0], + result->value[0], &vstr); + + tab_text (tbl, 1, + heading_rows + row_var_start + i * PERCENTILE_ROWS, + TAB_LEFT, + ds_cstr (&vstr) + ); - if ( fctr->indep_var[1]) - tab_text (tbl, 2, n_heading_rows - 1, TAB_CENTER | TAT_TITLE, - var_to_string (fctr->indep_var[1])); + ds_destroy (&vstr); + } - while ( *fs ) - { - const int row = n_heading_rows + n_stat_rows * - ( ( i * n_factors ) + count ); + tab_text (tbl, n_cols - n_percentiles - 1, + heading_rows + row_var_start + i * PERCENTILE_ROWS, + TAB_LEFT, + ptile_alg_desc [percentile_algorithm]); - if ( !prev || 0 != compare_values (prev, (*fs)->id[0], - var_get_width (fctr->indep_var[0]))) - { - struct string vstr; - ds_init_empty (&vstr); - var_append_value_name (fctr->indep_var[0], - (*fs)->id[0], &vstr); + tab_text (tbl, n_cols - n_percentiles - 1, + heading_rows + row_var_start + 1 + i * PERCENTILE_ROWS, + TAB_LEFT, + _("Tukey's Hinges")); - if ( count > 0 ) - tab_hline (tbl, TAL_1, 1, n_cols - 1, row); - tab_text (tbl, - 1, row, - TAB_LEFT | TAT_TITLE, - ds_cstr (&vstr) - ); + tab_vline (tbl, TAL_1, n_cols - n_percentiles -1, heading_rows, n_rows - 1); - ds_destroy (&vstr); - } + tukey_hinges_calculate ((struct tukey_hinges *) result->metrics[v].tukey_hinges, + hinges); - prev = (*fs)->id[0]; + for (j = 0; j < n_percentiles; ++j) + { + double hinge = SYSMIS; - tab_float (tbl, n_cols - n_percentiles + j, ++ tab_double (tbl, n_cols - n_percentiles + j, + heading_rows + row_var_start + i * PERCENTILE_ROWS, + TAB_CENTER, + percentile_calculate (result->metrics[v].ptl[j], + percentile_algorithm), - 8, 2 ++ NULL + ); + + if ( result->metrics[v].ptl[j]->ptile == 0.5) + hinge = hinges[1]; + else if ( result->metrics[v].ptl[j]->ptile == 0.25) + hinge = hinges[0]; + else if ( result->metrics[v].ptl[j]->ptile == 0.75) + hinge = hinges[2]; + + if ( hinge != SYSMIS) - tab_float (tbl, n_cols - n_percentiles + j, ++ tab_double (tbl, n_cols - n_percentiles + j, + heading_rows + row_var_start + 1 + i * PERCENTILE_ROWS, + TAB_CENTER, + hinge, - 8, 2 ++ NULL + ); - if (fctr->indep_var[1] && count > 0 ) - tab_hline (tbl, TAL_1, 2, n_cols - 1, row); + } + } + } - if ( fctr->indep_var[1]) - { - struct string vstr; - ds_init_empty (&vstr); - var_append_value_name (fctr->indep_var[1], (*fs)->id[1], &vstr); + tab_vline (tbl, TAL_2, heading_columns, 0, n_rows - 1); - tab_text (tbl, 2, row, - TAB_LEFT | TAT_TITLE, - ds_cstr (&vstr) - ); + tab_title (tbl, _("Percentiles")); - ds_destroy (&vstr); - } + for (i = 0 ; i < n_percentiles; ++i ) + { + tab_text (tbl, n_cols - n_percentiles + i, 1, + TAB_CENTER | TAT_TITLE | TAT_PRINTF, + _("%g"), + subc_list_double_at (&percentile_list, i) + ); - populate_percentiles (tbl, n_heading_columns - 1, - row, - & (*fs)->m[i]); + } - count++ ; - fs++; - } + tab_joint_text (tbl, + n_cols - n_percentiles, 0, + n_cols - 1, 0, + TAB_CENTER | TAT_TITLE, + _("Percentiles")); + /* Vertical lines for the data only */ + tab_box (tbl, + -1, -1, + -1, TAL_1, + n_cols - n_percentiles, 1, + n_cols - 1, n_rows - 1); - } - else - { - populate_percentiles (tbl, n_heading_columns - 1, - i * n_stat_rows * n_factors + n_heading_rows, - &totals[i]); - } - } + tab_hline (tbl, TAL_1, n_cols - n_percentiles, n_cols - 1, 1); tab_submit (tbl); diff --cc src/language/stats/frequencies.q index cd370be8,a09ecc10..25866a41 --- a/src/language/stats/frequencies.q +++ b/src/language/stats/frequencies.q @@@ -376,12 -376,12 +376,12 @@@ internal_cmd_frequencies (struct lexer for (; casegrouper_get_next_group (grouper, &group); casereader_destroy (group)) { - struct ccase c; + struct ccase *c; precalc (group, ds); - for (; casereader_read (group, &c); case_destroy (&c)) - calc (&c, ds); + for (; (c = casereader_read (group)) != NULL; case_unref (c)) + calc (c, ds); - postcalc (); + postcalc (ds); } ok = casegrouper_destroy (grouper); ok = proc_commit (ds) && ok; diff --cc src/language/stats/oneway.q index be57eb24,34f2a216..40107f77 --- a/src/language/stats/oneway.q +++ b/src/language/stats/oneway.q @@@ -88,19 -89,19 +89,19 @@@ static void run_oneway (struct cmd_onew /* Routines to show the output tables */ - static void show_anova_table (void); - static void show_descriptives (void); - static void show_homogeneity (void); + static void show_anova_table(void); + static void show_descriptives (const struct dictionary *dict); + static void show_homogeneity(void); -static void show_contrast_coeffs(short *); -static void show_contrast_tests(short *); +static void show_contrast_coeffs (short *); +static void show_contrast_tests (short *); enum stat_table_t {STAT_DESC = 1, STAT_HOMO = 2}; -static enum stat_table_t stat_tables ; +static enum stat_table_t stat_tables; - void output_oneway (void); + static void output_oneway (const struct dictionary *dict); int @@@ -148,11 -148,11 +149,11 @@@ cmd_oneway (struct lexer *lexer, struc } - void - output_oneway (void) + static void + output_oneway (const struct dictionary *dict) { size_t i; - short *bad_contrast ; + short *bad_contrast; bad_contrast = xnmalloc (cmd.sbc_contrast, sizeof *bad_contrast); @@@ -180,12 -180,12 +181,12 @@@ } if ( stat_tables & STAT_DESC ) - show_descriptives (); + show_descriptives (dict); if ( stat_tables & STAT_HOMO ) - show_homogeneity(); + show_homogeneity (); - show_anova_table(); + show_anova_table (); if (cmd.sbc_contrast ) { @@@ -323,24 -326,27 +324,24 @@@ show_anova_table (void /* Degrees of freedom */ - tab_float (t, 3, i * 3 + 1, 0, df1, 4, 0); - tab_float (t, 3, i * 3 + 2, 0, df2, 4, 0); - tab_float (t, 3, i * 3 + 3, 0, totals->n - 1, 4, 0); + tab_fixed (t, 3, i * 3 + 1, 0, df1, 4, 0); + tab_fixed (t, 3, i * 3 + 2, 0, df2, 4, 0); + tab_fixed (t, 3, i * 3 + 3, 0, totals->n - 1, 4, 0); /* Mean Squares */ - tab_float (t, 4, i * 3 + 1, TAB_RIGHT, msa, 8, 3); - tab_float (t, 4, i * 3 + 2, TAB_RIGHT, gp->mse, 8, 3); + tab_double (t, 4, i * 3 + 1, TAB_RIGHT, msa, NULL); + tab_double (t, 4, i * 3 + 2, TAB_RIGHT, gp->mse, NULL); - { - const double F = msa/gp->mse; + const double F = msa / gp->mse ; /* The F value */ - tab_float (t, 5, i * 3 + 1, 0, F, 8, 3); + tab_double (t, 5, i * 3 + 1, 0, F, NULL); /* The significance */ - tab_float (t, 6, i * 3 + 1, 0, gsl_cdf_fdist_Q (F, df1, df2), 8, 3); - tab_double (t, 6, i * 3 + 1, 0, gsl_cdf_fdist_Q (F, df1,df2), NULL); ++ tab_double (t, 6, i * 3 + 1, 0, gsl_cdf_fdist_Q (F, df1, df2), NULL); } - } - } @@@ -361,13 -367,15 +362,15 @@@ show_descriptives (const struct diction const double confidence = 0.95; const double q = (1.0 - confidence) / 2.0; + const struct variable *wv = dict_get_weight (dict); + const struct fmt_spec *wfmt = wv ? var_get_print_format (wv) : & F_8_0; - int n_rows = 2 ; + int n_rows = 2; - for ( v = 0 ; v < n_vars ; ++v ) + for ( v = 0; v < n_vars; ++v ) n_rows += group_proc_get (vars[v])->n_groups + 1; - t = tab_create (n_cols,n_rows,0); + t = tab_create (n_cols, n_rows, 0); tab_headers (t, 2, 0, 2, 0); tab_dim (t, tab_natural_dimensions); @@@ -416,9 -423,10 +419,10 @@@ struct group_statistics *totals = &gp->ugs; const char *s = var_to_string (vars[v]); + const struct fmt_spec *fmt = var_get_print_format (vars[v]); struct group_statistics *const *gs_array = - (struct group_statistics *const *) hsh_sort(gp->group_hash); + (struct group_statistics *const *) hsh_sort (gp->group_hash); int count = 0; tab_text (t, 0, row, TAB_LEFT | TAT_TITLE, s); @@@ -467,17 -476,17 +472,17 @@@ } tab_text (t, 1, row + count, - TAB_LEFT | TAT_TITLE ,_("Total")); + TAB_LEFT | TAT_TITLE, _("Total")); - tab_float (t, 2, row + count, 0, totals->n, 8, 0); + tab_double (t, 2, row + count, 0, totals->n, wfmt); - tab_float (t, 3, row + count, 0, totals->mean, 8, 2); + tab_double (t, 3, row + count, 0, totals->mean, NULL); - tab_float (t, 4, row + count, 0, totals->std_dev, 8, 2); + tab_double (t, 4, row + count, 0, totals->std_dev, NULL); - std_error = totals->std_dev/sqrt (totals->n); + std_error = totals->std_dev / sqrt (totals->n) ; - tab_float (t, 5, row + count, 0, std_error, 8, 2); + tab_double (t, 5, row + count, 0, std_error, NULL); /* Now the confidence interval */ @@@ -761,25 -773,25 +767,25 @@@ show_contrast_tests (short *bad_contras df_numerator += (coef * coef) * winv; df_denominator += pow2((coef * coef) * winv) / (gs->n - 1); } - sec_vneq = sqrt(sec_vneq); + sec_vneq = sqrt (sec_vneq); - df_numerator = pow2(df_numerator); + df_numerator = pow2 (df_numerator); - tab_float (t, 3, (v * lines_per_variable) + i + 1, - TAB_RIGHT, contrast_value, 8, 2); + tab_double (t, 3, (v * lines_per_variable) + i + 1, + TAB_RIGHT, contrast_value, NULL); - tab_float (t, 3, (v * lines_per_variable) + i + 1 + + tab_double (t, 3, (v * lines_per_variable) + i + 1 + cmd.sbc_contrast, - TAB_RIGHT, contrast_value, 8, 2); + TAB_RIGHT, contrast_value, NULL); std_error_contrast = sqrt (grp_data->mse * coef_msq); /* Std. Error */ - tab_float (t, 4, (v * lines_per_variable) + i + 1, + tab_double (t, 4, (v * lines_per_variable) + i + 1, TAB_RIGHT, std_error_contrast, - 8, 3); + NULL); - T = fabs(contrast_value / std_error_contrast) ; + T = fabs (contrast_value / std_error_contrast); /* T Statistic */ @@@ -796,41 -808,41 +802,36 @@@ /* Significance TWO TAILED !!*/ - tab_float (t, 7, (v * lines_per_variable) + i + 1, + tab_double (t, 7, (v * lines_per_variable) + i + 1, TAB_RIGHT, 2 * gsl_cdf_tdist_Q (T, df), - 8, 3); - + NULL); - /* Now for the Variances NOT Equal case */ /* Std. Error */ - tab_float (t, 4, + tab_double (t, 4, (v * lines_per_variable) + i + 1 + cmd.sbc_contrast, TAB_RIGHT, sec_vneq, - 8, 3); - + NULL); - T = contrast_value / sec_vneq; - tab_float (t, 5, + tab_double (t, 5, (v * lines_per_variable) + i + 1 + cmd.sbc_contrast, TAB_RIGHT, T, - 8, 3); - + NULL); - df = df_numerator / df_denominator; - tab_float (t, 6, + tab_double (t, 6, (v * lines_per_variable) + i + 1 + cmd.sbc_contrast, TAB_RIGHT, df, - 8, 3); + NULL); /* The Significance */ - tab_float (t, 7, (v * lines_per_variable) + i + 1 + cmd.sbc_contrast, - TAB_RIGHT, 2 * gsl_cdf_tdist_Q (T, df), - 8, 3); - - + tab_double (t, 7, (v * lines_per_variable) + i + 1 + cmd.sbc_contrast, + TAB_RIGHT, 2 * gsl_cdf_tdist_Q (T,df), + NULL); - - } if ( v > 0 ) diff --cc src/language/stats/reliability.q index 25aff211,00000000..0e7f91a0 mode 100644,000000..100644 --- a/src/language/stats/reliability.q +++ b/src/language/stats/reliability.q @@@ -1,812 -1,0 +1,824 @@@ +/* PSPP - a program for statistical analysis. + Copyright (C) 2008, 2009 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +#include + +#include "xalloc.h" +#include "xmalloca.h" + +#include "gettext.h" +#define _(msgid) gettext (msgid) +#define N_(msgid) msgid + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +/* (headers) */ + +/* (specification) + reliability (rel_): + *^variables=varlist("PV_NO_SCRATCH | PV_NUMERIC"); + scale=custom; + missing=miss:!exclude/include; + model=custom; + method=covariance; + +summary[sum_]=total. +*/ +/* (declarations) */ +/* (functions) */ + + +static int rel_custom_scale (struct lexer *lexer, struct dataset *ds, + struct cmd_reliability *p, void *aux); + +static int rel_custom_model (struct lexer *, struct dataset *, + struct cmd_reliability *, void *); + +int cmd_reliability (struct lexer *lexer, struct dataset *ds); + +struct cronbach +{ + const struct variable **items; + size_t n_items; + double alpha; + double sum_of_variances; + double variance_of_sums; + int totals_idx; /* Casereader index into the totals */ + + struct moments1 **m ; /* Moments of the items */ + struct moments1 *total ; /* Moments of the totals */ +}; + +#if 0 +static void +dump_cronbach (const struct cronbach *s) +{ + int i; + printf ("N items %d\n", s->n_items); + for (i = 0 ; i < s->n_items; ++i) + { + printf ("%s\n", var_get_name (s->items[i])); + } + + printf ("Totals idx %d\n", s->totals_idx); + + printf ("scale variance %g\n", s->variance_of_sums); + printf ("alpha %g\n", s->alpha); + putchar ('\n'); +} +#endif + +enum model + { + MODEL_ALPHA, + MODEL_SPLIT + }; + + +struct reliability +{ ++ const struct dictionary *dict; + const struct variable **variables; + int n_variables; + enum mv_class exclude; + + struct cronbach *sc; + int n_sc; + + int total_start; + + struct string scale_name; + + enum model model; + int split_point; +}; + + +static double +alpha (int k, double sum_of_variances, double variance_of_sums) +{ + return k / ( k - 1.0) * ( 1 - sum_of_variances / variance_of_sums); +} + +static void reliability_summary_total (const struct reliability *rel); + +static void reliability_statistics (const struct reliability *rel); + + + +static void +run_reliability (struct casereader *group, struct dataset *ds, + struct reliability *rel); + + +int +cmd_reliability (struct lexer *lexer, struct dataset *ds) +{ + int i; + bool ok = false; + struct casegrouper *grouper; + struct casereader *group; + struct cmd_reliability cmd; + - struct reliability rel = { ++ struct reliability rel = {NULL, + NULL, 0, MV_ANY, NULL, 0, -1, + DS_EMPTY_INITIALIZER, + MODEL_ALPHA, 0}; + + cmd.v_variables = NULL; + + if ( ! parse_reliability (lexer, ds, &cmd, &rel) ) + { + goto done; + } + ++ rel.dict = dataset_dict (ds); + rel.variables = cmd.v_variables; + rel.n_variables = cmd.n_variables; + rel.exclude = MV_ANY; + + + if (NULL == rel.sc) + { + struct cronbach *c; + /* Create a default Scale */ + + rel.n_sc = 1; + rel.sc = xzalloc (sizeof (struct cronbach) * rel.n_sc); + + ds_init_cstr (&rel.scale_name, "ANY"); + + c = &rel.sc[0]; + c->n_items = cmd.n_variables; + c->items = xzalloc (sizeof (struct variable*) * c->n_items); + + for (i = 0 ; i < c->n_items ; ++i) + c->items[i] = cmd.v_variables[i]; + } + + if ( cmd.miss == REL_INCLUDE) + rel.exclude = MV_SYSTEM; + + if ( rel.model == MODEL_SPLIT) + { + int i; + const struct cronbach *s; + + rel.n_sc += 2 ; + rel.sc = xrealloc (rel.sc, sizeof (struct cronbach) * rel.n_sc); + + s = &rel.sc[0]; + + rel.sc[1].n_items = + (rel.split_point == -1) ? s->n_items / 2 : rel.split_point; + + rel.sc[2].n_items = s->n_items - rel.sc[1].n_items; + rel.sc[1].items = xzalloc (sizeof (struct variable *) + * rel.sc[1].n_items); + + rel.sc[2].items = xzalloc (sizeof (struct variable *) * + rel.sc[2].n_items); + + for (i = 0; i < rel.sc[1].n_items ; ++i) + rel.sc[1].items[i] = s->items[i]; + + while (i < s->n_items) + { + rel.sc[2].items[i - rel.sc[1].n_items] = s->items[i]; + i++; + } + } + + if (cmd.a_summary[REL_SUM_TOTAL]) + { + int i; + const int base_sc = rel.n_sc; + + rel.total_start = base_sc; + + rel.n_sc += rel.sc[0].n_items ; + rel.sc = xrealloc (rel.sc, sizeof (struct cronbach) * rel.n_sc); + + for (i = 0 ; i < rel.sc[0].n_items; ++i ) + { + int v_src; + int v_dest = 0; + struct cronbach *s = &rel.sc[i + base_sc]; + + s->n_items = rel.sc[0].n_items - 1; + s->items = xzalloc (sizeof (struct variable *) * s->n_items); + for (v_src = 0 ; v_src < rel.sc[0].n_items ; ++v_src) + { + if ( v_src != i) + s->items[v_dest++] = rel.sc[0].items[v_src]; + } + } + } + + /* Data pass. */ + grouper = casegrouper_create_splits (proc_open (ds), dataset_dict (ds)); + while (casegrouper_get_next_group (grouper, &group)) + { + run_reliability (group, ds, &rel); + + reliability_statistics (&rel); + + if (cmd.a_summary[REL_SUM_TOTAL]) + reliability_summary_total (&rel); + } + ok = casegrouper_destroy (grouper); + ok = proc_commit (ds) && ok; + + free_reliability (&cmd); + + done: + + /* Free all the stuff */ + for (i = 0 ; i < rel.n_sc; ++i) + { + int x; + struct cronbach *c = &rel.sc[i]; + free (c->items); + + moments1_destroy (c->total); + + if ( c->m) + for (x = 0 ; x < c->n_items; ++x) + moments1_destroy (c->m[x]); + + free (c->m); + } + + ds_destroy (&rel.scale_name); + free (rel.sc); + + if (ok) + return CMD_SUCCESS; + + return CMD_FAILURE; +} + +/* Return the sum of all the item variables in S */ +static double +append_sum (const struct ccase *c, casenumber n UNUSED, void *aux) +{ + double sum = 0; + const struct cronbach *s = aux; + + int v; + for (v = 0 ; v < s->n_items; ++v) + { + sum += case_data (c, s->items[v])->f; + } + + return sum; +}; + + - static void case_processing_summary (casenumber n_valid, casenumber n_missing); ++static void case_processing_summary (casenumber n_valid, casenumber n_missing, ++ const struct dictionary *dict); + +static void - run_reliability (struct casereader *input, struct dataset *ds UNUSED, ++run_reliability (struct casereader *input, struct dataset *ds, + struct reliability *rel) +{ + int i; + int si; + struct ccase *c; + casenumber n_missing ; + casenumber n_valid = 0; + + + for (si = 0 ; si < rel->n_sc; ++si) + { + struct cronbach *s = &rel->sc[si]; + + s->m = xzalloc (sizeof (s->m) * s->n_items); + s->total = moments1_create (MOMENT_VARIANCE); + + for (i = 0 ; i < s->n_items ; ++i ) + s->m[i] = moments1_create (MOMENT_VARIANCE); + } + + input = casereader_create_filter_missing (input, + rel->variables, + rel->n_variables, + rel->exclude, + &n_missing, + NULL); + + for (si = 0 ; si < rel->n_sc; ++si) + { + struct cronbach *s = &rel->sc[si]; + + + s->totals_idx = casereader_get_value_cnt (input); + input = + casereader_create_append_numeric (input, append_sum, + s, NULL); + } + + for (; (c = casereader_read (input)) != NULL; case_unref (c)) + { + double weight = 1.0; + n_valid ++; + + for (si = 0; si < rel->n_sc; ++si) + { + struct cronbach *s = &rel->sc[si]; + + for (i = 0 ; i < s->n_items ; ++i ) + moments1_add (s->m[i], case_data (c, s->items[i])->f, weight); + + moments1_add (s->total, case_data_idx (c, s->totals_idx)->f, weight); + } + } + casereader_destroy (input); + + for (si = 0; si < rel->n_sc; ++si) + { + struct cronbach *s = &rel->sc[si]; + + s->sum_of_variances = 0; + for (i = 0 ; i < s->n_items ; ++i ) + { + double weight, mean, variance; + moments1_calculate (s->m[i], &weight, &mean, &variance, NULL, NULL); + + s->sum_of_variances += variance; + } + + moments1_calculate (s->total, NULL, NULL, &s->variance_of_sums, + NULL, NULL); + + s->alpha = + alpha (s->n_items, s->sum_of_variances, s->variance_of_sums); + } + + + { + struct tab_table *tab = tab_create(1, 1, 0); + + tab_dim (tab, tab_natural_dimensions); + tab_flags (tab, SOMF_NO_TITLE ); + + tab_text(tab, 0, 0, TAT_PRINTF, "Scale: %s", ds_cstr (&rel->scale_name)); + + tab_submit(tab); + } + + - case_processing_summary (n_valid, n_missing); ++ case_processing_summary (n_valid, n_missing, dataset_dict (ds)); +} + + +static void reliability_statistics_model_alpha (struct tab_table *tbl, + const struct reliability *rel); + +static void reliability_statistics_model_split (struct tab_table *tbl, + const struct reliability *rel); + +struct reliability_output_table +{ + int n_cols; + int n_rows; + int heading_cols; + int heading_rows; - void (*populate)(struct tab_table *, const struct reliability *); ++ void (*populate) (struct tab_table *, const struct reliability *); +}; + +static struct reliability_output_table rol[2] = + { + { 2, 2, 1, 1, reliability_statistics_model_alpha}, + { 4, 9, 3, 0, reliability_statistics_model_split} + }; + +static void +reliability_statistics (const struct reliability *rel) +{ + int n_cols = rol[rel->model].n_cols; + int n_rows = rol[rel->model].n_rows; + int heading_columns = rol[rel->model].heading_cols; + int heading_rows = rol[rel->model].heading_rows; + + struct tab_table *tbl = tab_create (n_cols, n_rows, 0); + tab_headers (tbl, heading_columns, 0, heading_rows, 0); + + tab_dim (tbl, tab_natural_dimensions); + + tab_title (tbl, _("Reliability Statistics")); + + /* Vertical lines for the data only */ + tab_box (tbl, + -1, -1, + -1, TAL_1, + heading_columns, 0, + n_cols - 1, n_rows - 1); + + /* Box around table */ + tab_box (tbl, + TAL_2, TAL_2, + -1, -1, + 0, 0, + n_cols - 1, n_rows - 1); + + + tab_hline (tbl, TAL_2, 0, n_cols - 1, heading_rows); + + tab_vline (tbl, TAL_2, heading_columns, 0, n_rows - 1); + + if ( rel->model == MODEL_ALPHA ) + reliability_statistics_model_alpha (tbl, rel); + else if (rel->model == MODEL_SPLIT ) + reliability_statistics_model_split (tbl, rel); + + tab_submit (tbl); +} + +static void +reliability_summary_total (const struct reliability *rel) +{ + int i; + const int n_cols = 5; + const int heading_columns = 1; + const int heading_rows = 1; + const int n_rows = rel->sc[0].n_items + heading_rows ; + + struct tab_table *tbl = tab_create (n_cols, n_rows, 0); + tab_headers (tbl, heading_columns, 0, heading_rows, 0); + + tab_dim (tbl, tab_natural_dimensions); + + tab_title (tbl, _("Item-Total Statistics")); + + /* Vertical lines for the data only */ + tab_box (tbl, + -1, -1, + -1, TAL_1, + heading_columns, 0, + n_cols - 1, n_rows - 1); + + /* Box around table */ + tab_box (tbl, + TAL_2, TAL_2, + -1, -1, + 0, 0, + n_cols - 1, n_rows - 1); + + + tab_hline (tbl, TAL_2, 0, n_cols - 1, heading_rows); + + tab_vline (tbl, TAL_2, heading_columns, 0, n_rows - 1); + + tab_text (tbl, 1, 0, TAB_CENTER | TAT_TITLE, + _("Scale Mean if Item Deleted")); + + tab_text (tbl, 2, 0, TAB_CENTER | TAT_TITLE, + _("Scale Variance if Item Deleted")); + + tab_text (tbl, 3, 0, TAB_CENTER | TAT_TITLE, + _("Corrected Item-Total Correlation")); + + tab_text (tbl, 4, 0, TAB_CENTER | TAT_TITLE, + _("Cronbach's Alpha if Item Deleted")); + + + for (i = 0 ; i < rel->sc[0].n_items; ++i) + { + double cov, item_to_total_r; + double mean, weight, var; + + const struct cronbach *s = &rel->sc[rel->total_start + i]; + tab_text (tbl, 0, heading_rows + i, TAB_LEFT| TAT_TITLE, + var_to_string (rel->sc[0].items[i])); + + moments1_calculate (s->total, &weight, &mean, &var, 0, 0); + - tab_float (tbl, 1, heading_rows + i, TAB_RIGHT, - mean, 8, 3); ++ tab_double (tbl, 1, heading_rows + i, TAB_RIGHT, ++ mean, NULL); + - tab_float (tbl, 2, heading_rows + i, TAB_RIGHT, - s->variance_of_sums, 8, 3); ++ tab_double (tbl, 2, heading_rows + i, TAB_RIGHT, ++ s->variance_of_sums, NULL); + - tab_float (tbl, 4, heading_rows + i, TAB_RIGHT, - s->alpha, 8, 3); ++ tab_double (tbl, 4, heading_rows + i, TAB_RIGHT, ++ s->alpha, NULL); + + + moments1_calculate (rel->sc[0].m[i], &weight, &mean, &var, 0,0); + cov = rel->sc[0].variance_of_sums + var - s->variance_of_sums; + cov /= 2.0; + + item_to_total_r = (cov - var) / (sqrt(var) * sqrt (s->variance_of_sums)); + + - tab_float (tbl, 3, heading_rows + i, TAB_RIGHT, - item_to_total_r, 8, 3); ++ tab_double (tbl, 3, heading_rows + i, TAB_RIGHT, ++ item_to_total_r, NULL); + } + + + tab_submit (tbl); +} + + +static void +reliability_statistics_model_alpha (struct tab_table *tbl, + const struct reliability *rel) +{ ++ const struct variable *wv = dict_get_weight (rel->dict); ++ const struct fmt_spec *wfmt = wv ? var_get_print_format (wv) : & F_8_0; ++ + const struct cronbach *s = &rel->sc[0]; + + tab_text (tbl, 0, 0, TAB_CENTER | TAT_TITLE, + _("Cronbach's Alpha")); + + tab_text (tbl, 1, 0, TAB_CENTER | TAT_TITLE, + _("N of items")); + - tab_float (tbl, 0, 1, TAB_RIGHT, s->alpha, 8, 3); ++ tab_double (tbl, 0, 1, TAB_RIGHT, s->alpha, NULL); + - tab_float (tbl, 1, 1, TAB_RIGHT, s->n_items, 8, 0); ++ tab_double (tbl, 1, 1, TAB_RIGHT, s->n_items, wfmt); +} + + +static void +reliability_statistics_model_split (struct tab_table *tbl, + const struct reliability *rel) +{ ++ const struct variable *wv = dict_get_weight (rel->dict); ++ const struct fmt_spec *wfmt = wv ? var_get_print_format (wv) : & F_8_0; ++ + tab_text (tbl, 0, 0, TAB_LEFT, + _("Cronbach's Alpha")); + + tab_text (tbl, 1, 0, TAB_LEFT, + _("Part 1")); + + tab_text (tbl, 2, 0, TAB_LEFT, + _("Value")); + + tab_text (tbl, 2, 1, TAB_LEFT, + _("N of Items")); + + + + tab_text (tbl, 1, 2, TAB_LEFT, + _("Part 2")); + + tab_text (tbl, 2, 2, TAB_LEFT, + _("Value")); + + tab_text (tbl, 2, 3, TAB_LEFT, + _("N of Items")); + + + + tab_text (tbl, 1, 4, TAB_LEFT, + _("Total N of Items")); + + tab_text (tbl, 0, 5, TAB_LEFT, + _("Correlation Between Forms")); + + + tab_text (tbl, 0, 6, TAB_LEFT, + _("Spearman-Brown Coefficient")); + + tab_text (tbl, 1, 6, TAB_LEFT, + _("Equal Length")); + + tab_text (tbl, 1, 7, TAB_LEFT, + _("Unequal Length")); + + + tab_text (tbl, 0, 8, TAB_LEFT, + _("Guttman Split-Half Coefficient")); + + + - tab_float (tbl, 3, 0, TAB_RIGHT, rel->sc[1].alpha, 8, 3); - tab_float (tbl, 3, 2, TAB_RIGHT, rel->sc[2].alpha, 8, 3); ++ tab_double (tbl, 3, 0, TAB_RIGHT, rel->sc[1].alpha, NULL); ++ tab_double (tbl, 3, 2, TAB_RIGHT, rel->sc[2].alpha, NULL); + - tab_float (tbl, 3, 1, TAB_RIGHT, rel->sc[1].n_items, 8, 0); - tab_float (tbl, 3, 3, TAB_RIGHT, rel->sc[2].n_items, 8, 0); ++ tab_double (tbl, 3, 1, TAB_RIGHT, rel->sc[1].n_items, wfmt); ++ tab_double (tbl, 3, 3, TAB_RIGHT, rel->sc[2].n_items, wfmt); + - tab_float (tbl, 3, 4, TAB_RIGHT, - rel->sc[1].n_items + rel->sc[2].n_items, 8, 0); ++ tab_double (tbl, 3, 4, TAB_RIGHT, ++ rel->sc[1].n_items + rel->sc[2].n_items, wfmt); + + { + /* R is the correlation between the two parts */ + double r = rel->sc[0].variance_of_sums - + rel->sc[1].variance_of_sums - + rel->sc[2].variance_of_sums ; + + /* Guttman Split Half Coefficient */ + double g = 2 * r / rel->sc[0].variance_of_sums; + + /* Unequal Length Spearman Brown Coefficient, and + intermediate value used in the computation thereof */ + double uly, tmp; + + r /= sqrt (rel->sc[1].variance_of_sums); + r /= sqrt (rel->sc[2].variance_of_sums); + r /= 2.0; + - tab_float (tbl, 3, 5, TAB_RIGHT, r, 8, 3); ++ tab_double (tbl, 3, 5, TAB_RIGHT, r, NULL); + + /* Equal length Spearman-Brown Coefficient */ - tab_float (tbl, 3, 6, TAB_RIGHT, 2 * r / (1.0 + r), 8, 3); ++ tab_double (tbl, 3, 6, TAB_RIGHT, 2 * r / (1.0 + r), NULL); + - tab_float (tbl, 3, 8, TAB_RIGHT, g, 8, 3); ++ tab_double (tbl, 3, 8, TAB_RIGHT, g, NULL); + + tmp = (1.0 - r*r) * rel->sc[1].n_items * rel->sc[2].n_items / + pow2 (rel->sc[0].n_items); + + uly = sqrt( pow4 (r) + 4 * pow2 (r) * tmp); + uly -= pow2 (r); + uly /= 2 * tmp; + - tab_float (tbl, 3, 7, TAB_RIGHT, uly, 8, 3); - ++ tab_double (tbl, 3, 7, TAB_RIGHT, uly, NULL); + } +} + + + +static void - case_processing_summary (casenumber n_valid, casenumber n_missing) ++case_processing_summary (casenumber n_valid, casenumber n_missing, ++ const struct dictionary *dict) +{ ++ const struct variable *wv = dict_get_weight (dict); ++ const struct fmt_spec *wfmt = wv ? var_get_print_format (wv) : & F_8_0; ++ + casenumber total; + int n_cols = 4; + int n_rows = 4; + int heading_columns = 2; + int heading_rows = 1; + struct tab_table *tbl; + tbl = tab_create (n_cols, n_rows, 0); + tab_headers (tbl, heading_columns, 0, heading_rows, 0); + + tab_dim (tbl, tab_natural_dimensions); + + tab_title (tbl, _("Case Processing Summary")); + + /* Vertical lines for the data only */ + tab_box (tbl, + -1, -1, + -1, TAL_1, + heading_columns, 0, + n_cols - 1, n_rows - 1); + + /* Box around table */ + tab_box (tbl, + TAL_2, TAL_2, + -1, -1, + 0, 0, + n_cols - 1, n_rows - 1); + + + tab_hline (tbl, TAL_2, 0, n_cols - 1, heading_rows); + + tab_vline (tbl, TAL_2, heading_columns, 0, n_rows - 1); + + + tab_text (tbl, 0, heading_rows, TAB_LEFT | TAT_TITLE, + _("Cases")); + + tab_text (tbl, 1, heading_rows, TAB_LEFT | TAT_TITLE, + _("Valid")); + + tab_text (tbl, 1, heading_rows + 1, TAB_LEFT | TAT_TITLE, + _("Excluded")); + + tab_text (tbl, 1, heading_rows + 2, TAB_LEFT | TAT_TITLE, + _("Total")); + + tab_text (tbl, heading_columns, 0, TAB_CENTER | TAT_TITLE, + _("N")); + + tab_text (tbl, heading_columns + 1, 0, TAB_CENTER | TAT_TITLE | TAT_PRINTF, + _("%%")); + + total = n_missing + n_valid; + - tab_float (tbl, 2, heading_rows, TAB_RIGHT, - n_valid, 8, 0); ++ tab_double (tbl, 2, heading_rows, TAB_RIGHT, ++ n_valid, wfmt); + + - tab_float (tbl, 2, heading_rows + 1, TAB_RIGHT, - n_missing, 8, 0); ++ tab_double (tbl, 2, heading_rows + 1, TAB_RIGHT, ++ n_missing, wfmt); + + - tab_float (tbl, 2, heading_rows + 2, TAB_RIGHT, - total, 8, 0); ++ tab_double (tbl, 2, heading_rows + 2, TAB_RIGHT, ++ total, wfmt); + + - tab_float (tbl, 3, heading_rows, TAB_RIGHT, - 100 * n_valid / (double) total, 8, 1); ++ tab_double (tbl, 3, heading_rows, TAB_RIGHT, ++ 100 * n_valid / (double) total, NULL); + + - tab_float (tbl, 3, heading_rows + 1, TAB_RIGHT, - 100 * n_missing / (double) total, 8, 1); ++ tab_double (tbl, 3, heading_rows + 1, TAB_RIGHT, ++ 100 * n_missing / (double) total, NULL); + + - tab_float (tbl, 3, heading_rows + 2, TAB_RIGHT, - 100 * total / (double) total, 8, 1); ++ tab_double (tbl, 3, heading_rows + 2, TAB_RIGHT, ++ 100 * total / (double) total, NULL); + + + tab_submit (tbl); +} + +static int +rel_custom_model (struct lexer *lexer, struct dataset *ds UNUSED, + struct cmd_reliability *cmd UNUSED, void *aux) +{ + struct reliability *rel = aux; + + if (lex_match_id (lexer, "ALPHA")) + { + rel->model = MODEL_ALPHA; + } + else if (lex_match_id (lexer, "SPLIT")) + { + rel->model = MODEL_SPLIT; + rel->split_point = -1; + if ( lex_match (lexer, '(')) + { + lex_force_num (lexer); + rel->split_point = lex_number (lexer); + lex_get (lexer); + lex_force_match (lexer, ')'); + } + } + else + return 0; + + return 1; +} + + + +static int +rel_custom_scale (struct lexer *lexer, struct dataset *ds UNUSED, + struct cmd_reliability *p, void *aux) +{ + struct const_var_set *vs; + struct reliability *rel = aux; + struct cronbach *scale; + + rel->n_sc = 1; + rel->sc = xzalloc (sizeof (struct cronbach) * rel->n_sc); + scale = &rel->sc[0]; + + if ( ! lex_force_match (lexer, '(')) return 0; + + if ( ! lex_force_string (lexer) ) return 0; + + ds_init_string (&rel->scale_name, lex_tokstr (lexer)); + + lex_get (lexer); + + if ( ! lex_force_match (lexer, ')')) return 0; + + lex_match (lexer, '='); + + vs = const_var_set_create_from_array (p->v_variables, p->n_variables); + + if (!parse_const_var_set_vars (lexer, vs, &scale->items, &scale->n_items, 0)) + { + const_var_set_destroy (vs); + return 2; + } + + const_var_set_destroy (vs); + return 1; +} + +/* + Local Variables: + mode: c + End: +*/ diff --cc src/language/stats/wilcoxon.c index 1bdcc06d,00000000..c0329f6c mode 100644,000000..100644 --- a/src/language/stats/wilcoxon.c +++ b/src/language/stats/wilcoxon.c @@@ -1,361 -1,0 +1,369 @@@ +/* Pspp - a program for statistical analysis. + Copyright (C) 2008, 2009 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + + + +#include +#include "wilcoxon.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include ++#include + +static double +append_difference (const struct ccase *c, casenumber n UNUSED, void *aux) +{ + const variable_pair *vp = aux; + + return case_data (c, (*vp)[0])->f - case_data (c, (*vp)[1])->f; +} + +static void show_ranks_box (const struct wilcoxon_state *, - const struct two_sample_test *); ++ const struct two_sample_test *, ++ const struct dictionary *); + +static void show_tests_box (const struct wilcoxon_state *, + const struct two_sample_test *, + bool exact, double timer); + + + +static void +distinct_callback (double v UNUSED, casenumber n, double w UNUSED, void *aux) +{ + struct wilcoxon_state *ws = aux; + + ws->tiebreaker += pow3 (n) - n; +} + +#define WEIGHT_IDX 2 + +void +wilcoxon_execute (const struct dataset *ds, + struct casereader *input, + enum mv_class exclude, + const struct npar_test *test, + bool exact, + double timer) +{ + int i; + bool warn = true; + const struct dictionary *dict = dataset_dict (ds); + const struct two_sample_test *t2s = (struct two_sample_test *) test; + + struct wilcoxon_state *ws = xcalloc (sizeof (*ws), t2s->n_pairs); + const struct variable *weight = dict_get_weight (dict); + struct variable *weightx = var_create_internal (WEIGHT_IDX); + + input = + casereader_create_filter_weight (input, dict, &warn, NULL); + + for (i = 0 ; i < t2s->n_pairs; ++i ) + { + struct casereader *r = casereader_clone (input); + struct casewriter *writer; + struct ccase *c; + struct subcase ordering; + variable_pair *vp = &t2s->pairs[i]; + + const int reader_width = weight ? 3 : 2; + + ws[i].sign = var_create_internal (0); + ws[i].absdiff = var_create_internal (1); + + r = casereader_create_filter_missing (r, *vp, 2, + exclude, + NULL, NULL); + + subcase_init_var (&ordering, ws[i].absdiff, SC_ASCEND); + writer = sort_create_writer (&ordering, reader_width); + subcase_destroy (&ordering); + + for (; (c = casereader_read (r)) != NULL; case_unref (c)) + { + struct ccase *output = case_create (reader_width); + double d = append_difference (c, 0, vp); + + if (d > 0) + { + case_data_rw (output, ws[i].sign)->f = 1.0; + + } + else if (d < 0) + { + case_data_rw (output, ws[i].sign)->f = -1.0; + } + else + { + double w = 1.0; + if (weight) + w = case_data (c, weight)->f; + + /* Central point values should be dropped */ + ws[i].n_zeros += w; + case_unref (output); + continue; + } + + case_data_rw (output, ws[i].absdiff)->f = fabs (d); + + if (weight) + case_data_rw (output, weightx)->f = case_data (c, weight)->f; + + casewriter_write (writer, output); + } + casereader_destroy (r); + ws[i].reader = casewriter_make_reader (writer); + } + + for (i = 0 ; i < t2s->n_pairs; ++i ) + { + struct casereader *rr ; + struct ccase *c; + enum rank_error err = 0; + + rr = casereader_create_append_rank (ws[i].reader, ws[i].absdiff, + weight ? weightx : NULL, &err, + distinct_callback, &ws[i] + ); + + for (; (c = casereader_read (rr)) != NULL; case_unref (c)) + { + double sign = case_data (c, ws[i].sign)->f; + double rank = case_data_idx (c, weight ? 3 : 2)->f; + double w = 1.0; + if (weight) + w = case_data (c, weightx)->f; + + if ( sign > 0 ) + { + ws[i].positives.sum += rank * w; + ws[i].positives.n += w; + } + else if (sign < 0) + { + ws[i].negatives.sum += rank * w; + ws[i].negatives.n += w; + } + else + NOT_REACHED (); + } + + casereader_destroy (rr); + } + + casereader_destroy (input); + + var_destroy (weightx); + - show_ranks_box (ws, t2s); ++ show_ranks_box (ws, t2s, dict); + show_tests_box (ws, t2s, exact, timer); + + for (i = 0 ; i < t2s->n_pairs; ++i ) + { + var_destroy (ws[i].sign); + var_destroy (ws[i].absdiff); + } + + free (ws); +} + + + + +#include "gettext.h" +#define _(msgid) gettext (msgid) + +static void - show_ranks_box (const struct wilcoxon_state *ws, const struct two_sample_test *t2s) ++show_ranks_box (const struct wilcoxon_state *ws, ++ const struct two_sample_test *t2s, ++ const struct dictionary *dict) +{ + size_t i; ++ ++ const struct variable *wv = dict_get_weight (dict); ++ const struct fmt_spec *wfmt = wv ? var_get_print_format (wv) : & F_8_0; ++ + struct tab_table *table = tab_create (5, 1 + 4 * t2s->n_pairs, 0); + + tab_dim (table, tab_natural_dimensions); + + tab_title (table, _("Ranks")); + + tab_headers (table, 2, 0, 1, 0); + + /* Vertical lines inside the box */ + tab_box (table, 0, 0, -1, TAL_1, + 1, 0, table->nc - 1, tab_nr (table) - 1 ); + + /* Box around entire table */ + tab_box (table, TAL_2, TAL_2, -1, -1, + 0, 0, table->nc - 1, tab_nr (table) - 1 ); + + + tab_text (table, 2, 0, TAB_CENTER, _("N")); + tab_text (table, 3, 0, TAB_CENTER, _("Mean Rank")); + tab_text (table, 4, 0, TAB_CENTER, _("Sum of Ranks")); + + + for (i = 0 ; i < t2s->n_pairs; ++i) + { + variable_pair *vp = &t2s->pairs[i]; + + struct string pair_name; + ds_init_cstr (&pair_name, var_to_string ((*vp)[0])); + ds_put_cstr (&pair_name, " - "); + ds_put_cstr (&pair_name, var_to_string ((*vp)[1])); + + tab_text (table, 1, 1 + i * 4, TAB_LEFT, _("Negative Ranks")); + tab_text (table, 1, 2 + i * 4, TAB_LEFT, _("Positive Ranks")); + tab_text (table, 1, 3 + i * 4, TAB_LEFT, _("Ties")); + tab_text (table, 1, 4 + i * 4, TAB_LEFT, _("Total")); + + tab_hline (table, TAL_1, 0, table->nc - 1, 1 + i * 4); + + + tab_text (table, 0, 1 + i * 4, TAB_LEFT, ds_cstr (&pair_name)); + ds_destroy (&pair_name); + + + /* N */ - tab_float (table, 2, 1 + i * 4, TAB_RIGHT, ws[i].negatives.n, 8, 0); - tab_float (table, 2, 2 + i * 4, TAB_RIGHT, ws[i].positives.n, 8, 0); - tab_float (table, 2, 3 + i * 4, TAB_RIGHT, ws[i].n_zeros, 8, 0); ++ tab_double (table, 2, 1 + i * 4, TAB_RIGHT, ws[i].negatives.n, wfmt); ++ tab_double (table, 2, 2 + i * 4, TAB_RIGHT, ws[i].positives.n, wfmt); ++ tab_double (table, 2, 3 + i * 4, TAB_RIGHT, ws[i].n_zeros, wfmt); + - tab_float (table, 2, 4 + i * 4, TAB_RIGHT, - ws[i].n_zeros + ws[i].positives.n + ws[i].negatives.n, 8, 0); ++ tab_double (table, 2, 4 + i * 4, TAB_RIGHT, ++ ws[i].n_zeros + ws[i].positives.n + ws[i].negatives.n, wfmt); + + /* Sums */ - tab_float (table, 4, 1 + i * 4, TAB_RIGHT, ws[i].negatives.sum, 8, 2); - tab_float (table, 4, 2 + i * 4, TAB_RIGHT, ws[i].positives.sum, 8, 2); ++ tab_double (table, 4, 1 + i * 4, TAB_RIGHT, ws[i].negatives.sum, NULL); ++ tab_double (table, 4, 2 + i * 4, TAB_RIGHT, ws[i].positives.sum, NULL); + + + /* Means */ - tab_float (table, 3, 1 + i * 4, TAB_RIGHT, - ws[i].negatives.sum / (double) ws[i].negatives.n, 8, 2); ++ tab_double (table, 3, 1 + i * 4, TAB_RIGHT, ++ ws[i].negatives.sum / (double) ws[i].negatives.n, NULL); + - tab_float (table, 3, 2 + i * 4, TAB_RIGHT, - ws[i].positives.sum / (double) ws[i].positives.n, 8, 2); ++ tab_double (table, 3, 2 + i * 4, TAB_RIGHT, ++ ws[i].positives.sum / (double) ws[i].positives.n, NULL); + + } + + tab_hline (table, TAL_2, 0, table->nc - 1, 1); + tab_vline (table, TAL_2, 2, 0, table->nr - 1); + + + tab_submit (table); +} + + +static void +show_tests_box (const struct wilcoxon_state *ws, + const struct two_sample_test *t2s, + bool exact, + double timer UNUSED + ) +{ + size_t i; + struct tab_table *table = tab_create (1 + t2s->n_pairs, exact ? 5 : 3, 0); + + tab_dim (table, tab_natural_dimensions); + + tab_title (table, _("Test Statistics")); + + tab_headers (table, 1, 0, 1, 0); + + /* Vertical lines inside the box */ + tab_box (table, 0, 0, -1, TAL_1, + 0, 0, table->nc - 1, tab_nr (table) - 1 ); + + /* Box around entire table */ + tab_box (table, TAL_2, TAL_2, -1, -1, + 0, 0, table->nc - 1, tab_nr (table) - 1 ); + + + tab_text (table, 0, 1, TAB_LEFT, _("Z")); + tab_text (table, 0, 2, TAB_LEFT, _("Asymp. Sig (2-tailed)")); + + if ( exact ) + { + tab_text (table, 0, 3, TAB_LEFT, _("Exact Sig (2-tailed)")); + tab_text (table, 0, 4, TAB_LEFT, _("Exact Sig (1-tailed)")); + +#if 0 + tab_text (table, 0, 5, TAB_LEFT, _("Point Probability")); +#endif + } + + for (i = 0 ; i < t2s->n_pairs; ++i) + { + double z; + double n = ws[i].positives.n + ws[i].negatives.n; + variable_pair *vp = &t2s->pairs[i]; + + struct string pair_name; + ds_init_cstr (&pair_name, var_to_string ((*vp)[0])); + ds_put_cstr (&pair_name, " - "); + ds_put_cstr (&pair_name, var_to_string ((*vp)[1])); + + + tab_text (table, 1 + i, 0, TAB_CENTER, ds_cstr (&pair_name)); + ds_destroy (&pair_name); + + z = MIN (ws[i].positives.sum, ws[i].negatives.sum); + z -= n * (n + 1)/ 4.0; + + z /= sqrt (n * (n + 1) * (2*n + 1)/24.0 - ws[i].tiebreaker / 48.0); + - tab_float (table, 1 + i, 1, TAB_RIGHT, z, 8, 3); ++ tab_double (table, 1 + i, 1, TAB_RIGHT, z, NULL); + - tab_float (table, 1 + i, 2, TAB_RIGHT, ++ tab_double (table, 1 + i, 2, TAB_RIGHT, + 2.0 * gsl_cdf_ugaussian_P (z), - 8, 3); ++ NULL); + + if (exact) + { + double p = LevelOfSignificanceWXMPSR (ws[i].positives.sum, n); + if (p < 0) + { + msg (MW, ("Too many pairs to calculate exact significance.")); + } + else + { - tab_float (table, 1 + i, 3, TAB_RIGHT, p, 8, 3); - tab_float (table, 1 + i, 4, TAB_RIGHT, p / 2.0, 8, 3); ++ tab_double (table, 1 + i, 3, TAB_RIGHT, p, NULL); ++ tab_double (table, 1 + i, 4, TAB_RIGHT, p / 2.0, NULL); + } + } + } + + tab_hline (table, TAL_2, 0, table->nc - 1, 1); + tab_vline (table, TAL_2, 1, 0, table->nr - 1); + + + tab_submit (table); +} diff --cc tests/command/examine-extremes.sh index 8524bf5e,6e789c7b..67dfed0a --- a/tests/command/examine-extremes.sh +++ b/tests/command/examine-extremes.sh @@@ -102,33 -102,33 +102,33 @@@ activity="compare results perl -pi -e 's/^\s*$//g' $TEMPDIR/pspp.list diff -b $TEMPDIR/pspp.list - << EOF 1.1 EXAMINE. Case Processing Summary - #==#===============================# - # # Cases # - # #----------+---------+----------# - # # Valid | Missing | Total # - # #--+-------+-+-------+--+-------# - # # N|Percent|N|Percent| N|Percent# - #==#==#=======#=#=======#==#=======# - #V1#23| 100%|0| 0%|23| 100%# - #==#==#=======#=#=======#==#=======# + #==#=======================================# + # # Cases # + # #-------------+-----------+-------------# + # # Valid | Missing | Total # + # #-----+-------+---+-------+-----+-------# + # # N |Percent| N |Percent| N |Percent# + #==#=====#=======#===#=======#=====#=======# + #V1#23.00| 100%|.00| 0%|23.00| 100%# + #==#=====#=======#===#=======#=====#=======# 1.2 EXAMINE. Extreme Values -#============#===========#========# -# #Case Number| Value # -#============#===========#========# -#V1 Highest 1# 21| 20.00# -# 2# 20| 19.00# -# 3# 19| 18.00# -# 4# 19| 18.00# -# 5# 18| 17.00# -# 6# 17| 16.00# -# ----------#-----------+--------# -# Lowest 1# 1| 1.00# -# 2# 2| 2.00# -# 3# 4| 3.00# -# 4# 3| 3.00# -# 5# 3| 3.00# -# 6# 5| 4.00# -#============#===========#========# +#============#===========#=====# +# #Case Number|Value# +#============#===========#=====# +#V1 Highest 1# 21|20.00# +# 2# 20|19.00# +# 3# 19|18.00# +# 4# 19|18.00# +# 5# 18|17.00# +# 6# 17|16.00# +# ----------#-----------+-----# +# Lowest 1# 1| 1.00# +# 2# 2| 2.00# +# 3# 3| 3.00# +# 4# 3| 3.00# +# 5# 4| 3.00# +# 6# 5| 4.00# +#============#===========#=====# EOF if [ $? -ne 0 ] ; then fail ; fi diff --cc tests/command/examine.sh index 095ee0fb,6468e814..11534385 --- a/tests/command/examine.sh +++ b/tests/command/examine.sh @@@ -116,147 -116,147 +116,147 @@@ diff -b $TEMPDIR/pspp.list - << EO +--------+------+ Case# QUALITY W BRAND ----- -------- -------- -------- - 1 3.00 1.00 1.00 - 2 2.00 2.00 1.00 - 3 1.00 2.00 1.00 - 4 1.00 1.00 1.00 - 5 4.00 1.00 1.00 - 6 4.00 1.00 1.00 - 7 5.00 1.00 2.00 - 8 2.00 1.00 2.00 - 9 4.00 4.00 2.00 - 10 2.00 1.00 2.00 - 11 3.00 1.00 2.00 - 12 7.00 1.00 3.00 - 13 4.00 2.00 3.00 - 14 5.00 3.00 3.00 - 15 3.00 1.00 3.00 - 16 6.00 1.00 3.00 + 1 3.00 1.00 1.00 + 2 2.00 2.00 1.00 + 3 1.00 2.00 1.00 + 4 1.00 1.00 1.00 + 5 4.00 1.00 1.00 + 6 4.00 1.00 1.00 + 7 5.00 1.00 2.00 + 8 2.00 1.00 2.00 + 9 4.00 4.00 2.00 + 10 2.00 1.00 2.00 + 11 3.00 1.00 2.00 + 12 7.00 1.00 3.00 + 13 4.00 2.00 3.00 + 14 5.00 3.00 3.00 + 15 3.00 1.00 3.00 + 16 6.00 1.00 3.00 2.1 EXAMINE. Case Processing Summary - #===============#===============================# - # # Cases # - # #----------+---------+----------# - # # Valid | Missing | Total # - # #--+-------+-+-------+--+-------# - # # N|Percent|N|Percent| N|Percent# - #===============#==#=======#=#=======#==#=======# - #Breaking Strain#24| 100%|0| 0%|24| 100%# - #===============#==#=======#=#=======#==#=======# + #===============#=======================================# + # # Cases # + # #-------------+-----------+-------------# + # # Valid | Missing | Total # + # #-----+-------+---+-------+-----+-------# + # # N |Percent| N |Percent| N |Percent# + #===============#=====#=======#===#=======#=====#=======# + #Breaking Strain#24.00| 100%|.00| 0%|24.00| 100%# + #===============#=====#=======#===#=======#=====#=======# 2.2 EXAMINE. Extreme Values -#=========================#===========#========# -# #Case Number| Value # -#=========================#===========#========# -#Breaking Strain Highest 1# 12| 7.00# -# 2# 16| 6.00# -# 3# 14| 5.00# -# ----------#-----------+--------# -# Lowest 1# 4| 1.00# -# 2# 3| 1.00# -# 3# 3| 1.00# -#=========================#===========#========# +#=========================#===========#=====# +# #Case Number|Value# +#=========================#===========#=====# +#Breaking Strain Highest 1# 12| 7.00# +# 2# 16| 6.00# +# 3# 7| 5.00# +# ----------#-----------+-----# +# Lowest 1# 3| 1.00# +# 2# 3| 1.00# +# 3# 4| 1.00# +#=========================#===========#=====# 2.3 EXAMINE. Descriptives #============================================================#=========#==========# # #Statistic|Std. Error# #============================================================#=========#==========# - #Breaking Strain Mean # 3.54 | .324 # - # 95% Confidence Interval for Mean Lower Bound# 2.871 | # - # Upper Bound# 4.212 | # + #Breaking Strain Mean # 3.54 | .32 # + # 95% Confidence Interval for Mean Lower Bound# 2.87 | # + # Upper Bound# 4.21 | # # 5% Trimmed Mean # 3.50 | # # Median # 4.00 | # - # Variance # 2.520 | # - # Std. Deviation # 1.587 | # - # Minimum # 1.000 | # - # Maximum # 7.000 | # - # Range # 6.000 | # + # Variance # 2.52 | # + # Std. Deviation # 1.59 | # + # Minimum # 1.00 | # + # Maximum # 7.00 | # + # Range # 6.00 | # # Interquartile Range # 2.75 | # - # Skewness # .059 | .472 # - # Kurtosis # -.358 | .918 # + # Skewness # .06 | .47 # + # Kurtosis # -.36 | .92 # #============================================================#=========#==========# 2.4 EXAMINE. Case Processing Summary - #============================#=============================# - # # Cases # - # #---------+---------+---------# - # # Valid | Missing | Total # - # #-+-------+-+-------+-+-------# - # Manufacturer#N|Percent|N|Percent|N|Percent# - #============================#=#=======#=#=======#=#=======# - #Breaking Strain Aspeger #8| 100%|0| 0%|8| 100%# - # Bloggs #8| 100%|0| 0%|8| 100%# - # Charlies #8| 100%|0| 0%|8| 100%# - #============================#=#=======#=#=======#=#=======# + #============================#=====================================# + # # Cases # + # #------------+-----------+------------# + # # Valid | Missing | Total # + # #----+-------+---+-------+----+-------# + # Manufacturer# N |Percent| N |Percent| N |Percent# + #============================#====#=======#===#=======#====#=======# + #Breaking Strain Aspeger #8.00| 100%|.00| 0%|8.00| 100%# + # Bloggs #8.00| 100%|.00| 0%|8.00| 100%# + # Charlies #8.00| 100%|.00| 0%|8.00| 100%# + #============================#====#=======#===#=======#====#=======# 2.5 EXAMINE. Extreme Values -#======================================#===========#========# -# Manufacturer #Case Number| Value # -#======================================#===========#========# -#Breaking Strain Aspeger Highest 1# 6| 4.00# -# 2# 5| 4.00# -# 3# 1| 3.00# -# ----------#-----------+--------# -# Lowest 1# 4| 1.00# -# 2# 3| 1.00# -# 3# 3| 1.00# -# -----------------------#-----------+--------# -# Bloggs Highest 1# 7| 5.00# -# 2# 9| 4.00# -# 3# 9| 4.00# -# ----------#-----------+--------# -# Lowest 1# 10| 2.00# -# 2# 8| 2.00# -# 3# 11| 3.00# -# -----------------------#-----------+--------# -# Charlies Highest 1# 12| 7.00# -# 2# 16| 6.00# -# 3# 14| 5.00# -# ----------#-----------+--------# -# Lowest 1# 15| 3.00# -# 2# 13| 4.00# -# 3# 13| 4.00# -#======================================#===========#========# +#======================================#===========#=====# +# Manufacturer #Case Number|Value# +#======================================#===========#=====# +#Breaking Strain Aspeger Highest 1# 5| 4.00# +# 2# 6| 4.00# +# 3# 1| 3.00# +# ----------#-----------+-----# +# Lowest 1# 3| 1.00# +# 2# 3| 1.00# +# 3# 4| 1.00# +# -----------------------#-----------+-----# +# Bloggs Highest 1# 7| 5.00# +# 2# 9| 4.00# +# 3# 9| 4.00# +# ----------#-----------+-----# +# Lowest 1# 8| 2.00# +# 2# 10| 2.00# +# 3# 11| 3.00# +# -----------------------#-----------+-----# +# Charlies Highest 1# 12| 7.00# +# 2# 16| 6.00# +# 3# 14| 5.00# +# ----------#-----------+-----# +# Lowest 1# 15| 3.00# +# 2# 13| 4.00# +# 3# 13| 4.00# +#======================================#===========#=====# 2.6 EXAMINE. Descriptives #=========================================================================#=========#==========# - # Manufacturer #Statistic|Std. Error# + # Manufacturer #Statistic|Std. Error# #=========================================================================#=========#==========# - #Breaking Strain Aspeger Mean # 2.25 | .453 # - # 95% Confidence Interval for Mean Lower Bound# 1.178 | # - # Upper Bound# 3.322 | # + #Breaking Strain Aspeger Mean # 2.25 | .45 # + # 95% Confidence Interval for Mean Lower Bound# 1.18 | # + # Upper Bound# 3.32 | # # 5% Trimmed Mean # 2.22 | # # Median # 2.00 | # - # Variance # 1.643 | # - # Std. Deviation # 1.282 | # - # Minimum # 1.000 | # - # Maximum # 4.000 | # - # Range # 3.000 | # + # Variance # 1.64 | # + # Std. Deviation # 1.28 | # + # Minimum # 1.00 | # + # Maximum # 4.00 | # + # Range # 3.00 | # # Interquartile Range # 2.75 | # - # Skewness # .475 | .752 # - # Kurtosis # -1.546 | 1.481 # + # Skewness # .47 | .75 # + # Kurtosis # -1.55 | 1.48 # # ----------------------------------------------------------#---------+----------# - # Bloggs Mean # 3.50 | .378 # - # 95% Confidence Interval for Mean Lower Bound# 2.606 | # - # Upper Bound# 4.394 | # + # Bloggs Mean # 3.50 | .38 # + # 95% Confidence Interval for Mean Lower Bound# 2.61 | # + # Upper Bound# 4.39 | # # 5% Trimmed Mean # 3.50 | # # Median # 4.00 | # - # Variance # 1.143 | # - # Std. Deviation # 1.069 | # - # Minimum # 2.000 | # - # Maximum # 5.000 | # - # Range # 3.000 | # + # Variance # 1.14 | # + # Std. Deviation # 1.07 | # + # Minimum # 2.00 | # + # Maximum # 5.00 | # + # Range # 3.00 | # # Interquartile Range # 1.75 | # - # Skewness # -.468 | .752 # - # Kurtosis # -.831 | 1.481 # + # Skewness # -.47 | .75 # + # Kurtosis # -.83 | 1.48 # # ----------------------------------------------------------#---------+----------# - # Charlies Mean # 4.88 | .441 # - # 95% Confidence Interval for Mean Lower Bound# 3.833 | # - # Upper Bound# 5.917 | # + # Charlies Mean # 4.88 | .44 # + # 95% Confidence Interval for Mean Lower Bound# 3.83 | # + # Upper Bound# 5.92 | # # 5% Trimmed Mean # 4.86 | # # Median # 5.00 | # - # Variance # 1.554 | # - # Std. Deviation # 1.246 | # - # Minimum # 3.000 | # - # Maximum # 7.000 | # - # Range # 4.000 | # + # Variance # 1.55 | # + # Std. Deviation # 1.25 | # + # Minimum # 3.00 | # + # Maximum # 7.00 | # + # Range # 4.00 | # # Interquartile Range # 1.75 | # - # Skewness # .304 | .752 # - # Kurtosis # .146 | 1.481 # + # Skewness # .30 | .75 # + # Kurtosis # .15 | 1.48 # #=========================================================================#=========#==========# EOF if [ $? -ne 0 ] ; then fail ; fi diff --cc tests/command/npar-binomial.sh index 29ad2fab,439398e2..0db048dd --- a/tests/command/npar-binomial.sh +++ b/tests/command/npar-binomial.sh @@@ -251,103 -225,85 +255,103 @@@ perl -pi -e 's/^\s*$//g' $TEMPDIR/pspp. diff -b $TEMPDIR/pspp.list - << EOF P < 0.5; N1/N2 < 1 1.1 NPAR TESTS. Binomial Test - +-+------#--------+--+--------------+----------+---------------------+ - | | #Category| N|Observed Prop.|Test Prop.|Exact Sig. (1-tailed)| - +-+------#--------+--+--------------+----------+---------------------+ - |x|Group1# 1.00| 6| .286| .300| .551| - | |Group2# 2.00|15| .714| | | - | |Total # |21| 1.00| | | - +-+------#--------+--+--------------+----------+---------------------+ + +-+------#--------+-----+--------------+----------+---------------------+ + | | #Category| N |Observed Prop.|Test Prop.|Exact Sig. (1-tailed)| + +-+------#--------+-----+--------------+----------+---------------------+ + |x|Group1# 1.00| 6.00| .286| .300| .551| + | |Group2# 2.00|15.00| .714| | | -| |Total # |21.00| 1.00| | | ++| |Total # |21.00| 1.000| | | + +-+------#--------+-----+--------------+----------+---------------------+ P < 0.5; N1/N2 > 1 2.1 NPAR TESTS. Binomial Test +-+------#--------+--+--------------+----------+---------------------+ | | #Category| N|Observed Prop.|Test Prop.|Exact Sig. (1-tailed)| +-+------#--------+--+--------------+----------+---------------------+ - |x|Group1# 1.00| 7| .538| .400| .229| - | |Group2# 2.00| 6| .462| | | - | |Total # |13| 1.00| | | + |x|Group1# 1| 7| .538| .400| .229| + | |Group2# 2| 6| .462| | | -| |Total # |13| 1| | | ++| |Total # |13| 1.000| | | +-+------#--------+--+--------------+----------+---------------------+ P < 0.5; N1/N2 = 1 3.1 NPAR TESTS. Binomial Test +-+------#--------+--+--------------+----------+---------------------+ | | #Category| N|Observed Prop.|Test Prop.|Exact Sig. (1-tailed)| +-+------#--------+--+--------------+----------+---------------------+ - |x|Group1# 1.00| 8| .500| .400| .284| - | |Group2# 2.00| 8| .500| | | - | |Total # |16| 1.00| | | + |x|Group1# 1| 8| .500| .400| .284| + | |Group2# 2| 8| .500| | | -| |Total # |16| 1| | | ++| |Total # |16| 1.000| | | +-+------#--------+--+--------------+----------+---------------------+ P > 0.5; N1/N2 < 1 4.1 NPAR TESTS. Binomial Test +-+------#--------+--+--------------+----------+---------------------+ | | #Category| N|Observed Prop.|Test Prop.|Exact Sig. (1-tailed)| +-+------#--------+--+--------------+----------+---------------------+ - |x|Group1# 1.00|11| .478| .600| .164| - | |Group2# 2.00|12| .522| | | - | |Total # |23| 1.00| | | + |x|Group1# 1|11| .478| .600| .164| + | |Group2# 2|12| .522| | | -| |Total # |23| 1| | | ++| |Total # |23| 1.000| | | +-+------#--------+--+--------------+----------+---------------------+ P > 0.5; N1/N2 > 1 5.1 NPAR TESTS. Binomial Test +-+------#--------+--+--------------+----------+---------------------+ | | #Category| N|Observed Prop.|Test Prop.|Exact Sig. (1-tailed)| +-+------#--------+--+--------------+----------+---------------------+ - |x|Group1# 1.00|11| .550| .600| .404| - | |Group2# 2.00| 9| .450| | | - | |Total # |20| 1.00| | | + |x|Group1# 1|11| .550| .600| .404| + | |Group2# 2| 9| .450| | | -| |Total # |20| 1| | | ++| |Total # |20| 1.000| | | +-+------#--------+--+--------------+----------+---------------------+ P > 0.5; N1/N2 == 1 6.1 NPAR TESTS. Binomial Test +-+------#--------+--+--------------+----------+---------------------+ | | #Category| N|Observed Prop.|Test Prop.|Exact Sig. (1-tailed)| +-+------#--------+--+--------------+----------+---------------------+ - |x|Group1# 1.00|11| .500| .600| .228| - | |Group2# 2.00|11| .500| | | - | |Total # |22| 1.00| | | + |x|Group1# 1|11| .500| .600| .228| + | |Group2# 2|11| .500| | | -| |Total # |22| 1| | | ++| |Total # |22| 1.000| | | +-+------#--------+--+--------------+----------+---------------------+ P == 0.5; N1/N2 < 1 7.1 NPAR TESTS. Binomial Test +-+------#--------+--+--------------+----------+---------------------+ | | #Category| N|Observed Prop.|Test Prop.|Exact Sig. (2-tailed)| +-+------#--------+--+--------------+----------+---------------------+ - |x|Group1# 1.00| 8| .348| .500| .210| - | |Group2# 2.00|15| .652| | | - | |Total # |23| 1.00| | | + |x|Group1# 1| 8| .348| .500| .210| + | |Group2# 2|15| .652| | | -| |Total # |23| 1| | | ++| |Total # |23| 1.000| | | +-+------#--------+--+--------------+----------+---------------------+ P == 0.5; N1/N2 > 1 8.1 NPAR TESTS. Binomial Test +-+------#--------+--+--------------+----------+---------------------+ | | #Category| N|Observed Prop.|Test Prop.|Exact Sig. (2-tailed)| +-+------#--------+--+--------------+----------+---------------------+ - |x|Group1# 1.00|12| .667| .500| .238| - | |Group2# 2.00| 6| .333| | | - | |Total # |18| 1.00| | | + |x|Group1# 1|12| .667| .500| .238| + | |Group2# 2| 6| .333| | | -| |Total # |18| 1| | | ++| |Total # |18| 1.000| | | +-+------#--------+--+--------------+----------+---------------------+ P == 0.5; N1/N2 == 1 9.1 NPAR TESTS. Binomial Test +-+------#--------+--+--------------+----------+---------------------+ | | #Category| N|Observed Prop.|Test Prop.|Exact Sig. (2-tailed)| +-+------#--------+--+--------------+----------+---------------------+ - |x|Group1# 1.00|10| .500| .500| 1.000| - | |Group2# 2.00|10| .500| | | - | |Total # |20| 1.00| | | + |x|Group1# 1|10| .500| .500| 1.000| + | |Group2# 2|10| .500| | | -| |Total # |20| 1| | | ++| |Total # |20| 1.000| | | +-+------#--------+--+--------------+----------+---------------------+ +P == 0.5; N1/N2 == 1 Cutpoint +10.1 NPAR TESTS. Binomial Test - +-+------#--------+--+--------------+----------+---------------------+ - | | #Category| N|Observed Prop.|Test Prop.|Exact Sig. (2-tailed)| - +-+------#--------+--+--------------+----------+---------------------+ - |x|Group1# <= 10|10| .385| .500| .327| - | |Group2# |16| .615| | | - | |Total # |26| 1.00| | | - +-+------#--------+--+--------------+----------+---------------------+ +++-+------#--------+------+--------------+----------+---------------------+ ++| | #Category| N |Observed Prop.|Test Prop.|Exact Sig. (2-tailed)| +++-+------#--------+------+--------------+----------+---------------------+ ++|x|Group1# <= 10|10.000| .385| .500| .327| ++| |Group2# |16.000| .615| | | ++| |Total # |26.000| 1.000| | | +++-+------#--------+------+--------------+----------+---------------------+ +P == 0.5; N1/N2 == 1 Named values +11.1 NPAR TESTS. Binomial Test - +-+------#--------+--+--------------+----------+---------------------+ - | | #Category| N|Observed Prop.|Test Prop.|Exact Sig. (2-tailed)| - +-+------#--------+--+--------------+----------+---------------------+ - |x|Group1# 10.00|10| .435| .500| .678| - | |Group2# 20.00|13| .565| | | - | |Total # |23| 1.00| | | - +-+------#--------+--+--------------+----------+---------------------+ +++-+------#--------+------+--------------+----------+---------------------+ ++| | #Category| N |Observed Prop.|Test Prop.|Exact Sig. (2-tailed)| +++-+------#--------+------+--------------+----------+---------------------+ ++|x|Group1# 10.000|10.000| .435| .500| .678| ++| |Group2# 20.000|13.000| .565| | | ++| |Total # |23.000| 1.000| | | +++-+------#--------+------+--------------+----------+---------------------+ EOF if [ $? -ne 0 ] ; then fail ; fi diff --cc tests/command/npar-wilcoxon.sh index c4a5d825,00000000..ae0d39fc mode 100755,000000..100755 --- a/tests/command/npar-wilcoxon.sh +++ b/tests/command/npar-wilcoxon.sh @@@ -1,174 -1,0 +1,173 @@@ +#!/bin/sh + +# This program tests the wilcoxon subcommand of npar tests + +TEMPDIR=/tmp/pspp-tst-$$ +TESTFILE=$TEMPDIR/`basename $0`.sps + +# ensure that top_srcdir and top_builddir are absolute +if [ -z "$top_srcdir" ] ; then top_srcdir=. ; fi +if [ -z "$top_builddir" ] ; then top_builddir=. ; fi +top_srcdir=`cd $top_srcdir; pwd` +top_builddir=`cd $top_builddir; pwd` + +PSPP=$top_builddir/src/ui/terminal/pspp + +STAT_CONFIG_PATH=$top_srcdir/config +export STAT_CONFIG_PATH + +LANG=C +export LANG + + +cleanup() +{ + if [ x"$PSPP_TEST_NO_CLEANUP" != x ] ; then + echo "NOT cleaning $TEMPDIR" + return ; + fi + rm -rf $TEMPDIR +} + + +fail() +{ + echo $activity + echo FAILED + cleanup; + exit 1; +} + + +no_result() +{ + echo $activity + echo NO RESULT; + cleanup; + exit 2; +} + +pass() +{ + cleanup; + exit 0; +} + +mkdir -p $TEMPDIR + +cd $TEMPDIR + +activity="create program 1" +cat > $TESTFILE << EOF - data list notable list /foo * bar * w *. ++data list notable list /foo * bar * w (f8.0). +begin data. +1.00 1.00 1 +1.00 2.00 1 +2.00 1.00 1 +1.00 4.00 1 +2.00 5.00 1 +1.00 19.00 1 +2.00 7.00 1 +4.00 5.00 1 +1.00 12.00 1 +2.00 13.00 1 +2.00 2.00 1 +12.00 .00 2 +12.00 1.00 1 +13.00 1.00 1 +end data + +variable labels foo "first" bar "second". + +weight by w. + +npar test + /wilcoxon=foo with bar (paired) + /missing analysis + /method=exact. + +EOF +if [ $? -ne 0 ] ; then no_result ; fi + + +activity="run program 1" +$SUPERVISOR $PSPP --testing-mode -o raw-ascii $TESTFILE +if [ $? -ne 0 ] ; then no_result ; fi + - +activity="generate results" +cat > $TEMPDIR/results.txt < $TESTFILE << EOF +data list notable list /foo * bar * dummy *. +begin data. +1.00 1.00 1 +1.00 2.00 1 +2.00 1.00 1 +1.00 4.00 . +2.00 5.00 . +1.00 19.00 . +2.00 7.00 1 +4.00 5.00 1 +1.00 12.00 1 +2.00 13.00 1 +2.00 2.00 1 +12.00 .00 1 +12.00 .00 1 +34.2 . 1 +12.00 1.00 1 +13.00 1.00 1 +end data + +variable labels foo "first" bar "second". + +npar test + /wilcoxon=foo with bar (paired) + /missing analysis + /method=exact. + +EOF +if [ $? -ne 0 ] ; then no_result ; fi + + +activity="run program 2" +$SUPERVISOR $PSPP --testing-mode -o raw-ascii $TESTFILE +if [ $? -ne 0 ] ; then no_result ; fi + - +activity="compare output 2" +diff pspp.list $TEMPDIR/results.txt +if [ $? -ne 0 ] ; then fail ; fi + + + +pass; diff --cc tests/command/reliability.sh index 0bfa7336,00000000..ed4b4b85 mode 100755,000000..100755 --- a/tests/command/reliability.sh +++ b/tests/command/reliability.sh @@@ -1,345 -1,0 +1,345 @@@ +#!/bin/sh + +# This program tests the reliability command. + +TEMPDIR=/tmp/pspp-tst-$$ +TESTFILE=$TEMPDIR/`basename $0`.sps + +# ensure that top_srcdir and top_builddir are absolute +if [ -z "$top_srcdir" ] ; then top_srcdir=. ; fi +if [ -z "$top_builddir" ] ; then top_builddir=. ; fi +top_srcdir=`cd $top_srcdir; pwd` +top_builddir=`cd $top_builddir; pwd` + +PSPP=$top_builddir/src/ui/terminal/pspp + +STAT_CONFIG_PATH=$top_srcdir/config +export STAT_CONFIG_PATH + +LANG=C +export LANG + + +cleanup() +{ + if [ x"$PSPP_TEST_NO_CLEANUP" != x ] ; then + echo "NOT cleaning $TEMPDIR" + return ; + fi + rm -rf $TEMPDIR +} + + +fail() +{ + echo $activity + echo FAILED + cleanup; + exit 1; +} + + +no_result() +{ + echo $activity + echo NO RESULT; + cleanup; + exit 2; +} + +pass() +{ + cleanup; + exit 0; +} + +mkdir -p $TEMPDIR + +cd $TEMPDIR + + +activity="create program" +cat > $TESTFILE <