#include <language/dictionary/split-file.h>
#include <language/data-io/file-handle.h>
#include <language/lexer/lexer.h>
-#include <libpspp/alloc.h>
#include <libpspp/compiler.h>
#include <libpspp/message.h>
#include <libpspp/taint.h>
#include <math/moments.h>
#include <output/table.h>
+#include "xalloc.h"
+
#include "gettext.h"
#define _(msgid) gettext (msgid)
}
static void
-reg_get_name (const struct dictionary *dict, char name[LONG_NAME_LEN],
- const char prefix[LONG_NAME_LEN])
+reg_get_name (const struct dictionary *dict, char name[VAR_NAME_LEN],
+ const char prefix[VAR_NAME_LEN])
{
int i = 1;
- snprintf (name, LONG_NAME_LEN, "%s%d", prefix, i);
+ snprintf (name, VAR_NAME_LEN, "%s%d", prefix, i);
while (!try_name (dict, name))
{
i++;
- snprintf (name, LONG_NAME_LEN, "%s%d", prefix, i);
+ snprintf (name, VAR_NAME_LEN, "%s%d", prefix, i);
}
}
{
struct dictionary *dict = dataset_dict (ds);
static int trns_index = 1;
- char name[LONG_NAME_LEN];
+ char name[VAR_NAME_LEN];
struct variable *new_var;
struct reg_trns *t = NULL;
model_file = NULL;
else
{
+ fh_unref (model_file);
model_file = fh_parse (lexer, FH_REF_FILE);
if (model_file == NULL)
return 0;
bool ok;
size_t i;
+ model_file = NULL;
if (!parse_regression (lexer, ds, &cmd, NULL))
- return CMD_FAILURE;
+ {
+ fh_unref (model_file);
+ return CMD_FAILURE;
+ }
models = xnmalloc (cmd.n_dependent, sizeof *models);
for (i = 0; i < cmd.n_dependent; i++)
subcommand_save (ds, cmd.sbc_save, models);
free (v_variables);
free (models);
+ free_regression (&cmd);
+ fh_unref (model_file);
+
return ok ? CMD_SUCCESS : CMD_FAILURE;
}
if (!v_variables)
{
- dict_get_vars (dataset_dict (ds), &v_variables, &n_variables,
- 1u << DC_SYSTEM);
+ dict_get_vars (dataset_dict (ds), &v_variables, &n_variables, 0);
}
for (i = 0; i < cmd->n_dependent; i++)
lopts.get_indep_mean_std[i] = 1;
}
models[k] = pspp_linreg_cache_alloc (X->m->size1, X->m->size2);
- models[k]->indep_means = gsl_vector_alloc (X->m->size2);
- models[k]->indep_std = gsl_vector_alloc (X->m->size2);
models[k]->depvar = dep_var;
/*
For large data sets, use QR decomposition.