#include <uniwidth.h>
#include "language/command.h"
+#include "language/lexer/macro.h"
#include "language/lexer/scan.h"
#include "language/lexer/segment.h"
#include "language/lexer/token.h"
#include "libpspp/str.h"
#include "libpspp/u8-istream.h"
#include "output/journal.h"
-#include "output/text-item.h"
+#include "output/output-item.h"
#include "gl/c-ctype.h"
#include "gl/minmax.h"
size_t token_len; /* Length of source for token in bytes. */
size_t line_pos; /* Start of line containing token_pos. */
int first_line; /* Line number at token_pos. */
+ bool from_macro;
};
/* A source of tokens, corresponding to a syntax file.
{
struct ll ll; /* In lexer's list of sources. */
struct lex_reader *reader;
+ struct lexer *lexer;
struct segmenter segmenter;
bool eof; /* True if T_STOP was read from 'reader'. */
struct lex_token *tokens; /* Lookahead tokens for parser. */
};
-static struct lex_source *lex_source_create (struct lex_reader *);
+static struct lex_source *lex_source_create (struct lexer *,
+ struct lex_reader *);
static void lex_source_destroy (struct lex_source *);
/* Lexer. */
struct lexer
{
struct ll_list sources; /* Contains "struct lex_source"s. */
+ struct macro_set *macros;
};
static struct lex_source *lex_source__ (const struct lexer *);
+static struct substring lex_source_get_syntax__ (const struct lex_source *,
+ int n0, int n1);
static const struct lex_token *lex_next__ (const struct lexer *, int n);
static void lex_source_push_endcmd__ (struct lex_source *);
static void lex_source_pop__ (struct lex_source *);
-static bool lex_source_get__ (const struct lex_source *);
+static bool lex_source_get (const struct lex_source *);
static void lex_source_error_valist (struct lex_source *, int n0, int n1,
const char *format, va_list)
PRINTF_FORMAT (4, 0);
const struct lex_reader_class *class)
{
reader->class = class;
- reader->syntax = LEX_SYNTAX_AUTO;
+ reader->syntax = SEG_MODE_AUTO;
reader->error = LEX_ERROR_CONTINUE;
reader->file_name = NULL;
reader->encoding = NULL;
lex_reader_set_file_name (struct lex_reader *reader, const char *file_name)
{
free (reader->file_name);
- reader->file_name = file_name != NULL ? xstrdup (file_name) : NULL;
+ reader->file_name = xstrdup_if_nonnull (file_name);
}
\f
/* Creates and returns a new lexer. */
struct lexer *
lex_create (void)
{
- struct lexer *lexer = xzalloc (sizeof *lexer);
- ll_init (&lexer->sources);
+ struct lexer *lexer = xmalloc (sizeof *lexer);
+ *lexer = (struct lexer) {
+ .sources = LL_INITIALIZER (lexer->sources),
+ .macros = macro_set_create (),
+ };
return lexer;
}
ll_for_each_safe (source, next, struct lex_source, ll, &lexer->sources)
lex_source_destroy (source);
+ macro_set_destroy (lexer->macros);
free (lexer);
}
}
+/* Adds M to LEXER's set of macros. M replaces any existing macro with the
+ same name. Takes ownership of M. */
+void
+lex_define_macro (struct lexer *lexer, struct macro *m)
+{
+ macro_set_add (lexer->macros, m);
+}
+
/* Inserts READER into LEXER so that the next token read by LEXER comes from
READER. Before the caller, LEXER must either be empty or at a T_ENDCMD
token. */
lex_include (struct lexer *lexer, struct lex_reader *reader)
{
assert (ll_is_empty (&lexer->sources) || lex_token (lexer) == T_ENDCMD);
- ll_push_head (&lexer->sources, &lex_source_create (reader)->ll);
+ ll_push_head (&lexer->sources, &lex_source_create (lexer, reader)->ll);
}
/* Appends READER to LEXER, so that it will be read after all other current
void
lex_append (struct lexer *lexer, struct lex_reader *reader)
{
- ll_push_tail (&lexer->sources, &lex_source_create (reader)->ll);
+ ll_push_tail (&lexer->sources, &lex_source_create (lexer, reader)->ll);
}
\f
/* Advancing. */
src->tokens = deque_expand (&src->deque, src->tokens, sizeof *src->tokens);
token = &src->tokens[deque_push_front (&src->deque)];
- token_init (&token->token);
+ token->token = (struct token) { .type = T_STOP };
+ token->from_macro = false;
return token;
}
static void
lex_source_pop__ (struct lex_source *src)
{
- token_destroy (&src->tokens[deque_pop_back (&src->deque)].token);
+ token_uninit (&src->tokens[deque_pop_back (&src->deque)].token);
}
static void
lex_source_pop_front (struct lex_source *src)
{
- token_destroy (&src->tokens[deque_pop_front (&src->deque)].token);
+ token_uninit (&src->tokens[deque_pop_front (&src->deque)].token);
}
/* Advances LEXER to the next token, consuming the current token. */
lex_source_pop__ (src);
while (deque_is_empty (&src->deque))
- if (!lex_source_get__ (src))
+ if (!lex_source_get (src))
{
lex_source_destroy (src);
src = lex_source__ (lexer);
va_end (args);
}
-/* Prints a syntax error message saying that OPTION0 or one of the other
- strings following it, up to the first NULL, is expected. */
+/* Prints a syntax error message saying that one of the strings provided as
+ varargs, up to the first NULL, is expected. */
void
-(lex_error_expecting) (struct lexer *lexer, const char *option0, ...)
+(lex_error_expecting) (struct lexer *lexer, ...)
{
- enum { MAX_OPTIONS = 8 };
- const char *options[MAX_OPTIONS + 1];
va_list args;
- int n;
- va_start (args, option0);
- options[0] = option0;
- n = 0;
- while (n + 1 < MAX_OPTIONS && options[n] != NULL)
- options[++n] = va_arg (args, const char *);
+ va_start (args, lexer);
+ lex_error_expecting_valist (lexer, args);
va_end (args);
+}
+
+/* Prints a syntax error message saying that one of the options provided in
+ ARGS, up to the first NULL, is expected. */
+void
+lex_error_expecting_valist (struct lexer *lexer, va_list args)
+{
+ enum { MAX_OPTIONS = 9 };
+ const char *options[MAX_OPTIONS];
+ int n = 0;
+ while (n < MAX_OPTIONS)
+ {
+ const char *option = va_arg (args, const char *);
+ if (!option)
+ break;
+ options[n++] = option;
+ }
+ lex_error_expecting_array (lexer, options, n);
+}
+
+void
+lex_error_expecting_array (struct lexer *lexer, const char **options, size_t n)
+{
switch (n)
{
case 0:
break;
default:
- NOT_REACHED ();
+ lex_error (lexer, NULL);
}
}
}
}
+/* If the current token is an integer in the range MIN...MAX (inclusive), does
+ nothing and returns true. Otherwise, reports an error and returns false.
+ If NAME is nonnull, then it is used in the error message. */
+bool
+lex_force_int_range (struct lexer *lexer, const char *name, long min, long max)
+{
+ bool is_integer = lex_is_integer (lexer);
+ bool too_small = is_integer && lex_integer (lexer) < min;
+ bool too_big = is_integer && lex_integer (lexer) > max;
+ if (is_integer && !too_small && !too_big)
+ return true;
+
+ if (min > max)
+ {
+ /* Weird, maybe a bug in the caller. Just report that we needed an
+ integer. */
+ if (name)
+ lex_error (lexer, _("Integer expected for %s."), name);
+ else
+ lex_error (lexer, _("Integer expected."));
+ }
+ else if (min == max)
+ {
+ if (name)
+ lex_error (lexer, _("Expected %ld for %s."), min, name);
+ else
+ lex_error (lexer, _("Expected %ld."), min);
+ }
+ else if (min + 1 == max)
+ {
+ if (name)
+ lex_error (lexer, _("Expected %ld or %ld for %s."), min, min + 1, name);
+ else
+ lex_error (lexer, _("Expected %ld or %ld."), min, min + 1);
+ }
+ else
+ {
+ bool report_lower_bound = (min > INT_MIN / 2) || too_small;
+ bool report_upper_bound = (max < INT_MAX / 2) || too_big;
+
+ if (report_lower_bound && report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer,
+ _("Expected integer between %ld and %ld for %s."),
+ min, max, name);
+ else
+ lex_error (lexer, _("Expected integer between %ld and %ld."),
+ min, max);
+ }
+ else if (report_lower_bound)
+ {
+ if (min == 0)
+ {
+ if (name)
+ lex_error (lexer, _("Expected non-negative integer for %s."),
+ name);
+ else
+ lex_error (lexer, _("Expected non-negative integer."));
+ }
+ else if (min == 1)
+ {
+ if (name)
+ lex_error (lexer, _("Expected positive integer for %s."),
+ name);
+ else
+ lex_error (lexer, _("Expected positive integer."));
+ }
+ }
+ else if (report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer,
+ _("Expected integer less than or equal to %ld for %s."),
+ max, name);
+ else
+ lex_error (lexer, _("Expected integer less than or equal to %ld."),
+ max);
+ }
+ else
+ {
+ if (name)
+ lex_error (lexer, _("Integer expected for %s."), name);
+ else
+ lex_error (lexer, _("Integer expected."));
+ }
+ }
+ return false;
+}
+
/* If the current token is a number, does nothing and returns true.
Otherwise, reports an error and returns false. */
bool
return lex_source_next__ (src, n);
else
{
- static const struct lex_token stop_token =
- { TOKEN_INITIALIZER (T_STOP, 0.0, ""), 0, 0, 0, 0 };
-
+ static const struct lex_token stop_token = { .token = { .type = T_STOP } };
return &stop_token;
}
}
+static const struct lex_token *
+lex_source_front (const struct lex_source *src)
+{
+ return &src->tokens[deque_front (&src->deque, 0)];
+}
+
static const struct lex_token *
lex_source_next__ (const struct lex_source *src, int n)
{
{
if (!deque_is_empty (&src->deque))
{
- struct lex_token *front;
-
- front = &src->tokens[deque_front (&src->deque, 0)];
+ const struct lex_token *front = lex_source_front (src);
if (front->token.type == T_STOP || front->token.type == T_ENDCMD)
return front;
}
- lex_source_get__ (src);
+ lex_source_get (src);
}
return &src->tokens[deque_back (&src->deque, n)];
The string is null-terminated (but the null terminator is not included in
the returned substring's 'length').
- Only T_ID and T_STRING tokens have meaningful strings. For other tokens
- this functions this function will always return NULL.
+ Only T_ID, T_MACRO_ID, T_STRING tokens have meaningful strings. For other
+ tokens this functions this function will always return NULL.
The UTF-8 encoding of the returned string is correct for variable names and
other identifiers. Use filename_to_utf8() to use it as a filename. Use
return lex_next (lexer, n)->string;
}
+struct substring
+lex_next_representation (const struct lexer *lexer, int n0, int n1)
+{
+ return lex_source_get_syntax__ (lex_source__ (lexer), n0, n1);
+}
+
+bool
+lex_next_is_from_macro (const struct lexer *lexer, int n)
+{
+ return lex_next__ (lexer, n)->from_macro;
+}
+
static bool
lex_tokens_match (const struct token *actual, const struct token *expected)
{
if (token.type != SCAN_SKIP)
{
bool match = lex_tokens_match (lex_next (lexer, i++), &token);
- token_destroy (&token);
+ token_uninit (&token);
if (!match)
return false;
}
return src == NULL ? NULL : src->reader->encoding;
}
-
/* Returns the syntax mode for the syntax file from which the current drawn is
- drawn. Returns LEX_SYNTAX_AUTO for a T_STOP token or if the command's
- source does not have line numbers.
+ drawn. Returns SEG_MODE_AUTO for a T_STOP token or if the command's source
+ does not have line numbers.
There is no version of this function that takes an N argument because
lookahead only works to the end of a command and any given command is always
within a single syntax file. */
-enum lex_syntax_mode
+enum segmenter_mode
lex_get_syntax_mode (const struct lexer *lexer)
{
struct lex_source *src = lex_source__ (lexer);
- return src == NULL ? LEX_SYNTAX_AUTO : src->reader->syntax;
+ return src == NULL ? SEG_MODE_AUTO : src->reader->syntax;
}
/* Returns the error mode for the syntax file from which the current drawn is
}
static struct substring
-lex_source_get_syntax__ (const struct lex_source *src, int n0, int n1)
+lex_tokens_get_syntax__ (const struct lex_source *src,
+ const struct lex_token *token0,
+ const struct lex_token *token1)
{
- const struct lex_token *token0 = lex_source_next__ (src, n0);
- const struct lex_token *token1 = lex_source_next__ (src, MAX (n0, n1));
size_t start = token0->token_pos;
size_t end = token1->token_pos + token1->token_len;
return ss_buffer (&src->buffer[start - src->tail], end - start);
}
+static struct substring
+lex_source_get_syntax__ (const struct lex_source *src, int n0, int n1)
+{
+ return lex_tokens_get_syntax__ (src,
+ lex_source_next__ (src, n0),
+ lex_source_next__ (src, MAX (n0, n1)));
+}
+
static void
lex_ellipsize__ (struct substring in, char *out, size_t out_size)
{
token = lex_source_next__ (src, n0);
if (token->token.type == T_ENDCMD)
ds_put_cstr (&s, _("Syntax error at end of command"));
+ else if (token->from_macro)
+ {
+ /* XXX this isn't ideal, we should get the actual syntax */
+ char *syntax = token_to_string (&token->token);
+ if (syntax)
+ ds_put_format (&s, _("Syntax error at `%s'"), syntax);
+ else
+ ds_put_cstr (&s, _("Syntax error"));
+ free (syntax);
+ }
else
{
struct substring syntax = lex_source_get_syntax__ (src, n0, n1);
ds_put_cstr (&s, ": ");
ds_put_vformat (&s, format, args);
}
- ds_put_byte (&s, '.');
+ if (ds_last (&s) != '.')
+ ds_put_byte (&s, '.');
struct msg m = {
.category = MSG_C_SYNTAX,
}
/* Attempts to append an additional token into SRC's deque, reading more from
- the underlying lex_reader if necessary.. Returns true if successful, false
- if the deque already represents (a suffix of) the whole lex_reader's
- contents, */
+ the underlying lex_reader if necessary. Returns true if a new token was
+ added to SRC's deque, false otherwise. */
static bool
-lex_source_get__ (const struct lex_source *src_)
+lex_source_try_get (struct lex_source *src)
{
- struct lex_source *src = CONST_CAST (struct lex_source *, src_);
- if (src->eof)
- return false;
-
/* State maintained while scanning tokens. Usually we only need a single
state, but scanner_push() can return SCAN_SAVE to indicate that the state
needs to be saved and possibly restored later with SCAN_BACK. */
copy_len--;
/* Submit the line as syntax. */
- text_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX,
- xmemdup0 (line, copy_len),
- NULL));
+ output_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX,
+ xmemdup0 (line, copy_len),
+ NULL));
src->journal_pos += line_len;
}
switch (token->token.type)
{
default:
- break;
+ return true;
case T_STOP:
token->token.type = T_ENDCMD;
src->eof = true;
- break;
+ return true;
case SCAN_BAD_HEX_LENGTH:
lex_get_error (src, _("String of hex digits has %d characters, which "
"is not a multiple of 2"),
(int) token->token.number);
- break;
+ return false;
case SCAN_BAD_HEX_DIGIT:
case SCAN_BAD_UNICODE_DIGIT:
lex_get_error (src, _("`%c' is not a valid hex digit"),
(int) token->token.number);
- break;
+ return false;
case SCAN_BAD_UNICODE_LENGTH:
lex_get_error (src, _("Unicode string contains %d bytes, which is "
"not in the valid range of 1 to 8 bytes"),
(int) token->token.number);
- break;
+ return false;
case SCAN_BAD_UNICODE_CODE_POINT:
lex_get_error (src, _("U+%04X is not a valid Unicode code point"),
(int) token->token.number);
- break;
+ return false;
case SCAN_EXPECTED_QUOTE:
lex_get_error (src, _("Unterminated string constant"));
- break;
+ return false;
case SCAN_EXPECTED_EXPONENT:
lex_get_error (src, _("Missing exponent following `%s'"),
token->token.string.string);
- break;
-
- case SCAN_UNEXPECTED_DOT:
- lex_get_error (src, _("Unexpected `.' in middle of command"));
- break;
+ return false;
case SCAN_UNEXPECTED_CHAR:
{
char c_name[16];
lex_get_error (src, _("Bad character %s in input"),
uc_name (token->token.number, c_name));
+ return false;
}
- break;
case SCAN_SKIP:
lex_source_pop_front (src);
- break;
+ return false;
}
+ NOT_REACHED ();
+}
+
+static bool
+lex_source_get__ (struct lex_source *src)
+{
+ for (;;)
+ {
+ if (src->eof)
+ return false;
+ else if (lex_source_try_get (src))
+ return true;
+ }
+}
+
+static bool
+lex_source_get (const struct lex_source *src_)
+{
+ struct lex_source *src = CONST_CAST (struct lex_source *, src_);
+
+ size_t old_count = deque_count (&src->deque);
+ if (!lex_source_get__ (src))
+ return false;
+
+ if (!settings_get_mexpand ())
+ return true;
+
+ struct macro_expander *me;
+ int retval = macro_expander_create (src->lexer->macros,
+ &lex_source_front (src)->token,
+ &me);
+ while (!retval)
+ {
+ if (!lex_source_get__ (src))
+ {
+ /* This should not be reachable because we always get a T_ENDCMD at
+ the end of an input file (transformed from T_STOP by
+ lex_source_try_get()) and the macro_expander should always
+ terminate expansion on T_ENDCMD. */
+ NOT_REACHED ();
+ }
+
+ const struct lex_token *front = lex_source_front (src);
+ const struct macro_token mt = {
+ .token = front->token,
+ .representation = lex_tokens_get_syntax__ (src, front, front)
+ };
+ retval = macro_expander_add (me, &mt);
+ }
+ if (retval < 0)
+ {
+ /* XXX handle case where there's a macro invocation starting from some
+ later token we've already obtained */
+ macro_expander_destroy (me);
+ return true;
+ }
+
+ /* XXX handle case where the macro invocation doesn't use all the tokens */
+ while (deque_count (&src->deque) > old_count)
+ lex_source_pop_front (src);
+
+ struct macro_tokens expansion = { .n = 0 };
+ macro_expander_get_expansion (me, &expansion);
+ macro_expander_destroy (me);
+
+ if (settings_get_mprint ())
+ {
+ struct string mprint = DS_EMPTY_INITIALIZER;
+ macro_tokens_to_representation (&expansion, &mprint);
+ output_item_submit (text_item_create (TEXT_ITEM_LOG, ds_cstr (&mprint),
+ _("Macro Expansion")));
+ ds_destroy (&mprint);
+ }
+
+ for (size_t i = 0; i < expansion.n; i++)
+ {
+ *lex_push_token__ (src) = (struct lex_token) {
+ .token = expansion.mts[i].token,
+ .from_macro = true,
+ /* XXX the rest */
+ };
+
+ ss_dealloc (&expansion.mts[i].representation); /* XXX should feed into lexer */
+ }
+ free (expansion.mts);
+
return true;
}
\f
static void
lex_source_push_endcmd__ (struct lex_source *src)
{
- struct lex_token *token = lex_push_token__ (src);
- token->token.type = T_ENDCMD;
- token->token_pos = 0;
- token->token_len = 0;
- token->line_pos = 0;
- token->first_line = 0;
+ *lex_push_token__ (src) = (struct lex_token) { .token = { .type = T_ENDCMD } };
}
static struct lex_source *
-lex_source_create (struct lex_reader *reader)
+lex_source_create (struct lexer *lexer, struct lex_reader *reader)
{
struct lex_source *src;
- enum segmenter_mode mode;
src = xzalloc (sizeof *src);
src->reader = reader;
-
- if (reader->syntax == LEX_SYNTAX_AUTO)
- mode = SEG_MODE_AUTO;
- else if (reader->syntax == LEX_SYNTAX_INTERACTIVE)
- mode = SEG_MODE_INTERACTIVE;
- else if (reader->syntax == LEX_SYNTAX_BATCH)
- mode = SEG_MODE_BATCH;
- else
- NOT_REACHED ();
- segmenter_init (&src->segmenter, mode);
-
+ segmenter_init (&src->segmenter, reader->syntax);
+ src->lexer = lexer;
src->tokens = deque_init (&src->deque, 4, sizeof *src->tokens);
lex_source_push_endcmd__ (src);
Returns a null pointer if FILE_NAME cannot be opened. */
struct lex_reader *
lex_reader_for_file (const char *file_name, const char *encoding,
- enum lex_syntax_mode syntax,
+ enum segmenter_mode syntax,
enum lex_error_mode error)
{
struct lex_file_reader *r;
r->reader.syntax = syntax;
r->reader.error = error;
r->reader.file_name = xstrdup (file_name);
- r->reader.encoding = encoding ? xstrdup (encoding) : NULL;
+ r->reader.encoding = xstrdup_if_nonnull (encoding);
r->reader.line_number = 1;
r->istream = istream;
r = xmalloc (sizeof *r);
lex_reader_init (&r->reader, &lex_string_reader_class);
- r->reader.syntax = LEX_SYNTAX_AUTO;
- r->reader.encoding = encoding ? xstrdup (encoding) : NULL;
+ r->reader.syntax = SEG_MODE_AUTO;
+ r->reader.encoding = xstrdup_if_nonnull (encoding);
r->s = s;
r->offset = 0;