/* PSPP - a program for statistical analysis.
- Copyright (C) 1997-9, 2000, 2006, 2009, 2010, 2011, 2013 Free Software Foundation, Inc.
+ Copyright (C) 1997-9, 2000, 2006, 2009, 2010, 2011, 2013, 2016 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include <unistr.h>
#include <uniwidth.h>
-#include "data/file-name.h"
#include "language/command.h"
+#include "language/lexer/macro.h"
#include "language/lexer/scan.h"
#include "language/lexer/segment.h"
#include "language/lexer/token.h"
#include "libpspp/str.h"
#include "libpspp/u8-istream.h"
#include "output/journal.h"
-#include "output/text-item.h"
+#include "output/output-item.h"
#include "gl/c-ctype.h"
#include "gl/minmax.h"
/* The regular token information. */
struct token token;
- /* Location of token in terms of the lex_source's buffer.
+ /* For a token obtained through the lexer in an ordinary way, this is the
+ location of the token in terms of the lex_source's buffer.
+
+ For a token produced through macro expansion, this is the entire macro
+ call.
+
src->tail <= line_pos <= token_pos <= src->head. */
size_t token_pos; /* Start of token. */
size_t token_len; /* Length of source for token in bytes. */
size_t line_pos; /* Start of line containing token_pos. */
int first_line; /* Line number at token_pos. */
+
+ /* For a token obtained through macro expansion, this is just this token. */
+ char *macro_rep; /* The whole macro expansion. */
+ size_t ofs; /* Offset of this token in macro_rep. */
+ size_t len; /* Length of this token in macro_rep. */
+ size_t *ref_cnt; /* Number of lex_tokens that refer to macro_rep. */
};
+static void
+lex_token_uninit (struct lex_token *t)
+{
+ token_uninit (&t->token);
+ if (t->ref_cnt)
+ {
+ assert (*t->ref_cnt > 0);
+ if (!--*t->ref_cnt)
+ {
+ free (t->macro_rep);
+ free (t->ref_cnt);
+ }
+ }
+}
+
/* A source of tokens, corresponding to a syntax file.
This is conceptually a lex_reader wrapped with everything needed to convert
{
struct ll ll; /* In lexer's list of sources. */
struct lex_reader *reader;
+ struct lexer *lexer;
struct segmenter segmenter;
bool eof; /* True if T_STOP was read from 'reader'. */
int n_newlines; /* Number of new-lines up to seg_pos. */
bool suppress_next_newline;
- /* Tokens. */
- struct deque deque; /* Indexes into 'tokens'. */
- struct lex_token *tokens; /* Lookahead tokens for parser. */
+ /* Tokens.
+
+ This is mostly like a deque, with the conceptual invariant that back <=
+ middle <= front (modulo SIZE_MAX+1). The tokens available for parsing
+ lie between 'back' and 'middle': the token at 'back' is the current
+ token, the token at 'back + 1' is the next token, and so on. There are
+ usually no tokens between 'middle' and 'front'; if there are, then they
+ need to go through macro expansion and are not yet available for
+ parsing.
+
+ 'capacity' is the current number of elements in 'tokens'. It is always
+ a power of 2. 'front', 'middle', and 'back' refer to indexes in
+ 'tokens' modulo 'capacity'. */
+ size_t front;
+ size_t middle;
+ size_t back;
+ size_t capacity;
+ struct lex_token *tokens;
};
-static struct lex_source *lex_source_create (struct lex_reader *);
+static struct lex_source *lex_source_create (struct lexer *,
+ struct lex_reader *);
static void lex_source_destroy (struct lex_source *);
/* Lexer. */
struct lexer
{
struct ll_list sources; /* Contains "struct lex_source"s. */
+ struct macro_set *macros;
};
static struct lex_source *lex_source__ (const struct lexer *);
+static char *lex_source_get_syntax__ (const struct lex_source *,
+ int n0, int n1);
static const struct lex_token *lex_next__ (const struct lexer *, int n);
static void lex_source_push_endcmd__ (struct lex_source *);
-static void lex_source_pop__ (struct lex_source *);
-static bool lex_source_get__ (const struct lex_source *);
+static void lex_source_pop_back (struct lex_source *);
+static bool lex_source_get (const struct lex_source *);
static void lex_source_error_valist (struct lex_source *, int n0, int n1,
const char *format, va_list)
PRINTF_FORMAT (4, 0);
const struct lex_reader_class *class)
{
reader->class = class;
- reader->syntax = LEX_SYNTAX_AUTO;
+ reader->syntax = SEG_MODE_AUTO;
reader->error = LEX_ERROR_CONTINUE;
reader->file_name = NULL;
reader->encoding = NULL;
reader->line_number = 0;
+ reader->eof = false;
}
/* Frees any file name already in READER and replaces it by a copy of
lex_reader_set_file_name (struct lex_reader *reader, const char *file_name)
{
free (reader->file_name);
- reader->file_name = file_name != NULL ? xstrdup (file_name) : NULL;
+ reader->file_name = xstrdup_if_nonnull (file_name);
}
\f
/* Creates and returns a new lexer. */
struct lexer *
lex_create (void)
{
- struct lexer *lexer = xzalloc (sizeof *lexer);
- ll_init (&lexer->sources);
+ struct lexer *lexer = xmalloc (sizeof *lexer);
+ *lexer = (struct lexer) {
+ .sources = LL_INITIALIZER (lexer->sources),
+ .macros = macro_set_create (),
+ };
return lexer;
}
ll_for_each_safe (source, next, struct lex_source, ll, &lexer->sources)
lex_source_destroy (source);
+ macro_set_destroy (lexer->macros);
free (lexer);
}
}
+/* Adds M to LEXER's set of macros. M replaces any existing macro with the
+ same name. Takes ownership of M. */
+void
+lex_define_macro (struct lexer *lexer, struct macro *m)
+{
+ macro_set_add (lexer->macros, m);
+}
+
/* Inserts READER into LEXER so that the next token read by LEXER comes from
READER. Before the caller, LEXER must either be empty or at a T_ENDCMD
token. */
lex_include (struct lexer *lexer, struct lex_reader *reader)
{
assert (ll_is_empty (&lexer->sources) || lex_token (lexer) == T_ENDCMD);
- ll_push_head (&lexer->sources, &lex_source_create (reader)->ll);
+ ll_push_head (&lexer->sources, &lex_source_create (lexer, reader)->ll);
}
/* Appends READER to LEXER, so that it will be read after all other current
void
lex_append (struct lexer *lexer, struct lex_reader *reader)
{
- ll_push_tail (&lexer->sources, &lex_source_create (reader)->ll);
+ ll_push_tail (&lexer->sources, &lex_source_create (lexer, reader)->ll);
}
\f
-/* Advacning. */
+/* Advancing. */
+/* Adds a new token at the front of SRC and returns a pointer to it. The
+ caller should initialize it. Does not advance the middle pointer, so the
+ token isn't immediately available to the parser. */
static struct lex_token *
lex_push_token__ (struct lex_source *src)
{
- struct lex_token *token;
-
- if (deque_is_full (&src->deque))
- src->tokens = deque_expand (&src->deque, src->tokens, sizeof *src->tokens);
+ if (src->front - src->back >= src->capacity)
+ {
+ /* Expansion works just like a deque, so we reuse the code. */
+ struct deque deque = {
+ .capacity = src->capacity,
+ .front = src->front,
+ .back = src->back,
+ };
+ src->tokens = deque_expand (&deque, src->tokens, sizeof *src->tokens);
+ src->capacity = deque.capacity;
+ }
- token = &src->tokens[deque_push_front (&src->deque)];
- token_init (&token->token);
+ struct lex_token *token = &src->tokens[src->front++ & (src->capacity - 1)];
+ token->token = (struct token) { .type = T_STOP };
+ token->macro_rep = NULL;
+ token->ref_cnt = NULL;
return token;
}
+/* Removes the current token from SRC and uninitializes it. */
static void
-lex_source_pop__ (struct lex_source *src)
+lex_source_pop_back (struct lex_source *src)
{
- token_destroy (&src->tokens[deque_pop_back (&src->deque)].token);
+ assert (src->middle - src->back > 0);
+ lex_token_uninit (&src->tokens[src->back++ & (src->capacity - 1)]);
}
+/* Removes the token at the greatest lookahead from SRC and uninitializes
+ it. */
static void
lex_source_pop_front (struct lex_source *src)
{
- token_destroy (&src->tokens[deque_pop_front (&src->deque)].token);
+ assert (src->front - src->middle > 0);
+ lex_token_uninit (&src->tokens[--src->front & (src->capacity - 1)]);
}
/* Advances LEXER to the next token, consuming the current token. */
if (src == NULL)
return;
- if (!deque_is_empty (&src->deque))
- lex_source_pop__ (src);
+ if (src->middle - src->back > 0)
+ lex_source_pop_back (src);
- while (deque_is_empty (&src->deque))
- if (!lex_source_get__ (src))
+ while (src->back == src->middle)
+ if (!lex_source_get (src))
{
lex_source_destroy (src);
src = lex_source__ (lexer);
va_end (args);
}
-/* Prints a syntax error message saying that OPTION0 or one of the other
- strings following it, up to the first NULL, is expected. */
+/* Prints a syntax error message saying that one of the strings provided as
+ varargs, up to the first NULL, is expected. */
void
-lex_error_expecting (struct lexer *lexer, const char *option0, ...)
+(lex_error_expecting) (struct lexer *lexer, ...)
{
- enum { MAX_OPTIONS = 8 };
- const char *options[MAX_OPTIONS + 1];
va_list args;
- int n;
- va_start (args, option0);
- options[0] = option0;
- n = 0;
- while (n + 1 < MAX_OPTIONS && options[n] != NULL)
- options[++n] = va_arg (args, const char *);
+ va_start (args, lexer);
+ lex_error_expecting_valist (lexer, args);
va_end (args);
+}
+
+/* Prints a syntax error message saying that one of the options provided in
+ ARGS, up to the first NULL, is expected. */
+void
+lex_error_expecting_valist (struct lexer *lexer, va_list args)
+{
+ enum { MAX_OPTIONS = 9 };
+ const char *options[MAX_OPTIONS];
+ int n = 0;
+ while (n < MAX_OPTIONS)
+ {
+ const char *option = va_arg (args, const char *);
+ if (!option)
+ break;
+ options[n++] = option;
+ }
+ lex_error_expecting_array (lexer, options, n);
+}
+
+void
+lex_error_expecting_array (struct lexer *lexer, const char **options, size_t n)
+{
switch (n)
{
case 0:
break;
default:
- NOT_REACHED ();
+ lex_error (lexer, NULL);
}
}
/* Returns true if the current token is a number. */
bool
-lex_is_number (struct lexer *lexer)
+lex_is_number (const struct lexer *lexer)
{
return lex_next_is_number (lexer, 0);
}
/* Returns true if the current token is a string. */
bool
-lex_is_string (struct lexer *lexer)
+lex_is_string (const struct lexer *lexer)
{
return lex_next_is_string (lexer, 0);
}
/* Returns the value of the current token, which must be a
floating point number. */
double
-lex_number (struct lexer *lexer)
+lex_number (const struct lexer *lexer)
{
return lex_next_number (lexer, 0);
}
/* Returns true iff the current token is an integer. */
bool
-lex_is_integer (struct lexer *lexer)
+lex_is_integer (const struct lexer *lexer)
{
return lex_next_is_integer (lexer, 0);
}
/* Returns the value of the current token, which must be an
integer. */
long
-lex_integer (struct lexer *lexer)
+lex_integer (const struct lexer *lexer)
{
return lex_next_integer (lexer, 0);
}
/* Returns true if the token N ahead of the current token is a number. */
bool
-lex_next_is_number (struct lexer *lexer, int n)
+lex_next_is_number (const struct lexer *lexer, int n)
{
- enum token_type next_token = lex_next_token (lexer, n);
- return next_token == T_POS_NUM || next_token == T_NEG_NUM;
+ return token_is_number (lex_next (lexer, n));
}
/* Returns true if the token N ahead of the current token is a string. */
bool
-lex_next_is_string (struct lexer *lexer, int n)
+lex_next_is_string (const struct lexer *lexer, int n)
{
- return lex_next_token (lexer, n) == T_STRING;
+ return token_is_string (lex_next (lexer, n));
}
/* Returns the value of the token N ahead of the current token, which must be a
floating point number. */
double
-lex_next_number (struct lexer *lexer, int n)
+lex_next_number (const struct lexer *lexer, int n)
{
- assert (lex_next_is_number (lexer, n));
- return lex_next_tokval (lexer, n);
+ return token_number (lex_next (lexer, n));
}
/* Returns true if the token N ahead of the current token is an integer. */
bool
-lex_next_is_integer (struct lexer *lexer, int n)
+lex_next_is_integer (const struct lexer *lexer, int n)
{
- double value;
-
- if (!lex_next_is_number (lexer, n))
- return false;
-
- value = lex_next_tokval (lexer, n);
- return value > LONG_MIN && value <= LONG_MAX && floor (value) == value;
+ return token_is_integer (lex_next (lexer, n));
}
/* Returns the value of the token N ahead of the current token, which must be
an integer. */
long
-lex_next_integer (struct lexer *lexer, int n)
+lex_next_integer (const struct lexer *lexer, int n)
{
- assert (lex_next_is_integer (lexer, n));
- return lex_next_tokval (lexer, n);
+ return token_integer (lex_next (lexer, n));
}
\f
/* Token matching functions. */
return true;
else
{
- lex_error_expecting (lexer, identifier, NULL_SENTINEL);
+ lex_error_expecting (lexer, identifier);
return false;
}
}
}
else
{
- char *s = xasprintf ("`%s'", token_type_to_string (type));
- lex_error_expecting (lexer, s, NULL_SENTINEL);
- free (s);
+ const char *type_string = token_type_to_string (type);
+ if (type_string)
+ {
+ char *s = xasprintf ("`%s'", type_string);
+ lex_error_expecting (lexer, s);
+ free (s);
+ }
+ else
+ lex_error_expecting (lexer, token_type_to_name (type));
+
return false;
}
}
bool
lex_force_string_or_id (struct lexer *lexer)
{
- return lex_is_integer (lexer) || lex_force_string (lexer);
+ return lex_token (lexer) == T_ID || lex_force_string (lexer);
}
/* If the current token is an integer, does nothing and returns true.
}
}
+/* If the current token is an integer in the range MIN...MAX (inclusive), does
+ nothing and returns true. Otherwise, reports an error and returns false.
+ If NAME is nonnull, then it is used in the error message. */
+bool
+lex_force_int_range (struct lexer *lexer, const char *name, long min, long max)
+{
+ bool is_integer = lex_is_integer (lexer);
+ bool too_small = is_integer && lex_integer (lexer) < min;
+ bool too_big = is_integer && lex_integer (lexer) > max;
+ if (is_integer && !too_small && !too_big)
+ return true;
+
+ if (min > max)
+ {
+ /* Weird, maybe a bug in the caller. Just report that we needed an
+ integer. */
+ if (name)
+ lex_error (lexer, _("Integer expected for %s."), name);
+ else
+ lex_error (lexer, _("Integer expected."));
+ }
+ else if (min == max)
+ {
+ if (name)
+ lex_error (lexer, _("Expected %ld for %s."), min, name);
+ else
+ lex_error (lexer, _("Expected %ld."), min);
+ }
+ else if (min + 1 == max)
+ {
+ if (name)
+ lex_error (lexer, _("Expected %ld or %ld for %s."), min, min + 1, name);
+ else
+ lex_error (lexer, _("Expected %ld or %ld."), min, min + 1);
+ }
+ else
+ {
+ bool report_lower_bound = (min > INT_MIN / 2) || too_small;
+ bool report_upper_bound = (max < INT_MAX / 2) || too_big;
+
+ if (report_lower_bound && report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer,
+ _("Expected integer between %ld and %ld for %s."),
+ min, max, name);
+ else
+ lex_error (lexer, _("Expected integer between %ld and %ld."),
+ min, max);
+ }
+ else if (report_lower_bound)
+ {
+ if (min == 0)
+ {
+ if (name)
+ lex_error (lexer, _("Expected non-negative integer for %s."),
+ name);
+ else
+ lex_error (lexer, _("Expected non-negative integer."));
+ }
+ else if (min == 1)
+ {
+ if (name)
+ lex_error (lexer, _("Expected positive integer for %s."),
+ name);
+ else
+ lex_error (lexer, _("Expected positive integer."));
+ }
+ }
+ else if (report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer,
+ _("Expected integer less than or equal to %ld for %s."),
+ max, name);
+ else
+ lex_error (lexer, _("Expected integer less than or equal to %ld."),
+ max);
+ }
+ else
+ {
+ if (name)
+ lex_error (lexer, _("Integer expected for %s."), name);
+ else
+ lex_error (lexer, _("Integer expected."));
+ }
+ }
+ return false;
+}
+
/* If the current token is a number, does nothing and returns true.
Otherwise, reports an error and returns false. */
bool
return lex_source_next__ (src, n);
else
{
- static const struct lex_token stop_token =
- { TOKEN_INITIALIZER (T_STOP, 0.0, ""), 0, 0, 0, 0 };
-
+ static const struct lex_token stop_token = { .token = { .type = T_STOP } };
return &stop_token;
}
}
+/* Returns the token in SRC with the greatest lookahead. */
+static const struct lex_token *
+lex_source_middle (const struct lex_source *src)
+{
+ assert (src->middle - src->back > 0);
+ return &src->tokens[(src->middle - 1) & (src->capacity - 1)];
+}
+
static const struct lex_token *
lex_source_next__ (const struct lex_source *src, int n)
{
- while (deque_count (&src->deque) <= n)
+ while (src->middle - src->back <= n)
{
- if (!deque_is_empty (&src->deque))
+ if (src->middle - src->back > 0)
{
- struct lex_token *front;
-
- front = &src->tokens[deque_front (&src->deque, 0)];
- if (front->token.type == T_STOP || front->token.type == T_ENDCMD)
- return front;
+ const struct lex_token *middle = lex_source_middle (src);
+ if (middle->token.type == T_STOP || middle->token.type == T_ENDCMD)
+ return middle;
}
- lex_source_get__ (src);
+ lex_source_get (src);
}
- return &src->tokens[deque_back (&src->deque, n)];
+ return &src->tokens[(src->back + n) & (src->capacity - 1)];
}
/* Returns the "struct token" of the token N after the current one in LEXER.
double
lex_next_tokval (const struct lexer *lexer, int n)
{
- const struct token *token = lex_next (lexer, n);
- return token->number;
+ return token_number (lex_next (lexer, n));
}
/* Returns the null-terminated string in the token N after the current one, in
The string is null-terminated (but the null terminator is not included in
the returned substring's 'length').
- Only T_ID and T_STRING tokens have meaningful strings. For other tokens
- this functions this function will always return NULL.
+ Only T_ID, T_MACRO_ID, T_STRING tokens have meaningful strings. For other
+ tokens this functions this function will always return NULL.
The UTF-8 encoding of the returned string is correct for variable names and
other identifiers. Use filename_to_utf8() to use it as a filename. Use
return lex_next (lexer, n)->string;
}
+/* Returns the text of the syntax in tokens N0 ahead of the current one,
+ through N1 ahead of the current one, inclusive. (For example, if N0 and N1
+ are both zero, this requests the syntax for the current token.) The caller
+ must eventually free the returned string (with free()). The syntax is
+ encoded in UTF-8 and in the original form supplied to the lexer so that, for
+ example, it may include comments, spaces, and new-lines if it spans multiple
+ tokens. Macro expansion, however, has already been performed. */
+char *
+lex_next_representation (const struct lexer *lexer, int n0, int n1)
+{
+ return lex_source_get_syntax__ (lex_source__ (lexer), n0, n1);
+}
+
+bool
+lex_next_is_from_macro (const struct lexer *lexer, int n)
+{
+ return lex_next__ (lexer, n)->macro_rep != NULL;
+}
+
static bool
lex_tokens_match (const struct token *actual, const struct token *expected)
{
int i;
i = 0;
- string_lexer_init (&slex, s, SEG_MODE_INTERACTIVE);
+ string_lexer_init (&slex, s, strlen (s), SEG_MODE_INTERACTIVE, true);
while (string_lexer_next (&slex, &token))
if (token.type != SCAN_SKIP)
{
bool match = lex_tokens_match (lex_next (lexer, i++), &token);
- token_destroy (&token);
+ token_uninit (&token);
if (!match)
return false;
}
return src == NULL ? NULL : src->reader->file_name;
}
+/* Returns a newly allocated msg_location for the syntax that represents tokens
+ with 0-based offsets N0...N1, inclusive, from the current token. The caller
+ must eventually free the location (with msg_location_destroy()). */
+struct msg_location *
+lex_get_location (const struct lexer *lexer, int n0, int n1)
+{
+ struct msg_location *loc = lex_get_lines (lexer, n0, n1);
+ loc->first_column = lex_get_first_column (lexer, n0);
+ loc->last_column = lex_get_last_column (lexer, n1);
+ return loc;
+}
+
+/* Returns a newly allocated msg_location for the syntax that represents tokens
+ with 0-based offsets N0...N1, inclusive, from the current token. The
+ location only covers the tokens' lines, not the columns. The caller must
+ eventually free the location (with msg_location_destroy()). */
+struct msg_location *
+lex_get_lines (const struct lexer *lexer, int n0, int n1)
+{
+ struct msg_location *loc = xmalloc (sizeof *loc);
+ *loc = (struct msg_location) {
+ .file_name = xstrdup_if_nonnull (lex_get_file_name (lexer)),
+ .first_line = lex_get_first_line_number (lexer, n0),
+ .last_line = lex_get_last_line_number (lexer, n1),
+ };
+ return loc;
+}
+
const char *
lex_get_encoding (const struct lexer *lexer)
{
return src == NULL ? NULL : src->reader->encoding;
}
-
/* Returns the syntax mode for the syntax file from which the current drawn is
- drawn. Returns LEX_SYNTAX_AUTO for a T_STOP token or if the command's
- source does not have line numbers.
+ drawn. Returns SEG_MODE_AUTO for a T_STOP token or if the command's source
+ does not have line numbers.
There is no version of this function that takes an N argument because
lookahead only works to the end of a command and any given command is always
within a single syntax file. */
-enum lex_syntax_mode
+enum segmenter_mode
lex_get_syntax_mode (const struct lexer *lexer)
{
struct lex_source *src = lex_source__ (lexer);
- return src == NULL ? LEX_SYNTAX_AUTO : src->reader->syntax;
+ return src == NULL ? SEG_MODE_AUTO : src->reader->syntax;
}
/* Returns the error mode for the syntax file from which the current drawn is
src->journal_pos = src->seg_pos = src->line_pos = 0;
src->n_newlines = 0;
src->suppress_next_newline = false;
- segmenter_init (&src->segmenter, segmenter_get_mode (&src->segmenter));
- while (!deque_is_empty (&src->deque))
- lex_source_pop__ (src);
+ src->segmenter = segmenter_init (segmenter_get_mode (&src->segmenter),
+ false);
+ while (src->middle - src->back > 0)
+ lex_source_pop_back (src);
+ while (src->front - src->middle > 0)
+ lex_source_pop_front (src);
lex_source_push_endcmd__ (src);
}
}
if (src != NULL)
{
- while (!deque_is_empty (&src->deque))
- lex_source_pop__ (src);
+ while (src->middle - src->back > 0)
+ lex_source_pop_back (src);
for (; src != NULL && src->reader->error != LEX_ERROR_TERMINAL;
src = lex_source__ (lexer))
/* Use the oldest token also. (We know that src->deque cannot be empty
because we are in the process of adding a new token, which is already
initialized enough to use here.) */
- token = &src->tokens[deque_back (&src->deque, 0)];
+ token = &src->tokens[src->back & (src->capacity - 1)];
assert (token->token_pos >= token->line_pos);
max_tail = MIN (max_tail, token->line_pos);
{
do
{
- size_t head_ofs;
- size_t space;
- size_t n;
-
lex_source_expand__ (src);
- head_ofs = src->head - src->tail;
- space = src->allocated - head_ofs;
- n = src->reader->class->read (src->reader, &src->buffer[head_ofs],
- space,
- segmenter_get_prompt (&src->segmenter));
+ size_t head_ofs = src->head - src->tail;
+ size_t space = src->allocated - head_ofs;
+ enum prompt_style prompt = segmenter_get_prompt (&src->segmenter);
+ size_t n = src->reader->class->read (src->reader, &src->buffer[head_ofs],
+ space, prompt);
assert (n <= space);
if (n == 0)
{
- /* End of input.
-
- Ensure that the input always ends in a new-line followed by a null
- byte, as required by the segmenter library. */
-
- if (src->head == src->tail
- || src->buffer[src->head - src->tail - 1] != '\n')
- src->buffer[src->head++ - src->tail] = '\n';
-
+ /* End of input. */
+ src->reader->eof = true;
lex_source_expand__ (src);
- src->buffer[src->head++ - src->tail] = '\0';
-
return;
}
: ll_data (ll_head (&lexer->sources), struct lex_source, ll));
}
-static struct substring
+static char *
lex_source_get_syntax__ (const struct lex_source *src, int n0, int n1)
{
- const struct lex_token *token0 = lex_source_next__ (src, n0);
- const struct lex_token *token1 = lex_source_next__ (src, MAX (n0, n1));
- size_t start = token0->token_pos;
- size_t end = token1->token_pos + token1->token_len;
+ struct string s = DS_EMPTY_INITIALIZER;
+ for (size_t i = n0; i <= n1; )
+ {
+ /* Find [I,J) as the longest sequence of tokens not produced by macro
+ expansion, or otherwise the longest sequence expanded from a single
+ macro call. */
+ const struct lex_token *first = lex_source_next__ (src, i);
+ size_t j;
+ for (j = i + 1; j <= n1; j++)
+ {
+ const struct lex_token *cur = lex_source_next__ (src, j);
+ if ((first->macro_rep != NULL) != (cur->macro_rep != NULL)
+ || first->macro_rep != cur->macro_rep)
+ break;
+ }
+ const struct lex_token *last = lex_source_next__ (src, j - 1);
- return ss_buffer (&src->buffer[start - src->tail], end - start);
+ if (!ds_is_empty (&s))
+ ds_put_byte (&s, ' ');
+ if (!first->macro_rep)
+ {
+ size_t start = first->token_pos;
+ size_t end = last->token_pos + last->token_len;
+ ds_put_substring (&s, ss_buffer (&src->buffer[start - src->tail],
+ end - start));
+ }
+ else
+ {
+ size_t start = first->ofs;
+ size_t end = last->ofs + last->len;
+ ds_put_substring (&s, ss_buffer (first->macro_rep + start,
+ end - start));
+ }
+
+ i = j;
+ }
+ return ds_steal_cstr (&s);
}
-static void
-lex_ellipsize__ (struct substring in, char *out, size_t out_size)
+void
+lex_ellipsize (struct substring in, char *out, size_t out_size)
{
size_t out_maxlen;
size_t out_len;
int mblen;
assert (out_size >= 16);
- out_maxlen = out_size - (in.length >= out_size ? 3 : 0) - 1;
+ out_maxlen = out_size - 1;
+ if (in.length > out_maxlen - 3)
+ out_maxlen -= 3;
+
for (out_len = 0; out_len < in.length; out_len += mblen)
{
if (in.string[out_len] == '\n'
+ || in.string[out_len] == '\0'
|| (in.string[out_len] == '\r'
&& out_len + 1 < in.length
&& in.string[out_len + 1] == '\n'))
mblen = u8_mblen (CHAR_CAST (const uint8_t *, in.string + out_len),
in.length - out_len);
+
+ if (mblen < 0)
+ break;
+
if (out_len + mblen > out_maxlen)
break;
}
strcpy (&out[out_len], out_len < in.length ? "..." : "");
}
+static bool
+lex_source_contains_macro_call (struct lex_source *src, int n0, int n1)
+{
+ for (size_t i = n0; i <= n1; i++)
+ if (lex_source_next__ (src, i)->macro_rep)
+ return true;
+ return false;
+}
+
+static struct substring
+lex_source_get_macro_call (struct lex_source *src, int n0, int n1)
+{
+ if (!lex_source_contains_macro_call (src, n0, n1))
+ return ss_empty ();
+
+ const struct lex_token *token0 = lex_source_next__ (src, n0);
+ const struct lex_token *token1 = lex_source_next__ (src, MAX (n0, n1));
+ size_t start = token0->token_pos;
+ size_t end = token1->token_pos + token1->token_len;
+
+ return ss_buffer (&src->buffer[start - src->tail], end - start);
+}
+
static void
lex_source_error_valist (struct lex_source *src, int n0, int n1,
const char *format, va_list args)
{
const struct lex_token *token;
struct string s;
- struct msg m;
ds_init_empty (&s);
ds_put_cstr (&s, _("Syntax error at end of command"));
else
{
- struct substring syntax = lex_source_get_syntax__ (src, n0, n1);
- if (!ss_is_empty (syntax))
+ /* Get the syntax that caused the error. */
+ char *syntax = lex_source_get_syntax__ (src, n0, n1);
+ char syntax_cstr[64];
+ lex_ellipsize (ss_cstr (syntax), syntax_cstr, sizeof syntax_cstr);
+ free (syntax);
+
+ /* Get the macro call(s) that expanded to the syntax that caused the
+ error. */
+ char call_cstr[64];
+ struct substring call = lex_source_get_macro_call (src, n0, n1);
+ lex_ellipsize (call, call_cstr, sizeof call_cstr);
+
+ if (syntax_cstr[0])
{
- char syntax_cstr[64];
-
- lex_ellipsize__ (syntax, syntax_cstr, sizeof syntax_cstr);
- ds_put_format (&s, _("Syntax error at `%s'"), syntax_cstr);
+ if (call_cstr[0])
+ ds_put_format (&s, _("Syntax error at `%s' "
+ "(in expansion of `%s')"),
+ syntax_cstr, call_cstr);
+ else
+ ds_put_format (&s, _("Syntax error at `%s'"), syntax_cstr);
}
+ else if (call_cstr[0])
+ ds_put_format (&s, _("Syntax error in syntax expanded from `%s'"),
+ call_cstr);
else
ds_put_cstr (&s, _("Syntax error"));
}
ds_put_cstr (&s, ": ");
ds_put_vformat (&s, format, args);
}
- ds_put_byte (&s, '.');
-
- m.category = MSG_C_SYNTAX;
- m.severity = MSG_S_ERROR;
- m.file_name = src->reader->file_name;
- m.first_line = lex_source_get_first_line_number (src, n0);
- m.last_line = lex_source_get_last_line_number (src, n1);
- m.first_column = lex_source_get_first_column (src, n0);
- m.last_column = lex_source_get_last_column (src, n1);
- m.text = ds_steal_cstr (&s);
- msg_emit (&m);
+ if (ds_last (&s) != '.')
+ ds_put_byte (&s, '.');
+
+ struct msg_location *location = xmalloc (sizeof *location);
+ *location = (struct msg_location) {
+ .file_name = xstrdup_if_nonnull (src->reader->file_name),
+ .first_line = lex_source_get_first_line_number (src, n0),
+ .last_line = lex_source_get_last_line_number (src, n1),
+ .first_column = lex_source_get_first_column (src, n0),
+ .last_column = lex_source_get_last_column (src, n1),
+ };
+ struct msg *m = xmalloc (sizeof *m);
+ *m = (struct msg) {
+ .category = MSG_C_SYNTAX,
+ .severity = MSG_S_ERROR,
+ .location = location,
+ .text = ds_steal_cstr (&s),
+ };
+ msg_emit (m);
}
-static void PRINTF_FORMAT (2, 3)
-lex_get_error (struct lex_source *src, const char *format, ...)
+static void PRINTF_FORMAT (4, 5)
+lex_source_error (struct lex_source *src, int n0, int n1,
+ const char *format, ...)
{
va_list args;
- int n;
-
va_start (args, format);
+ lex_source_error_valist (src, n0, n1, format, args);
+ va_end (args);
+}
- n = deque_count (&src->deque) - 1;
- lex_source_error_valist (src, n, n, format, args);
- lex_source_pop_front (src);
+static void
+lex_get_error (struct lex_source *src, const char *s)
+{
+ size_t old_middle = src->middle;
+ src->middle = src->front;
+ size_t n = src->front - src->back - 1;
+ lex_source_error (src, n, n, "%s", s);
+ src->middle = old_middle;
- va_end (args);
+ lex_source_pop_front (src);
}
+/* Attempts to append an additional token at the front of SRC, reading more
+ from the underlying lex_reader if necessary. Returns true if a new token
+ was added to SRC's deque, false otherwise. The caller should retry failures
+ unless SRC's 'eof' marker was set to true indicating that there will be no
+ more tokens from this source.
+
+ Does not make the new token available for lookahead yet; the caller must
+ adjust SRC's 'middle' pointer to do so. */
static bool
-lex_source_get__ (const struct lex_source *src_)
+lex_source_try_get__ (struct lex_source *src)
{
- struct lex_source *src = CONST_CAST (struct lex_source *, src_);
-
+ /* State maintained while scanning tokens. Usually we only need a single
+ state, but scanner_push() can return SCAN_SAVE to indicate that the state
+ needs to be saved and possibly restored later with SCAN_BACK. */
struct state
{
struct segmenter segmenter;
enum segment_type last_segment;
- int newlines;
+ int newlines; /* Number of newlines encountered so far. */
+ /* Maintained here so we can update lex_source's similar members when we
+ finish. */
size_t line_pos;
size_t seg_pos;
};
- struct state state, saved;
- enum scan_result result;
- struct scanner scanner;
- struct lex_token *token;
- int n_lines;
- int i;
-
- if (src->eof)
- return false;
-
- state.segmenter = src->segmenter;
- state.newlines = 0;
- state.seg_pos = src->seg_pos;
- state.line_pos = src->line_pos;
- saved = state;
+ /* Initialize state. */
+ struct state state =
+ {
+ .segmenter = src->segmenter,
+ .newlines = 0,
+ .seg_pos = src->seg_pos,
+ .line_pos = src->line_pos,
+ };
+ struct state saved = state;
- token = lex_push_token__ (src);
+ /* Append a new token to SRC and initialize it. */
+ struct lex_token *token = lex_push_token__ (src);
+ struct scanner scanner;
scanner_init (&scanner, &token->token);
token->line_pos = src->line_pos;
token->token_pos = src->seg_pos;
else
token->first_line = 0;
+ /* Extract segments and pass them through the scanner until we obtain a
+ token. */
for (;;)
{
+ /* Extract a segment. */
+ const char *segment = &src->buffer[state.seg_pos - src->tail];
+ size_t seg_maxlen = src->head - state.seg_pos;
enum segment_type type;
- const char *segment;
- size_t seg_maxlen;
- int seg_len;
-
- segment = &src->buffer[state.seg_pos - src->tail];
- seg_maxlen = src->head - state.seg_pos;
- seg_len = segmenter_push (&state.segmenter, segment, seg_maxlen, &type);
+ int seg_len = segmenter_push (&state.segmenter, segment, seg_maxlen,
+ src->reader->eof, &type);
if (seg_len < 0)
{
+ /* The segmenter needs more input to produce a segment. */
+ assert (!src->reader->eof);
lex_source_read__ (src);
continue;
}
+ /* Update state based on the segment. */
state.last_segment = type;
state.seg_pos += seg_len;
if (type == SEG_NEWLINE)
state.line_pos = state.seg_pos;
}
- result = scanner_push (&scanner, type, ss_buffer (segment, seg_len),
- &token->token);
+ /* Pass the segment into the scanner and try to get a token out. */
+ enum scan_result result = scanner_push (&scanner, type,
+ ss_buffer (segment, seg_len),
+ &token->token);
if (result == SCAN_SAVE)
saved = state;
else if (result == SCAN_BACK)
break;
}
- n_lines = state.newlines;
+ /* If we've reached the end of a line, or the end of a command, then pass
+ the line to the output engine as a syntax text item. */
+ int n_lines = state.newlines;
if (state.last_segment == SEG_END_COMMAND && !src->suppress_next_newline)
{
n_lines++;
n_lines--;
src->suppress_next_newline = false;
}
- for (i = 0; i < n_lines; i++)
+ for (int i = 0; i < n_lines; i++)
{
- const char *newline;
- const char *line;
- size_t line_len;
- char *syntax;
-
- line = &src->buffer[src->journal_pos - src->tail];
- newline = rawmemchr (line, '\n');
- line_len = newline - line;
- if (line_len > 0 && line[line_len - 1] == '\r')
- line_len--;
-
- syntax = malloc (line_len + 2);
- memcpy (syntax, line, line_len);
- syntax[line_len] = '\n';
- syntax[line_len + 1] = '\0';
-
- text_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX, syntax));
-
- src->journal_pos += newline - line + 1;
+ /* Beginning of line. */
+ const char *line = &src->buffer[src->journal_pos - src->tail];
+
+ /* Calculate line length, including \n or \r\n end-of-line if present.
+
+ We use src->head even though that may be beyond what we've actually
+ converted to tokens (which is only through state.line_pos). That's
+ because, if we're emitting the line due to SEG_END_COMMAND, we want to
+ take the whole line through the newline, not just through the '.'. */
+ size_t max_len = src->head - src->journal_pos;
+ const char *newline = memchr (line, '\n', max_len);
+ size_t line_len = newline ? newline - line + 1 : max_len;
+
+ /* Calculate line length excluding end-of-line. */
+ size_t copy_len = line_len;
+ if (copy_len > 0 && line[copy_len - 1] == '\n')
+ copy_len--;
+ if (copy_len > 0 && line[copy_len - 1] == '\r')
+ copy_len--;
+
+ /* Submit the line as syntax. */
+ output_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX,
+ xmemdup0 (line, copy_len),
+ NULL));
+
+ src->journal_pos += line_len;
}
token->token_len = state.seg_pos - src->seg_pos;
switch (token->token.type)
{
default:
- break;
+ return true;
case T_STOP:
token->token.type = T_ENDCMD;
src->eof = true;
- break;
+ return true;
case SCAN_BAD_HEX_LENGTH:
- lex_get_error (src, _("String of hex digits has %d characters, which "
- "is not a multiple of 2"),
- (int) token->token.number);
- break;
-
case SCAN_BAD_HEX_DIGIT:
case SCAN_BAD_UNICODE_DIGIT:
- lex_get_error (src, _("`%c' is not a valid hex digit"),
- (int) token->token.number);
- break;
-
case SCAN_BAD_UNICODE_LENGTH:
- lex_get_error (src, _("Unicode string contains %d bytes, which is "
- "not in the valid range of 1 to 8 bytes"),
- (int) token->token.number);
- break;
-
case SCAN_BAD_UNICODE_CODE_POINT:
- lex_get_error (src, _("U+%04X is not a valid Unicode code point"),
- (int) token->token.number);
- break;
-
case SCAN_EXPECTED_QUOTE:
- lex_get_error (src, _("Unterminated string constant"));
- break;
-
case SCAN_EXPECTED_EXPONENT:
- lex_get_error (src, _("Missing exponent following `%s'"),
- token->token.string.string);
- break;
-
- case SCAN_UNEXPECTED_DOT:
- lex_get_error (src, _("Unexpected `.' in middle of command"));
- break;
-
case SCAN_UNEXPECTED_CHAR:
- {
- char c_name[16];
- lex_get_error (src, _("Bad character %s in input"),
- uc_name (token->token.number, c_name));
- }
- break;
+ char *msg = scan_token_to_error (&token->token);
+ lex_get_error (src, msg);
+ free (msg);
+ return false;
case SCAN_SKIP:
lex_source_pop_front (src);
- break;
+ return false;
+ }
+
+ NOT_REACHED ();
+}
+
+/* Attempts to add a new token at the front of SRC. Returns true if
+ successful, false on failure. On failure, the end of SRC has been reached
+ and no more tokens will be forthcoming from it.
+
+ Does not make the new token available for lookahead yet; the caller must
+ adjust SRC's 'middle' pointer to do so. */
+static bool
+lex_source_get__ (struct lex_source *src)
+{
+ while (!src->eof)
+ if (lex_source_try_get__ (src))
+ return true;
+ return false;
+}
+
+static bool
+lex_source_get (const struct lex_source *src_)
+{
+ struct lex_source *src = CONST_CAST (struct lex_source *, src_);
+
+ if (src->front - src->middle == 0)
+ {
+ if (!lex_source_get__ (src))
+ return false;
+ }
+
+ if (!settings_get_mexpand ())
+ {
+ src->middle++;
+ return true;
+ }
+
+ struct macro_expander *me;
+ int n_call = macro_expander_create (
+ src->lexer->macros, &src->tokens[src->middle & (src->capacity - 1)].token,
+ &me);
+ for (int middle_ofs = 1; !n_call; middle_ofs++)
+ {
+ if (src->front - src->middle <= middle_ofs && !lex_source_get__ (src))
+ {
+ /* This should not be reachable because we always get a T_ENDCMD at
+ the end of an input file (transformed from T_STOP by
+ lex_source_try_get__()) and the macro_expander should always
+ terminate expansion on T_ENDCMD. */
+ NOT_REACHED ();
+ }
+
+ const struct lex_token *t = &src->tokens[(src->middle + middle_ofs)
+ & (src->capacity - 1)];
+ size_t start = t->token_pos;
+ size_t end = t->token_pos + t->token_len;
+ const struct macro_token mt = {
+ .token = t->token,
+ .representation = ss_buffer (&src->buffer[start - src->tail],
+ end - start),
+ };
+ src->middle += middle_ofs + 1;
+ n_call = macro_expander_add (me, &mt);
+ src->middle -= middle_ofs + 1;
}
+ if (n_call < 0)
+ {
+ /* False alarm: no macro expansion after all. Use first token as
+ lookahead. We'll retry macro expansion from the second token next
+ time around. */
+ macro_expander_destroy (me);
+ src->middle++;
+ return true;
+ }
+
+ /* The first 'n_call' tokens starting at 'middle' will be replaced by a
+ macro expansion. There might be more tokens after that, up to 'front'.
+
+ Figure out the boundary of the macro call in the syntax, to go into the
+ lex_tokens for the expansion so that later error messages can report what
+ macro was called. */
+ const struct lex_token *call_first
+ = &src->tokens[src->middle & (src->capacity - 1)];
+ const struct lex_token *call_last
+ = &src->tokens[(src->middle + n_call - 1) & (src->capacity - 1)];
+ size_t call_pos = call_first->token_pos;
+ size_t call_len = (call_last->token_pos + call_last->token_len) - call_pos;
+ size_t line_pos = call_first->line_pos;
+ int first_line = call_first->first_line;
+
+ /* Destroy the tokens for the call, and save any tokens following the call so
+ we can add them back later. */
+ for (size_t i = src->middle; i != src->middle + n_call; i++)
+ lex_token_uninit (&src->tokens[i & (src->capacity - 1)]);
+ size_t n_save = src->front - (src->middle + n_call);
+ struct lex_token *save_tokens = xnmalloc (n_save, sizeof *save_tokens);
+ for (size_t i = 0; i < n_save; i++)
+ save_tokens[i] = src->tokens[(src->middle + n_call + i)
+ & (src->capacity - 1)];
+ src->front = src->middle;
+
+ /* Now expand the macro. */
+ struct macro_tokens expansion = { .n = 0 };
+ macro_expander_get_expansion (me, &expansion);
+ macro_expander_destroy (me);
+
+ /* Convert the macro expansion into syntax for possible error messages later. */
+ size_t *ofs = xnmalloc (expansion.n, sizeof *ofs);
+ size_t *len = xnmalloc (expansion.n, sizeof *len);
+ struct string s = DS_EMPTY_INITIALIZER;
+ macro_tokens_to_representation (&expansion, &s, ofs, len);
+
+ if (settings_get_mprint ())
+ output_item_submit (text_item_create (TEXT_ITEM_LOG, ds_cstr (&s),
+ _("Macro Expansion")));
+
+ /* Append the macro expansion tokens to the lookahead. */
+ char *macro_rep = ds_steal_cstr (&s);
+ size_t *ref_cnt = xmalloc (sizeof *ref_cnt);
+ *ref_cnt = expansion.n;
+ for (size_t i = 0; i < expansion.n; i++)
+ {
+ *lex_push_token__ (src) = (struct lex_token) {
+ .token = expansion.mts[i].token,
+ .token_pos = call_pos,
+ .token_len = call_len,
+ .line_pos = line_pos,
+ .first_line = first_line,
+ .macro_rep = macro_rep,
+ .ofs = ofs[i],
+ .len = len[i],
+ .ref_cnt = ref_cnt,
+ };
+ src->middle++;
+
+ ss_dealloc (&expansion.mts[i].representation);
+ }
+ free (expansion.mts);
+ free (ofs);
+ free (len);
+
+ /* Finally, put the saved tokens back. */
+ for (size_t i = 0; i < n_save; i++)
+ *lex_push_token__ (src) = save_tokens[i];
+ free (save_tokens);
return true;
}
static void
lex_source_push_endcmd__ (struct lex_source *src)
{
- struct lex_token *token = lex_push_token__ (src);
- token->token.type = T_ENDCMD;
- token->token_pos = 0;
- token->token_len = 0;
- token->line_pos = 0;
- token->first_line = 0;
+ assert (src->back == src->middle && src->middle == src->front);
+ *lex_push_token__ (src) = (struct lex_token) {
+ .token = { .type = T_ENDCMD } };
+ src->middle++;
}
static struct lex_source *
-lex_source_create (struct lex_reader *reader)
+lex_source_create (struct lexer *lexer, struct lex_reader *reader)
{
- struct lex_source *src;
- enum segmenter_mode mode;
-
- src = xzalloc (sizeof *src);
- src->reader = reader;
-
- if (reader->syntax == LEX_SYNTAX_AUTO)
- mode = SEG_MODE_AUTO;
- else if (reader->syntax == LEX_SYNTAX_INTERACTIVE)
- mode = SEG_MODE_INTERACTIVE;
- else if (reader->syntax == LEX_SYNTAX_BATCH)
- mode = SEG_MODE_BATCH;
- else
- NOT_REACHED ();
- segmenter_init (&src->segmenter, mode);
-
- src->tokens = deque_init (&src->deque, 4, sizeof *src->tokens);
+ struct lex_source *src = xmalloc (sizeof *src);
+ *src = (struct lex_source) {
+ .reader = reader,
+ .segmenter = segmenter_init (reader->syntax, false),
+ .lexer = lexer,
+ };
lex_source_push_endcmd__ (src);
free (file_name);
free (encoding);
free (src->buffer);
- while (!deque_is_empty (&src->deque))
- lex_source_pop__ (src);
+ while (src->middle - src->back > 0)
+ lex_source_pop_back (src);
+ while (src->front - src->middle > 0)
+ lex_source_pop_front (src);
free (src->tokens);
ll_remove (&src->ll);
free (src);
Returns a null pointer if FILE_NAME cannot be opened. */
struct lex_reader *
lex_reader_for_file (const char *file_name, const char *encoding,
- enum lex_syntax_mode syntax,
+ enum segmenter_mode syntax,
enum lex_error_mode error)
{
struct lex_file_reader *r;
r->reader.syntax = syntax;
r->reader.error = error;
r->reader.file_name = xstrdup (file_name);
- r->reader.encoding = encoding ? xstrdup (encoding) : NULL;
+ r->reader.encoding = xstrdup_if_nonnull (encoding);
r->reader.line_number = 1;
r->istream = istream;
r = xmalloc (sizeof *r);
lex_reader_init (&r->reader, &lex_string_reader_class);
- r->reader.syntax = LEX_SYNTAX_AUTO;
- r->reader.encoding = encoding ? xstrdup (encoding) : NULL;
+ r->reader.syntax = SEG_MODE_AUTO;
+ r->reader.encoding = xstrdup_if_nonnull (encoding);
r->s = s;
r->offset = 0;