/* PSPP - a program for statistical analysis.
- Copyright (C) 1997-9, 2000, 2006, 2009, 2010, 2011 Free Software Foundation, Inc.
+ Copyright (C) 1997-9, 2000, 2006, 2009, 2010, 2011, 2013, 2016 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include <unictype.h>
#include <unistd.h>
#include <unistr.h>
-#include <uniwidth.h>
-#include "data/file-name.h"
#include "language/command.h"
+#include "language/lexer/macro.h"
#include "language/lexer/scan.h"
#include "language/lexer/segment.h"
#include "language/lexer/token.h"
#include "libpspp/cast.h"
#include "libpspp/deque.h"
#include "libpspp/i18n.h"
+#include "libpspp/intern.h"
#include "libpspp/ll.h"
#include "libpspp/message.h"
#include "libpspp/misc.h"
#include "libpspp/str.h"
#include "libpspp/u8-istream.h"
#include "output/journal.h"
-#include "output/text-item.h"
+#include "output/output-item.h"
#include "gl/c-ctype.h"
#include "gl/minmax.h"
/* The regular token information. */
struct token token;
- /* Location of token in terms of the lex_source's buffer.
- src->tail <= line_pos <= token_pos <= src->head. */
- size_t token_pos; /* Start of token. */
+ /* For a token obtained through the lexer in an ordinary way, this is the
+ location of the token in terms of the lex_source's buffer.
+
+ For a token produced through macro expansion, this is the entire macro
+ call. */
+ size_t token_pos; /* Offset into src->buffer of token start. */
size_t token_len; /* Length of source for token in bytes. */
- size_t line_pos; /* Start of line containing token_pos. */
- int first_line; /* Line number at token_pos. */
+
+ /* For a token obtained through macro expansion, this is just this token.
+
+ For a token obtained through the lexer in an ordinary way, these are
+ nulls and zeros. */
+ char *macro_rep; /* The whole macro expansion. */
+ size_t ofs; /* Offset of this token in macro_rep. */
+ size_t len; /* Length of this token in macro_rep. */
+ size_t *ref_cnt; /* Number of lex_tokens that refer to macro_rep. */
};
+static struct msg_point lex_token_start_point (const struct lex_source *,
+ const struct lex_token *);
+static struct msg_point lex_token_end_point (const struct lex_source *,
+ const struct lex_token *);
+
+static size_t lex_ofs_at_phrase__ (struct lexer *, int ofs, const char *s);
+
+/* Source offset of the last byte in TOKEN. */
+static size_t
+lex_token_end (const struct lex_token *token)
+{
+ return token->token_pos + MAX (token->token_len, 1) - 1;
+}
+
+static void
+lex_token_destroy (struct lex_token *t)
+{
+ token_uninit (&t->token);
+ if (t->ref_cnt)
+ {
+ assert (*t->ref_cnt > 0);
+ if (!--*t->ref_cnt)
+ {
+ free (t->macro_rep);
+ free (t->ref_cnt);
+ }
+ }
+ free (t);
+}
+\f
+/* A deque of lex_tokens that comprises one stage in the token pipeline in a
+ lex_source. */
+struct lex_stage
+ {
+ struct deque deque;
+ struct lex_token **tokens;
+ };
+
+static void lex_stage_clear (struct lex_stage *);
+static void lex_stage_uninit (struct lex_stage *);
+
+static size_t lex_stage_count (const struct lex_stage *);
+static bool lex_stage_is_empty (const struct lex_stage *);
+
+static struct lex_token *lex_stage_first (struct lex_stage *);
+static struct lex_token *lex_stage_nth (struct lex_stage *, size_t ofs);
+
+static void lex_stage_push_last (struct lex_stage *, struct lex_token *);
+static void lex_stage_pop_first (struct lex_stage *);
+
+static void lex_stage_shift (struct lex_stage *dst, struct lex_stage *src,
+ size_t n);
+
+/* Deletes all the tokens from STAGE. */
+static void
+lex_stage_clear (struct lex_stage *stage)
+{
+ while (!deque_is_empty (&stage->deque))
+ lex_stage_pop_first (stage);
+}
+
+/* Deletes all the tokens from STAGE and frees storage for the deque. */
+static void
+lex_stage_uninit (struct lex_stage *stage)
+{
+ lex_stage_clear (stage);
+ free (stage->tokens);
+}
+
+/* Returns true if STAGE contains no tokens, otherwise false. */
+static bool
+lex_stage_is_empty (const struct lex_stage *stage)
+{
+ return deque_is_empty (&stage->deque);
+}
+
+/* Returns the number of tokens in STAGE. */
+static size_t
+lex_stage_count (const struct lex_stage *stage)
+{
+ return deque_count (&stage->deque);
+}
+
+/* Returns the first token in STAGE, which must be nonempty.
+ The first token is the one accessed with the least lookahead. */
+static struct lex_token *
+lex_stage_first (struct lex_stage *stage)
+{
+ return lex_stage_nth (stage, 0);
+}
+
+/* Returns the token the given INDEX in STAGE. The first token (with the least
+ lookahead) is 0, the second token is 1, and so on. There must be at least
+ INDEX + 1 tokens in STAGE. */
+static struct lex_token *
+lex_stage_nth (struct lex_stage *stage, size_t index)
+{
+ return stage->tokens[deque_back (&stage->deque, index)];
+}
+
+/* Adds TOKEN so that it becomes the last token in STAGE. */
+static void
+lex_stage_push_last (struct lex_stage *stage, struct lex_token *token)
+{
+ if (deque_is_full (&stage->deque))
+ stage->tokens = deque_expand (&stage->deque, stage->tokens,
+ sizeof *stage->tokens);
+ stage->tokens[deque_push_front (&stage->deque)] = token;
+}
+
+/* Removes and returns the first token from STAGE. */
+static struct lex_token *
+lex_stage_take_first (struct lex_stage *stage)
+{
+ return stage->tokens[deque_pop_back (&stage->deque)];
+}
+
+/* Removes the first token from STAGE and uninitializes it. */
+static void
+lex_stage_pop_first (struct lex_stage *stage)
+{
+ lex_token_destroy (lex_stage_take_first (stage));
+}
+
+/* Removes the first N tokens from SRC, appending them to DST as the last
+ tokens. */
+static void
+lex_stage_shift (struct lex_stage *dst, struct lex_stage *src, size_t n)
+{
+ for (size_t i = 0; i < n; i++)
+ lex_stage_push_last (dst, lex_stage_take_first (src));
+}
+
/* A source of tokens, corresponding to a syntax file.
This is conceptually a lex_reader wrapped with everything needed to convert
struct lex_source
{
struct ll ll; /* In lexer's list of sources. */
+
+ /* Reference count:
+
+ - One for struct lexer.
+
+ - One for each struct msg_location that references this source. */
+ size_t n_refs;
+
struct lex_reader *reader;
+ struct lexer *lexer;
struct segmenter segmenter;
bool eof; /* True if T_STOP was read from 'reader'. */
/* Buffer of UTF-8 bytes. */
- char *buffer;
+ char *buffer; /* Source file contents. */
+ size_t length; /* Number of bytes filled. */
size_t allocated; /* Number of bytes allocated. */
- size_t tail; /* &buffer[0] offset into UTF-8 source. */
- size_t head; /* &buffer[head - tail] offset into source. */
- /* Positions in source file, tail <= pos <= head for each member here. */
+ /* Offsets into 'buffer'. */
size_t journal_pos; /* First byte not yet output to journal. */
size_t seg_pos; /* First byte not yet scanned as token. */
- size_t line_pos; /* First byte of line containing seg_pos. */
- int n_newlines; /* Number of new-lines up to seg_pos. */
+ /* Offset into 'buffer' of starts of lines. */
+ size_t *lines;
+ size_t n_lines, allocated_lines;
+
bool suppress_next_newline;
- /* Tokens. */
- struct deque deque; /* Indexes into 'tokens'. */
- struct lex_token *tokens; /* Lookahead tokens for parser. */
+ /* Tokens.
+
+ This is a pipeline with the following stages. Each token eventually
+ made available to the parser passes through of these stages. The stages
+ are named after the processing that happens in each one.
+
+ Initially, tokens come from the segmenter and scanner to 'pp':
+
+ - pp: Tokens that need to pass through the macro preprocessor to end up
+ in 'merge'.
+
+ - merge: Tokens that need to pass through scan_merge() to end up in
+ 'parse'.
+
+ - parse: Tokens available to the client for parsing.
+
+ 'pp' and 'merge' store tokens only temporarily until they pass into
+ 'parse'. Tokens then live in 'parse' until the command is fully
+ consumed, at which time they are freed together. */
+ struct lex_stage pp;
+ struct lex_stage merge;
+ struct lex_token **parse;
+ size_t n_parse, allocated_parse, parse_ofs;
};
-static struct lex_source *lex_source_create (struct lex_reader *);
-static void lex_source_destroy (struct lex_source *);
+static struct lex_source *lex_source_create (struct lexer *,
+ struct lex_reader *);
/* Lexer. */
struct lexer
{
struct ll_list sources; /* Contains "struct lex_source"s. */
+ struct macro_set *macros;
};
static struct lex_source *lex_source__ (const struct lexer *);
+static char *lex_source_syntax__ (const struct lex_source *,
+ int ofs0, int ofs1);
static const struct lex_token *lex_next__ (const struct lexer *, int n);
static void lex_source_push_endcmd__ (struct lex_source *);
-
-static void lex_source_pop__ (struct lex_source *);
-static bool lex_source_get__ (const struct lex_source *);
-static void lex_source_error_valist (struct lex_source *, int n0, int n1,
- const char *format, va_list)
- PRINTF_FORMAT (4, 0);
+static void lex_source_push_parse (struct lex_source *, struct lex_token *);
+static void lex_source_clear_parse (struct lex_source *);
+
+static bool lex_source_get_parse (struct lex_source *);
+static void lex_source_msg_valist (struct lex_source *, enum msg_class,
+ int ofs0, int ofs1,
+ const char *format, va_list)
+ PRINTF_FORMAT (5, 0);
static const struct lex_token *lex_source_next__ (const struct lex_source *,
int n);
\f
const struct lex_reader_class *class)
{
reader->class = class;
- reader->syntax = LEX_SYNTAX_AUTO;
- reader->error = LEX_ERROR_INTERACTIVE;
+ reader->syntax = SEG_MODE_AUTO;
+ reader->error = LEX_ERROR_CONTINUE;
reader->file_name = NULL;
+ reader->encoding = NULL;
reader->line_number = 0;
+ reader->eof = false;
}
/* Frees any file name already in READER and replaces it by a copy of
lex_reader_set_file_name (struct lex_reader *reader, const char *file_name)
{
free (reader->file_name);
- reader->file_name = file_name != NULL ? xstrdup (file_name) : NULL;
+ reader->file_name = xstrdup_if_nonnull (file_name);
}
\f
/* Creates and returns a new lexer. */
struct lexer *
lex_create (void)
{
- struct lexer *lexer = xzalloc (sizeof *lexer);
- ll_init (&lexer->sources);
+ struct lexer *lexer = xmalloc (sizeof *lexer);
+ *lexer = (struct lexer) {
+ .sources = LL_INITIALIZER (lexer->sources),
+ .macros = macro_set_create (),
+ };
return lexer;
}
struct lex_source *source, *next;
ll_for_each_safe (source, next, struct lex_source, ll, &lexer->sources)
- lex_source_destroy (source);
+ {
+ ll_remove (&source->ll);
+ lex_source_unref (source);
+ }
+ macro_set_destroy (lexer->macros);
free (lexer);
}
}
+/* Adds M to LEXER's set of macros. M replaces any existing macro with the
+ same name. Takes ownership of M. */
+void
+lex_define_macro (struct lexer *lexer, struct macro *m)
+{
+ macro_set_add (lexer->macros, m);
+}
+
/* Inserts READER into LEXER so that the next token read by LEXER comes from
READER. Before the caller, LEXER must either be empty or at a T_ENDCMD
token. */
lex_include (struct lexer *lexer, struct lex_reader *reader)
{
assert (ll_is_empty (&lexer->sources) || lex_token (lexer) == T_ENDCMD);
- ll_push_head (&lexer->sources, &lex_source_create (reader)->ll);
+ ll_push_head (&lexer->sources, &lex_source_create (lexer, reader)->ll);
}
/* Appends READER to LEXER, so that it will be read after all other current
void
lex_append (struct lexer *lexer, struct lex_reader *reader)
{
- ll_push_tail (&lexer->sources, &lex_source_create (reader)->ll);
+ ll_push_tail (&lexer->sources, &lex_source_create (lexer, reader)->ll);
}
\f
-/* Advacning. */
-
-static struct lex_token *
-lex_push_token__ (struct lex_source *src)
-{
- struct lex_token *token;
-
- if (deque_is_full (&src->deque))
- src->tokens = deque_expand (&src->deque, src->tokens, sizeof *src->tokens);
-
- token = &src->tokens[deque_push_front (&src->deque)];
- token_init (&token->token);
- return token;
-}
-
-static void
-lex_source_pop__ (struct lex_source *src)
-{
- token_destroy (&src->tokens[deque_pop_back (&src->deque)].token);
-}
-
-static void
-lex_source_pop_front (struct lex_source *src)
-{
- token_destroy (&src->tokens[deque_pop_front (&src->deque)].token);
-}
+/* Advancing. */
/* Advances LEXER to the next token, consuming the current token. */
void
if (src == NULL)
return;
- if (!deque_is_empty (&src->deque))
- lex_source_pop__ (src);
+ if (src->parse_ofs < src->n_parse)
+ {
+ if (src->parse[src->parse_ofs]->token.type == T_ENDCMD)
+ lex_source_clear_parse (src);
+ else
+ src->parse_ofs++;
+ }
- while (deque_is_empty (&src->deque))
- if (!lex_source_get__ (src))
+ while (src->parse_ofs == src->n_parse)
+ if (!lex_source_get_parse (src))
{
- lex_source_destroy (src);
+ ll_remove (&src->ll);
+ lex_source_unref (src);
src = lex_source__ (lexer);
if (src == NULL)
return;
}
}
+
+/* Advances LEXER by N tokens. */
+void
+lex_get_n (struct lexer *lexer, size_t n)
+{
+ while (n-- > 0)
+ lex_get (lexer);
+}
\f
/* Issuing errors. */
va_list args;
va_start (args, format);
- lex_next_error_valist (lexer, 0, 0, format, args);
+ lex_ofs_msg_valist (lexer, SE, lex_ofs (lexer), lex_ofs (lexer),
+ format, args);
va_end (args);
}
-/* Prints a syntax error message containing the current token and
- given message MESSAGE (if non-null). */
+/* Prints a syntax error message for the span of tokens N0 through N1,
+ inclusive, from the current token in LEXER, adding message MESSAGE (if
+ non-null). */
void
-lex_error_valist (struct lexer *lexer, const char *format, va_list args)
+lex_next_error (struct lexer *lexer, int n0, int n1, const char *format, ...)
{
- lex_next_error_valist (lexer, 0, 0, format, args);
+ va_list args;
+
+ va_start (args, format);
+ int ofs = lex_ofs (lexer);
+ lex_ofs_msg_valist (lexer, SE, n0 + ofs, n1 + ofs, format, args);
+ va_end (args);
}
-/* Prints a syntax error message containing the current token and
- given message MESSAGE (if non-null). */
+/* Prints a syntax error message for the span of tokens with offsets OFS0
+ through OFS1, inclusive, within the current command in LEXER, adding message
+ MESSAGE (if non-null). */
void
-lex_next_error (struct lexer *lexer, int n0, int n1, const char *format, ...)
+lex_ofs_error (struct lexer *lexer, int ofs0, int ofs1, const char *format, ...)
{
va_list args;
va_start (args, format);
- lex_next_error_valist (lexer, n0, n1, format, args);
+ lex_ofs_msg_valist (lexer, SE, ofs0, ofs1, format, args);
va_end (args);
}
-/* Reports an error to the effect that subcommand SBC may only be
- specified once. */
+/* Prints a message of the given CLASS containing the current token and given
+ message MESSAGE (if non-null). */
void
-lex_sbc_only_once (const char *sbc)
+lex_msg (struct lexer *lexer, enum msg_class class, const char *format, ...)
{
- msg (SE, _("Subcommand %s may only be specified once."), sbc);
+ va_list args;
+
+ va_start (args, format);
+ lex_ofs_msg_valist (lexer, class, lex_ofs (lexer), lex_ofs (lexer),
+ format, args);
+ va_end (args);
}
-/* Reports an error to the effect that subcommand SBC is
- missing. */
+/* Prints a syntax error message for the span of tokens N0 through N1,
+ inclusive, from the current token in LEXER, adding message MESSAGE (if
+ non-null). */
void
-lex_sbc_missing (struct lexer *lexer, const char *sbc)
+lex_next_msg (struct lexer *lexer, enum msg_class class, int n0, int n1,
+ const char *format, ...)
{
- lex_error (lexer, _("missing required subcommand %s"), sbc);
+ va_list args;
+
+ va_start (args, format);
+ int ofs = lex_ofs (lexer);
+ lex_ofs_msg_valist (lexer, class, n0 + ofs, n1 + ofs, format, args);
+ va_end (args);
}
-/* Prints a syntax error message containing the current token and
- given message MESSAGE (if non-null). */
+/* Prints a message of the given CLASS for the span of tokens with offsets OFS0
+ through OFS1, inclusive, within the current command in LEXER, adding message
+ MESSAGE (if non-null). */
void
-lex_next_error_valist (struct lexer *lexer, int n0, int n1,
- const char *format, va_list args)
+lex_ofs_msg (struct lexer *lexer, enum msg_class class, int ofs0, int ofs1,
+ const char *format, ...)
{
- struct lex_source *src = lex_source__ (lexer);
+ va_list args;
- if (src != NULL)
- lex_source_error_valist (src, n0, n1, format, args);
- else
+ va_start (args, format);
+ lex_ofs_msg_valist (lexer, class, ofs0, ofs1, format, args);
+ va_end (args);
+}
+
+/* Prints a syntax error message saying that one of the strings provided as
+ varargs, up to the first NULL, is expected. */
+void
+(lex_error_expecting) (struct lexer *lexer, ...)
+{
+ va_list args;
+
+ va_start (args, lexer);
+ lex_error_expecting_valist (lexer, args);
+ va_end (args);
+}
+
+/* Prints a syntax error message saying that one of the options provided in
+ ARGS, up to the first NULL, is expected. */
+void
+lex_error_expecting_valist (struct lexer *lexer, va_list args)
+{
+ enum { MAX_OPTIONS = 9 };
+ const char *options[MAX_OPTIONS];
+ int n = 0;
+ while (n < MAX_OPTIONS)
{
- struct string s;
+ const char *option = va_arg (args, const char *);
+ if (!option)
+ break;
- ds_init_empty (&s);
- ds_put_format (&s, _("Syntax error at end of input"));
- if (format != NULL)
- {
- ds_put_cstr (&s, ": ");
- ds_put_vformat (&s, format, args);
- }
- ds_put_byte (&s, '.');
- msg (SE, "%s", ds_cstr (&s));
- ds_destroy (&s);
+ options[n++] = option;
}
+ lex_error_expecting_array (lexer, options, n);
+}
+
+void
+lex_error_expecting_array (struct lexer *lexer, const char **options, size_t n)
+{
+ switch (n)
+ {
+ case 0:
+ lex_error (lexer, NULL);
+ break;
+
+ case 1:
+ lex_error (lexer, _("Syntax error expecting %s."), options[0]);
+ break;
+
+ case 2:
+ lex_error (lexer, _("Syntax error expecting %s or %s."),
+ options[0], options[1]);
+ break;
+
+ case 3:
+ lex_error (lexer, _("Syntax error expecting %s, %s, or %s."),
+ options[0], options[1], options[2]);
+ break;
+
+ case 4:
+ lex_error (lexer, _("Syntax error expecting %s, %s, %s, or %s."),
+ options[0], options[1], options[2], options[3]);
+ break;
+
+ case 5:
+ lex_error (lexer, _("Syntax error expecting %s, %s, %s, %s, or %s."),
+ options[0], options[1], options[2], options[3], options[4]);
+ break;
+
+ case 6:
+ lex_error (lexer, _("Syntax error expecting %s, %s, %s, %s, %s, or %s."),
+ options[0], options[1], options[2], options[3], options[4],
+ options[5]);
+ break;
+
+ case 7:
+ lex_error (lexer, _("Syntax error expecting %s, %s, %s, %s, %s, %s, "
+ "or %s."),
+ options[0], options[1], options[2], options[3], options[4],
+ options[5], options[6]);
+ break;
+
+ case 8:
+ lex_error (lexer, _("Syntax error expecting %s, %s, %s, %s, %s, %s, %s, "
+ "or %s."),
+ options[0], options[1], options[2], options[3], options[4],
+ options[5], options[6], options[7]);
+ break;
+
+ default:
+ {
+ struct string s = DS_EMPTY_INITIALIZER;
+ for (size_t i = 0; i < n; i++)
+ {
+ if (i > 0)
+ ds_put_cstr (&s, ", ");
+ ds_put_cstr (&s, options[i]);
+ }
+ lex_error (lexer, _("Syntax error expecting one of the following: %s."),
+ ds_cstr (&s));
+ ds_destroy (&s);
+ }
+ break;
+ }
+}
+
+/* Reports an error to the effect that subcommand SBC may only be specified
+ once. */
+void
+lex_sbc_only_once (struct lexer *lexer, const char *sbc)
+{
+ int ofs = lex_ofs (lexer) - 1;
+ if (lex_ofs_token (lexer, ofs)->type == T_EQUALS)
+ ofs--;
+
+ /* lex_ofs_at_phrase__() handles subcommand names that are keywords, such as
+ BY. */
+ if (lex_ofs_at_phrase__ (lexer, ofs, sbc))
+ lex_ofs_error (lexer, ofs, ofs,
+ _("Subcommand %s may only be specified once."), sbc);
+ else
+ msg (SE, _("Subcommand %s may only be specified once."), sbc);
+}
+
+/* Reports an error to the effect that subcommand SBC is missing.
+
+ This function does not take a lexer as an argument or use lex_error(),
+ because a missing subcommand can normally be detected only after the whole
+ command has been parsed, and so lex_error() would always report "Syntax
+ error at end of command", which does not help the user find the error. */
+void
+lex_sbc_missing (struct lexer *lexer, const char *sbc)
+{
+ lex_ofs_error (lexer, 0, lex_max_ofs (lexer),
+ _("Required subcommand %s was not specified."), sbc);
+}
+
+/* Reports an error to the effect that specification SPEC may only be specified
+ once within subcommand SBC. */
+void
+lex_spec_only_once (struct lexer *lexer, const char *sbc, const char *spec)
+{
+ lex_error (lexer, _("%s may only be specified once within subcommand %s."),
+ spec, sbc);
+}
+
+/* Reports an error to the effect that specification SPEC is missing within
+ subcommand SBC. */
+void
+lex_spec_missing (struct lexer *lexer, const char *sbc, const char *spec)
+{
+ lex_error (lexer, _("Required %s specification missing from %s subcommand."),
+ spec, sbc);
+}
+
+/* Prints a syntax error message for the span of tokens with offsets OFS0
+ through OFS1, inclusive, within the current command in LEXER, adding message
+ MESSAGE (if non-null) with the given ARGS. */
+void
+lex_ofs_msg_valist (struct lexer *lexer, enum msg_class class,
+ int ofs0, int ofs1, const char *format, va_list args)
+{
+ lex_source_msg_valist (lex_source__ (lexer), class, ofs0, ofs1, format, args);
}
/* Checks that we're at end of command.
{
if (lex_token (lexer) != T_ENDCMD && lex_token (lexer) != T_STOP)
{
- lex_error (lexer, _("expecting end of command"));
+ lex_error (lexer, _("Syntax error expecting end of command."));
return CMD_FAILURE;
}
else
/* Returns true if the current token is a number. */
bool
-lex_is_number (struct lexer *lexer)
+lex_is_number (const struct lexer *lexer)
{
return lex_next_is_number (lexer, 0);
}
/* Returns true if the current token is a string. */
bool
-lex_is_string (struct lexer *lexer)
+lex_is_string (const struct lexer *lexer)
{
return lex_next_is_string (lexer, 0);
}
/* Returns the value of the current token, which must be a
floating point number. */
double
-lex_number (struct lexer *lexer)
+lex_number (const struct lexer *lexer)
{
return lex_next_number (lexer, 0);
}
/* Returns true iff the current token is an integer. */
bool
-lex_is_integer (struct lexer *lexer)
+lex_is_integer (const struct lexer *lexer)
{
return lex_next_is_integer (lexer, 0);
}
/* Returns the value of the current token, which must be an
integer. */
long
-lex_integer (struct lexer *lexer)
+lex_integer (const struct lexer *lexer)
{
return lex_next_integer (lexer, 0);
}
/* Returns true if the token N ahead of the current token is a number. */
bool
-lex_next_is_number (struct lexer *lexer, int n)
+lex_next_is_number (const struct lexer *lexer, int n)
{
- enum token_type next_token = lex_next_token (lexer, n);
- return next_token == T_POS_NUM || next_token == T_NEG_NUM;
+ return token_is_number (lex_next (lexer, n));
}
/* Returns true if the token N ahead of the current token is a string. */
bool
-lex_next_is_string (struct lexer *lexer, int n)
+lex_next_is_string (const struct lexer *lexer, int n)
{
- return lex_next_token (lexer, n) == T_STRING;
+ return token_is_string (lex_next (lexer, n));
}
/* Returns the value of the token N ahead of the current token, which must be a
floating point number. */
double
-lex_next_number (struct lexer *lexer, int n)
+lex_next_number (const struct lexer *lexer, int n)
{
- assert (lex_next_is_number (lexer, n));
- return lex_next_tokval (lexer, n);
+ return token_number (lex_next (lexer, n));
}
/* Returns true if the token N ahead of the current token is an integer. */
bool
-lex_next_is_integer (struct lexer *lexer, int n)
+lex_next_is_integer (const struct lexer *lexer, int n)
{
- double value;
-
- if (!lex_next_is_number (lexer, n))
- return false;
-
- value = lex_next_tokval (lexer, n);
- return value > LONG_MIN && value <= LONG_MAX && floor (value) == value;
+ return token_is_integer (lex_next (lexer, n));
}
/* Returns the value of the token N ahead of the current token, which must be
an integer. */
long
-lex_next_integer (struct lexer *lexer, int n)
+lex_next_integer (const struct lexer *lexer, int n)
{
- assert (lex_next_is_integer (lexer, n));
- return lex_next_tokval (lexer, n);
+ return token_integer (lex_next (lexer, n));
}
\f
/* Token matching functions. */
return true;
else
{
- lex_error (lexer, _("expecting `%s'"), identifier);
+ lex_error_expecting (lexer, identifier);
return false;
}
}
}
else
{
- lex_error (lexer, _("expecting `%s'"), token_type_to_string (type));
+ const char *type_string = token_type_to_string (type);
+ if (type_string)
+ {
+ char *s = xasprintf ("`%s'", type_string);
+ lex_error_expecting (lexer, s);
+ free (s);
+ }
+ else
+ lex_error_expecting (lexer, token_type_to_name (type));
+
return false;
}
}
return true;
else
{
- lex_error (lexer, _("expecting string"));
+ lex_error (lexer, _("Syntax error expecting string."));
return false;
}
}
+/* If the current token is a string or an identifier, does nothing and returns
+ true. Otherwise, reports an error and returns false.
+
+ This is meant for use in syntactic situations where we want to encourage the
+ user to supply a quoted string, but for compatibility we also accept
+ identifiers. (One example of such a situation is file names.) Therefore,
+ the error message issued when the current token is wrong only says that a
+ string is expected and doesn't mention that an identifier would also be
+ accepted. */
+bool
+lex_force_string_or_id (struct lexer *lexer)
+{
+ return lex_token (lexer) == T_ID || lex_force_string (lexer);
+}
+
/* If the current token is an integer, does nothing and returns true.
Otherwise, reports an error and returns false. */
bool
return true;
else
{
- lex_error (lexer, _("expecting integer"));
+ lex_error (lexer, _("Syntax error expecting integer."));
return false;
}
}
+/* If the current token is an integer in the range MIN...MAX (inclusive), does
+ nothing and returns true. Otherwise, reports an error and returns false.
+ If NAME is nonnull, then it is used in the error message. */
+bool
+lex_force_int_range (struct lexer *lexer, const char *name, long min, long max)
+{
+ bool is_number = lex_is_number (lexer);
+ bool is_integer = lex_is_integer (lexer);
+ bool too_small = (is_integer ? lex_integer (lexer) < min
+ : is_number ? lex_number (lexer) < min
+ : false);
+ bool too_big = (is_integer ? lex_integer (lexer) > max
+ : is_number ? lex_number (lexer) > max
+ : false);
+ if (is_integer && !too_small && !too_big)
+ return true;
+
+ if (min > max)
+ {
+ /* Weird, maybe a bug in the caller. Just report that we needed an
+ integer. */
+ if (name)
+ lex_error (lexer, _("Syntax error expecting integer for %s."), name);
+ else
+ lex_error (lexer, _("Syntax error expecting integer."));
+ }
+ else if (min == max)
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting %ld for %s."), min, name);
+ else
+ lex_error (lexer, _("Syntax error expecting %ld."), min);
+ }
+ else if (min + 1 == max)
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting %ld or %ld for %s."),
+ min, min + 1, name);
+ else
+ lex_error (lexer, _("Syntax error expecting %ld or %ld."),
+ min, min + 1);
+ }
+ else
+ {
+ bool report_lower_bound = (min > INT_MIN / 2) || too_small;
+ bool report_upper_bound = (max < INT_MAX / 2) || too_big;
+
+ if (report_lower_bound && report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer,
+ _("Syntax error expecting integer "
+ "between %ld and %ld for %s."),
+ min, max, name);
+ else
+ lex_error (lexer, _("Syntax error expecting integer "
+ "between %ld and %ld."),
+ min, max);
+ }
+ else if (report_lower_bound)
+ {
+ if (min == 0)
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting "
+ "non-negative integer for %s."),
+ name);
+ else
+ lex_error (lexer, _("Syntax error expecting "
+ "non-negative integer."));
+ }
+ else if (min == 1)
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting "
+ "positive integer for %s."),
+ name);
+ else
+ lex_error (lexer, _("Syntax error expecting "
+ "positive integer."));
+ }
+ else
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting "
+ "integer %ld or greater for %s."),
+ min, name);
+ else
+ lex_error (lexer, _("Syntax error expecting "
+ "integer %ld or greater."), min);
+ }
+ }
+ else if (report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer,
+ _("Syntax error expecting integer less than or equal "
+ "to %ld for %s."),
+ max, name);
+ else
+ lex_error (lexer, _("Syntax error expecting integer less than or "
+ "equal to %ld."),
+ max);
+ }
+ else
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting integer for %s."),
+ name);
+ else
+ lex_error (lexer, _("Syntax error expecting integer."));
+ }
+ }
+ return false;
+}
+
/* If the current token is a number, does nothing and returns true.
Otherwise, reports an error and returns false. */
bool
if (lex_is_number (lexer))
return true;
- lex_error (lexer, _("expecting number"));
+ lex_error (lexer, _("Syntax error expecting number."));
+ return false;
+}
+
+/* If the current token is an number in the closed range [MIN,MAX], does
+ nothing and returns true. Otherwise, reports an error and returns false.
+ If NAME is nonnull, then it is used in the error message. */
+bool
+lex_force_num_range_closed (struct lexer *lexer, const char *name,
+ double min, double max)
+{
+ bool is_number = lex_is_number (lexer);
+ bool too_small = is_number && lex_number (lexer) < min;
+ bool too_big = is_number && lex_number (lexer) > max;
+ if (is_number && !too_small && !too_big)
+ return true;
+
+ if (min > max)
+ {
+ /* Weird, maybe a bug in the caller. Just report that we needed an
+ number. */
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number for %s."), name);
+ else
+ lex_error (lexer, _("Syntax error expecting number."));
+ }
+ else if (min == max)
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number %g for %s."),
+ min, name);
+ else
+ lex_error (lexer, _("Syntax error expecting number %g."), min);
+ }
+ else
+ {
+ bool report_lower_bound = min > -DBL_MAX || too_small;
+ bool report_upper_bound = max < DBL_MAX || too_big;
+
+ if (report_lower_bound && report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer,
+ _("Syntax error expecting number "
+ "between %g and %g for %s."),
+ min, max, name);
+ else
+ lex_error (lexer, _("Syntax error expecting number "
+ "between %g and %g."),
+ min, max);
+ }
+ else if (report_lower_bound)
+ {
+ if (min == 0)
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting "
+ "non-negative number for %s."),
+ name);
+ else
+ lex_error (lexer, _("Syntax error expecting "
+ "non-negative number."));
+ }
+ else
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number "
+ "%g or greater for %s."),
+ min, name);
+ else
+ lex_error (lexer, _("Syntax error expecting number "
+ "%g or greater."), min);
+ }
+ }
+ else if (report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer,
+ _("Syntax error expecting number "
+ "less than or equal to %g for %s."),
+ max, name);
+ else
+ lex_error (lexer, _("Syntax error expecting number "
+ "less than or equal to %g."),
+ max);
+ }
+ else
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number for %s."), name);
+ else
+ lex_error (lexer, _("Syntax error expecting number."));
+ }
+ }
+ return false;
+}
+
+/* If the current token is an number in the half-open range [MIN,MAX), does
+ nothing and returns true. Otherwise, reports an error and returns false.
+ If NAME is nonnull, then it is used in the error message. */
+bool
+lex_force_num_range_halfopen (struct lexer *lexer, const char *name,
+ double min, double max)
+{
+ bool is_number = lex_is_number (lexer);
+ bool too_small = is_number && lex_number (lexer) < min;
+ bool too_big = is_number && lex_number (lexer) >= max;
+ if (is_number && !too_small && !too_big)
+ return true;
+
+ if (min >= max)
+ {
+ /* Weird, maybe a bug in the caller. Just report that we needed an
+ number. */
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number for %s."), name);
+ else
+ lex_error (lexer, _("Syntax error expecting number."));
+ }
+ else
+ {
+ bool report_lower_bound = min > -DBL_MAX || too_small;
+ bool report_upper_bound = max < DBL_MAX || too_big;
+
+ if (report_lower_bound && report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number "
+ "in [%g,%g) for %s."),
+ min, max, name);
+ else
+ lex_error (lexer, _("Syntax error expecting number in [%g,%g)."),
+ min, max);
+ }
+ else if (report_lower_bound)
+ {
+ if (min == 0)
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting "
+ "non-negative number for %s."),
+ name);
+ else
+ lex_error (lexer, _("Syntax error expecting "
+ "non-negative number."));
+ }
+ else
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting "
+ "number %g or greater for %s."),
+ min, name);
+ else
+ lex_error (lexer, _("Syntax error expecting "
+ "number %g or greater."), min);
+ }
+ }
+ else if (report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer,
+ _("Syntax error expecting "
+ "number less than %g for %s."), max, name);
+ else
+ lex_error (lexer, _("Syntax error expecting "
+ "number less than %g."), max);
+ }
+ else
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number for %s."), name);
+ else
+ lex_error (lexer, _("Syntax error expecting number."));
+ }
+ }
+ return false;
+}
+
+/* If the current token is an number in the open range (MIN,MAX), does
+ nothing and returns true. Otherwise, reports an error and returns false.
+ If NAME is nonnull, then it is used in the error message. */
+bool
+lex_force_num_range_open (struct lexer *lexer, const char *name,
+ double min, double max)
+{
+ bool is_number = lex_is_number (lexer);
+ bool too_small = is_number && lex_number (lexer) <= min;
+ bool too_big = is_number && lex_number (lexer) >= max;
+ if (is_number && !too_small && !too_big)
+ return true;
+
+ if (min >= max)
+ {
+ /* Weird, maybe a bug in the caller. Just report that we needed an
+ number. */
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number for %s."), name);
+ else
+ lex_error (lexer, _("Syntax error expecting number."));
+ }
+ else
+ {
+ bool report_lower_bound = min > -DBL_MAX || too_small;
+ bool report_upper_bound = max < DBL_MAX || too_big;
+
+ if (report_lower_bound && report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number "
+ "in (%g,%g) for %s."),
+ min, max, name);
+ else
+ lex_error (lexer, _("Syntax error expecting number "
+ "in (%g,%g)."), min, max);
+ }
+ else if (report_lower_bound)
+ {
+ if (min == 0)
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting "
+ "positive number for %s."), name);
+ else
+ lex_error (lexer, _("Syntax error expecting "
+ "positive number."));
+ }
+ else
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number "
+ "greater than %g for %s."),
+ min, name);
+ else
+ lex_error (lexer, _("Syntax error expecting number "
+ "greater than %g."), min);
+ }
+ }
+ else if (report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number "
+ "less than %g for %s."),
+ max, name);
+ else
+ lex_error (lexer, _("Syntax error expecting number "
+ "less than %g."), max);
+ }
+ else
+ {
+ if (name)
+ lex_error (lexer, _("Syntax error expecting number "
+ "for %s."), name);
+ else
+ lex_error (lexer, _("Syntax error expecting number."));
+ }
+ }
return false;
}
if (lex_token (lexer) == T_ID)
return true;
- lex_error (lexer, _("expecting identifier"));
+ lex_error (lexer, _("Syntax error expecting identifier."));
return false;
}
\f
return lex_source_next__ (src, n);
else
{
- static const struct lex_token stop_token =
- { TOKEN_INITIALIZER (T_STOP, 0.0, ""), 0, 0, 0, 0 };
-
+ static const struct lex_token stop_token = { .token = { .type = T_STOP } };
return &stop_token;
}
}
static const struct lex_token *
-lex_source_next__ (const struct lex_source *src, int n)
+lex_source_ofs__ (const struct lex_source *src_, int ofs)
{
- while (deque_count (&src->deque) <= n)
+ struct lex_source *src = CONST_CAST (struct lex_source *, src_);
+
+ if (ofs < 0)
{
- if (!deque_is_empty (&src->deque))
- {
- struct lex_token *front;
+ static const struct lex_token endcmd_token
+ = { .token = { .type = T_ENDCMD } };
+ return &endcmd_token;
+ }
- front = &src->tokens[deque_front (&src->deque, 0)];
- if (front->token.type == T_STOP || front->token.type == T_ENDCMD)
- return front;
+ while (ofs >= src->n_parse)
+ {
+ if (src->n_parse > 0)
+ {
+ const struct lex_token *t = src->parse[src->n_parse - 1];
+ if (t->token.type == T_STOP || t->token.type == T_ENDCMD)
+ return t;
}
- lex_source_get__ (src);
+ lex_source_get_parse (src);
}
- return &src->tokens[deque_back (&src->deque, n)];
+ return src->parse[ofs];
+}
+
+static const struct lex_token *
+lex_source_next__ (const struct lex_source *src, int n)
+{
+ return lex_source_ofs__ (src, n + src->parse_ofs);
}
/* Returns the "struct token" of the token N after the current one in LEXER.
double
lex_next_tokval (const struct lexer *lexer, int n)
{
- const struct token *token = lex_next (lexer, n);
- return token->number;
+ return token_number (lex_next (lexer, n));
}
/* Returns the null-terminated string in the token N after the current one, in
The string is null-terminated (but the null terminator is not included in
the returned substring's 'length').
- Only T_ID and T_STRING tokens have meaningful strings. For other tokens
- this functions this function will always return NULL.
+ Only T_ID, T_MACRO_ID, T_STRING tokens have meaningful strings. For other
+ tokens this functions this function will always return NULL.
The UTF-8 encoding of the returned string is correct for variable names and
other identifiers. Use filename_to_utf8() to use it as a filename. Use
return lex_next (lexer, n)->string;
}
-/* If LEXER is positioned at the (pseudo)identifier S, skips it and returns
- true. Otherwise, returns false.
+/* Returns the offset of the current token within the command being parsed in
+ LEXER. This is 0 for the first token in a command, 1 for the second, and so
+ on. The return value is useful later for referring to this token in calls
+ to lex_ofs_*(). */
+int
+lex_ofs (const struct lexer *lexer)
+{
+ struct lex_source *src = lex_source__ (lexer);
+ return src ? src->parse_ofs : 0;
+}
- S may consist of an arbitrary number of identifiers, integers, and
- punctuation e.g. "KRUSKAL-WALLIS", "2SLS", or "END INPUT PROGRAM".
- Identifiers may be abbreviated to their first three letters. Currently only
- hyphens, slashes, and equals signs are supported as punctuation (but it
- would be easy to add more).
+/* Returns the offset of the last token in the current command. */
+int
+lex_max_ofs (const struct lexer *lexer)
+{
+ struct lex_source *src = lex_source__ (lexer);
+ if (!src)
+ return 0;
- S must be an ASCII string. */
-bool
-lex_match_phrase (struct lexer *lexer, const char *s)
+ int ofs = MAX (1, src->n_parse) - 1;
+ for (;;)
+ {
+ enum token_type type = lex_source_ofs__ (src, ofs)->token.type;
+ if (type == T_ENDCMD || type == T_STOP)
+ return ofs;
+
+ ofs++;
+ }
+}
+
+/* Returns the token within LEXER's current command with offset OFS. Use
+ lex_ofs() to find out the offset of the current token. */
+const struct token *
+lex_ofs_token (const struct lexer *lexer_, int ofs)
{
- int tok_idx;
+ struct lexer *lexer = CONST_CAST (struct lexer *, lexer_);
+ struct lex_source *src = lex_source__ (lexer);
- for (tok_idx = 0; ; tok_idx++)
+ if (src != NULL)
+ return &lex_source_next__ (src, ofs - src->parse_ofs)->token;
+ else
{
- enum token_type token;
- unsigned char c;
+ static const struct token stop_token = { .type = T_STOP };
+ return &stop_token;
+ }
+}
- while (c_isspace (*s))
- s++;
+/* Allocates and returns a new struct msg_location that spans tokens with
+ offsets OFS0 through OFS1, inclusive, within the current command in
+ LEXER. See lex_ofs() for an explanation of token offsets.
- c = *s;
- if (c == '\0')
- {
- int i;
+ The caller owns and must eventually free the returned object. */
+struct msg_location *
+lex_ofs_location (const struct lexer *lexer, int ofs0, int ofs1)
+{
+ int ofs = lex_ofs (lexer);
+ return lex_get_location (lexer, ofs0 - ofs, ofs1 - ofs);
+}
- for (i = 0; i < tok_idx; i++)
- lex_get (lexer);
- return true;
- }
+/* Returns a msg_point for the first character in the token with offset OFS,
+ where offset 0 is the first token in the command currently being parsed, 1
+ the second token, and so on. These are absolute offsets, not relative to
+ the token currently being parsed within the command.
- token = lex_next_token (lexer, tok_idx);
- switch (c)
- {
- case '-':
- if (token != T_DASH)
- return false;
- s++;
- break;
-
- case '/':
- if (token != T_SLASH)
- return false;
- s++;
- break;
-
- case '=':
- if (token != T_EQUALS)
- return false;
- s++;
- break;
-
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- {
- unsigned int value;
+ Returns zeros for a T_STOP token.
+ */
+struct msg_point
+lex_ofs_start_point (const struct lexer *lexer, int ofs)
+{
+ const struct lex_source *src = lex_source__ (lexer);
+ return (src
+ ? lex_token_start_point (src, lex_source_ofs__ (src, ofs))
+ : (struct msg_point) { 0, 0 });
+}
- if (token != T_POS_NUM)
- return false;
+/* Returns a msg_point for the last character, inclusive, in the token with
+ offset OFS, where offset 0 is the first token in the command currently being
+ parsed, 1 the second token, and so on. These are absolute offsets, not
+ relative to the token currently being parsed within the command.
- value = 0;
- do
- {
- value = value * 10 + (*s++ - '0');
- }
- while (c_isdigit (*s));
+ Returns zeros for a T_STOP token.
- if (lex_next_tokval (lexer, tok_idx) != value)
- return false;
- }
- break;
+ Most of the time, a single token is wholly within a single line of syntax,
+ so that the start and end point for a given offset have the same line
+ number. There are two exceptions: a T_STRING token can be made up of
+ multiple segments on adjacent lines connected with "+" punctuators, and a
+ T_NEG_NUM token can consist of a "-" on one line followed by the number on
+ the next.
+ */
+struct msg_point
+lex_ofs_end_point (const struct lexer *lexer, int ofs)
+{
+ const struct lex_source *src = lex_source__ (lexer);
+ return (src
+ ? lex_token_end_point (src, lex_source_ofs__ (src, ofs))
+ : (struct msg_point) { 0, 0 });
+}
- default:
- if (lex_is_id1 (c))
- {
- int len;
+/* Returns the text of the syntax in tokens N0 ahead of the current one,
+ through N1 ahead of the current one, inclusive. (For example, if N0 and N1
+ are both zero, this requests the syntax for the current token.)
+
+ The caller must eventually free the returned string (with free()). The
+ syntax is encoded in UTF-8 and in the original form supplied to the lexer so
+ that, for example, it may include comments, spaces, and new-lines if it
+ spans multiple tokens. Macro expansion, however, has already been
+ performed. */
+char *
+lex_next_representation (const struct lexer *lexer, int n0, int n1)
+{
+ const struct lex_source *src = lex_source__ (lexer);
+ return (src
+ ? lex_source_syntax__ (src, n0 + src->parse_ofs, n1 + src->parse_ofs)
+ : xstrdup (""));
+}
+
+
+/* Returns the text of the syntax in tokens with offsets OFS0 to OFS1,
+ inclusive. (For example, if OFS0 and OFS1 are both zero, this requests the
+ syntax for the first token in the current command.)
+
+ The caller must eventually free the returned string (with free()). The
+ syntax is encoded in UTF-8 and in the original form supplied to the lexer so
+ that, for example, it may include comments, spaces, and new-lines if it
+ spans multiple tokens. Macro expansion, however, has already been
+ performed. */
+char *
+lex_ofs_representation (const struct lexer *lexer, int ofs0, int ofs1)
+{
+ const struct lex_source *src = lex_source__ (lexer);
+ return src ? lex_source_syntax__ (src, ofs0, ofs1) : xstrdup ("");
+}
+
+/* Returns true if the token N ahead of the current one was produced by macro
+ expansion, false otherwise. */
+bool
+lex_next_is_from_macro (const struct lexer *lexer, int n)
+{
+ return lex_next__ (lexer, n)->macro_rep != NULL;
+}
+
+static bool
+lex_tokens_match (const struct token *actual, const struct token *expected)
+{
+ if (actual->type != expected->type)
+ return false;
+
+ switch (actual->type)
+ {
+ case T_POS_NUM:
+ case T_NEG_NUM:
+ return actual->number == expected->number;
- if (token != T_ID)
- return false;
+ case T_ID:
+ return lex_id_match (expected->string, actual->string);
- len = lex_id_get_length (ss_cstr (s));
- if (!lex_id_match (ss_buffer (s, len),
- lex_next_tokss (lexer, tok_idx)))
- return false;
+ case T_STRING:
+ return (actual->string.length == expected->string.length
+ && !memcmp (actual->string.string, expected->string.string,
+ actual->string.length));
- s += len;
- }
- else
- NOT_REACHED ();
- }
+ default:
+ return true;
}
}
-static int
-lex_source_get_first_line_number (const struct lex_source *src, int n)
-{
- return lex_source_next__ (src, n)->first_line;
-}
-
-static int
-count_newlines (char *s, size_t length)
+static size_t
+lex_ofs_at_phrase__ (struct lexer *lexer, int ofs, const char *s)
{
- int n_newlines = 0;
- char *newline;
+ struct string_lexer slex;
+ struct token token;
- while ((newline = memchr (s, '\n', length)) != NULL)
+ size_t i = 0;
+ string_lexer_init (&slex, s, strlen (s), SEG_MODE_INTERACTIVE, true);
+ while (string_lexer_next (&slex, &token))
{
- n_newlines++;
- length -= (newline + 1) - s;
- s = newline + 1;
+ bool match = lex_tokens_match (lex_ofs_token (lexer, ofs + i++), &token);
+ token_uninit (&token);
+ if (!match)
+ return 0;
}
-
- return n_newlines;
+ return i;
}
-static int
-lex_source_get_last_line_number (const struct lex_source *src, int n)
+/* If LEXER is positioned at the sequence of tokens that may be parsed from S,
+ returns true. Otherwise, returns false.
+
+ S may consist of an arbitrary sequence of tokens, e.g. "KRUSKAL-WALLIS",
+ "2SLS", or "END INPUT PROGRAM". Identifiers may be abbreviated to their
+ first three letters. */
+bool
+lex_at_phrase (struct lexer *lexer, const char *s)
{
- const struct lex_token *token = lex_source_next__ (src, n);
+ return lex_ofs_at_phrase__ (lexer, lex_ofs (lexer), s) > 0;
+}
- if (token->first_line == 0)
- return 0;
- else
- {
- char *token_str = &src->buffer[token->token_pos - src->tail];
- return token->first_line + count_newlines (token_str, token->token_len) + 1;
- }
+/* If LEXER is positioned at the sequence of tokens that may be parsed from S,
+ skips it and returns true. Otherwise, returns false.
+
+ S may consist of an arbitrary sequence of tokens, e.g. "KRUSKAL-WALLIS",
+ "2SLS", or "END INPUT PROGRAM". Identifiers may be abbreviated to their
+ first three letters. */
+bool
+lex_match_phrase (struct lexer *lexer, const char *s)
+{
+ size_t n = lex_ofs_at_phrase__ (lexer, lex_ofs (lexer), s);
+ if (n > 0)
+ lex_get_n (lexer, n);
+ return n > 0;
}
+/* Returns the 1-based line number of the source text at the byte OFFSET in
+ SRC. */
static int
-count_columns (const char *s_, size_t length)
+lex_source_ofs_to_line_number (const struct lex_source *src, size_t offset)
{
- const uint8_t *s = CHAR_CAST (const uint8_t *, s_);
- int columns;
- size_t ofs;
- int mblen;
-
- columns = 0;
- for (ofs = 0; ofs < length; ofs += mblen)
+ size_t lo = 0;
+ size_t hi = src->n_lines;
+ for (;;)
{
- ucs4_t uc;
-
- mblen = u8_mbtouc (&uc, s + ofs, length - ofs);
- if (uc != '\t')
- {
- int width = uc_width (uc, "UTF-8");
- if (width > 0)
- columns += width;
- }
+ size_t mid = (lo + hi) / 2;
+ if (mid + 1 >= src->n_lines)
+ return src->n_lines;
+ else if (offset >= src->lines[mid + 1])
+ lo = mid;
+ else if (offset < src->lines[mid])
+ hi = mid;
else
- columns = ROUND_UP (columns + 1, 8);
+ return mid + 1;
}
-
- return columns + 1;
}
+/* Returns the 1-based column number of the source text at the byte OFFSET in
+ SRC. */
static int
-lex_source_get_first_column (const struct lex_source *src, int n)
+lex_source_ofs_to_column_number (const struct lex_source *src, size_t offset)
{
- const struct lex_token *token = lex_source_next__ (src, n);
- return count_columns (&src->buffer[token->line_pos - src->tail],
- token->token_pos - token->line_pos);
+ const char *newline = memrchr (src->buffer, '\n', offset);
+ size_t line_ofs = newline ? newline - src->buffer + 1 : 0;
+ return utf8_count_columns (&src->buffer[line_ofs], offset - line_ofs) + 1;
}
-static int
-lex_source_get_last_column (const struct lex_source *src, int n)
+static struct msg_point
+lex_source_ofs_to_point__ (const struct lex_source *src, size_t offset)
{
- const struct lex_token *token = lex_source_next__ (src, n);
- char *start, *end, *newline;
-
- start = &src->buffer[token->line_pos - src->tail];
- end = &src->buffer[(token->token_pos + token->token_len) - src->tail];
- newline = memrchr (start, '\n', end - start);
- if (newline != NULL)
- start = newline + 1;
- return count_columns (start, end - start);
+ return (struct msg_point) {
+ .line = lex_source_ofs_to_line_number (src, offset),
+ .column = lex_source_ofs_to_column_number (src, offset),
+ };
}
-/* Returns the 1-based line number of the start of the syntax that represents
- the token N after the current one in LEXER. Returns 0 for a T_STOP token or
- if the token is drawn from a source that does not have line numbers. */
-int
-lex_get_first_line_number (const struct lexer *lexer, int n)
+static struct msg_point
+lex_token_start_point (const struct lex_source *src,
+ const struct lex_token *token)
{
- const struct lex_source *src = lex_source__ (lexer);
- return src != NULL ? lex_source_get_first_line_number (src, n) : 0;
+ return lex_source_ofs_to_point__ (src, token->token_pos);
}
-/* Returns the 1-based line number of the end of the syntax that represents the
- token N after the current one in LEXER, plus 1. Returns 0 for a T_STOP
- token or if the token is drawn from a source that does not have line
- numbers.
-
- Most of the time, a single token is wholly within a single line of syntax,
- but there are two exceptions: a T_STRING token can be made up of multiple
- segments on adjacent lines connected with "+" punctuators, and a T_NEG_NUM
- token can consist of a "-" on one line followed by the number on the next.
- */
-int
-lex_get_last_line_number (const struct lexer *lexer, int n)
+static struct msg_point
+lex_token_end_point (const struct lex_source *src,
+ const struct lex_token *token)
{
- const struct lex_source *src = lex_source__ (lexer);
- return src != NULL ? lex_source_get_last_line_number (src, n) : 0;
+ return lex_source_ofs_to_point__ (src, lex_token_end (token));
}
-/* Returns the 1-based column number of the start of the syntax that represents
- the token N after the current one in LEXER. Returns 0 for a T_STOP
- token.
-
- Column numbers are measured according to the width of characters as shown in
- a typical fixed-width font, in which CJK characters have width 2 and
- combining characters have width 0. */
-int
-lex_get_first_column (const struct lexer *lexer, int n)
+static struct msg_location
+lex_token_location (const struct lex_source *src,
+ const struct lex_token *t0,
+ const struct lex_token *t1)
{
- const struct lex_source *src = lex_source__ (lexer);
- return src != NULL ? lex_source_get_first_column (src, n) : 0;
+ return (struct msg_location) {
+ .file_name = intern_new_if_nonnull (src->reader->file_name),
+ .start = lex_token_start_point (src, t0),
+ .end = lex_token_end_point (src, t1),
+ .src = CONST_CAST (struct lex_source *, src),
+ };
}
-/* Returns the 1-based column number of the end of the syntax that represents
- the token N after the current one in LEXER, plus 1. Returns 0 for a T_STOP
- token.
+static struct msg_location *
+lex_token_location_rw (const struct lex_source *src,
+ const struct lex_token *t0,
+ const struct lex_token *t1)
+{
+ struct msg_location location = lex_token_location (src, t0, t1);
+ return msg_location_dup (&location);
+}
- Column numbers are measured according to the width of characters as shown in
- a typical fixed-width font, in which CJK characters have width 2 and
- combining characters have width 0. */
-int
-lex_get_last_column (const struct lexer *lexer, int n)
+static struct msg_location *
+lex_source_get_location (const struct lex_source *src, int ofs0, int ofs1)
{
- const struct lex_source *src = lex_source__ (lexer);
- return src != NULL ? lex_source_get_last_column (src, n) : 0;
+ return lex_token_location_rw (src,
+ lex_source_ofs__ (src, ofs0),
+ lex_source_ofs__ (src, ofs1));
}
/* Returns the name of the syntax file from which the current command is drawn.
return src == NULL ? NULL : src->reader->file_name;
}
+/* Returns a newly allocated msg_location for the syntax that represents tokens
+ with 0-based offsets N0...N1, inclusive, from the current token. The caller
+ must eventually free the location (with msg_location_destroy()). */
+struct msg_location *
+lex_get_location (const struct lexer *lexer, int n0, int n1)
+{
+ struct msg_location *loc = xmalloc (sizeof *loc);
+ *loc = (struct msg_location) {
+ .file_name = intern_new_if_nonnull (lex_get_file_name (lexer)),
+ .start = lex_ofs_start_point (lexer, n0 + lex_ofs (lexer)),
+ .end = lex_ofs_end_point (lexer, n1 + lex_ofs (lexer)),
+ .src = lex_source__ (lexer),
+ };
+ lex_source_ref (loc->src);
+ return loc;
+}
+
+const char *
+lex_get_encoding (const struct lexer *lexer)
+{
+ struct lex_source *src = lex_source__ (lexer);
+ return src == NULL ? NULL : src->reader->encoding;
+}
+
/* Returns the syntax mode for the syntax file from which the current drawn is
- drawn. Returns LEX_SYNTAX_AUTO for a T_STOP token or if the command's
- source does not have line numbers.
+ drawn. Returns SEG_MODE_AUTO for a T_STOP token or if the command's source
+ does not have line numbers.
There is no version of this function that takes an N argument because
lookahead only works to the end of a command and any given command is always
within a single syntax file. */
-enum lex_syntax_mode
+enum segmenter_mode
lex_get_syntax_mode (const struct lexer *lexer)
{
struct lex_source *src = lex_source__ (lexer);
- return src == NULL ? LEX_SYNTAX_AUTO : src->reader->syntax;
+ return src == NULL ? SEG_MODE_AUTO : src->reader->syntax;
}
/* Returns the error mode for the syntax file from which the current drawn is
- drawn. Returns LEX_ERROR_INTERACTIVE for a T_STOP token or if the command's
+ drawn. Returns LEX_ERROR_TERMINAL for a T_STOP token or if the command's
source does not have line numbers.
There is no version of this function that takes an N argument because
lex_get_error_mode (const struct lexer *lexer)
{
struct lex_source *src = lex_source__ (lexer);
- return src == NULL ? LEX_ERROR_INTERACTIVE : src->reader->error;
+ return src == NULL ? LEX_ERROR_TERMINAL : src->reader->error;
}
/* If the source that LEXER is currently reading has error mode
- LEX_ERROR_INTERACTIVE, discards all buffered input and tokens, so that the
- next token to be read comes directly from whatever is next read from the
- stream.
+ LEX_ERROR_TERMINAL, discards all buffered input and tokens, so that the next
+ token to be read comes directly from whatever is next read from the stream.
It makes sense to call this function after encountering an error in a
command entered on the console, because usually the user would prefer not to
lex_interactive_reset (struct lexer *lexer)
{
struct lex_source *src = lex_source__ (lexer);
- if (src != NULL && src->reader->error == LEX_ERROR_INTERACTIVE)
+ if (src != NULL && src->reader->error == LEX_ERROR_TERMINAL)
{
- src->head = src->tail = 0;
- src->journal_pos = src->seg_pos = src->line_pos = 0;
- src->n_newlines = 0;
+ src->length = 0;
+ src->journal_pos = src->seg_pos = 0;
+ src->n_lines = 0;
src->suppress_next_newline = false;
- segmenter_init (&src->segmenter, segmenter_get_mode (&src->segmenter));
- while (!deque_is_empty (&src->deque))
- lex_source_pop__ (src);
+ src->segmenter = segmenter_init (segmenter_get_mode (&src->segmenter),
+ false);
+ lex_stage_clear (&src->pp);
+ lex_stage_clear (&src->merge);
+ lex_source_clear_parse (src);
lex_source_push_endcmd__ (src);
}
}
}
/* Discards all lookahead tokens in LEXER, then discards all input sources
- until it encounters one with error mode LEX_ERROR_INTERACTIVE or until it
+ until it encounters one with error mode LEX_ERROR_TERMINAL or until it
runs out of input sources. */
void
lex_discard_noninteractive (struct lexer *lexer)
{
struct lex_source *src = lex_source__ (lexer);
-
if (src != NULL)
{
- while (!deque_is_empty (&src->deque))
- lex_source_pop__ (src);
+ if (src->reader->error == LEX_ERROR_IGNORE)
+ return;
- for (; src != NULL && src->reader->error != LEX_ERROR_INTERACTIVE;
+ lex_stage_clear (&src->pp);
+ lex_stage_clear (&src->merge);
+ lex_source_clear_parse (src);
+
+ for (; src != NULL && src->reader->error != LEX_ERROR_TERMINAL;
src = lex_source__ (lexer))
- lex_source_destroy (src);
+ {
+ ll_remove (&src->ll);
+ lex_source_unref (src);
+ }
}
}
\f
-static size_t
-lex_source_max_tail__ (const struct lex_source *src)
-{
- const struct lex_token *token;
- size_t max_tail;
-
- assert (src->seg_pos >= src->line_pos);
- max_tail = MIN (src->journal_pos, src->line_pos);
-
- /* Use the oldest token also. (We know that src->deque cannot be empty
- because we are in the process of adding a new token, which is already
- initialized enough to use here.) */
- token = &src->tokens[deque_back (&src->deque, 0)];
- assert (token->token_pos >= token->line_pos);
- max_tail = MIN (max_tail, token->line_pos);
-
- return max_tail;
-}
-
static void
lex_source_expand__ (struct lex_source *src)
{
- if (src->head - src->tail >= src->allocated)
- {
- size_t max_tail = lex_source_max_tail__ (src);
- if (max_tail > src->tail)
- {
- /* Advance the tail, freeing up room at the head. */
- memmove (src->buffer, src->buffer + (max_tail - src->tail),
- src->head - max_tail);
- src->tail = max_tail;
- }
- else
- {
- /* Buffer is completely full. Expand it. */
- src->buffer = x2realloc (src->buffer, &src->allocated);
- }
- }
- else
- {
- /* There's space available at the head of the buffer. Nothing to do. */
- }
+ if (src->length >= src->allocated)
+ src->buffer = x2realloc (src->buffer, &src->allocated);
}
static void
{
do
{
- size_t head_ofs;
- size_t n;
-
lex_source_expand__ (src);
- head_ofs = src->head - src->tail;
- n = src->reader->class->read (src->reader, &src->buffer[head_ofs],
- src->allocated - head_ofs,
- segmenter_get_prompt (&src->segmenter));
+ size_t space = src->allocated - src->length;
+ enum prompt_style prompt = segmenter_get_prompt (&src->segmenter);
+ size_t n = src->reader->class->read (src->reader,
+ &src->buffer[src->length],
+ space, prompt);
+ assert (n <= space);
+
if (n == 0)
{
- /* End of input.
-
- Ensure that the input always ends in a new-line followed by a null
- byte, as required by the segmenter library. */
-
- if (src->head == src->tail
- || src->buffer[src->head - src->tail - 1] != '\n')
- src->buffer[src->head++ - src->tail] = '\n';
-
- lex_source_expand__ (src);
- src->buffer[src->head++ - src->tail] = '\0';
-
+ /* End of input. */
+ src->reader->eof = true;
return;
}
- src->head += n;
+ src->length += n;
}
- while (!memchr (&src->buffer[src->seg_pos - src->tail], '\n',
- src->head - src->seg_pos));
+ while (!memchr (&src->buffer[src->seg_pos], '\n',
+ src->length - src->seg_pos));
}
static struct lex_source *
: ll_data (ll_head (&lexer->sources), struct lex_source, ll));
}
-static struct substring
-lex_source_get_syntax__ (const struct lex_source *src, int n0, int n1)
+const struct lex_source *
+lex_source (const struct lexer *lexer)
{
- const struct lex_token *token0 = lex_source_next__ (src, n0);
- const struct lex_token *token1 = lex_source_next__ (src, MAX (n0, n1));
- size_t start = token0->token_pos;
- size_t end = token1->token_pos + token1->token_len;
-
- return ss_buffer (&src->buffer[start - src->tail], end - start);
+ return lex_source__ (lexer);
}
-static void
-lex_ellipsize__ (struct substring in, char *out, size_t out_size)
+/* Returns the text of the syntax in SRC for tokens with offsets OFS0 through
+ OFS1 in the current command, inclusive. (For example, if OFS0 and OFS1 are
+ both zero, this requests the syntax for the first token in the current
+ command.) The caller must eventually free the returned string (with
+ free()). The syntax is encoded in UTF-8 and in the original form supplied
+ to the lexer so that, for example, it may include comments, spaces, and
+ new-lines if it spans multiple tokens. Macro expansion, however, has
+ already been performed. */
+static char *
+lex_source_syntax__ (const struct lex_source *src, int ofs0, int ofs1)
{
- size_t out_maxlen;
- size_t out_len;
- int mblen;
-
- assert (out_size >= 16);
- out_maxlen = out_size - (in.length >= out_size ? 3 : 0) - 1;
- for (out_len = 0; out_len < in.length; out_len += mblen)
+ struct string s = DS_EMPTY_INITIALIZER;
+ for (size_t i = ofs0; i <= ofs1; )
{
- if (in.string[out_len] == '\n'
- || (in.string[out_len] == '\r'
- && out_len + 1 < in.length
- && in.string[out_len + 1] == '\n'))
- break;
+ /* Find [I,J) as the longest sequence of tokens not produced by macro
+ expansion, or otherwise the longest sequence expanded from a single
+ macro call. */
+ const struct lex_token *first = lex_source_ofs__ (src, i);
+ size_t j;
+ for (j = i + 1; j <= ofs1; j++)
+ {
+ const struct lex_token *cur = lex_source_ofs__ (src, j);
+ if ((first->macro_rep != NULL) != (cur->macro_rep != NULL)
+ || first->macro_rep != cur->macro_rep)
+ break;
+ }
+ const struct lex_token *last = lex_source_ofs__ (src, j - 1);
- mblen = u8_mblen (CHAR_CAST (const uint8_t *, in.string + out_len),
- in.length - out_len);
- if (out_len + mblen > out_maxlen)
- break;
- }
+ /* Now add the syntax for this sequence of tokens to SRC. */
+ if (!ds_is_empty (&s))
+ ds_put_byte (&s, ' ');
+ if (!first->macro_rep)
+ {
+ size_t start = first->token_pos;
+ size_t end = last->token_pos + last->token_len;
+ ds_put_substring (&s, ss_buffer (&src->buffer[start], end - start));
+ }
+ else
+ {
+ size_t start = first->ofs;
+ size_t end = last->ofs + last->len;
+ ds_put_substring (&s, ss_buffer (first->macro_rep + start,
+ end - start));
+ }
- memcpy (out, in.string, out_len);
- strcpy (&out[out_len], out_len < in.length ? "..." : "");
+ i = j;
+ }
+ return ds_steal_cstr (&s);
}
-static void
-lex_source_error_valist (struct lex_source *src, int n0, int n1,
- const char *format, va_list args)
+static bool
+lex_source_contains_macro_call (struct lex_source *src, int ofs0, int ofs1)
{
- const struct lex_token *token;
- struct string s;
- struct msg m;
+ for (int i = ofs0; i <= ofs1; i++)
+ if (lex_source_ofs__ (src, i)->macro_rep)
+ return true;
+ return false;
+}
- ds_init_empty (&s);
+/* If tokens N0...N1 (inclusive) in SRC contains a macro call, this returns the
+ raw UTF-8 syntax for the macro call (not for the expansion) and for any
+ other tokens included in that range. The syntax is encoded in UTF-8 and in
+ the original form supplied to the lexer so that, for example, it may include
+ comments, spaces, and new-lines if it spans multiple tokens.
- token = lex_source_next__ (src, n0);
- if (token->token.type == T_ENDCMD)
- ds_put_cstr (&s, _("Syntax error at end of command"));
- else
- {
- struct substring syntax = lex_source_get_syntax__ (src, n0, n1);
- if (!ss_is_empty (syntax))
- {
- char syntax_cstr[64];
+ Returns an empty string if the token range doesn't include a macro call.
- lex_ellipsize__ (syntax, syntax_cstr, sizeof syntax_cstr);
- ds_put_format (&s, _("Syntax error at `%s'"), syntax_cstr);
- }
- else
- ds_put_cstr (&s, _("Syntax error"));
- }
+ The caller must not modify or free the returned string. */
+static struct substring
+lex_source_get_macro_call (struct lex_source *src, int ofs0, int ofs1)
+{
+ if (!lex_source_contains_macro_call (src, ofs0, ofs1))
+ return ss_empty ();
- if (format)
- {
- ds_put_cstr (&s, ": ");
- ds_put_vformat (&s, format, args);
- }
- ds_put_byte (&s, '.');
+ const struct lex_token *token0 = lex_source_ofs__ (src, ofs0);
+ const struct lex_token *token1 = lex_source_ofs__ (src, MAX (ofs0, ofs1));
+ size_t start = token0->token_pos;
+ size_t end = token1->token_pos + token1->token_len;
- m.category = MSG_C_SYNTAX;
- m.severity = MSG_S_ERROR;
- m.file_name = src->reader->file_name;
- m.first_line = lex_source_get_first_line_number (src, n0);
- m.last_line = lex_source_get_last_line_number (src, n1);
- m.first_column = lex_source_get_first_column (src, n0);
- m.last_column = lex_source_get_last_column (src, n1);
- m.text = ds_steal_cstr (&s);
- msg_emit (&m);
+ return ss_buffer (&src->buffer[start], end - start);
}
-static void PRINTF_FORMAT (2, 3)
-lex_get_error (struct lex_source *src, const char *format, ...)
+static void
+lex_source_msg_valist (struct lex_source *src, enum msg_class class,
+ int ofs0, int ofs1, const char *format, va_list args)
{
- va_list args;
- int n;
+ struct string s = DS_EMPTY_INITIALIZER;
- va_start (args, format);
+ if (src)
+ {
+ /* Get the macro call(s) that expanded to the syntax that caused the
+ error. */
+ char call[64];
+ str_ellipsize (lex_source_get_macro_call (src, ofs0, ofs1),
+ call, sizeof call);
+ if (call[0])
+ ds_put_format (&s, _("In syntax expanded from `%s'"), call);
+ }
+ else
+ ds_put_cstr (&s, _("At end of input"));
+
+ if (!ds_is_empty (&s))
+ ds_put_cstr (&s, ": ");
+ if (format)
+ ds_put_vformat (&s, format, args);
+ else
+ ds_put_cstr (&s, _("Syntax error."));
- n = deque_count (&src->deque) - 1;
- lex_source_error_valist (src, n, n, format, args);
- lex_source_pop_front (src);
+ if (ds_last (&s) != '.')
+ ds_put_byte (&s, '.');
- va_end (args);
+ struct msg *m = xmalloc (sizeof *m);
+ *m = (struct msg) {
+ .category = msg_class_to_category (class),
+ .severity = msg_class_to_severity (class),
+ .location = src ? lex_source_get_location (src, ofs0, ofs1) : NULL,
+ .text = ds_steal_cstr (&s),
+ };
+ msg_emit (m);
}
-static bool
-lex_source_get__ (const struct lex_source *src_)
+static void
+lex_get_error (struct lex_source *src, const struct lex_token *token)
{
- struct lex_source *src = CONST_CAST (struct lex_source *, src_);
+ char syntax[64];
+ str_ellipsize (ss_buffer (&src->buffer[token->token_pos], token->token_len),
+ syntax, sizeof syntax);
- struct state
- {
- struct segmenter segmenter;
- enum segment_type last_segment;
- int newlines;
- size_t line_pos;
- size_t seg_pos;
- };
-
- struct state state, saved;
- enum scan_result result;
- struct scanner scanner;
- struct lex_token *token;
- int n_lines;
- int i;
-
- if (src->eof)
- return false;
+ struct string s = DS_EMPTY_INITIALIZER;
+ ds_put_cstr (&s, token->token.string.string);
- state.segmenter = src->segmenter;
- state.newlines = 0;
- state.seg_pos = src->seg_pos;
- state.line_pos = src->line_pos;
- saved = state;
+ struct msg *m = xmalloc (sizeof *m);
+ *m = (struct msg) {
+ .category = MSG_C_SYNTAX,
+ .severity = MSG_S_ERROR,
+ .location = lex_token_location_rw (src, token, token),
+ .text = ds_steal_cstr (&s),
+ };
+ msg_emit (m);
+}
- token = lex_push_token__ (src);
- scanner_init (&scanner, &token->token);
- token->line_pos = src->line_pos;
+/* Attempts to append an additional token to 'pp' in SRC, reading more from the
+ underlying lex_reader if necessary. Returns true if a new token was added
+ to SRC's deque, false otherwise. The caller should retry failures unless
+ SRC's 'eof' marker was set to true indicating that there will be no more
+ tokens from this source. */
+static bool
+lex_source_try_get_pp (struct lex_source *src)
+{
+ /* Append a new token to SRC and initialize it. */
+ struct lex_token *token = xmalloc (sizeof *token);
+ token->token = (struct token) { .type = T_STOP };
+ token->macro_rep = NULL;
+ token->ref_cnt = NULL;
token->token_pos = src->seg_pos;
- if (src->reader->line_number > 0)
- token->first_line = src->reader->line_number + src->n_newlines;
- else
- token->first_line = 0;
+ /* Extract a segment. */
+ const char *segment;
+ enum segment_type seg_type;
+ int seg_len;
for (;;)
{
- enum segment_type type;
- const char *segment;
- size_t seg_maxlen;
- int seg_len;
-
- segment = &src->buffer[state.seg_pos - src->tail];
- seg_maxlen = src->head - state.seg_pos;
- seg_len = segmenter_push (&state.segmenter, segment, seg_maxlen, &type);
- if (seg_len < 0)
- {
- lex_source_read__ (src);
- continue;
- }
+ segment = &src->buffer[src->seg_pos];
+ seg_len = segmenter_push (&src->segmenter, segment,
+ src->length - src->seg_pos,
+ src->reader->eof, &seg_type);
+ if (seg_len >= 0)
+ break;
- state.last_segment = type;
- state.seg_pos += seg_len;
- if (type == SEG_NEWLINE)
- {
- state.newlines++;
- state.line_pos = state.seg_pos;
- }
+ /* The segmenter needs more input to produce a segment. */
+ assert (!src->reader->eof);
+ lex_source_read__ (src);
+ }
- result = scanner_push (&scanner, type, ss_buffer (segment, seg_len),
- &token->token);
- if (result == SCAN_SAVE)
- saved = state;
- else if (result == SCAN_BACK)
- {
- state = saved;
- break;
- }
- else if (result == SCAN_DONE)
- break;
+ /* Update state based on the segment. */
+ token->token_len = seg_len;
+ src->seg_pos += seg_len;
+ if (seg_type == SEG_NEWLINE)
+ {
+ if (src->n_lines >= src->allocated_lines)
+ src->lines = x2nrealloc (src->lines, &src->allocated_lines,
+ sizeof *src->lines);
+ src->lines[src->n_lines++] = src->seg_pos;
}
- n_lines = state.newlines;
- if (state.last_segment == SEG_END_COMMAND && !src->suppress_next_newline)
+ /* Get a token from the segment. */
+ enum tokenize_result result = token_from_segment (
+ seg_type, ss_buffer (segment, seg_len), &token->token);
+
+ /* If we've reached the end of a line, or the end of a command, then pass
+ the line to the output engine as a syntax text item. */
+ int n_lines = seg_type == SEG_NEWLINE;
+ if (seg_type == SEG_END_COMMAND && !src->suppress_next_newline)
{
n_lines++;
src->suppress_next_newline = true;
n_lines--;
src->suppress_next_newline = false;
}
- for (i = 0; i < n_lines; i++)
+ for (int i = 0; i < n_lines; i++)
{
- const char *newline;
- const char *line;
- size_t line_len;
- char *syntax;
+ /* Beginning of line. */
+ const char *line = &src->buffer[src->journal_pos];
+
+ /* Calculate line length, including \n or \r\n end-of-line if present.
+
+ We use src->length even though that may be beyond what we've actually
+ converted to tokens. That's because, if we're emitting the line due
+ to SEG_END_COMMAND, we want to take the whole line through the
+ newline, not just through the '.'. */
+ size_t max_len = src->length - src->journal_pos;
+ const char *newline = memchr (line, '\n', max_len);
+ size_t line_len = newline ? newline - line + 1 : max_len;
+
+ /* Calculate line length excluding end-of-line. */
+ size_t copy_len = line_len;
+ if (copy_len > 0 && line[copy_len - 1] == '\n')
+ copy_len--;
+ if (copy_len > 0 && line[copy_len - 1] == '\r')
+ copy_len--;
+
+ /* Submit the line as syntax. */
+ output_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX,
+ xmemdup0 (line, copy_len),
+ NULL));
+
+ src->journal_pos += line_len;
+ }
- line = &src->buffer[src->journal_pos - src->tail];
- newline = rawmemchr (line, '\n');
- line_len = newline - line;
- if (line_len > 0 && line[line_len - 1] == '\r')
- line_len--;
+ switch (result)
+ {
+ case TOKENIZE_ERROR:
+ lex_get_error (src, token);
+ /* Fall through. */
+ case TOKENIZE_EMPTY:
+ lex_token_destroy (token);
+ return false;
- syntax = malloc (line_len + 2);
- memcpy (syntax, line, line_len);
- syntax[line_len] = '\n';
- syntax[line_len + 1] = '\0';
+ case TOKENIZE_TOKEN:
+ if (token->token.type == T_STOP)
+ {
+ token->token.type = T_ENDCMD;
+ src->eof = true;
+ }
+ lex_stage_push_last (&src->pp, token);
+ return true;
+ }
+ NOT_REACHED ();
+}
- text_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX, syntax));
+/* Attempts to append a new token to SRC. Returns true if successful, false on
+ failure. On failure, the end of SRC has been reached and no more tokens
+ will be forthcoming from it.
- src->journal_pos += newline - line + 1;
- }
+ Does not make the new token available for lookahead yet; the caller must
+ adjust SRC's 'middle' pointer to do so. */
+static bool
+lex_source_get_pp (struct lex_source *src)
+{
+ while (!src->eof)
+ if (lex_source_try_get_pp (src))
+ return true;
+ return false;
+}
- token->token_len = state.seg_pos - src->seg_pos;
+static bool
+lex_source_try_get_merge (const struct lex_source *src_)
+{
+ struct lex_source *src = CONST_CAST (struct lex_source *, src_);
- src->segmenter = state.segmenter;
- src->seg_pos = state.seg_pos;
- src->line_pos = state.line_pos;
- src->n_newlines += state.newlines;
+ if (lex_stage_is_empty (&src->pp) && !lex_source_get_pp (src))
+ return false;
- switch (token->token.type)
+ if (!settings_get_mexpand ())
{
- default:
- break;
+ lex_stage_shift (&src->merge, &src->pp, lex_stage_count (&src->pp));
+ return true;
+ }
- case T_STOP:
- token->token.type = T_ENDCMD;
- src->eof = true;
- break;
+ /* Now pass tokens one-by-one to the macro expander.
- case SCAN_BAD_HEX_LENGTH:
- lex_get_error (src, _("String of hex digits has %d characters, which "
- "is not a multiple of 2"),
- (int) token->token.number);
- break;
+ In the common case where there is no macro to expand, the loop is not
+ entered. */
+ struct macro_call *mc;
+ int n_call = macro_call_create (src->lexer->macros,
+ &lex_stage_first (&src->pp)->token, &mc);
+ for (int ofs = 1; !n_call; ofs++)
+ {
+ if (lex_stage_count (&src->pp) <= ofs && !lex_source_get_pp (src))
+ {
+ /* This should not be reachable because we always get a T_ENDCMD at
+ the end of an input file (transformed from T_STOP by
+ lex_source_try_get_pp()) and the macro_expander should always
+ terminate expansion on T_ENDCMD. */
+ NOT_REACHED ();
+ }
- case SCAN_BAD_HEX_DIGIT:
- case SCAN_BAD_UNICODE_DIGIT:
- lex_get_error (src, _("`%c' is not a valid hex digit"),
- (int) token->token.number);
- break;
+ const struct lex_token *t = lex_stage_nth (&src->pp, ofs);
+ const struct macro_token mt = {
+ .token = t->token,
+ .syntax = ss_buffer (&src->buffer[t->token_pos], t->token_len),
+ };
+ const struct msg_location loc = lex_token_location (src, t, t);
+ n_call = macro_call_add (mc, &mt, &loc);
+ }
+ if (n_call < 0)
+ {
+ /* False alarm: no macro expansion after all. Use first token as
+ lookahead. We'll retry macro expansion from the second token next
+ time around. */
+ macro_call_destroy (mc);
+ lex_stage_shift (&src->merge, &src->pp, 1);
+ return true;
+ }
- case SCAN_BAD_UNICODE_LENGTH:
- lex_get_error (src, _("Unicode string contains %d bytes, which is "
- "not in the valid range of 1 to 8 bytes"),
- (int) token->token.number);
- break;
+ /* The first 'n_call' tokens in 'pp', which we bracket as C0...C1, inclusive,
+ are a macro call. (These are likely to be the only tokens in 'pp'.)
+ Expand them. */
+ const struct lex_token *c0 = lex_stage_first (&src->pp);
+ const struct lex_token *c1 = lex_stage_nth (&src->pp, n_call - 1);
+ struct macro_tokens expansion = { .n = 0 };
+ struct msg_location loc = lex_token_location (src, c0, c1);
+ macro_call_expand (mc, src->reader->syntax, &loc, &expansion);
+ macro_call_destroy (mc);
+
+ /* Convert the macro expansion into syntax for possible error messages
+ later. */
+ size_t *ofs = xnmalloc (expansion.n, sizeof *ofs);
+ size_t *len = xnmalloc (expansion.n, sizeof *len);
+ struct string s = DS_EMPTY_INITIALIZER;
+ macro_tokens_to_syntax (&expansion, &s, ofs, len);
+
+ if (settings_get_mprint ())
+ output_item_submit (text_item_create (TEXT_ITEM_LOG, ds_cstr (&s),
+ _("Macro Expansion")));
+
+ /* Append the macro expansion tokens to the lookahead. */
+ if (expansion.n > 0)
+ {
+ char *macro_rep = ds_steal_cstr (&s);
+ size_t *ref_cnt = xmalloc (sizeof *ref_cnt);
+ *ref_cnt = expansion.n;
+ for (size_t i = 0; i < expansion.n; i++)
+ {
+ struct lex_token *token = xmalloc (sizeof *token);
+ *token = (struct lex_token) {
+ .token = expansion.mts[i].token,
+ .token_pos = c0->token_pos,
+ .token_len = (c1->token_pos + c1->token_len) - c0->token_pos,
+ .macro_rep = macro_rep,
+ .ofs = ofs[i],
+ .len = len[i],
+ .ref_cnt = ref_cnt,
+ };
+ lex_stage_push_last (&src->merge, token);
+
+ ss_dealloc (&expansion.mts[i].syntax);
+ }
+ }
+ else
+ ds_destroy (&s);
+ free (expansion.mts);
+ free (ofs);
+ free (len);
- case SCAN_BAD_UNICODE_CODE_POINT:
- lex_get_error (src, _("U+%04X is not a valid Unicode code point"),
- (int) token->token.number);
- break;
+ /* Destroy the tokens for the call. */
+ for (size_t i = 0; i < n_call; i++)
+ lex_stage_pop_first (&src->pp);
- case SCAN_EXPECTED_QUOTE:
- lex_get_error (src, _("Unterminated string constant"));
- break;
+ return expansion.n > 0;
+}
- case SCAN_EXPECTED_EXPONENT:
- lex_get_error (src, _("Missing exponent following `%s'"),
- token->token.string.string);
- break;
+/* Attempts to obtain at least one new token into 'merge' in SRC.
- case SCAN_UNEXPECTED_DOT:
- lex_get_error (src, _("Unexpected `.' in middle of command"));
- break;
+ Returns true if successful, false on failure. In the latter case, SRC is
+ exhausted and 'src->eof' is now true. */
+static bool
+lex_source_get_merge (struct lex_source *src)
+{
+ while (!src->eof)
+ if (lex_source_try_get_merge (src))
+ return true;
+ return false;
+}
- case SCAN_UNEXPECTED_CHAR:
- {
- char c_name[16];
- lex_get_error (src, _("Bad character %s in input"),
- uc_name (token->token.number, c_name));
- }
- break;
+/* Attempts to obtain at least one new token into 'lookahead' in SRC.
- case SCAN_SKIP:
- lex_source_pop_front (src);
- break;
- }
+ Returns true if successful, false on failure. In the latter case, SRC is
+ exhausted and 'src->eof' is now true. */
+static bool
+lex_source_get_parse (struct lex_source *src)
+{
+ struct merger m = MERGER_INIT;
+ struct token out;
+ for (size_t i = 0; ; i++)
+ {
+ while (lex_stage_count (&src->merge) <= i && !lex_source_get_merge (src))
+ {
+ /* We always get a T_ENDCMD at the end of an input file
+ (transformed from T_STOP by lex_source_try_get_pp()) and
+ merger_add() should never return -1 on T_ENDCMD. */
+ assert (lex_stage_is_empty (&src->merge));
+ return false;
+ }
- return true;
+ int retval = merger_add (&m, &lex_stage_nth (&src->merge, i)->token,
+ &out);
+ if (!retval)
+ {
+ lex_source_push_parse (src, lex_stage_take_first (&src->merge));
+ return true;
+ }
+ else if (retval > 0)
+ {
+ /* Add a token that merges all the tokens together. */
+ const struct lex_token *first = lex_stage_first (&src->merge);
+ const struct lex_token *last = lex_stage_nth (&src->merge,
+ retval - 1);
+ bool macro = first->macro_rep && first->macro_rep == last->macro_rep;
+ struct lex_token *t = xmalloc (sizeof *t);
+ *t = (struct lex_token) {
+ .token = out,
+ .token_pos = first->token_pos,
+ .token_len = (last->token_pos - first->token_pos) + last->token_len,
+
+ /* This works well if all the tokens were not expanded from macros,
+ or if they came from the same macro expansion. It just gives up
+ in the other (corner) cases. */
+ .macro_rep = macro ? first->macro_rep : NULL,
+ .ofs = macro ? first->ofs : 0,
+ .len = macro ? (last->ofs - first->ofs) + last->len : 0,
+ .ref_cnt = macro ? first->ref_cnt : NULL,
+ };
+ if (t->ref_cnt)
+ ++*t->ref_cnt;
+ lex_source_push_parse (src, t);
+
+ for (int i = 0; i < retval; i++)
+ lex_stage_pop_first (&src->merge);
+ return true;
+ }
+ }
}
\f
static void
lex_source_push_endcmd__ (struct lex_source *src)
{
- struct lex_token *token = lex_push_token__ (src);
- token->token.type = T_ENDCMD;
- token->token_pos = 0;
- token->token_len = 0;
- token->line_pos = 0;
- token->first_line = 0;
+ assert (src->n_parse == 0);
+
+ struct lex_token *token = xmalloc (sizeof *token);
+ *token = (struct lex_token) { .token = { .type = T_ENDCMD } };
+ lex_source_push_parse (src, token);
}
-static struct lex_source *
-lex_source_create (struct lex_reader *reader)
+static void
+lex_source_push_parse (struct lex_source *src, struct lex_token *token)
{
- struct lex_source *src;
- enum segmenter_mode mode;
-
- src = xzalloc (sizeof *src);
- src->reader = reader;
+ if (src->n_parse >= src->allocated_parse)
+ src->parse = x2nrealloc (src->parse, &src->allocated_parse,
+ sizeof *src->parse);
+ src->parse[src->n_parse++] = token;
+}
- if (reader->syntax == LEX_SYNTAX_AUTO)
- mode = SEG_MODE_AUTO;
- else if (reader->syntax == LEX_SYNTAX_INTERACTIVE)
- mode = SEG_MODE_INTERACTIVE;
- else if (reader->syntax == LEX_SYNTAX_BATCH)
- mode = SEG_MODE_BATCH;
- else
- NOT_REACHED ();
- segmenter_init (&src->segmenter, mode);
+static void
+lex_source_clear_parse (struct lex_source *src)
+{
+ for (size_t i = 0; i < src->n_parse; i++)
+ lex_token_destroy (src->parse[i]);
+ src->n_parse = src->parse_ofs = 0;
+}
- src->tokens = deque_init (&src->deque, 4, sizeof *src->tokens);
+static struct lex_source *
+lex_source_create (struct lexer *lexer, struct lex_reader *reader)
+{
+ size_t allocated_lines = 4;
+ size_t *lines = xmalloc (allocated_lines * sizeof *lines);
+ *lines = 0;
+
+ struct lex_source *src = xmalloc (sizeof *src);
+ *src = (struct lex_source) {
+ .n_refs = 1,
+ .reader = reader,
+ .segmenter = segmenter_init (reader->syntax, false),
+ .lexer = lexer,
+ .lines = lines,
+ .n_lines = 1,
+ .allocated_lines = allocated_lines,
+ };
lex_source_push_endcmd__ (src);
return src;
}
-static void
-lex_source_destroy (struct lex_source *src)
+void
+lex_set_message_handler (struct lexer *lexer,
+ void (*output_msg) (const struct msg *,
+ struct lexer *))
+{
+ struct msg_handler msg_handler = {
+ .output_msg = (void (*)(const struct msg *, void *)) output_msg,
+ .aux = lexer,
+ .lex_source_ref = lex_source_ref,
+ .lex_source_unref = lex_source_unref,
+ .lex_source_get_line = lex_source_get_line,
+ };
+ msg_set_handler (&msg_handler);
+}
+
+struct lex_source *
+lex_source_ref (const struct lex_source *src_)
+{
+ struct lex_source *src = CONST_CAST (struct lex_source *, src_);
+ if (src)
+ {
+ assert (src->n_refs > 0);
+ src->n_refs++;
+ }
+ return src;
+}
+
+void
+lex_source_unref (struct lex_source *src)
{
+ if (!src)
+ return;
+
+ assert (src->n_refs > 0);
+ if (--src->n_refs > 0)
+ return;
+
char *file_name = src->reader->file_name;
- if (src->reader->class->close != NULL)
- src->reader->class->close (src->reader);
+ char *encoding = src->reader->encoding;
+ if (src->reader->class->destroy != NULL)
+ src->reader->class->destroy (src->reader);
free (file_name);
+ free (encoding);
free (src->buffer);
- while (!deque_is_empty (&src->deque))
- lex_source_pop__ (src);
- free (src->tokens);
- ll_remove (&src->ll);
+ free (src->lines);
+ lex_stage_uninit (&src->pp);
+ lex_stage_uninit (&src->merge);
+ lex_source_clear_parse (src);
+ free (src->parse);
free (src);
}
\f
{
struct lex_reader reader;
struct u8_istream *istream;
- char *file_name;
};
static struct lex_reader_class lex_file_reader_class;
Returns a null pointer if FILE_NAME cannot be opened. */
struct lex_reader *
lex_reader_for_file (const char *file_name, const char *encoding,
- enum lex_syntax_mode syntax,
+ enum segmenter_mode syntax,
enum lex_error_mode error)
{
struct lex_file_reader *r;
r->reader.syntax = syntax;
r->reader.error = error;
r->reader.file_name = xstrdup (file_name);
+ r->reader.encoding = xstrdup_if_nonnull (encoding);
r->reader.line_number = 1;
r->istream = istream;
- r->file_name = xstrdup (file_name);
return &r->reader;
}
ssize_t n_read = u8_istream_read (r->istream, buf, n);
if (n_read < 0)
{
- msg (ME, _("Error reading `%s': %s."), r->file_name, strerror (errno));
+ msg (ME, _("Error reading `%s': %s."), r_->file_name, strerror (errno));
return 0;
}
return n_read;
if (u8_istream_fileno (r->istream) != STDIN_FILENO)
{
if (u8_istream_close (r->istream) != 0)
- msg (ME, _("Error closing `%s': %s."), r->file_name, strerror (errno));
+ msg (ME, _("Error closing `%s': %s."), r_->file_name, strerror (errno));
}
else
u8_istream_free (r->istream);
- free (r->file_name);
free (r);
}
static struct lex_reader_class lex_string_reader_class;
/* Creates and returns a new lex_reader for the contents of S, which must be
- encoded in UTF-8. The new reader takes ownership of S and will free it
+ encoded in the given ENCODING. The new reader takes ownership of S and will free it
with ss_dealloc() when it is closed. */
struct lex_reader *
-lex_reader_for_substring_nocopy (struct substring s)
+lex_reader_for_substring_nocopy (struct substring s, const char *encoding)
{
struct lex_string_reader *r;
r = xmalloc (sizeof *r);
lex_reader_init (&r->reader, &lex_string_reader_class);
- r->reader.syntax = LEX_SYNTAX_INTERACTIVE;
+ r->reader.syntax = SEG_MODE_AUTO;
+ r->reader.encoding = xstrdup_if_nonnull (encoding);
r->s = s;
r->offset = 0;
}
/* Creates and returns a new lex_reader for a copy of null-terminated string S,
- which must be encoded in UTF-8. The caller retains ownership of S. */
+ which must be encoded in ENCODING. The caller retains ownership of S. */
struct lex_reader *
-lex_reader_for_string (const char *s)
+lex_reader_for_string (const char *s, const char *encoding)
{
- struct substring ss;
- ss_alloc_substring (&ss, ss_cstr (s));
- return lex_reader_for_substring_nocopy (ss);
+ return lex_reader_for_substring_nocopy (ss_clone (ss_cstr (s)), encoding);
}
/* Formats FORMAT as a printf()-like format string and creates and returns a
new lex_reader for the formatted result. */
struct lex_reader *
-lex_reader_for_format (const char *format, ...)
+lex_reader_for_format (const char *format, const char *encoding, ...)
{
struct lex_reader *r;
va_list args;
- va_start (args, format);
- r = lex_reader_for_substring_nocopy (ss_cstr (xvasprintf (format, args)));
+ va_start (args, encoding);
+ r = lex_reader_for_substring_nocopy (ss_cstr (xvasprintf (format, args)), encoding);
va_end (args);
return r;
lex_string_read,
lex_string_close
};
+\f
+struct substring
+lex_source_get_line (const struct lex_source *src, int line)
+{
+ if (line < 1 || line > src->n_lines)
+ return ss_empty ();
+
+ size_t ofs = src->lines[line - 1];
+ size_t end;
+ if (line < src->n_lines)
+ end = src->lines[line];
+ else
+ {
+ const char *newline = memchr (src->buffer + ofs, '\n', src->length - ofs);
+ end = newline ? newline - src->buffer : src->length;
+ }
+ return ss_buffer (&src->buffer[ofs], end - ofs);
+}