/* PSPP - a program for statistical analysis.
- Copyright (C) 1997-9, 2000, 2006, 2009 Free Software Foundation, Inc.
+ Copyright (C) 1997-9, 2000, 2006, 2009, 2010, 2011, 2013, 2016 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include <config.h>
-#include "lexer.h"
-#include <libpspp/message.h>
-#include <c-ctype.h>
-#include <c-strtod.h>
+
+#include "language/lexer/lexer.h"
+
#include <errno.h>
+#include <fcntl.h>
#include <limits.h>
#include <math.h>
#include <stdarg.h>
-#include <stdint.h>
#include <stdlib.h>
-#include <libpspp/assertion.h>
-#include <language/command.h>
-#include <libpspp/message.h>
-#include <data/settings.h>
-#include <libpspp/getl.h>
-#include <libpspp/str.h>
-#include <output/journal.h>
-
-#include "xalloc.h"
+#include <string.h>
+#include <unictype.h>
+#include <unistd.h>
+#include <unistr.h>
+
+#include "language/command.h"
+#include "language/lexer/macro.h"
+#include "language/lexer/scan.h"
+#include "language/lexer/segment.h"
+#include "language/lexer/token.h"
+#include "libpspp/assertion.h"
+#include "libpspp/cast.h"
+#include "libpspp/deque.h"
+#include "libpspp/i18n.h"
+#include "libpspp/ll.h"
+#include "libpspp/message.h"
+#include "libpspp/misc.h"
+#include "libpspp/str.h"
+#include "libpspp/u8-istream.h"
+#include "output/journal.h"
+#include "output/output-item.h"
+
+#include "gl/c-ctype.h"
+#include "gl/minmax.h"
+#include "gl/xalloc.h"
+#include "gl/xmemdup0.h"
#include "gettext.h"
#define _(msgid) gettext (msgid)
#define N_(msgid) msgid
+/* A token within a lex_source. */
+struct lex_token
+ {
+ /* The regular token information. */
+ struct token token;
-#define DUMP_TOKENS 0
+ /* For a token obtained through the lexer in an ordinary way, this is the
+ location of the token in terms of the lex_source's buffer.
+ For a token produced through macro expansion, this is the entire macro
+ call.
+ src->tail <= line_pos <= token_pos <= src->head. */
+ size_t token_pos; /* Start of token. */
+ size_t token_len; /* Length of source for token in bytes. */
+ size_t line_pos; /* Start of line containing token_pos. */
+ int first_line; /* Line number at token_pos. */
-struct lexer
-{
- struct string line_buffer;
+ /* For a token obtained through macro expansion, this is just this token.
- struct source_stream *ss;
+ For a token obtained through the lexer in an ordinary way, these are
+ nulls and zeros. */
+ char *macro_rep; /* The whole macro expansion. */
+ size_t ofs; /* Offset of this token in macro_rep. */
+ size_t len; /* Length of this token in macro_rep. */
+ size_t *ref_cnt; /* Number of lex_tokens that refer to macro_rep. */
+ };
- int token; /* Current token. */
- double tokval; /* T_POS_NUM, T_NEG_NUM: the token's value. */
+static void
+lex_token_destroy (struct lex_token *t)
+{
+ token_uninit (&t->token);
+ if (t->ref_cnt)
+ {
+ assert (*t->ref_cnt > 0);
+ if (!--*t->ref_cnt)
+ {
+ free (t->macro_rep);
+ free (t->ref_cnt);
+ }
+ }
+ free (t);
+}
+\f
+/* A deque of lex_tokens that comprises one stage in the token pipeline in a
+ lex_source. */
+struct lex_stage
+ {
+ struct deque deque;
+ struct lex_token **tokens;
+ };
- char tokid [VAR_NAME_LEN + 1]; /* T_ID: the identifier. */
+static void lex_stage_clear (struct lex_stage *);
+static void lex_stage_uninit (struct lex_stage *);
- struct string tokstr; /* T_ID, T_STRING: token string value.
- For T_ID, this is not truncated as is
- tokid. */
+static size_t lex_stage_count (const struct lex_stage *);
+static bool lex_stage_is_empty (const struct lex_stage *);
- char *prog; /* Pointer to next token in line_buffer. */
- bool dot; /* True only if this line ends with a terminal dot. */
+static struct lex_token *lex_stage_last (struct lex_stage *);
+static struct lex_token *lex_stage_first (struct lex_stage *);
+static struct lex_token *lex_stage_nth (struct lex_stage *, size_t ofs);
- int put_token ; /* If nonzero, next token returned by lex_get().
- Used only in exceptional circumstances. */
+static void lex_stage_push_last (struct lex_stage *, struct lex_token *);
+static void lex_stage_pop_first (struct lex_stage *);
- struct string put_tokstr;
- double put_tokval;
-};
+static void lex_stage_shift (struct lex_stage *dst, struct lex_stage *src,
+ size_t n);
+/* Deletes all the tokens from STAGE. */
+static void
+lex_stage_clear (struct lex_stage *stage)
+{
+ while (!deque_is_empty (&stage->deque))
+ lex_stage_pop_first (stage);
+}
-static int parse_id (struct lexer *);
+/* Deletes all the tokens from STAGE and frees storage for the deque. */
+static void
+lex_stage_uninit (struct lex_stage *stage)
+{
+ lex_stage_clear (stage);
+ free (stage->tokens);
+}
-/* How a string represents its contents. */
-enum string_type
- {
- CHARACTER_STRING, /* Characters. */
- BINARY_STRING, /* Binary digits. */
- OCTAL_STRING, /* Octal digits. */
- HEX_STRING /* Hexadecimal digits. */
- };
+/* Returns true if STAGE contains no tokens, otherwise false. */
+static bool
+lex_stage_is_empty (const struct lex_stage *stage)
+{
+ return deque_is_empty (&stage->deque);
+}
-static int parse_string (struct lexer *, enum string_type);
+/* Returns the number of tokens in STAGE. */
+static size_t
+lex_stage_count (const struct lex_stage *stage)
+{
+ return deque_count (&stage->deque);
+}
-#if DUMP_TOKENS
-static void dump_token (struct lexer *);
-#endif
-\f
-/* Initialization. */
+/* Returns the last token in STAGE, which must be nonempty. The last token is
+ the one accessed with the greatest lookahead. */
+static struct lex_token *
+lex_stage_last (struct lex_stage *stage)
+{
+ return stage->tokens[deque_front (&stage->deque, 0)];
+}
-/* Initializes the lexer. */
-struct lexer *
-lex_create (struct source_stream *ss)
+/* Returns the first token in STAGE, which must be nonempty.
+ The first token is the one accessed with the least lookahead. */
+static struct lex_token *
+lex_stage_first (struct lex_stage *stage)
{
- struct lexer *lexer = xzalloc (sizeof (*lexer));
+ return lex_stage_nth (stage, 0);
+}
- ds_init_empty (&lexer->tokstr);
- ds_init_empty (&lexer->put_tokstr);
- ds_init_empty (&lexer->line_buffer);
- lexer->ss = ss;
+/* Returns the token the given INDEX in STAGE. The first token (with the least
+ lookahead) is 0, the second token is 1, and so on. There must be at least
+ INDEX + 1 tokens in STAGE. */
+static struct lex_token *
+lex_stage_nth (struct lex_stage *stage, size_t index)
+{
+ return stage->tokens[deque_back (&stage->deque, index)];
+}
- return lexer;
+/* Adds TOKEN so that it becomes the last token in STAGE. */
+static void
+lex_stage_push_last (struct lex_stage *stage, struct lex_token *token)
+{
+ if (deque_is_full (&stage->deque))
+ stage->tokens = deque_expand (&stage->deque, stage->tokens,
+ sizeof *stage->tokens);
+ stage->tokens[deque_push_front (&stage->deque)] = token;
}
-struct source_stream *
-lex_get_source_stream (const struct lexer *lex)
+/* Removes the first token from STAGE and uninitializes it. */
+static void
+lex_stage_pop_first (struct lex_stage *stage)
{
- return lex->ss;
+ lex_token_destroy (stage->tokens[deque_pop_back (&stage->deque)]);
}
-enum syntax_mode
-lex_current_syntax_mode (const struct lexer *lex)
+/* Removes the first N tokens from SRC, appending them to DST as the last
+ tokens. */
+static void
+lex_stage_shift (struct lex_stage *dst, struct lex_stage *src, size_t n)
{
- return source_stream_current_syntax_mode (lex->ss);
+ for (size_t i = 0; i < n; i++)
+ {
+ lex_stage_push_last (dst, lex_stage_first (src));
+ deque_pop_back (&src->deque);
+ }
}
-enum error_mode
-lex_current_error_mode (const struct lexer *lex)
+/* A source of tokens, corresponding to a syntax file.
+
+ This is conceptually a lex_reader wrapped with everything needed to convert
+ its UTF-8 bytes into tokens. */
+struct lex_source
+ {
+ struct ll ll; /* In lexer's list of sources. */
+ struct lex_reader *reader;
+ struct lexer *lexer;
+ struct segmenter segmenter;
+ bool eof; /* True if T_STOP was read from 'reader'. */
+
+ /* Buffer of UTF-8 bytes. */
+ char *buffer;
+ size_t allocated; /* Number of bytes allocated. */
+ size_t tail; /* &buffer[0] offset into UTF-8 source. */
+ size_t head; /* &buffer[head - tail] offset into source. */
+
+ /* Positions in source file, tail <= pos <= head for each member here. */
+ size_t journal_pos; /* First byte not yet output to journal. */
+ size_t seg_pos; /* First byte not yet scanned as token. */
+ size_t line_pos; /* First byte of line containing seg_pos. */
+
+ int n_newlines; /* Number of new-lines up to seg_pos. */
+ bool suppress_next_newline;
+
+ /* Tokens.
+
+ This is a pipeline with the following stages. Each token eventually
+ made available to the parser passes through of these stages. The stages
+ are named after the processing that happens in each one.
+
+ Initially, tokens come from the segmenter and scanner to 'pp':
+
+ - pp: Tokens that need to pass through the macro preprocessor to end up
+ in 'merge'.
+
+ - merge: Tokens that need to pass through scan_merge() to end up in
+ 'lookahead'.
+
+ - lookahead: Tokens available to the client for parsing. */
+ struct lex_stage pp;
+ struct lex_stage merge;
+ struct lex_stage lookahead;
+ };
+
+static struct lex_source *lex_source_create (struct lexer *,
+ struct lex_reader *);
+static void lex_source_destroy (struct lex_source *);
+
+/* Lexer. */
+struct lexer
+ {
+ struct ll_list sources; /* Contains "struct lex_source"s. */
+ struct macro_set *macros;
+ };
+
+static struct lex_source *lex_source__ (const struct lexer *);
+static char *lex_source_get_syntax__ (const struct lex_source *,
+ int n0, int n1);
+static const struct lex_token *lex_next__ (const struct lexer *, int n);
+static void lex_source_push_endcmd__ (struct lex_source *);
+
+static bool lex_source_get_lookahead (struct lex_source *);
+static void lex_source_error_valist (struct lex_source *, int n0, int n1,
+ const char *format, va_list)
+ PRINTF_FORMAT (4, 0);
+static const struct lex_token *lex_source_next__ (const struct lex_source *,
+ int n);
+\f
+/* Initializes READER with the specified CLASS and otherwise some reasonable
+ defaults. The caller should fill in the others members as desired. */
+void
+lex_reader_init (struct lex_reader *reader,
+ const struct lex_reader_class *class)
{
- return source_stream_current_error_mode (lex->ss);
+ reader->class = class;
+ reader->syntax = SEG_MODE_AUTO;
+ reader->error = LEX_ERROR_CONTINUE;
+ reader->file_name = NULL;
+ reader->encoding = NULL;
+ reader->line_number = 0;
+ reader->eof = false;
}
+/* Frees any file name already in READER and replaces it by a copy of
+ FILE_NAME, or if FILE_NAME is null then clears any existing name. */
+void
+lex_reader_set_file_name (struct lex_reader *reader, const char *file_name)
+{
+ free (reader->file_name);
+ reader->file_name = xstrdup_if_nonnull (file_name);
+}
+\f
+/* Creates and returns a new lexer. */
+struct lexer *
+lex_create (void)
+{
+ struct lexer *lexer = xmalloc (sizeof *lexer);
+ *lexer = (struct lexer) {
+ .sources = LL_INITIALIZER (lexer->sources),
+ .macros = macro_set_create (),
+ };
+ return lexer;
+}
+/* Destroys LEXER. */
void
lex_destroy (struct lexer *lexer)
{
- if ( NULL != lexer )
+ if (lexer != NULL)
{
- ds_destroy (&lexer->put_tokstr);
- ds_destroy (&lexer->tokstr);
- ds_destroy (&lexer->line_buffer);
+ struct lex_source *source, *next;
+ ll_for_each_safe (source, next, struct lex_source, ll, &lexer->sources)
+ lex_source_destroy (source);
+ macro_set_destroy (lexer->macros);
free (lexer);
}
}
-\f
-/* Common functions. */
+/* Adds M to LEXER's set of macros. M replaces any existing macro with the
+ same name. Takes ownership of M. */
+void
+lex_define_macro (struct lexer *lexer, struct macro *m)
+{
+ macro_set_add (lexer->macros, m);
+}
-/* Copies put_token, lexer->put_tokstr, put_tokval into token, tokstr,
- tokval, respectively, and sets tokid appropriately. */
-static void
-restore_token (struct lexer *lexer)
+/* Inserts READER into LEXER so that the next token read by LEXER comes from
+ READER. Before the caller, LEXER must either be empty or at a T_ENDCMD
+ token. */
+void
+lex_include (struct lexer *lexer, struct lex_reader *reader)
{
- assert (lexer->put_token != 0);
- lexer->token = lexer->put_token;
- ds_assign_string (&lexer->tokstr, &lexer->put_tokstr);
- str_copy_trunc (lexer->tokid, sizeof lexer->tokid, ds_cstr (&lexer->tokstr));
- lexer->tokval = lexer->put_tokval;
- lexer->put_token = 0;
+ assert (ll_is_empty (&lexer->sources) || lex_token (lexer) == T_ENDCMD);
+ ll_push_head (&lexer->sources, &lex_source_create (lexer, reader)->ll);
}
-/* Copies token, tokstr, lexer->tokval into lexer->put_token, put_tokstr,
- put_lexer->tokval respectively. */
-static void
-save_token (struct lexer *lexer)
+/* Appends READER to LEXER, so that it will be read after all other current
+ readers have already been read. */
+void
+lex_append (struct lexer *lexer, struct lex_reader *reader)
{
- lexer->put_token = lexer->token;
- ds_assign_string (&lexer->put_tokstr, &lexer->tokstr);
- lexer->put_tokval = lexer->tokval;
+ ll_push_tail (&lexer->sources, &lex_source_create (lexer, reader)->ll);
}
+\f
+/* Advancing. */
-/* Parses a single token, setting appropriate global variables to
- indicate the token's attributes. */
+/* Advances LEXER to the next token, consuming the current token. */
void
lex_get (struct lexer *lexer)
{
- /* Find a token. */
- for (;;)
- {
- if (NULL == lexer->prog && ! lex_get_line (lexer) )
- {
- lexer->token = T_STOP;
- return;
- }
+ struct lex_source *src;
+
+ src = lex_source__ (lexer);
+ if (src == NULL)
+ return;
+
+ if (!lex_stage_is_empty (&src->lookahead))
+ lex_stage_pop_first (&src->lookahead);
+
+ while (lex_stage_is_empty (&src->lookahead))
+ if (!lex_source_get_lookahead (src))
+ {
+ lex_source_destroy (src);
+ src = lex_source__ (lexer);
+ if (src == NULL)
+ return;
+ }
+}
+
+/* Advances LEXER by N tokens. */
+void
+lex_get_n (struct lexer *lexer, size_t n)
+{
+ while (n-- > 0)
+ lex_get (lexer);
+}
+\f
+/* Issuing errors. */
+
+/* Prints a syntax error message containing the current token and
+ given message MESSAGE (if non-null). */
+void
+lex_error (struct lexer *lexer, const char *format, ...)
+{
+ va_list args;
+
+ va_start (args, format);
+ lex_next_error_valist (lexer, 0, 0, format, args);
+ va_end (args);
+}
+
+/* Prints a syntax error message containing the current token and
+ given message MESSAGE (if non-null). */
+void
+lex_error_valist (struct lexer *lexer, const char *format, va_list args)
+{
+ lex_next_error_valist (lexer, 0, 0, format, args);
+}
+
+/* Prints a syntax error message containing the current token and
+ given message MESSAGE (if non-null). */
+void
+lex_next_error (struct lexer *lexer, int n0, int n1, const char *format, ...)
+{
+ va_list args;
+
+ va_start (args, format);
+ lex_next_error_valist (lexer, n0, n1, format, args);
+ va_end (args);
+}
+
+/* Prints a syntax error message saying that one of the strings provided as
+ varargs, up to the first NULL, is expected. */
+void
+(lex_error_expecting) (struct lexer *lexer, ...)
+{
+ va_list args;
- /* If a token was pushed ahead, return it. */
- if (lexer->put_token)
+ va_start (args, lexer);
+ lex_error_expecting_valist (lexer, args);
+ va_end (args);
+}
+
+/* Prints a syntax error message saying that one of the options provided in
+ ARGS, up to the first NULL, is expected. */
+void
+lex_error_expecting_valist (struct lexer *lexer, va_list args)
+{
+ enum { MAX_OPTIONS = 9 };
+ const char *options[MAX_OPTIONS];
+ int n = 0;
+ while (n < MAX_OPTIONS)
{
- restore_token (lexer);
-#if DUMP_TOKENS
- dump_token (lexer);
-#endif
- return;
+ const char *option = va_arg (args, const char *);
+ if (!option)
+ break;
+
+ options[n++] = option;
}
+ lex_error_expecting_array (lexer, options, n);
+}
- for (;;)
+void
+lex_error_expecting_array (struct lexer *lexer, const char **options, size_t n)
+{
+ switch (n)
{
- /* Skip whitespace. */
- while (c_isspace ((unsigned char) *lexer->prog))
- lexer->prog++;
-
- if (*lexer->prog)
- break;
-
- if (lexer->dot)
- {
- lexer->dot = 0;
- lexer->token = '.';
-#if DUMP_TOKENS
- dump_token (lexer);
-#endif
- return;
- }
- else if (!lex_get_line (lexer))
- {
- lexer->prog = NULL;
- lexer->token = T_STOP;
-#if DUMP_TOKENS
- dump_token (lexer);
-#endif
- return;
- }
-
- if (lexer->put_token)
- {
- restore_token (lexer);
-#if DUMP_TOKENS
- dump_token (lexer);
-#endif
- return;
- }
- }
+ case 0:
+ lex_error (lexer, NULL);
+ break;
+ case 1:
+ lex_error (lexer, _("expecting %s"), options[0]);
+ break;
- /* Actually parse the token. */
- ds_clear (&lexer->tokstr);
+ case 2:
+ lex_error (lexer, _("expecting %s or %s"), options[0], options[1]);
+ break;
- switch (*lexer->prog)
- {
- case '-': case '.':
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- {
- char *tail;
-
- /* `-' can introduce a negative number, or it can be a
- token by itself. If it is not followed by a digit or a
- decimal point, it is definitely not a number.
- Otherwise, it might be either, but most of the time we
- want it as a number. When the syntax calls for a `-'
- token, lex_negative_to_dash() must be used to break
- negative numbers into two tokens. */
- if (*lexer->prog == '-')
- {
- ds_put_char (&lexer->tokstr, *lexer->prog++);
- while (c_isspace ((unsigned char) *lexer->prog))
- lexer->prog++;
-
- if (!c_isdigit ((unsigned char) *lexer->prog) && *lexer->prog != '.')
- {
- lexer->token = '-';
- break;
- }
- lexer->token = T_NEG_NUM;
- }
- else
- lexer->token = T_POS_NUM;
-
- /* Parse the number, copying it into tokstr. */
- while (c_isdigit ((unsigned char) *lexer->prog))
- ds_put_char (&lexer->tokstr, *lexer->prog++);
- if (*lexer->prog == '.')
- {
- ds_put_char (&lexer->tokstr, *lexer->prog++);
- while (c_isdigit ((unsigned char) *lexer->prog))
- ds_put_char (&lexer->tokstr, *lexer->prog++);
- }
- if (*lexer->prog == 'e' || *lexer->prog == 'E')
- {
- ds_put_char (&lexer->tokstr, *lexer->prog++);
- if (*lexer->prog == '+' || *lexer->prog == '-')
- ds_put_char (&lexer->tokstr, *lexer->prog++);
- while (c_isdigit ((unsigned char) *lexer->prog))
- ds_put_char (&lexer->tokstr, *lexer->prog++);
- }
-
- /* Parse as floating point. */
- lexer->tokval = c_strtod (ds_cstr (&lexer->tokstr), &tail);
- if (*tail)
- {
- msg (SE, _("%s does not form a valid number."),
- ds_cstr (&lexer->tokstr));
- lexer->tokval = 0.0;
-
- ds_clear (&lexer->tokstr);
- ds_put_char (&lexer->tokstr, '0');
- }
-
- break;
- }
-
- case '\'': case '"':
- lexer->token = parse_string (lexer, CHARACTER_STRING);
- break;
-
- case '(': case ')': case ',': case '=': case '+': case '/':
- case '[': case ']':
- lexer->token = *lexer->prog++;
- break;
-
- case '*':
- if (*++lexer->prog == '*')
- {
- lexer->prog++;
- lexer->token = T_EXP;
- }
- else
- lexer->token = '*';
- break;
-
- case '<':
- if (*++lexer->prog == '=')
- {
- lexer->prog++;
- lexer->token = T_LE;
- }
- else if (*lexer->prog == '>')
- {
- lexer->prog++;
- lexer->token = T_NE;
- }
- else
- lexer->token = T_LT;
- break;
-
- case '>':
- if (*++lexer->prog == '=')
- {
- lexer->prog++;
- lexer->token = T_GE;
- }
- else
- lexer->token = T_GT;
- break;
-
- case '~':
- if (*++lexer->prog == '=')
- {
- lexer->prog++;
- lexer->token = T_NE;
- }
- else
- lexer->token = T_NOT;
- break;
-
- case '&':
- lexer->prog++;
- lexer->token = T_AND;
- break;
-
- case '|':
- lexer->prog++;
- lexer->token = T_OR;
- break;
-
- case 'b': case 'B':
- if (lexer->prog[1] == '\'' || lexer->prog[1] == '"')
- lexer->token = parse_string (lexer, BINARY_STRING);
- else
- lexer->token = parse_id (lexer);
- break;
+ case 3:
+ lex_error (lexer, _("expecting %s, %s, or %s"), options[0], options[1],
+ options[2]);
+ break;
- case 'o': case 'O':
- if (lexer->prog[1] == '\'' || lexer->prog[1] == '"')
- lexer->token = parse_string (lexer, OCTAL_STRING);
- else
- lexer->token = parse_id (lexer);
- break;
+ case 4:
+ lex_error (lexer, _("expecting %s, %s, %s, or %s"),
+ options[0], options[1], options[2], options[3]);
+ break;
- case 'x': case 'X':
- if (lexer->prog[1] == '\'' || lexer->prog[1] == '"')
- lexer->token = parse_string (lexer, HEX_STRING);
- else
- lexer->token = parse_id (lexer);
- break;
+ case 5:
+ lex_error (lexer, _("expecting %s, %s, %s, %s, or %s"),
+ options[0], options[1], options[2], options[3], options[4]);
+ break;
- default:
- if (lex_is_id1 (*lexer->prog))
- {
- lexer->token = parse_id (lexer);
- break;
- }
- else
- {
- unsigned char c = *lexer->prog++;
- char *c_name = xasprintf (c_isgraph (c) ? "%c" : "\\%o", c);
- msg (SE, _("Bad character in input: `%s'."), c_name);
- free (c_name);
- continue;
- }
- }
+ case 6:
+ lex_error (lexer, _("expecting %s, %s, %s, %s, %s, or %s"),
+ options[0], options[1], options[2], options[3], options[4],
+ options[5]);
break;
- }
-#if DUMP_TOKENS
- dump_token (lexer);
-#endif
-}
+ case 7:
+ lex_error (lexer, _("expecting %s, %s, %s, %s, %s, %s, or %s"),
+ options[0], options[1], options[2], options[3], options[4],
+ options[5], options[6]);
+ break;
-/* Parses an identifier at the current position into tokid and
- tokstr.
- Returns the correct token type. */
-static int
-parse_id (struct lexer *lexer)
-{
- struct substring rest_of_line
- = ss_substr (ds_ss (&lexer->line_buffer),
- ds_pointer_to_position (&lexer->line_buffer, lexer->prog),
- SIZE_MAX);
- struct substring id = ss_head (rest_of_line,
- lex_id_get_length (rest_of_line));
- lexer->prog += ss_length (id);
+ case 8:
+ lex_error (lexer, _("expecting %s, %s, %s, %s, %s, %s, %s, or %s"),
+ options[0], options[1], options[2], options[3], options[4],
+ options[5], options[6], options[7]);
+ break;
- ds_assign_substring (&lexer->tokstr, id);
- str_copy_trunc (lexer->tokid, sizeof lexer->tokid, ds_cstr (&lexer->tokstr));
- return lex_id_to_token (id);
+ default:
+ lex_error (lexer, NULL);
+ }
}
-/* Reports an error to the effect that subcommand SBC may only be
- specified once. */
+/* Reports an error to the effect that subcommand SBC may only be specified
+ once.
+
+ This function does not take a lexer as an argument or use lex_error(),
+ because the result would ordinarily just be redundant: "Syntax error at
+ SUBCOMMAND: Subcommand SUBCOMMAND may only be specified once.", which does
+ not help the user find the error. */
void
lex_sbc_only_once (const char *sbc)
{
msg (SE, _("Subcommand %s may only be specified once."), sbc);
}
-/* Reports an error to the effect that subcommand SBC is
- missing. */
+/* Reports an error to the effect that subcommand SBC is missing.
+
+ This function does not take a lexer as an argument or use lex_error(),
+ because a missing subcommand can normally be detected only after the whole
+ command has been parsed, and so lex_error() would always report "Syntax
+ error at end of command", which does not help the user find the error. */
+void
+lex_sbc_missing (const char *sbc)
+{
+ msg (SE, _("Required subcommand %s was not specified."), sbc);
+}
+
+/* Reports an error to the effect that specification SPEC may only be specified
+ once within subcommand SBC. */
+void
+lex_spec_only_once (struct lexer *lexer, const char *sbc, const char *spec)
+{
+ lex_error (lexer, _("%s may only be specified once within subcommand %s"),
+ spec, sbc);
+}
+
+/* Reports an error to the effect that specification SPEC is missing within
+ subcommand SBC. */
void
-lex_sbc_missing (struct lexer *lexer, const char *sbc)
+lex_spec_missing (struct lexer *lexer, const char *sbc, const char *spec)
{
- lex_error (lexer, _("missing required subcommand %s"), sbc);
+ lex_error (lexer, _("Required %s specification missing from %s subcommand"),
+ sbc, spec);
}
/* Prints a syntax error message containing the current token and
given message MESSAGE (if non-null). */
void
-lex_error (struct lexer *lexer, const char *message, ...)
+lex_next_error_valist (struct lexer *lexer, int n0, int n1,
+ const char *format, va_list args)
{
- char *token_rep;
- char where[128];
+ struct lex_source *src = lex_source__ (lexer);
- token_rep = lex_token_representation (lexer);
- if (lexer->token == T_STOP)
- strcpy (where, "end of file");
- else if (lexer->token == '.')
- strcpy (where, "end of command");
+ if (src != NULL)
+ lex_source_error_valist (src, n0, n1, format, args);
else
- snprintf (where, sizeof where, "`%s'", token_rep);
- free (token_rep);
-
- if (message)
{
- char buf[1024];
- va_list args;
+ struct string s;
- va_start (args, message);
- vsnprintf (buf, 1024, message, args);
- va_end (args);
-
- msg (SE, _("Syntax error %s at %s."), buf, where);
+ ds_init_empty (&s);
+ ds_put_format (&s, _("Syntax error at end of input"));
+ if (format != NULL)
+ {
+ ds_put_cstr (&s, ": ");
+ ds_put_vformat (&s, format, args);
+ }
+ if (ds_last (&s) != '.')
+ ds_put_byte (&s, '.');
+ msg (SE, "%s", ds_cstr (&s));
+ ds_destroy (&s);
}
- else
- msg (SE, _("Syntax error at %s."), where);
}
/* Checks that we're at end of command.
int
lex_end_of_command (struct lexer *lexer)
{
- if (lexer->token != '.')
+ if (lex_token (lexer) != T_ENDCMD && lex_token (lexer) != T_STOP)
{
lex_error (lexer, _("expecting end of command"));
return CMD_FAILURE;
/* Returns true if the current token is a number. */
bool
-lex_is_number (struct lexer *lexer)
+lex_is_number (const struct lexer *lexer)
{
- return lexer->token == T_POS_NUM || lexer->token == T_NEG_NUM;
+ return lex_next_is_number (lexer, 0);
}
-
/* Returns true if the current token is a string. */
bool
-lex_is_string (struct lexer *lexer)
+lex_is_string (const struct lexer *lexer)
{
- return lexer->token == T_STRING;
+ return lex_next_is_string (lexer, 0);
}
-
/* Returns the value of the current token, which must be a
floating point number. */
double
-lex_number (struct lexer *lexer)
+lex_number (const struct lexer *lexer)
{
- assert (lex_is_number (lexer));
- return lexer->tokval;
+ return lex_next_number (lexer, 0);
}
/* Returns true iff the current token is an integer. */
bool
-lex_is_integer (struct lexer *lexer)
+lex_is_integer (const struct lexer *lexer)
{
- return (lex_is_number (lexer)
- && lexer->tokval > LONG_MIN
- && lexer->tokval <= LONG_MAX
- && floor (lexer->tokval) == lexer->tokval);
+ return lex_next_is_integer (lexer, 0);
}
/* Returns the value of the current token, which must be an
integer. */
long
-lex_integer (struct lexer *lexer)
+lex_integer (const struct lexer *lexer)
+{
+ return lex_next_integer (lexer, 0);
+}
+\f
+/* Token testing functions with lookahead.
+
+ A value of 0 for N as an argument to any of these functions refers to the
+ current token. Lookahead is limited to the current command. Any N greater
+ than the number of tokens remaining in the current command will be treated
+ as referring to a T_ENDCMD token. */
+
+/* Returns true if the token N ahead of the current token is a number. */
+bool
+lex_next_is_number (const struct lexer *lexer, int n)
+{
+ return token_is_number (lex_next (lexer, n));
+}
+
+/* Returns true if the token N ahead of the current token is a string. */
+bool
+lex_next_is_string (const struct lexer *lexer, int n)
+{
+ return token_is_string (lex_next (lexer, n));
+}
+
+/* Returns the value of the token N ahead of the current token, which must be a
+ floating point number. */
+double
+lex_next_number (const struct lexer *lexer, int n)
+{
+ return token_number (lex_next (lexer, n));
+}
+
+/* Returns true if the token N ahead of the current token is an integer. */
+bool
+lex_next_is_integer (const struct lexer *lexer, int n)
+{
+ return token_is_integer (lex_next (lexer, n));
+}
+
+/* Returns the value of the token N ahead of the current token, which must be
+ an integer. */
+long
+lex_next_integer (const struct lexer *lexer, int n)
{
- assert (lex_is_integer (lexer));
- return lexer->tokval;
+ return token_integer (lex_next (lexer, n));
}
\f
/* Token matching functions. */
-/* If TOK is the current token, skips it and returns true
+/* If the current token has the specified TYPE, skips it and returns true.
Otherwise, returns false. */
bool
-lex_match (struct lexer *lexer, int t)
+lex_match (struct lexer *lexer, enum token_type type)
{
- if (lexer->token == t)
+ if (lex_token (lexer) == type)
{
lex_get (lexer);
return true;
return false;
}
-/* If the current token is the identifier S, skips it and returns
- true. The identifier may be abbreviated to its first three
- letters.
- Otherwise, returns false. */
+/* If the current token matches IDENTIFIER, skips it and returns true.
+ IDENTIFIER may be abbreviated to its first three letters. Otherwise,
+ returns false.
+
+ IDENTIFIER must be an ASCII string. */
bool
-lex_match_id (struct lexer *lexer, const char *s)
+lex_match_id (struct lexer *lexer, const char *identifier)
{
- return lex_match_id_n (lexer, s, 3);
+ return lex_match_id_n (lexer, identifier, 3);
}
-/* If the current token is the identifier S, skips it and returns
- true. The identifier may be abbreviated to its first N
- letters.
- Otherwise, returns false. */
+/* If the current token is IDENTIFIER, skips it and returns true. IDENTIFIER
+ may be abbreviated to its first N letters. Otherwise, returns false.
+
+ IDENTIFIER must be an ASCII string. */
bool
-lex_match_id_n (struct lexer *lexer, const char *s, size_t n)
+lex_match_id_n (struct lexer *lexer, const char *identifier, size_t n)
{
- if (lexer->token == T_ID
- && lex_id_match_n (ss_cstr (s), ss_cstr (lexer->tokid), n))
+ if (lex_token (lexer) == T_ID
+ && lex_id_match_n (ss_cstr (identifier), lex_tokss (lexer), n))
{
lex_get (lexer);
return true;
return false;
}
-/* If the current token is integer N, skips it and returns true.
- Otherwise, returns false. */
+/* If the current token is integer X, skips it and returns true. Otherwise,
+ returns false. */
bool
lex_match_int (struct lexer *lexer, int x)
{
\f
/* Forced matches. */
-/* If this token is identifier S, fetches the next token and returns
- nonzero.
- Otherwise, reports an error and returns zero. */
+/* If this token is IDENTIFIER, skips it and returns true. IDENTIFIER may be
+ abbreviated to its first 3 letters. Otherwise, reports an error and returns
+ false.
+
+ IDENTIFIER must be an ASCII string. */
bool
-lex_force_match_id (struct lexer *lexer, const char *s)
+lex_force_match_id (struct lexer *lexer, const char *identifier)
{
- if (lex_match_id (lexer, s))
+ if (lex_match_id (lexer, identifier))
return true;
else
{
- lex_error (lexer, _("expecting `%s'"), s);
+ lex_error_expecting (lexer, identifier);
return false;
}
}
-/* If the current token is T, skips the token. Otherwise, reports an
- error and returns from the current function with return value false. */
+/* If the current token has the specified TYPE, skips it and returns true.
+ Otherwise, reports an error and returns false. */
bool
-lex_force_match (struct lexer *lexer, int t)
+lex_force_match (struct lexer *lexer, enum token_type type)
{
- if (lexer->token == t)
+ if (lex_token (lexer) == type)
{
lex_get (lexer);
return true;
}
else
{
- lex_error (lexer, _("expecting `%s'"), lex_token_name (t));
+ const char *type_string = token_type_to_string (type);
+ if (type_string)
+ {
+ char *s = xasprintf ("`%s'", type_string);
+ lex_error_expecting (lexer, s);
+ free (s);
+ }
+ else
+ lex_error_expecting (lexer, token_type_to_name (type));
+
return false;
}
}
-/* If this token is a string, does nothing and returns true.
+/* If the current token is a string, does nothing and returns true.
Otherwise, reports an error and returns false. */
bool
lex_force_string (struct lexer *lexer)
{
- if (lexer->token == T_STRING)
+ if (lex_is_string (lexer))
return true;
else
{
}
}
-/* If this token is an integer, does nothing and returns true.
+/* If the current token is a string or an identifier, does nothing and returns
+ true. Otherwise, reports an error and returns false.
+
+ This is meant for use in syntactic situations where we want to encourage the
+ user to supply a quoted string, but for compatibility we also accept
+ identifiers. (One example of such a situation is file names.) Therefore,
+ the error message issued when the current token is wrong only says that a
+ string is expected and doesn't mention that an identifier would also be
+ accepted. */
+bool
+lex_force_string_or_id (struct lexer *lexer)
+{
+ return lex_token (lexer) == T_ID || lex_force_string (lexer);
+}
+
+/* If the current token is an integer, does nothing and returns true.
Otherwise, reports an error and returns false. */
bool
lex_force_int (struct lexer *lexer)
}
}
-/* If this token is a number, does nothing and returns true.
+/* If the current token is an integer in the range MIN...MAX (inclusive), does
+ nothing and returns true. Otherwise, reports an error and returns false.
+ If NAME is nonnull, then it is used in the error message. */
+bool
+lex_force_int_range (struct lexer *lexer, const char *name, long min, long max)
+{
+ bool is_integer = lex_is_integer (lexer);
+ bool too_small = is_integer && lex_integer (lexer) < min;
+ bool too_big = is_integer && lex_integer (lexer) > max;
+ if (is_integer && !too_small && !too_big)
+ return true;
+
+ if (min > max)
+ {
+ /* Weird, maybe a bug in the caller. Just report that we needed an
+ integer. */
+ if (name)
+ lex_error (lexer, _("Integer expected for %s."), name);
+ else
+ lex_error (lexer, _("Integer expected."));
+ }
+ else if (min == max)
+ {
+ if (name)
+ lex_error (lexer, _("Expected %ld for %s."), min, name);
+ else
+ lex_error (lexer, _("Expected %ld."), min);
+ }
+ else if (min + 1 == max)
+ {
+ if (name)
+ lex_error (lexer, _("Expected %ld or %ld for %s."), min, min + 1, name);
+ else
+ lex_error (lexer, _("Expected %ld or %ld."), min, min + 1);
+ }
+ else
+ {
+ bool report_lower_bound = (min > INT_MIN / 2) || too_small;
+ bool report_upper_bound = (max < INT_MAX / 2) || too_big;
+
+ if (report_lower_bound && report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer,
+ _("Expected integer between %ld and %ld for %s."),
+ min, max, name);
+ else
+ lex_error (lexer, _("Expected integer between %ld and %ld."),
+ min, max);
+ }
+ else if (report_lower_bound)
+ {
+ if (min == 0)
+ {
+ if (name)
+ lex_error (lexer, _("Expected non-negative integer for %s."),
+ name);
+ else
+ lex_error (lexer, _("Expected non-negative integer."));
+ }
+ else if (min == 1)
+ {
+ if (name)
+ lex_error (lexer, _("Expected positive integer for %s."),
+ name);
+ else
+ lex_error (lexer, _("Expected positive integer."));
+ }
+ }
+ else if (report_upper_bound)
+ {
+ if (name)
+ lex_error (lexer,
+ _("Expected integer less than or equal to %ld for %s."),
+ max, name);
+ else
+ lex_error (lexer, _("Expected integer less than or equal to %ld."),
+ max);
+ }
+ else
+ {
+ if (name)
+ lex_error (lexer, _("Integer expected for %s."), name);
+ else
+ lex_error (lexer, _("Integer expected."));
+ }
+ }
+ return false;
+}
+
+/* If the current token is a number, does nothing and returns true.
Otherwise, reports an error and returns false. */
bool
lex_force_num (struct lexer *lexer)
return false;
}
-/* If this token is an identifier, does nothing and returns true.
+/* If the current token is an identifier, does nothing and returns true.
Otherwise, reports an error and returns false. */
bool
lex_force_id (struct lexer *lexer)
{
- if (lexer->token == T_ID)
+ if (lex_token (lexer) == T_ID)
return true;
lex_error (lexer, _("expecting identifier"));
return false;
}
+\f
+/* Token accessors. */
-/* Weird token functions. */
-
-/* Returns the first character of the next token, except that if the
- next token is not an identifier, the character returned will not be
- a character that can begin an identifier. Specifically, the
- hexstring lead-in X' causes lookahead() to return '. Note that an
- alphanumeric return value doesn't guarantee an ID token, it could
- also be a reserved-word token. */
-int
-lex_look_ahead (struct lexer *lexer)
+/* Returns the type of LEXER's current token. */
+enum token_type
+lex_token (const struct lexer *lexer)
{
- if (lexer->put_token)
- return lexer->put_token;
+ return lex_next_token (lexer, 0);
+}
- for (;;)
- {
- if (NULL == lexer->prog && ! lex_get_line (lexer) )
- return 0;
+/* Returns the number in LEXER's current token.
- for (;;)
- {
- while (c_isspace ((unsigned char) *lexer->prog))
- lexer->prog++;
- if (*lexer->prog)
- break;
-
- if (lexer->dot)
- return '.';
- else if (!lex_get_line (lexer))
- return 0;
-
- if (lexer->put_token)
- return lexer->put_token;
- }
+ Only T_NEG_NUM and T_POS_NUM tokens have meaningful values. For other
+ tokens this function will always return zero. */
+double
+lex_tokval (const struct lexer *lexer)
+{
+ return lex_next_tokval (lexer, 0);
+}
+
+/* Returns the null-terminated string in LEXER's current token, UTF-8 encoded.
+
+ Only T_ID and T_STRING tokens have meaningful strings. For other tokens
+ this functions this function will always return NULL.
+
+ The UTF-8 encoding of the returned string is correct for variable names and
+ other identifiers. Use filename_to_utf8() to use it as a filename. Use
+ data_in() to use it in a "union value". */
+const char *
+lex_tokcstr (const struct lexer *lexer)
+{
+ return lex_next_tokcstr (lexer, 0);
+}
+
+/* Returns the string in LEXER's current token, UTF-8 encoded. The string is
+ null-terminated (but the null terminator is not included in the returned
+ substring's 'length').
- if ((toupper ((unsigned char) *lexer->prog) == 'X'
- || toupper ((unsigned char) *lexer->prog) == 'B'
- || toupper ((unsigned char) *lexer->prog) == 'O')
- && (lexer->prog[1] == '\'' || lexer->prog[1] == '"'))
- return '\'';
+ Only T_ID and T_STRING tokens have meaningful strings. For other tokens
+ this functions this function will always return NULL.
- return *lexer->prog;
+ The UTF-8 encoding of the returned string is correct for variable names and
+ other identifiers. Use filename_to_utf8() to use it as a filename. Use
+ data_in() to use it in a "union value". */
+struct substring
+lex_tokss (const struct lexer *lexer)
+{
+ return lex_next_tokss (lexer, 0);
+}
+\f
+/* Looking ahead.
+
+ A value of 0 for N as an argument to any of these functions refers to the
+ current token. Lookahead is limited to the current command. Any N greater
+ than the number of tokens remaining in the current command will be treated
+ as referring to a T_ENDCMD token. */
+
+static const struct lex_token *
+lex_next__ (const struct lexer *lexer_, int n)
+{
+ struct lexer *lexer = CONST_CAST (struct lexer *, lexer_);
+ struct lex_source *src = lex_source__ (lexer);
+
+ if (src != NULL)
+ return lex_source_next__ (src, n);
+ else
+ {
+ static const struct lex_token stop_token = { .token = { .type = T_STOP } };
+ return &stop_token;
}
}
-/* Makes the current token become the next token to be read; the
- current token is set to T. */
-void
-lex_put_back (struct lexer *lexer, int t)
+static const struct lex_token *
+lex_source_next__ (const struct lex_source *src_, int n)
{
- save_token (lexer);
- lexer->token = t;
+ struct lex_source *src = CONST_CAST (struct lex_source *, src_);
+ while (lex_stage_count (&src->lookahead) <= n)
+ {
+ if (!lex_stage_is_empty (&src->lookahead))
+ {
+ const struct lex_token *t = lex_stage_last (&src->lookahead);
+ if (t->token.type == T_STOP || t->token.type == T_ENDCMD)
+ return t;
+ }
+
+ lex_source_get_lookahead (src);
+ }
+
+ return lex_stage_nth (&src->lookahead, n);
}
-/* Makes the current token become the next token to be read; the
- current token is set to the identifier ID. */
-void
-lex_put_back_id (struct lexer *lexer, const char *id)
+/* Returns the "struct token" of the token N after the current one in LEXER.
+ The returned pointer can be invalidated by pretty much any succeeding call
+ into the lexer, although the string pointer within the returned token is
+ only invalidated by consuming the token (e.g. with lex_get()). */
+const struct token *
+lex_next (const struct lexer *lexer, int n)
{
- assert (lex_id_to_token (ss_cstr (id)) == T_ID);
- save_token (lexer);
- lexer->token = T_ID;
- ds_assign_cstr (&lexer->tokstr, id);
- str_copy_trunc (lexer->tokid, sizeof lexer->tokid, ds_cstr (&lexer->tokstr));
+ return &lex_next__ (lexer, n)->token;
}
-\f
-/* Weird line processing functions. */
-/* Returns the entire contents of the current line. */
-const char *
-lex_entire_line (const struct lexer *lexer)
+/* Returns the type of the token N after the current one in LEXER. */
+enum token_type
+lex_next_token (const struct lexer *lexer, int n)
{
- return ds_cstr (&lexer->line_buffer);
+ return lex_next (lexer, n)->type;
}
-const struct string *
-lex_entire_line_ds (const struct lexer *lexer)
+/* Returns the number in the tokn N after the current one in LEXER.
+
+ Only T_NEG_NUM and T_POS_NUM tokens have meaningful values. For other
+ tokens this function will always return zero. */
+double
+lex_next_tokval (const struct lexer *lexer, int n)
{
- return &lexer->line_buffer;
+ return token_number (lex_next (lexer, n));
}
-/* As lex_entire_line(), but only returns the part of the current line
- that hasn't already been tokenized. */
+/* Returns the null-terminated string in the token N after the current one, in
+ UTF-8 encoding.
+
+ Only T_ID and T_STRING tokens have meaningful strings. For other tokens
+ this functions this function will always return NULL.
+
+ The UTF-8 encoding of the returned string is correct for variable names and
+ other identifiers. Use filename_to_utf8() to use it as a filename. Use
+ data_in() to use it in a "union value". */
const char *
-lex_rest_of_line (const struct lexer *lexer)
+lex_next_tokcstr (const struct lexer *lexer, int n)
{
- return lexer->prog;
+ return lex_next_tokss (lexer, n).string;
}
-/* Returns true if the current line ends in a terminal dot,
- false otherwise. */
-bool
-lex_end_dot (const struct lexer *lexer)
+/* Returns the string in the token N after the current one, in UTF-8 encoding.
+ The string is null-terminated (but the null terminator is not included in
+ the returned substring's 'length').
+
+ Only T_ID, T_MACRO_ID, T_STRING tokens have meaningful strings. For other
+ tokens this functions this function will always return NULL.
+
+ The UTF-8 encoding of the returned string is correct for variable names and
+ other identifiers. Use filename_to_utf8() to use it as a filename. Use
+ data_in() to use it in a "union value". */
+struct substring
+lex_next_tokss (const struct lexer *lexer, int n)
{
- return lexer->dot;
+ return lex_next (lexer, n)->string;
}
-/* Causes the rest of the current input line to be ignored for
- tokenization purposes. */
-void
-lex_discard_line (struct lexer *lexer)
+/* Returns the text of the syntax in tokens N0 ahead of the current one,
+ through N1 ahead of the current one, inclusive. (For example, if N0 and N1
+ are both zero, this requests the syntax for the current token.) The caller
+ must eventually free the returned string (with free()). The syntax is
+ encoded in UTF-8 and in the original form supplied to the lexer so that, for
+ example, it may include comments, spaces, and new-lines if it spans multiple
+ tokens. Macro expansion, however, has already been performed. */
+char *
+lex_next_representation (const struct lexer *lexer, int n0, int n1)
{
- ds_cstr (&lexer->line_buffer); /* Ensures ds_end points to something valid */
- lexer->prog = ds_end (&lexer->line_buffer);
- lexer->dot = false;
- lexer->put_token = 0;
+ return lex_source_get_syntax__ (lex_source__ (lexer), n0, n1);
}
-
-/* Discards the rest of the current command.
- When we're reading commands from a file, we skip tokens until
- a terminal dot or EOF.
- When we're reading commands interactively from the user,
- that's just discarding the current line, because presumably
- the user doesn't want to finish typing a command that will be
- ignored anyway. */
-void
-lex_discard_rest_of_command (struct lexer *lexer)
+/* Returns true if the token N ahead of the current one was produced by macro
+ expansion, false otherwise. */
+bool
+lex_next_is_from_macro (const struct lexer *lexer, int n)
{
- if (!getl_is_interactive (lexer->ss))
- {
- while (lexer->token != T_STOP && lexer->token != '.')
- lex_get (lexer);
- }
- else
- lex_discard_line (lexer);
+ return lex_next__ (lexer, n)->macro_rep != NULL;
}
-\f
-/* Weird line reading functions. */
-/* Remove C-style comments in STRING, begun by slash-star and
- terminated by star-slash or newline. */
-static void
-strip_comments (struct string *string)
+static bool
+lex_tokens_match (const struct token *actual, const struct token *expected)
{
- char *cp;
- int quote;
- bool in_comment;
+ if (actual->type != expected->type)
+ return false;
- in_comment = false;
- quote = EOF;
- for (cp = ds_cstr (string); *cp; )
+ switch (actual->type)
{
- /* If we're not in a comment, check for quote marks. */
- if (!in_comment)
- {
- if (*cp == quote)
- quote = EOF;
- else if (*cp == '\'' || *cp == '"')
- quote = *cp;
- }
+ case T_POS_NUM:
+ case T_NEG_NUM:
+ return actual->number == expected->number;
- /* If we're not inside a quotation, check for comment. */
- if (quote == EOF)
- {
- if (cp[0] == '/' && cp[1] == '*')
- {
- in_comment = true;
- *cp++ = ' ';
- *cp++ = ' ';
- continue;
- }
- else if (in_comment && cp[0] == '*' && cp[1] == '/')
- {
- in_comment = false;
- *cp++ = ' ';
- *cp++ = ' ';
- continue;
- }
- }
+ case T_ID:
+ return lex_id_match (expected->string, actual->string);
+
+ case T_STRING:
+ return (actual->string.length == expected->string.length
+ && !memcmp (actual->string.string, expected->string.string,
+ actual->string.length));
- /* Check commenting. */
- if (in_comment)
- *cp = ' ';
- cp++;
+ default:
+ return true;
}
}
-/* Prepares LINE, which is subject to the given SYNTAX rules, for
- tokenization by stripping comments and determining whether it
- is the beginning or end of a command and storing into
- *LINE_STARTS_COMMAND and *LINE_ENDS_COMMAND appropriately. */
-void
-lex_preprocess_line (struct string *line,
- enum syntax_mode syntax,
- bool *line_starts_command,
- bool *line_ends_command)
-{
- strip_comments (line);
- ds_rtrim (line, ss_cstr (CC_SPACES));
- *line_ends_command = (ds_chomp (line, settings_get_endcmd ())
- || (ds_is_empty (line) && settings_get_nulline ()));
- *line_starts_command = false;
- if (syntax == GETL_BATCH)
+static size_t
+lex_at_phrase__ (struct lexer *lexer, const char *s)
+{
+ struct string_lexer slex;
+ struct token token;
+
+ size_t i = 0;
+ string_lexer_init (&slex, s, strlen (s), SEG_MODE_INTERACTIVE, true);
+ while (string_lexer_next (&slex, &token))
{
- int first = ds_first (line);
- *line_starts_command = !c_isspace (first);
- if (first == '+' || first == '-')
- *ds_data (line) = ' ';
+ bool match = lex_tokens_match (lex_next (lexer, i++), &token);
+ token_uninit (&token);
+ if (!match)
+ return 0;
}
+ return i;
}
-/* Reads a line, without performing any preprocessing.
- Sets *SYNTAX, if SYNTAX is non-null, to the line's syntax
- mode. */
+/* If LEXER is positioned at the sequence of tokens that may be parsed from S,
+ returns true. Otherwise, returns false.
+
+ S may consist of an arbitrary sequence of tokens, e.g. "KRUSKAL-WALLIS",
+ "2SLS", or "END INPUT PROGRAM". Identifiers may be abbreviated to their
+ first three letters. */
bool
-lex_get_line_raw (struct lexer *lexer)
+lex_at_phrase (struct lexer *lexer, const char *s)
{
- bool ok = getl_read_line (lexer->ss, &lexer->line_buffer);
- enum syntax_mode mode = lex_current_syntax_mode (lexer);
- journal_write (mode == GETL_BATCH, ds_cstr (&lexer->line_buffer));
-
- return ok;
+ return lex_at_phrase__ (lexer, s) > 0;
}
-/* Reads a line for use by the tokenizer, and preprocesses it by
- removing comments, stripping trailing whitespace and the
- terminal dot, and removing leading indentors. */
+/* If LEXER is positioned at the sequence of tokens that may be parsed from S,
+ skips it and returns true. Otherwise, returns false.
+
+ S may consist of an arbitrary sequence of tokens, e.g. "KRUSKAL-WALLIS",
+ "2SLS", or "END INPUT PROGRAM". Identifiers may be abbreviated to their
+ first three letters. */
bool
-lex_get_line (struct lexer *lexer)
+lex_match_phrase (struct lexer *lexer, const char *s)
{
- bool line_starts_command;
+ size_t n = lex_at_phrase__ (lexer, s);
+ if (n > 0)
+ lex_get_n (lexer, n);
+ return n > 0;
+}
+
+static int
+count_newlines (char *s, size_t length)
+{
+ int n_newlines = 0;
+ char *newline;
- if (!lex_get_line_raw (lexer))
+ while ((newline = memchr (s, '\n', length)) != NULL)
{
- lexer->prog = NULL;
- return false;
+ n_newlines++;
+ length -= (newline + 1) - s;
+ s = newline + 1;
}
- lex_preprocess_line (&lexer->line_buffer,
- lex_current_syntax_mode (lexer),
- &line_starts_command, &lexer->dot);
+ return n_newlines;
+}
- if (line_starts_command)
- lexer->put_token = '.';
+static int
+lex_token_get_last_line_number (const struct lex_source *src,
+ const struct lex_token *token)
+{
+ if (token->first_line == 0)
+ return 0;
+ else
+ {
+ char *token_str = &src->buffer[token->token_pos - src->tail];
+ return token->first_line + count_newlines (token_str, token->token_len) + 1;
+ }
+}
- lexer->prog = ds_cstr (&lexer->line_buffer);
- return true;
+static int
+lex_token_get_first_column (const struct lex_source *src,
+ const struct lex_token *token)
+{
+ return utf8_count_columns (&src->buffer[token->line_pos - src->tail],
+ token->token_pos - token->line_pos) + 1;
+}
+
+static int
+lex_token_get_last_column (const struct lex_source *src,
+ const struct lex_token *token)
+{
+ char *start, *end, *newline;
+
+ start = &src->buffer[token->line_pos - src->tail];
+ end = &src->buffer[(token->token_pos + token->token_len) - src->tail];
+ newline = memrchr (start, '\n', end - start);
+ if (newline != NULL)
+ start = newline + 1;
+ return utf8_count_columns (start, end - start) + 1;
+}
+
+static struct msg_location
+lex_token_location (const struct lex_source *src,
+ const struct lex_token *t0,
+ const struct lex_token *t1)
+{
+ return (struct msg_location) {
+ .file_name = src->reader->file_name,
+ .first_line = t0->first_line,
+ .last_line = lex_token_get_last_line_number (src, t1),
+ .first_column = lex_token_get_first_column (src, t0),
+ .last_column = lex_token_get_last_column (src, t1),
+ };
}
-\f
-/* Token names. */
-/* Returns the name of a token. */
+static struct msg_location *
+lex_token_location_rw (const struct lex_source *src,
+ const struct lex_token *t0,
+ const struct lex_token *t1)
+{
+ struct msg_location location = lex_token_location (src, t0, t1);
+ return msg_location_dup (&location);
+}
+
+static struct msg_location *
+lex_source_get_location (const struct lex_source *src, int n0, int n1)
+{
+ return lex_token_location_rw (src,
+ lex_source_next__ (src, n0),
+ lex_source_next__ (src, n1));
+}
+
+/* Returns the 1-based line number of the start of the syntax that represents
+ the token N after the current one in LEXER. Returns 0 for a T_STOP token or
+ if the token is drawn from a source that does not have line numbers. */
+int
+lex_get_first_line_number (const struct lexer *lexer, int n)
+{
+ const struct lex_source *src = lex_source__ (lexer);
+ return src ? lex_source_next__ (src, n)->first_line : 0;
+}
+
+/* Returns the 1-based line number of the end of the syntax that represents the
+ token N after the current one in LEXER, plus 1. Returns 0 for a T_STOP
+ token or if the token is drawn from a source that does not have line
+ numbers.
+
+ Most of the time, a single token is wholly within a single line of syntax,
+ but there are two exceptions: a T_STRING token can be made up of multiple
+ segments on adjacent lines connected with "+" punctuators, and a T_NEG_NUM
+ token can consist of a "-" on one line followed by the number on the next.
+ */
+int
+lex_get_last_line_number (const struct lexer *lexer, int n)
+{
+ const struct lex_source *src = lex_source__ (lexer);
+ return src ? lex_token_get_last_line_number (src,
+ lex_source_next__ (src, n)) : 0;
+}
+
+/* Returns the 1-based column number of the start of the syntax that represents
+ the token N after the current one in LEXER. Returns 0 for a T_STOP
+ token.
+
+ Column numbers are measured according to the width of characters as shown in
+ a typical fixed-width font, in which CJK characters have width 2 and
+ combining characters have width 0. */
+int
+lex_get_first_column (const struct lexer *lexer, int n)
+{
+ const struct lex_source *src = lex_source__ (lexer);
+ return src ? lex_token_get_first_column (src, lex_source_next__ (src, n)) : 0;
+}
+
+/* Returns the 1-based column number of the end of the syntax that represents
+ the token N after the current one in LEXER, plus 1. Returns 0 for a T_STOP
+ token.
+
+ Column numbers are measured according to the width of characters as shown in
+ a typical fixed-width font, in which CJK characters have width 2 and
+ combining characters have width 0. */
+int
+lex_get_last_column (const struct lexer *lexer, int n)
+{
+ const struct lex_source *src = lex_source__ (lexer);
+ return src ? lex_token_get_last_column (src, lex_source_next__ (src, n)) : 0;
+}
+
+/* Returns the name of the syntax file from which the current command is drawn.
+ Returns NULL for a T_STOP token or if the command's source does not have
+ line numbers.
+
+ There is no version of this function that takes an N argument because
+ lookahead only works to the end of a command and any given command is always
+ within a single syntax file. */
const char *
-lex_token_name (int token)
+lex_get_file_name (const struct lexer *lexer)
{
- if (lex_is_keyword (token))
- return lex_id_name (token);
- else if (token < 256)
- {
- static char t[256][2];
- char *s = t[token];
- s[0] = token;
- s[1] = '\0';
- return s;
- }
- else
- NOT_REACHED ();
+ struct lex_source *src = lex_source__ (lexer);
+ return src == NULL ? NULL : src->reader->file_name;
}
-/* Returns an ASCII representation of the current token as a
- malloc()'d string. */
-char *
-lex_token_representation (struct lexer *lexer)
+/* Returns a newly allocated msg_location for the syntax that represents tokens
+ with 0-based offsets N0...N1, inclusive, from the current token. The caller
+ must eventually free the location (with msg_location_destroy()). */
+struct msg_location *
+lex_get_location (const struct lexer *lexer, int n0, int n1)
{
- char *token_rep;
+ struct msg_location *loc = lex_get_lines (lexer, n0, n1);
+ loc->first_column = lex_get_first_column (lexer, n0);
+ loc->last_column = lex_get_last_column (lexer, n1);
+ return loc;
+}
- switch (lexer->token)
- {
- case T_ID:
- case T_POS_NUM:
- case T_NEG_NUM:
- return ds_xstrdup (&lexer->tokstr);
- break;
+/* Returns a newly allocated msg_location for the syntax that represents tokens
+ with 0-based offsets N0...N1, inclusive, from the current token. The
+ location only covers the tokens' lines, not the columns. The caller must
+ eventually free the location (with msg_location_destroy()). */
+struct msg_location *
+lex_get_lines (const struct lexer *lexer, int n0, int n1)
+{
+ struct msg_location *loc = xmalloc (sizeof *loc);
+ *loc = (struct msg_location) {
+ .file_name = xstrdup_if_nonnull (lex_get_file_name (lexer)),
+ .first_line = lex_get_first_line_number (lexer, n0),
+ .last_line = lex_get_last_line_number (lexer, n1),
+ };
+ return loc;
+}
- case T_STRING:
- {
- int hexstring = 0;
- char *sp, *dp;
-
- for (sp = ds_cstr (&lexer->tokstr); sp < ds_end (&lexer->tokstr); sp++)
- if (!c_isprint ((unsigned char) *sp))
- {
- hexstring = 1;
- break;
- }
-
- token_rep = xmalloc (2 + ds_length (&lexer->tokstr) * 2 + 1 + 1);
-
- dp = token_rep;
- if (hexstring)
- *dp++ = 'X';
- *dp++ = '\'';
-
- if (!hexstring)
- for (sp = ds_cstr (&lexer->tokstr); *sp; )
- {
- if (*sp == '\'')
- *dp++ = '\'';
- *dp++ = (unsigned char) *sp++;
- }
- else
- for (sp = ds_cstr (&lexer->tokstr); sp < ds_end (&lexer->tokstr); sp++)
- {
- *dp++ = (((unsigned char) *sp) >> 4)["0123456789ABCDEF"];
- *dp++ = (((unsigned char) *sp) & 15)["0123456789ABCDEF"];
- }
- *dp++ = '\'';
- *dp = '\0';
-
- return token_rep;
- }
- break;
+const char *
+lex_get_encoding (const struct lexer *lexer)
+{
+ struct lex_source *src = lex_source__ (lexer);
+ return src == NULL ? NULL : src->reader->encoding;
+}
- case T_STOP:
- token_rep = xmalloc (1);
- *token_rep = '\0';
- return token_rep;
+/* Returns the syntax mode for the syntax file from which the current drawn is
+ drawn. Returns SEG_MODE_AUTO for a T_STOP token or if the command's source
+ does not have line numbers.
- case T_EXP:
- return xstrdup ("**");
+ There is no version of this function that takes an N argument because
+ lookahead only works to the end of a command and any given command is always
+ within a single syntax file. */
+enum segmenter_mode
+lex_get_syntax_mode (const struct lexer *lexer)
+{
+ struct lex_source *src = lex_source__ (lexer);
+ return src == NULL ? SEG_MODE_AUTO : src->reader->syntax;
+}
- default:
- return xstrdup (lex_token_name (lexer->token));
- }
+/* Returns the error mode for the syntax file from which the current drawn is
+ drawn. Returns LEX_ERROR_TERMINAL for a T_STOP token or if the command's
+ source does not have line numbers.
- NOT_REACHED ();
+ There is no version of this function that takes an N argument because
+ lookahead only works to the end of a command and any given command is always
+ within a single syntax file. */
+enum lex_error_mode
+lex_get_error_mode (const struct lexer *lexer)
+{
+ struct lex_source *src = lex_source__ (lexer);
+ return src == NULL ? LEX_ERROR_TERMINAL : src->reader->error;
}
-\f
-/* Really weird functions. */
-/* Most of the time, a `-' is a lead-in to a negative number. But
- sometimes it's actually part of the syntax. If a dash can be part
- of syntax then this function is called to rip it off of a
- number. */
+/* If the source that LEXER is currently reading has error mode
+ LEX_ERROR_TERMINAL, discards all buffered input and tokens, so that the next
+ token to be read comes directly from whatever is next read from the stream.
+
+ It makes sense to call this function after encountering an error in a
+ command entered on the console, because usually the user would prefer not to
+ have cascading errors. */
void
-lex_negative_to_dash (struct lexer *lexer)
+lex_interactive_reset (struct lexer *lexer)
{
- if (lexer->token == T_NEG_NUM)
+ struct lex_source *src = lex_source__ (lexer);
+ if (src != NULL && src->reader->error == LEX_ERROR_TERMINAL)
{
- lexer->token = T_POS_NUM;
- lexer->tokval = -lexer->tokval;
- ds_assign_substring (&lexer->tokstr, ds_substr (&lexer->tokstr, 1, SIZE_MAX));
- save_token (lexer);
- lexer->token = '-';
+ src->head = src->tail = 0;
+ src->journal_pos = src->seg_pos = src->line_pos = 0;
+ src->n_newlines = 0;
+ src->suppress_next_newline = false;
+ src->segmenter = segmenter_init (segmenter_get_mode (&src->segmenter),
+ false);
+ lex_stage_clear (&src->pp);
+ lex_stage_clear (&src->merge);
+ lex_stage_clear (&src->lookahead);
+ lex_source_push_endcmd__ (src);
}
}
-/* Skip a COMMENT command. */
+/* Advances past any tokens in LEXER up to a T_ENDCMD or T_STOP. */
void
-lex_skip_comment (struct lexer *lexer)
+lex_discard_rest_of_command (struct lexer *lexer)
{
- for (;;)
- {
- if (!lex_get_line (lexer))
- {
- lexer->put_token = T_STOP;
- lexer->prog = NULL;
- return;
- }
+ while (lex_token (lexer) != T_STOP && lex_token (lexer) != T_ENDCMD)
+ lex_get (lexer);
+}
- if (lexer->put_token == '.')
- break;
+/* Discards all lookahead tokens in LEXER, then discards all input sources
+ until it encounters one with error mode LEX_ERROR_TERMINAL or until it
+ runs out of input sources. */
+void
+lex_discard_noninteractive (struct lexer *lexer)
+{
+ struct lex_source *src = lex_source__ (lexer);
+
+ if (src != NULL)
+ {
+ lex_stage_clear (&src->pp);
+ lex_stage_clear (&src->merge);
+ lex_stage_clear (&src->lookahead);
- ds_cstr (&lexer->line_buffer); /* Ensures ds_end will point to a valid char */
- lexer->prog = ds_end (&lexer->line_buffer);
- if (lexer->dot)
- break;
+ for (; src != NULL && src->reader->error != LEX_ERROR_TERMINAL;
+ src = lex_source__ (lexer))
+ lex_source_destroy (src);
}
}
\f
-/* Private functions. */
+static size_t
+lex_source_max_tail__ (const struct lex_source *src_)
+{
+ struct lex_source *src = CONST_CAST (struct lex_source *, src_);
+
+ assert (src->seg_pos >= src->line_pos);
+ size_t max_tail = MIN (src->journal_pos, src->line_pos);
+
+ /* Use the oldest token also. */
+ struct lex_stage *stages[] = { &src->lookahead, &src->merge, &src->pp };
+ for (size_t i = 0; i < sizeof stages / sizeof *stages; i++)
+ if (!lex_stage_is_empty (stages[i]))
+ {
+ struct lex_token *first = lex_stage_first (stages[i]);
+ assert (first->token_pos >= first->line_pos);
+ return MIN (max_tail, first->line_pos);
+ }
+
+ return max_tail;
+}
-/* When invoked, tokstr contains a string of binary, octal, or
- hex digits, according to TYPE. The string is converted to
- characters having the specified values. */
static void
-convert_numeric_string_to_char_string (struct lexer *lexer,
- enum string_type type)
+lex_source_expand__ (struct lex_source *src)
{
- const char *base_name;
- int base;
- int chars_per_byte;
- size_t byte_cnt;
- size_t i;
- char *p;
+ if (src->head - src->tail >= src->allocated)
+ {
+ size_t max_tail = lex_source_max_tail__ (src);
+ if (max_tail > src->tail)
+ {
+ /* Advance the tail, freeing up room at the head. */
+ memmove (src->buffer, src->buffer + (max_tail - src->tail),
+ src->head - max_tail);
+ src->tail = max_tail;
+ }
+ else
+ {
+ /* Buffer is completely full. Expand it. */
+ src->buffer = x2realloc (src->buffer, &src->allocated);
+ }
+ }
+ else
+ {
+ /* There's space available at the head of the buffer. Nothing to do. */
+ }
+}
- switch (type)
+static void
+lex_source_read__ (struct lex_source *src)
+{
+ do
{
- case BINARY_STRING:
- base_name = _("binary");
- base = 2;
- chars_per_byte = 8;
- break;
- case OCTAL_STRING:
- base_name = _("octal");
- base = 8;
- chars_per_byte = 3;
- break;
- case HEX_STRING:
- base_name = _("hex");
- base = 16;
- chars_per_byte = 2;
- break;
- default:
- NOT_REACHED ();
+ lex_source_expand__ (src);
+
+ size_t head_ofs = src->head - src->tail;
+ size_t space = src->allocated - head_ofs;
+ enum prompt_style prompt = segmenter_get_prompt (&src->segmenter);
+ size_t n = src->reader->class->read (src->reader, &src->buffer[head_ofs],
+ space, prompt);
+ assert (n <= space);
+
+ if (n == 0)
+ {
+ /* End of input. */
+ src->reader->eof = true;
+ lex_source_expand__ (src);
+ return;
+ }
+
+ src->head += n;
}
+ while (!memchr (&src->buffer[src->seg_pos - src->tail], '\n',
+ src->head - src->seg_pos));
+}
- byte_cnt = ds_length (&lexer->tokstr) / chars_per_byte;
- if (ds_length (&lexer->tokstr) % chars_per_byte)
- msg (SE, _("String of %s digits has %zu characters, which is not a "
- "multiple of %d."),
- base_name, ds_length (&lexer->tokstr), chars_per_byte);
+static struct lex_source *
+lex_source__ (const struct lexer *lexer)
+{
+ return (ll_is_empty (&lexer->sources) ? NULL
+ : ll_data (ll_head (&lexer->sources), struct lex_source, ll));
+}
- p = ds_cstr (&lexer->tokstr);
- for (i = 0; i < byte_cnt; i++)
+/* Returns the text of the syntax in SRC for tokens N0 ahead of the current
+ one, through N1 ahead of the current one, inclusive. (For example, if N0
+ and N1 are both zero, this requests the syntax for the current token.) The
+ caller must eventually free the returned string (with free()). The syntax
+ is encoded in UTF-8 and in the original form supplied to the lexer so that,
+ for example, it may include comments, spaces, and new-lines if it spans
+ multiple tokens. Macro expansion, however, has already been performed. */
+static char *
+lex_source_get_syntax__ (const struct lex_source *src, int n0, int n1)
+{
+ struct string s = DS_EMPTY_INITIALIZER;
+ for (size_t i = n0; i <= n1; )
{
- int value;
- int j;
+ /* Find [I,J) as the longest sequence of tokens not produced by macro
+ expansion, or otherwise the longest sequence expanded from a single
+ macro call. */
+ const struct lex_token *first = lex_source_next__ (src, i);
+ size_t j;
+ for (j = i + 1; j <= n1; j++)
+ {
+ const struct lex_token *cur = lex_source_next__ (src, j);
+ if ((first->macro_rep != NULL) != (cur->macro_rep != NULL)
+ || first->macro_rep != cur->macro_rep)
+ break;
+ }
+ const struct lex_token *last = lex_source_next__ (src, j - 1);
- value = 0;
- for (j = 0; j < chars_per_byte; j++, p++)
- {
- int v;
+ /* Now add the syntax for this sequence of tokens to SRC. */
+ if (!ds_is_empty (&s))
+ ds_put_byte (&s, ' ');
+ if (!first->macro_rep)
+ {
+ size_t start = first->token_pos;
+ size_t end = last->token_pos + last->token_len;
+ ds_put_substring (&s, ss_buffer (&src->buffer[start - src->tail],
+ end - start));
+ }
+ else
+ {
+ size_t start = first->ofs;
+ size_t end = last->ofs + last->len;
+ ds_put_substring (&s, ss_buffer (first->macro_rep + start,
+ end - start));
+ }
+
+ i = j;
+ }
+ return ds_steal_cstr (&s);
+}
+
+static bool
+lex_source_contains_macro_call (struct lex_source *src, int n0, int n1)
+{
+ for (size_t i = n0; i <= n1; i++)
+ if (lex_source_next__ (src, i)->macro_rep)
+ return true;
+ return false;
+}
- if (*p >= '0' && *p <= '9')
- v = *p - '0';
- else
- {
- static const char alpha[] = "abcdef";
- const char *q = strchr (alpha, tolower ((unsigned char) *p));
+/* If tokens N0...N1 (inclusive) in SRC contains a macro call, this returns the
+ raw UTF-8 syntax for the macro call (not for the expansion) and for any
+ other tokens included in that range. The syntax is encoded in UTF-8 and in
+ the original form supplied to the lexer so that, for example, it may include
+ comments, spaces, and new-lines if it spans multiple tokens.
- if (q)
- v = q - alpha + 10;
- else
- v = base;
- }
+ Returns an empty string if the token range doesn't include a macro call.
- if (v >= base)
- msg (SE, _("`%c' is not a valid %s digit."), *p, base_name);
+ The caller must not modify or free the returned string. */
+static struct substring
+lex_source_get_macro_call (struct lex_source *src, int n0, int n1)
+{
+ if (!lex_source_contains_macro_call (src, n0, n1))
+ return ss_empty ();
- value = value * base + v;
- }
+ const struct lex_token *token0 = lex_source_next__ (src, n0);
+ const struct lex_token *token1 = lex_source_next__ (src, MAX (n0, n1));
+ size_t start = token0->token_pos;
+ size_t end = token1->token_pos + token1->token_len;
+
+ return ss_buffer (&src->buffer[start - src->tail], end - start);
+}
+
+static void
+lex_source_error_valist (struct lex_source *src, int n0, int n1,
+ const char *format, va_list args)
+{
+ const struct lex_token *token;
+ struct string s;
+
+ ds_init_empty (&s);
+
+ token = lex_source_next__ (src, n0);
+ if (token->token.type == T_ENDCMD)
+ ds_put_cstr (&s, _("Syntax error at end of command"));
+ else
+ {
+ /* Get the syntax that caused the error. */
+ char *raw_syntax = lex_source_get_syntax__ (src, n0, n1);
+ char syntax[64];
+ str_ellipsize (ss_cstr (raw_syntax), syntax, sizeof syntax);
+ free (raw_syntax);
+
+ /* Get the macro call(s) that expanded to the syntax that caused the
+ error. */
+ char call[64];
+ str_ellipsize (lex_source_get_macro_call (src, n0, n1),
+ call, sizeof call);
+
+ if (syntax[0])
+ {
+ if (call[0])
+ ds_put_format (&s,
+ _("Syntax error at `%s' (in expansion of `%s')"),
+ syntax, call);
+ else
+ ds_put_format (&s, _("Syntax error at `%s'"), syntax);
+ }
+ else
+ {
+ if (call[0])
+ ds_put_format (&s, _("Syntax error in syntax expanded from `%s'"),
+ call);
+ else
+ ds_put_cstr (&s, _("Syntax error"));
+ }
+ }
- ds_cstr (&lexer->tokstr)[i] = (unsigned char) value;
+ if (format)
+ {
+ ds_put_cstr (&s, ": ");
+ ds_put_vformat (&s, format, args);
}
+ if (ds_last (&s) != '.')
+ ds_put_byte (&s, '.');
+
+ struct msg *m = xmalloc (sizeof *m);
+ *m = (struct msg) {
+ .category = MSG_C_SYNTAX,
+ .severity = MSG_S_ERROR,
+ .location = lex_source_get_location (src, n0, n1),
+ .text = ds_steal_cstr (&s),
+ };
+ msg_emit (m);
+}
- ds_truncate (&lexer->tokstr, byte_cnt);
+static void
+lex_get_error (struct lex_source *src, const struct lex_token *token)
+{
+ char syntax[64];
+ str_ellipsize (ss_buffer (&src->buffer[token->token_pos - src->tail],
+ token->token_len),
+ syntax, sizeof syntax);
+
+ struct string s = DS_EMPTY_INITIALIZER;
+ ds_put_format (&s, _("Syntax error at `%s'"), syntax);
+ ds_put_format (&s, ": %s", token->token.string.string);
+
+ struct msg *m = xmalloc (sizeof *m);
+ *m = (struct msg) {
+ .category = MSG_C_SYNTAX,
+ .severity = MSG_S_ERROR,
+ .location = lex_token_location_rw (src, token, token),
+ .text = ds_steal_cstr (&s),
+ };
+ msg_emit (m);
}
-/* Parses a string from the input buffer into tokstr. The input
- buffer pointer lexer->prog must point to the initial single or double
- quote. TYPE indicates the type of string to be parsed.
- Returns token type. */
-static int
-parse_string (struct lexer *lexer, enum string_type type)
+/* Attempts to append an additional token to 'pp' in SRC, reading more from the
+ underlying lex_reader if necessary. Returns true if a new token was added
+ to SRC's deque, false otherwise. The caller should retry failures unless
+ SRC's 'eof' marker was set to true indicating that there will be no more
+ tokens from this source. */
+static bool
+lex_source_try_get_pp (struct lex_source *src)
{
- if (type != CHARACTER_STRING)
- lexer->prog++;
+ /* Append a new token to SRC and initialize it. */
+ struct lex_token *token = xmalloc (sizeof *token);
+ token->token = (struct token) { .type = T_STOP };
+ token->macro_rep = NULL;
+ token->ref_cnt = NULL;
+ token->line_pos = src->line_pos;
+ token->token_pos = src->seg_pos;
+ if (src->reader->line_number > 0)
+ token->first_line = src->reader->line_number + src->n_newlines;
+ else
+ token->first_line = 0;
- /* Accumulate the entire string, joining sections indicated by +
- signs. */
+ /* Extract a segment. */
+ const char *segment;
+ enum segment_type seg_type;
+ int seg_len;
for (;;)
{
- /* Single or double quote. */
- int c = *lexer->prog++;
+ segment = &src->buffer[src->seg_pos - src->tail];
+ seg_len = segmenter_push (&src->segmenter, segment,
+ src->head - src->seg_pos,
+ src->reader->eof, &seg_type);
+ if (seg_len >= 0)
+ break;
+
+ /* The segmenter needs more input to produce a segment. */
+ assert (!src->reader->eof);
+ lex_source_read__ (src);
+ }
- /* Accumulate section. */
- for (;;)
- {
- /* Check end of line. */
- if (*lexer->prog == '\0')
- {
- msg (SE, _("Unterminated string constant."));
- goto finish;
- }
-
- /* Double quote characters to embed them in strings. */
- if (*lexer->prog == c)
- {
- if (lexer->prog[1] == c)
- lexer->prog++;
- else
- break;
- }
-
- ds_put_char (&lexer->tokstr, *lexer->prog++);
- }
- lexer->prog++;
+ /* Update state based on the segment. */
+ token->token_len = seg_len;
+ src->seg_pos += seg_len;
+ if (seg_type == SEG_NEWLINE)
+ {
+ src->line_pos = src->seg_pos;
+ src->n_newlines++;
+ }
- /* Skip whitespace after final quote mark. */
- if (lexer->prog == NULL)
- break;
- for (;;)
- {
- while (c_isspace ((unsigned char) *lexer->prog))
- lexer->prog++;
- if (*lexer->prog)
- break;
+ /* Get a token from the segment. */
+ enum tokenize_result result = token_from_segment (
+ seg_type, ss_buffer (segment, seg_len), &token->token);
- if (lexer->dot)
- goto finish;
+ /* If we've reached the end of a line, or the end of a command, then pass
+ the line to the output engine as a syntax text item. */
+ int n_lines = seg_type == SEG_NEWLINE;
+ if (seg_type == SEG_END_COMMAND && !src->suppress_next_newline)
+ {
+ n_lines++;
+ src->suppress_next_newline = true;
+ }
+ else if (n_lines > 0 && src->suppress_next_newline)
+ {
+ n_lines--;
+ src->suppress_next_newline = false;
+ }
+ for (int i = 0; i < n_lines; i++)
+ {
+ /* Beginning of line. */
+ const char *line = &src->buffer[src->journal_pos - src->tail];
+
+ /* Calculate line length, including \n or \r\n end-of-line if present.
+
+ We use src->head even though that may be beyond what we've actually
+ converted to tokens (which is only through line_pos). That's because,
+ if we're emitting the line due to SEG_END_COMMAND, we want to take the
+ whole line through the newline, not just through the '.'. */
+ size_t max_len = src->head - src->journal_pos;
+ const char *newline = memchr (line, '\n', max_len);
+ size_t line_len = newline ? newline - line + 1 : max_len;
+
+ /* Calculate line length excluding end-of-line. */
+ size_t copy_len = line_len;
+ if (copy_len > 0 && line[copy_len - 1] == '\n')
+ copy_len--;
+ if (copy_len > 0 && line[copy_len - 1] == '\r')
+ copy_len--;
+
+ /* Submit the line as syntax. */
+ output_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX,
+ xmemdup0 (line, copy_len),
+ NULL));
+
+ src->journal_pos += line_len;
+ }
- if (!lex_get_line (lexer))
- goto finish;
- }
+ switch (result)
+ {
+ case TOKENIZE_ERROR:
+ lex_get_error (src, token);
+ /* Fall through. */
+ case TOKENIZE_EMPTY:
+ lex_token_destroy (token);
+ return false;
- /* Skip plus sign. */
- if (*lexer->prog != '+')
- break;
- lexer->prog++;
+ case TOKENIZE_TOKEN:
+ if (token->token.type == T_STOP)
+ {
+ token->token.type = T_ENDCMD;
+ src->eof = true;
+ }
+ lex_stage_push_last (&src->pp, token);
+ return true;
+ }
+ NOT_REACHED ();
+}
- /* Skip whitespace after plus sign. */
- if (lexer->prog == NULL)
- break;
- for (;;)
- {
- while (c_isspace ((unsigned char) *lexer->prog))
- lexer->prog++;
- if (*lexer->prog)
- break;
+/* Attempts to append a new token to SRC. Returns true if successful, false on
+ failure. On failure, the end of SRC has been reached and no more tokens
+ will be forthcoming from it.
- if (lexer->dot)
- goto finish;
+ Does not make the new token available for lookahead yet; the caller must
+ adjust SRC's 'middle' pointer to do so. */
+static bool
+lex_source_get_pp (struct lex_source *src)
+{
+ while (!src->eof)
+ if (lex_source_try_get_pp (src))
+ return true;
+ return false;
+}
- if (!lex_get_line (lexer))
- {
- msg (SE, _("Unexpected end of file in string concatenation."));
- goto finish;
- }
- }
+static bool
+lex_source_try_get_merge (const struct lex_source *src_)
+{
+ struct lex_source *src = CONST_CAST (struct lex_source *, src_);
- /* Ensure that a valid string follows. */
- if (*lexer->prog != '\'' && *lexer->prog != '"')
- {
- msg (SE, _("String expected following `+'."));
- goto finish;
- }
+ if (lex_stage_is_empty (&src->pp) && !lex_source_get_pp (src))
+ return false;
+
+ if (!settings_get_mexpand ())
+ {
+ lex_stage_shift (&src->merge, &src->pp, lex_stage_count (&src->pp));
+ return true;
}
- /* We come here when we've finished concatenating all the string sections
- into one large string. */
-finish:
- if (type != CHARACTER_STRING)
- convert_numeric_string_to_char_string (lexer, type);
+ /* Now pass tokens one-by-one to the macro expander.
- if (ds_length (&lexer->tokstr) > 255)
+ In the common case where there is no macro to expand, the loop is not
+ entered. */
+ struct macro_call *mc;
+ int n_call = macro_call_create (src->lexer->macros,
+ &lex_stage_first (&src->pp)->token, &mc);
+ for (int ofs = 1; !n_call; ofs++)
{
- msg (SE, _("String exceeds 255 characters in length (%zu characters)."),
- ds_length (&lexer->tokstr));
- ds_truncate (&lexer->tokstr, 255);
+ if (lex_stage_count (&src->pp) <= ofs && !lex_source_get_pp (src))
+ {
+ /* This should not be reachable because we always get a T_ENDCMD at
+ the end of an input file (transformed from T_STOP by
+ lex_source_try_get_pp()) and the macro_expander should always
+ terminate expansion on T_ENDCMD. */
+ NOT_REACHED ();
+ }
+
+ const struct lex_token *t = lex_stage_nth (&src->pp, ofs);
+ size_t start = t->token_pos;
+ size_t end = t->token_pos + t->token_len;
+ const struct macro_token mt = {
+ .token = t->token,
+ .syntax = ss_buffer (&src->buffer[start - src->tail], end - start),
+ };
+ const struct msg_location loc = lex_token_location (src, t, t);
+ n_call = macro_call_add (mc, &mt, &loc);
+ }
+ if (n_call < 0)
+ {
+ /* False alarm: no macro expansion after all. Use first token as
+ lookahead. We'll retry macro expansion from the second token next
+ time around. */
+ macro_call_destroy (mc);
+ lex_stage_shift (&src->merge, &src->pp, 1);
+ return true;
}
- return T_STRING;
+ /* The first 'n_call' tokens in 'pp', which we bracket as C0...C1, inclusive,
+ are a macro call. (These are likely to be the only tokens in 'pp'.)
+ Expand them. */
+ const struct lex_token *c0 = lex_stage_first (&src->pp);
+ const struct lex_token *c1 = lex_stage_nth (&src->pp, n_call - 1);
+ struct macro_tokens expansion = { .n = 0 };
+ struct msg_location loc = lex_token_location (src, c0, c1);
+ macro_call_expand (mc, src->reader->syntax, &loc, &expansion);
+ macro_call_destroy (mc);
+
+ /* Convert the macro expansion into syntax for possible error messages
+ later. */
+ size_t *ofs = xnmalloc (expansion.n, sizeof *ofs);
+ size_t *len = xnmalloc (expansion.n, sizeof *len);
+ struct string s = DS_EMPTY_INITIALIZER;
+ macro_tokens_to_syntax (&expansion, &s, ofs, len);
+
+ if (settings_get_mprint ())
+ output_item_submit (text_item_create (TEXT_ITEM_LOG, ds_cstr (&s),
+ _("Macro Expansion")));
+
+ /* Append the macro expansion tokens to the lookahead. */
+ if (expansion.n > 0)
+ {
+ char *macro_rep = ds_steal_cstr (&s);
+ size_t *ref_cnt = xmalloc (sizeof *ref_cnt);
+ *ref_cnt = expansion.n;
+ for (size_t i = 0; i < expansion.n; i++)
+ {
+ struct lex_token *token = xmalloc (sizeof *token);
+ *token = (struct lex_token) {
+ .token = expansion.mts[i].token,
+ .token_pos = c0->token_pos,
+ .token_len = (c1->token_pos + c1->token_len) - c0->token_pos,
+ .line_pos = c0->line_pos,
+ .first_line = c0->first_line,
+ .macro_rep = macro_rep,
+ .ofs = ofs[i],
+ .len = len[i],
+ .ref_cnt = ref_cnt,
+ };
+ lex_stage_push_last (&src->merge, token);
+
+ ss_dealloc (&expansion.mts[i].syntax);
+ }
+ }
+ else
+ ds_destroy (&s);
+ free (expansion.mts);
+ free (ofs);
+ free (len);
+
+ /* Destroy the tokens for the call. */
+ for (size_t i = 0; i < n_call; i++)
+ lex_stage_pop_first (&src->pp);
+
+ return expansion.n > 0;
+}
+
+/* Attempts to obtain at least one new token into 'merge' in SRC.
+
+ Returns true if successful, false on failure. In the latter case, SRC is
+ exhausted and 'src->eof' is now true. */
+static bool
+lex_source_get_merge (struct lex_source *src)
+{
+ while (!src->eof)
+ if (lex_source_try_get_merge (src))
+ return true;
+ return false;
+}
+
+/* Attempts to obtain at least one new token into 'lookahead' in SRC.
+
+ Returns true if successful, false on failure. In the latter case, SRC is
+ exhausted and 'src->eof' is now true. */
+static bool
+lex_source_get_lookahead (struct lex_source *src)
+{
+ struct merger m = MERGER_INIT;
+ struct token out;
+ for (size_t i = 0; ; i++)
+ {
+ while (lex_stage_count (&src->merge) <= i && !lex_source_get_merge (src))
+ {
+ /* We always get a T_ENDCMD at the end of an input file
+ (transformed from T_STOP by lex_source_try_get_pp()) and
+ merger_add() should never return -1 on T_ENDCMD. */
+ assert (lex_stage_is_empty (&src->merge));
+ return false;
+ }
+
+ int retval = merger_add (&m, &lex_stage_nth (&src->merge, i)->token,
+ &out);
+ if (!retval)
+ {
+ lex_stage_shift (&src->lookahead, &src->merge, 1);
+ return true;
+ }
+ else if (retval > 0)
+ {
+ /* Add a token that merges all the tokens together. */
+ const struct lex_token *first = lex_stage_first (&src->merge);
+ const struct lex_token *last = lex_stage_nth (&src->merge,
+ retval - 1);
+ bool macro = first->macro_rep && first->macro_rep == last->macro_rep;
+ struct lex_token *t = xmalloc (sizeof *t);
+ *t = (struct lex_token) {
+ .token = out,
+ .token_pos = first->token_pos,
+ .token_len = (last->token_pos - first->token_pos) + last->token_len,
+ .line_pos = first->line_pos,
+ .first_line = first->first_line,
+
+ /* This works well if all the tokens were not expanded from macros,
+ or if they came from the same macro expansion. It just gives up
+ in the other (corner) cases. */
+ .macro_rep = macro ? first->macro_rep : NULL,
+ .ofs = macro ? first->ofs : 0,
+ .len = macro ? (last->ofs - first->ofs) + last->len : 0,
+ .ref_cnt = macro ? first->ref_cnt : NULL,
+ };
+ if (t->ref_cnt)
+ ++*t->ref_cnt;
+ lex_stage_push_last (&src->lookahead, t);
+
+ for (int i = 0; i < retval; i++)
+ lex_stage_pop_first (&src->merge);
+ return true;
+ }
+ }
}
\f
-#if DUMP_TOKENS
-/* Reads one token from the lexer and writes a textual representation
- on stdout for debugging purposes. */
static void
-dump_token (struct lexer *lexer)
+lex_source_push_endcmd__ (struct lex_source *src)
+{
+ assert (lex_stage_is_empty (&src->lookahead));
+ struct lex_token *token = xmalloc (sizeof *token);
+ *token = (struct lex_token) { .token = { .type = T_ENDCMD } };
+ lex_stage_push_last (&src->lookahead, token);
+}
+
+static struct lex_source *
+lex_source_create (struct lexer *lexer, struct lex_reader *reader)
{
+ struct lex_source *src = xmalloc (sizeof *src);
+ *src = (struct lex_source) {
+ .reader = reader,
+ .segmenter = segmenter_init (reader->syntax, false),
+ .lexer = lexer,
+ };
+
+ lex_source_push_endcmd__ (src);
+
+ return src;
+}
+
+static void
+lex_source_destroy (struct lex_source *src)
+{
+ char *file_name = src->reader->file_name;
+ char *encoding = src->reader->encoding;
+ if (src->reader->class->destroy != NULL)
+ src->reader->class->destroy (src->reader);
+ free (file_name);
+ free (encoding);
+ free (src->buffer);
+ lex_stage_uninit (&src->pp);
+ lex_stage_uninit (&src->merge);
+ lex_stage_uninit (&src->lookahead);
+ ll_remove (&src->ll);
+ free (src);
+}
+\f
+struct lex_file_reader
{
- const char *curfn;
- int curln;
+ struct lex_reader reader;
+ struct u8_istream *istream;
+ };
+
+static struct lex_reader_class lex_file_reader_class;
- curln = getl_source_location (lexer->ss);
- curfn = getl_source_name (lexer->ss);
- if (curfn)
- fprintf (stderr, "%s:%d\t", curfn, curln);
- }
+/* Creates and returns a new lex_reader that will read from file FILE_NAME (or
+ from stdin if FILE_NAME is "-"). The file is expected to be encoded with
+ ENCODING, which should take one of the forms accepted by
+ u8_istream_for_file(). SYNTAX and ERROR become the syntax mode and error
+ mode of the new reader, respectively.
- switch (lexer->token)
+ Returns a null pointer if FILE_NAME cannot be opened. */
+struct lex_reader *
+lex_reader_for_file (const char *file_name, const char *encoding,
+ enum segmenter_mode syntax,
+ enum lex_error_mode error)
+{
+ struct lex_file_reader *r;
+ struct u8_istream *istream;
+
+ istream = (!strcmp(file_name, "-")
+ ? u8_istream_for_fd (encoding, STDIN_FILENO)
+ : u8_istream_for_file (encoding, file_name, O_RDONLY));
+ if (istream == NULL)
{
- case T_ID:
- fprintf (stderr, "ID\t%s\n", lexer->tokid);
- break;
+ msg (ME, _("Opening `%s': %s."), file_name, strerror (errno));
+ return NULL;
+ }
- case T_POS_NUM:
- case T_NEG_NUM:
- fprintf (stderr, "NUM\t%f\n", lexer->tokval);
- break;
+ r = xmalloc (sizeof *r);
+ lex_reader_init (&r->reader, &lex_file_reader_class);
+ r->reader.syntax = syntax;
+ r->reader.error = error;
+ r->reader.file_name = xstrdup (file_name);
+ r->reader.encoding = xstrdup_if_nonnull (encoding);
+ r->reader.line_number = 1;
+ r->istream = istream;
- case T_STRING:
- fprintf (stderr, "STRING\t\"%s\"\n", ds_cstr (&lexer->tokstr));
- break;
+ return &r->reader;
+}
- case T_STOP:
- fprintf (stderr, "STOP\n");
- break;
+static struct lex_file_reader *
+lex_file_reader_cast (struct lex_reader *r)
+{
+ return UP_CAST (r, struct lex_file_reader, reader);
+}
- case T_EXP:
- fprintf (stderr, "MISC\tEXP\"");
- break;
+static size_t
+lex_file_read (struct lex_reader *r_, char *buf, size_t n,
+ enum prompt_style prompt_style UNUSED)
+{
+ struct lex_file_reader *r = lex_file_reader_cast (r_);
+ ssize_t n_read = u8_istream_read (r->istream, buf, n);
+ if (n_read < 0)
+ {
+ msg (ME, _("Error reading `%s': %s."), r_->file_name, strerror (errno));
+ return 0;
+ }
+ return n_read;
+}
- case 0:
- fprintf (stderr, "MISC\tEOF\n");
- break;
+static void
+lex_file_close (struct lex_reader *r_)
+{
+ struct lex_file_reader *r = lex_file_reader_cast (r_);
- default:
- if (lex_is_keyword (lexer->token))
- fprintf (stderr, "KEYWORD\t%s\n", lex_token_name (lexer->token));
- else
- fprintf (stderr, "PUNCT\t%c\n", lexer->token);
- break;
+ if (u8_istream_fileno (r->istream) != STDIN_FILENO)
+ {
+ if (u8_istream_close (r->istream) != 0)
+ msg (ME, _("Error closing `%s': %s."), r_->file_name, strerror (errno));
}
+ else
+ u8_istream_free (r->istream);
+
+ free (r);
}
-#endif /* DUMP_TOKENS */
+static struct lex_reader_class lex_file_reader_class =
+ {
+ lex_file_read,
+ lex_file_close
+ };
+\f
+struct lex_string_reader
+ {
+ struct lex_reader reader;
+ struct substring s;
+ size_t offset;
+ };
-/* Token Accessor Functions */
+static struct lex_reader_class lex_string_reader_class;
-int
-lex_token (const struct lexer *lexer)
+/* Creates and returns a new lex_reader for the contents of S, which must be
+ encoded in the given ENCODING. The new reader takes ownership of S and will free it
+ with ss_dealloc() when it is closed. */
+struct lex_reader *
+lex_reader_for_substring_nocopy (struct substring s, const char *encoding)
{
- return lexer->token;
+ struct lex_string_reader *r;
+
+ r = xmalloc (sizeof *r);
+ lex_reader_init (&r->reader, &lex_string_reader_class);
+ r->reader.syntax = SEG_MODE_AUTO;
+ r->reader.encoding = xstrdup_if_nonnull (encoding);
+ r->s = s;
+ r->offset = 0;
+
+ return &r->reader;
}
-double
-lex_tokval (const struct lexer *lexer)
+/* Creates and returns a new lex_reader for a copy of null-terminated string S,
+ which must be encoded in ENCODING. The caller retains ownership of S. */
+struct lex_reader *
+lex_reader_for_string (const char *s, const char *encoding)
{
- return lexer->tokval;
+ struct substring ss;
+ ss_alloc_substring (&ss, ss_cstr (s));
+ return lex_reader_for_substring_nocopy (ss, encoding);
}
-const char *
-lex_tokid (const struct lexer *lexer)
+/* Formats FORMAT as a printf()-like format string and creates and returns a
+ new lex_reader for the formatted result. */
+struct lex_reader *
+lex_reader_for_format (const char *format, const char *encoding, ...)
{
- return lexer->tokid;
+ struct lex_reader *r;
+ va_list args;
+
+ va_start (args, encoding);
+ r = lex_reader_for_substring_nocopy (ss_cstr (xvasprintf (format, args)), encoding);
+ va_end (args);
+
+ return r;
}
-const struct string *
-lex_tokstr (const struct lexer *lexer)
+static struct lex_string_reader *
+lex_string_reader_cast (struct lex_reader *r)
{
- return &lexer->tokstr;
+ return UP_CAST (r, struct lex_string_reader, reader);
}
-/* If the lexer is positioned at the (pseudo)identifier S, which
- may contain a hyphen ('-'), skips it and returns true. Each
- half of the identifier may be abbreviated to its first three
- letters.
- Otherwise, returns false. */
-bool
-lex_match_hyphenated_word (struct lexer *lexer, const char *s)
-{
- const char *hyphen = strchr (s, '-');
- if (hyphen == NULL)
- return lex_match_id (lexer, s);
- else if (lexer->token != T_ID
- || !lex_id_match (ss_buffer (s, hyphen - s), ss_cstr (lexer->tokid))
- || lex_look_ahead (lexer) != '-')
- return false;
- else
- {
- lex_get (lexer);
- lex_force_match (lexer, '-');
- lex_force_match_id (lexer, hyphen + 1);
- return true;
- }
+static size_t
+lex_string_read (struct lex_reader *r_, char *buf, size_t n,
+ enum prompt_style prompt_style UNUSED)
+{
+ struct lex_string_reader *r = lex_string_reader_cast (r_);
+ size_t chunk;
+
+ chunk = MIN (n, r->s.length - r->offset);
+ memcpy (buf, r->s.string + r->offset, chunk);
+ r->offset += chunk;
+
+ return chunk;
}
+static void
+lex_string_close (struct lex_reader *r_)
+{
+ struct lex_string_reader *r = lex_string_reader_cast (r_);
+
+ ss_dealloc (&r->s);
+ free (r);
+}
+
+static struct lex_reader_class lex_string_reader_class =
+ {
+ lex_string_read,
+ lex_string_close
+ };