X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=src%2Flanguage%2Flexer%2Flexer.c;h=a3642f8a6c6f7e7f5fc5b9376ee31db8308e9efa;hb=b6d66ec3f328d0e8bf35b71f29332695121f7173;hp=3d2e29ac92aac6ca2bfd51aa9edfd1caccae7753;hpb=b990f5c31bc831e588a86f9f4826387c6843c989;p=pspp diff --git a/src/language/lexer/lexer.c b/src/language/lexer/lexer.c index 3d2e29ac92..a3642f8a6c 100644 --- a/src/language/lexer/lexer.c +++ b/src/language/lexer/lexer.c @@ -1,444 +1,410 @@ -/* PSPP - computes sample statistics. - Copyright (C) 1997-9, 2000 Free Software Foundation, Inc. - Written by Ben Pfaff . +/* PSPP - a program for statistical analysis. + Copyright (C) 1997-9, 2000, 2006, 2009, 2010, 2011, 2013, 2016 Free Software Foundation, Inc. - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with this program. If not, see . */ #include -#include "lexer.h" -#include -#include + +#include "language/lexer/lexer.h" + #include +#include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include - -#include "size_max.h" +#include +#include +#include +#include +#include + +#include "language/command.h" +#include "language/lexer/scan.h" +#include "language/lexer/segment.h" +#include "language/lexer/token.h" +#include "libpspp/assertion.h" +#include "libpspp/cast.h" +#include "libpspp/deque.h" +#include "libpspp/i18n.h" +#include "libpspp/ll.h" +#include "libpspp/message.h" +#include "libpspp/misc.h" +#include "libpspp/str.h" +#include "libpspp/u8-istream.h" +#include "output/journal.h" +#include "output/text-item.h" + +#include "gl/c-ctype.h" +#include "gl/minmax.h" +#include "gl/xalloc.h" +#include "gl/xmemdup0.h" #include "gettext.h" #define _(msgid) gettext (msgid) #define N_(msgid) msgid -/* -#define DUMP_TOKENS 1 -*/ - - -/* Global variables. */ +/* A token within a lex_source. */ +struct lex_token + { + /* The regular token information. */ + struct token token; + + /* Location of token in terms of the lex_source's buffer. + src->tail <= line_pos <= token_pos <= src->head. */ + size_t token_pos; /* Start of token. */ + size_t token_len; /* Length of source for token in bytes. */ + size_t line_pos; /* Start of line containing token_pos. */ + int first_line; /* Line number at token_pos. */ + }; -extern const char *keywords[T_N_KEYWORDS + 1]; +/* A source of tokens, corresponding to a syntax file. + This is conceptually a lex_reader wrapped with everything needed to convert + its UTF-8 bytes into tokens. */ +struct lex_source + { + struct ll ll; /* In lexer's list of sources. */ + struct lex_reader *reader; + struct segmenter segmenter; + bool eof; /* True if T_STOP was read from 'reader'. */ + + /* Buffer of UTF-8 bytes. */ + char *buffer; + size_t allocated; /* Number of bytes allocated. */ + size_t tail; /* &buffer[0] offset into UTF-8 source. */ + size_t head; /* &buffer[head - tail] offset into source. */ + + /* Positions in source file, tail <= pos <= head for each member here. */ + size_t journal_pos; /* First byte not yet output to journal. */ + size_t seg_pos; /* First byte not yet scanned as token. */ + size_t line_pos; /* First byte of line containing seg_pos. */ + + int n_newlines; /* Number of new-lines up to seg_pos. */ + bool suppress_next_newline; + + /* Tokens. */ + struct deque deque; /* Indexes into 'tokens'. */ + struct lex_token *tokens; /* Lookahead tokens for parser. */ + }; -/* Current token. */ -int token; +static struct lex_source *lex_source_create (struct lex_reader *); +static void lex_source_destroy (struct lex_source *); -/* T_POS_NUM, T_NEG_NUM: the token's value. */ -double tokval; +/* Lexer. */ +struct lexer + { + struct ll_list sources; /* Contains "struct lex_source"s. */ + }; -/* T_ID: the identifier. */ -char tokid[LONG_NAME_LEN + 1]; +static struct lex_source *lex_source__ (const struct lexer *); +static const struct lex_token *lex_next__ (const struct lexer *, int n); +static void lex_source_push_endcmd__ (struct lex_source *); + +static void lex_source_pop__ (struct lex_source *); +static bool lex_source_get__ (const struct lex_source *); +static void lex_source_error_valist (struct lex_source *, int n0, int n1, + const char *format, va_list) + PRINTF_FORMAT (4, 0); +static const struct lex_token *lex_source_next__ (const struct lex_source *, + int n); + +/* Initializes READER with the specified CLASS and otherwise some reasonable + defaults. The caller should fill in the others members as desired. */ +void +lex_reader_init (struct lex_reader *reader, + const struct lex_reader_class *class) +{ + reader->class = class; + reader->syntax = LEX_SYNTAX_AUTO; + reader->error = LEX_ERROR_CONTINUE; + reader->file_name = NULL; + reader->encoding = NULL; + reader->line_number = 0; +} -/* T_ID, T_STRING: token string value. - For T_ID, this is not truncated as is tokid. */ -struct string tokstr; +/* Frees any file name already in READER and replaces it by a copy of + FILE_NAME, or if FILE_NAME is null then clears any existing name. */ +void +lex_reader_set_file_name (struct lex_reader *reader, const char *file_name) +{ + free (reader->file_name); + reader->file_name = file_name != NULL ? xstrdup (file_name) : NULL; +} -/* Static variables. */ +/* Creates and returns a new lexer. */ +struct lexer * +lex_create (void) +{ + struct lexer *lexer = xzalloc (sizeof *lexer); + ll_init (&lexer->sources); + return lexer; +} -/* Pointer to next token in getl_buf. */ -static char *prog; +/* Destroys LEXER. */ +void +lex_destroy (struct lexer *lexer) +{ + if (lexer != NULL) + { + struct lex_source *source, *next; -/* Nonzero only if this line ends with a terminal dot. */ -static int dot; + ll_for_each_safe (source, next, struct lex_source, ll, &lexer->sources) + lex_source_destroy (source); + free (lexer); + } +} -/* Nonzero only if the last token returned was T_STOP. */ -static int eof; +/* Inserts READER into LEXER so that the next token read by LEXER comes from + READER. Before the caller, LEXER must either be empty or at a T_ENDCMD + token. */ +void +lex_include (struct lexer *lexer, struct lex_reader *reader) +{ + assert (ll_is_empty (&lexer->sources) || lex_token (lexer) == T_ENDCMD); + ll_push_head (&lexer->sources, &lex_source_create (reader)->ll); +} -/* If nonzero, next token returned by lex_get(). - Used only in exceptional circumstances. */ -static int put_token; -static struct string put_tokstr; -static double put_tokval; +/* Appends READER to LEXER, so that it will be read after all other current + readers have already been read. */ +void +lex_append (struct lexer *lexer, struct lex_reader *reader) +{ + ll_push_tail (&lexer->sources, &lex_source_create (reader)->ll); +} + +/* Advancing. */ -static int parse_id (void); +static struct lex_token * +lex_push_token__ (struct lex_source *src) +{ + struct lex_token *token; -/* How a string represents its contents. */ -enum string_type - { - CHARACTER_STRING, /* Characters. */ - BINARY_STRING, /* Binary digits. */ - OCTAL_STRING, /* Octal digits. */ - HEX_STRING /* Hexadecimal digits. */ - }; + if (deque_is_full (&src->deque)) + src->tokens = deque_expand (&src->deque, src->tokens, sizeof *src->tokens); -static void convert_numeric_string_to_char_string (enum string_type); -static int parse_string (enum string_type); + token = &src->tokens[deque_push_front (&src->deque)]; + token_init (&token->token); + return token; +} -#if DUMP_TOKENS -static void dump_token (void); -#endif - -/* Initialization. */ +static void +lex_source_pop__ (struct lex_source *src) +{ + token_destroy (&src->tokens[deque_pop_back (&src->deque)].token); +} -/* Initializes the lexer. */ -void -lex_init (void) +static void +lex_source_pop_front (struct lex_source *src) { - ds_init (&tokstr, 64); - ds_init (&put_tokstr, 64); - if (!lex_get_line ()) - eof = true; + token_destroy (&src->tokens[deque_pop_front (&src->deque)].token); } +/* Advances LEXER to the next token, consuming the current token. */ void -lex_done (void) +lex_get (struct lexer *lexer) { - ds_destroy (&put_tokstr); - ds_destroy (&tokstr); -} + struct lex_source *src; + + src = lex_source__ (lexer); + if (src == NULL) + return; + if (!deque_is_empty (&src->deque)) + lex_source_pop__ (src); + + while (deque_is_empty (&src->deque)) + if (!lex_source_get__ (src)) + { + lex_source_destroy (src); + src = lex_source__ (lexer); + if (src == NULL) + return; + } +} -/* Common functions. */ +/* Issuing errors. */ -/* Copies put_token, put_tokstr, put_tokval into token, tokstr, - tokval, respectively, and sets tokid appropriately. */ -static void -restore_token (void) +/* Prints a syntax error message containing the current token and + given message MESSAGE (if non-null). */ +void +lex_error (struct lexer *lexer, const char *format, ...) { - assert (put_token != 0); - token = put_token; - ds_assign_string (&tokstr, &put_tokstr); - str_copy_trunc (tokid, sizeof tokid, ds_c_str (&tokstr)); - tokval = put_tokval; - put_token = 0; + va_list args; + + va_start (args, format); + lex_next_error_valist (lexer, 0, 0, format, args); + va_end (args); } -/* Copies token, tokstr, tokval into put_token, put_tokstr, - put_tokval respectively. */ -static void -save_token (void) +/* Prints a syntax error message containing the current token and + given message MESSAGE (if non-null). */ +void +lex_error_valist (struct lexer *lexer, const char *format, va_list args) { - put_token = token; - ds_assign_string (&put_tokstr, &tokstr); - put_tokval = tokval; + lex_next_error_valist (lexer, 0, 0, format, args); } -/* Parses a single token, setting appropriate global variables to - indicate the token's attributes. */ +/* Prints a syntax error message containing the current token and + given message MESSAGE (if non-null). */ void -lex_get (void) +lex_next_error (struct lexer *lexer, int n0, int n1, const char *format, ...) { - /* If a token was pushed ahead, return it. */ - if (put_token) - { - restore_token (); -#if DUMP_TOKENS - dump_token (); -#endif - return; - } + va_list args; - /* Find a token. */ - for (;;) + va_start (args, format); + lex_next_error_valist (lexer, n0, n1, format, args); + va_end (args); +} + +/* Prints a syntax error message saying that OPTION0 or one of the other + strings following it, up to the first NULL, is expected. */ +void +lex_error_expecting (struct lexer *lexer, const char *option0, ...) +{ + enum { MAX_OPTIONS = 8 }; + const char *options[MAX_OPTIONS + 1]; + va_list args; + int n; + + va_start (args, option0); + options[0] = option0; + n = 0; + while (n + 1 < MAX_OPTIONS && options[n] != NULL) + options[++n] = va_arg (args, const char *); + va_end (args); + + switch (n) { - /* Skip whitespace. */ - if (eof) - { - token = T_STOP; - return; - } + case 0: + lex_error (lexer, NULL); + break; - for (;;) - { - while (isspace ((unsigned char) *prog)) - prog++; - if (*prog) - break; - - if (dot) - { - dot = 0; - token = '.'; -#if DUMP_TOKENS - dump_token (); -#endif - return; - } - else if (!lex_get_line ()) - { - eof = 1; - token = T_STOP; -#if DUMP_TOKENS - dump_token (); -#endif - return; - } - - if (put_token) - { - restore_token (); -#if DUMP_TOKENS - dump_token (); -#endif - return; - } - } + case 1: + lex_error (lexer, _("expecting %s"), options[0]); + break; + case 2: + lex_error (lexer, _("expecting %s or %s"), options[0], options[1]); + break; - /* Actually parse the token. */ - ds_clear (&tokstr); - - switch (*prog) - { - case '-': case '.': - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - { - char *tail; - - /* `-' can introduce a negative number, or it can be a - token by itself. If it is not followed by a digit or a - decimal point, it is definitely not a number. - Otherwise, it might be either, but most of the time we - want it as a number. When the syntax calls for a `-' - token, lex_negative_to_dash() must be used to break - negative numbers into two tokens. */ - if (*prog == '-') - { - ds_putc (&tokstr, *prog++); - while (isspace ((unsigned char) *prog)) - prog++; - - if (!isdigit ((unsigned char) *prog) && *prog != '.') - { - token = '-'; - break; - } - token = T_NEG_NUM; - } - else - token = T_POS_NUM; - - /* Parse the number, copying it into tokstr. */ - while (isdigit ((unsigned char) *prog)) - ds_putc (&tokstr, *prog++); - if (*prog == '.') - { - ds_putc (&tokstr, *prog++); - while (isdigit ((unsigned char) *prog)) - ds_putc (&tokstr, *prog++); - } - if (*prog == 'e' || *prog == 'E') - { - ds_putc (&tokstr, *prog++); - if (*prog == '+' || *prog == '-') - ds_putc (&tokstr, *prog++); - while (isdigit ((unsigned char) *prog)) - ds_putc (&tokstr, *prog++); - } - - /* Parse as floating point. */ - tokval = strtod (ds_c_str (&tokstr), &tail); - if (*tail) - { - msg (SE, _("%s does not form a valid number."), - ds_c_str (&tokstr)); - tokval = 0.0; - - ds_clear (&tokstr); - ds_putc (&tokstr, '0'); - } - - break; - } - - case '\'': case '"': - token = parse_string (CHARACTER_STRING); - break; - - case '(': case ')': case ',': case '=': case '+': case '/': - token = *prog++; - break; - - case '*': - if (*++prog == '*') - { - prog++; - token = T_EXP; - } - else - token = '*'; - break; - - case '<': - if (*++prog == '=') - { - prog++; - token = T_LE; - } - else if (*prog == '>') - { - prog++; - token = T_NE; - } - else - token = T_LT; - break; - - case '>': - if (*++prog == '=') - { - prog++; - token = T_GE; - } - else - token = T_GT; - break; - - case '~': - if (*++prog == '=') - { - prog++; - token = T_NE; - } - else - token = T_NOT; - break; - - case '&': - prog++; - token = T_AND; - break; - - case '|': - prog++; - token = T_OR; - break; - - case 'b': case 'B': - if (prog[1] == '\'' || prog[1] == '"') - token = parse_string (BINARY_STRING); - else - token = parse_id (); - break; - - case 'o': case 'O': - if (prog[1] == '\'' || prog[1] == '"') - token = parse_string (OCTAL_STRING); - else - token = parse_id (); - break; - - case 'x': case 'X': - if (prog[1] == '\'' || prog[1] == '"') - token = parse_string (HEX_STRING); - else - token = parse_id (); - break; - - default: - if (lex_is_id1 (*prog)) - { - token = parse_id (); - break; - } - else - { - if (isgraph ((unsigned char) *prog)) - msg (SE, _("Bad character in input: `%c'."), *prog++); - else - msg (SE, _("Bad character in input: `\\%o'."), *prog++); - continue; - } - } + case 3: + lex_error (lexer, _("expecting %s, %s, or %s"), options[0], options[1], + options[2]); + break; + + case 4: + lex_error (lexer, _("expecting %s, %s, %s, or %s"), + options[0], options[1], options[2], options[3]); + break; + + case 5: + lex_error (lexer, _("expecting %s, %s, %s, %s, or %s"), + options[0], options[1], options[2], options[3], options[4]); + break; + + case 6: + lex_error (lexer, _("expecting %s, %s, %s, %s, %s, or %s"), + options[0], options[1], options[2], options[3], options[4], + options[5]); break; - } -#if DUMP_TOKENS - dump_token (); -#endif + case 7: + lex_error (lexer, _("expecting %s, %s, %s, %s, %s, %s, or %s"), + options[0], options[1], options[2], options[3], options[4], + options[5], options[6]); + break; + + case 8: + lex_error (lexer, _("expecting %s, %s, %s, %s, %s, %s, %s, or %s"), + options[0], options[1], options[2], options[3], options[4], + options[5], options[6], options[7]); + break; + + default: + NOT_REACHED (); + } } -/* Parses an identifier at the current position into tokid and - tokstr. - Returns the correct token type. */ -static int -parse_id (void) +/* Reports an error to the effect that subcommand SBC may only be specified + once. + + This function does not take a lexer as an argument or use lex_error(), + because the result would ordinarily just be redundant: "Syntax error at + SUBCOMMAND: Subcommand SUBCOMMAND may only be specified once.", which does + not help the user find the error. */ +void +lex_sbc_only_once (const char *sbc) { - const char *start = prog; - prog = lex_skip_identifier (start); + msg (SE, _("Subcommand %s may only be specified once."), sbc); +} + +/* Reports an error to the effect that subcommand SBC is missing. - ds_concat (&tokstr, start, prog - start); - str_copy_trunc (tokid, sizeof tokid, ds_c_str (&tokstr)); - return lex_id_to_token (ds_c_str (&tokstr), ds_length (&tokstr)); + This function does not take a lexer as an argument or use lex_error(), + because a missing subcommand can normally be detected only after the whole + command has been parsed, and so lex_error() would always report "Syntax + error at end of command", which does not help the user find the error. */ +void +lex_sbc_missing (const char *sbc) +{ + msg (SE, _("Required subcommand %s was not specified."), sbc); } -/* Reports an error to the effect that subcommand SBC may only be - specified once. */ +/* Reports an error to the effect that specification SPEC may only be specified + once within subcommand SBC. */ void -lex_sbc_only_once (const char *sbc) +lex_spec_only_once (struct lexer *lexer, const char *sbc, const char *spec) { - msg (SE, _("Subcommand %s may only be specified once."), sbc); + lex_error (lexer, _("%s may only be specified once within subcommand %s"), + spec, sbc); } -/* Reports an error to the effect that subcommand SBC is - missing. */ +/* Reports an error to the effect that specification SPEC is missing within + subcommand SBC. */ void -lex_sbc_missing (const char *sbc) +lex_spec_missing (struct lexer *lexer, const char *sbc, const char *spec) { - lex_error (_("missing required subcommand %s"), sbc); + lex_error (lexer, _("Required %s specification missing from %s subcommand"), + sbc, spec); } /* Prints a syntax error message containing the current token and given message MESSAGE (if non-null). */ void -lex_error (const char *message, ...) +lex_next_error_valist (struct lexer *lexer, int n0, int n1, + const char *format, va_list args) { - char *token_rep; - char where[128]; + struct lex_source *src = lex_source__ (lexer); - token_rep = lex_token_representation (); - if (token == T_STOP) - strcpy (where, "end of file"); - else if (token == '.') - strcpy (where, "end of command"); + if (src != NULL) + lex_source_error_valist (src, n0, n1, format, args); else - snprintf (where, sizeof where, "`%s'", token_rep); - free (token_rep); - - if (message) { - char buf[1024]; - va_list args; - - va_start (args, message); - vsnprintf (buf, 1024, message, args); - va_end (args); - - msg (SE, _("Syntax error %s at %s."), buf, where); + struct string s; + + ds_init_empty (&s); + ds_put_format (&s, _("Syntax error at end of input")); + if (format != NULL) + { + ds_put_cstr (&s, ": "); + ds_put_vformat (&s, format, args); + } + ds_put_byte (&s, '.'); + msg (SE, "%s", ds_cstr (&s)); + ds_destroy (&s); } - else - msg (SE, _("Syntax error at %s."), where); } /* Checks that we're at end of command. @@ -446,12 +412,12 @@ lex_error (const char *message, ...) If not, flags a syntax error and returns an error command completion code. */ int -lex_end_of_command (void) +lex_end_of_command (struct lexer *lexer) { - if (token != '.') + if (lex_token (lexer) != T_ENDCMD && lex_token (lexer) != T_STOP) { - lex_error (_("expecting end of command")); - return CMD_TRAILING_GARBAGE; + lex_error (lexer, _("expecting end of command")); + return CMD_FAILURE; } else return CMD_SUCCESS; @@ -461,759 +427,1325 @@ lex_end_of_command (void) /* Returns true if the current token is a number. */ bool -lex_is_number (void) +lex_is_number (struct lexer *lexer) +{ + return lex_next_is_number (lexer, 0); +} + +/* Returns true if the current token is a string. */ +bool +lex_is_string (struct lexer *lexer) { - return token == T_POS_NUM || token == T_NEG_NUM; + return lex_next_is_string (lexer, 0); } /* Returns the value of the current token, which must be a floating point number. */ double -lex_number (void) +lex_number (struct lexer *lexer) { - assert (lex_is_number ()); - return tokval; + return lex_next_number (lexer, 0); } /* Returns true iff the current token is an integer. */ bool -lex_is_integer (void) +lex_is_integer (struct lexer *lexer) { - return (lex_is_number () - && tokval != NOT_LONG - && tokval >= LONG_MIN - && tokval <= LONG_MAX - && floor (tokval) == tokval); + return lex_next_is_integer (lexer, 0); } /* Returns the value of the current token, which must be an integer. */ long -lex_integer (void) +lex_integer (struct lexer *lexer) +{ + return lex_next_integer (lexer, 0); +} + +/* Token testing functions with lookahead. + + A value of 0 for N as an argument to any of these functions refers to the + current token. Lookahead is limited to the current command. Any N greater + than the number of tokens remaining in the current command will be treated + as referring to a T_ENDCMD token. */ + +/* Returns true if the token N ahead of the current token is a number. */ +bool +lex_next_is_number (struct lexer *lexer, int n) +{ + enum token_type next_token = lex_next_token (lexer, n); + return next_token == T_POS_NUM || next_token == T_NEG_NUM; +} + +/* Returns true if the token N ahead of the current token is a string. */ +bool +lex_next_is_string (struct lexer *lexer, int n) +{ + return lex_next_token (lexer, n) == T_STRING; +} + +/* Returns the value of the token N ahead of the current token, which must be a + floating point number. */ +double +lex_next_number (struct lexer *lexer, int n) +{ + assert (lex_next_is_number (lexer, n)); + return lex_next_tokval (lexer, n); +} + +/* Returns true if the token N ahead of the current token is an integer. */ +bool +lex_next_is_integer (struct lexer *lexer, int n) +{ + double value; + + if (!lex_next_is_number (lexer, n)) + return false; + + value = lex_next_tokval (lexer, n); + return value > LONG_MIN && value <= LONG_MAX && floor (value) == value; +} + +/* Returns the value of the token N ahead of the current token, which must be + an integer. */ +long +lex_next_integer (struct lexer *lexer, int n) { - assert (lex_is_integer ()); - return tokval; + assert (lex_next_is_integer (lexer, n)); + return lex_next_tokval (lexer, n); } - + /* Token matching functions. */ -/* If TOK is the current token, skips it and returns nonzero. - Otherwise, returns zero. */ -int -lex_match (int t) +/* If the current token has the specified TYPE, skips it and returns true. + Otherwise, returns false. */ +bool +lex_match (struct lexer *lexer, enum token_type type) { - if (token == t) + if (lex_token (lexer) == type) { - lex_get (); - return 1; + lex_get (lexer); + return true; } else - return 0; + return false; } -/* If the current token is the identifier S, skips it and returns - nonzero. The identifier may be abbreviated to its first three - letters. - Otherwise, returns zero. */ -int -lex_match_id (const char *s) +/* If the current token matches IDENTIFIER, skips it and returns true. + IDENTIFIER may be abbreviated to its first three letters. Otherwise, + returns false. + + IDENTIFIER must be an ASCII string. */ +bool +lex_match_id (struct lexer *lexer, const char *identifier) +{ + return lex_match_id_n (lexer, identifier, 3); +} + +/* If the current token is IDENTIFIER, skips it and returns true. IDENTIFIER + may be abbreviated to its first N letters. Otherwise, returns false. + + IDENTIFIER must be an ASCII string. */ +bool +lex_match_id_n (struct lexer *lexer, const char *identifier, size_t n) { - if (token == T_ID && lex_id_match (s, tokid)) + if (lex_token (lexer) == T_ID + && lex_id_match_n (ss_cstr (identifier), lex_tokss (lexer), n)) { - lex_get (); - return 1; + lex_get (lexer); + return true; } else - return 0; + return false; } -/* If the current token is integer N, skips it and returns nonzero. - Otherwise, returns zero. */ -int -lex_match_int (int x) +/* If the current token is integer X, skips it and returns true. Otherwise, + returns false. */ +bool +lex_match_int (struct lexer *lexer, int x) { - if (lex_is_integer () && lex_integer () == x) + if (lex_is_integer (lexer) && lex_integer (lexer) == x) { - lex_get (); - return 1; + lex_get (lexer); + return true; } else - return 0; + return false; } /* Forced matches. */ -/* If this token is identifier S, fetches the next token and returns - nonzero. - Otherwise, reports an error and returns zero. */ -int -lex_force_match_id (const char *s) +/* If this token is IDENTIFIER, skips it and returns true. IDENTIFIER may be + abbreviated to its first 3 letters. Otherwise, reports an error and returns + false. + + IDENTIFIER must be an ASCII string. */ +bool +lex_force_match_id (struct lexer *lexer, const char *identifier) { - if (token == T_ID && lex_id_match (s, tokid)) - { - lex_get (); - return 1; - } + if (lex_match_id (lexer, identifier)) + return true; else { - lex_error (_("expecting `%s'"), s); - return 0; + lex_error_expecting (lexer, identifier, NULL_SENTINEL); + return false; } } -/* If the current token is T, skips the token. Otherwise, reports an - error and returns from the current function with return value 0. */ -int -lex_force_match (int t) +/* If the current token has the specified TYPE, skips it and returns true. + Otherwise, reports an error and returns false. */ +bool +lex_force_match (struct lexer *lexer, enum token_type type) { - if (token == t) + if (lex_token (lexer) == type) { - lex_get (); - return 1; + lex_get (lexer); + return true; } else { - lex_error (_("expecting `%s'"), lex_token_name (t)); - return 0; - } -} + const char *type_string = token_type_to_string (type); + if (type_string) + { + char *s = xasprintf ("`%s'", type_string); + lex_error_expecting (lexer, s, NULL_SENTINEL); + free (s); + } + else + lex_error_expecting (lexer, token_type_to_name (type), NULL_SENTINEL); -/* If this token is a string, does nothing and returns nonzero. - Otherwise, reports an error and returns zero. */ -int -lex_force_string (void) -{ - if (token == T_STRING) - return 1; - else - { - lex_error (_("expecting string")); - return 0; + return false; } } -/* If this token is an integer, does nothing and returns nonzero. - Otherwise, reports an error and returns zero. */ -int -lex_force_int (void) +/* If the current token is a string, does nothing and returns true. + Otherwise, reports an error and returns false. */ +bool +lex_force_string (struct lexer *lexer) { - if (lex_is_integer ()) - return 1; + if (lex_is_string (lexer)) + return true; else { - lex_error (_("expecting integer")); - return 0; + lex_error (lexer, _("expecting string")); + return false; } } - -/* If this token is a number, does nothing and returns nonzero. - Otherwise, reports an error and returns zero. */ -int -lex_force_num (void) + +/* If the current token is a string or an identifier, does nothing and returns + true. Otherwise, reports an error and returns false. + + This is meant for use in syntactic situations where we want to encourage the + user to supply a quoted string, but for compatibility we also accept + identifiers. (One example of such a situation is file names.) Therefore, + the error message issued when the current token is wrong only says that a + string is expected and doesn't mention that an identifier would also be + accepted. */ +bool +lex_force_string_or_id (struct lexer *lexer) { - if (lex_is_number ()) - return 1; - else - { - lex_error (_("expecting number")); - return 0; - } + return lex_token (lexer) == T_ID || lex_force_string (lexer); } - -/* If this token is an identifier, does nothing and returns nonzero. - Otherwise, reports an error and returns zero. */ -int -lex_force_id (void) + +/* If the current token is an integer, does nothing and returns true. + Otherwise, reports an error and returns false. */ +bool +lex_force_int (struct lexer *lexer) { - if (token == T_ID) - return 1; + if (lex_is_integer (lexer)) + return true; else { - lex_error (_("expecting identifier")); - return 0; + lex_error (lexer, _("expecting integer")); + return false; } } -/* Weird token functions. */ -/* Returns the first character of the next token, except that if the - next token is not an identifier, the character returned will not be - a character that can begin an identifier. Specifically, the - hexstring lead-in X' causes lookahead() to return '. Note that an - alphanumeric return value doesn't guarantee an ID token, it could - also be a reserved-word token. */ -int -lex_look_ahead (void) +/* If the current token is a number, does nothing and returns true. + Otherwise, reports an error and returns false. */ +bool +lex_force_num (struct lexer *lexer) { - if (put_token) - return put_token; - - for (;;) - { - if (eof) - return 0; - - for (;;) - { - while (isspace ((unsigned char) *prog)) - prog++; - if (*prog) - break; - - if (dot) - return '.'; - else if (!lex_get_line ()) - return 0; - - if (put_token) - return put_token; - } - - if ((toupper ((unsigned char) *prog) == 'X' - || toupper ((unsigned char) *prog) == 'B' - || toupper ((unsigned char) *prog) == 'O') - && (prog[1] == '\'' || prog[1] == '"')) - return '\''; + if (lex_is_number (lexer)) + return true; - return *prog; - } + lex_error (lexer, _("expecting number")); + return false; } -/* Makes the current token become the next token to be read; the - current token is set to T. */ -void -lex_put_back (int t) +/* If the current token is an identifier, does nothing and returns true. + Otherwise, reports an error and returns false. */ +bool +lex_force_id (struct lexer *lexer) { - save_token (); - token = t; -} + if (lex_token (lexer) == T_ID) + return true; -/* Makes the current token become the next token to be read; the - current token is set to the identifier ID. */ -void -lex_put_back_id (const char *id) -{ - assert (lex_id_to_token (id, strlen (id)) == T_ID); - save_token (); - token = T_ID; - ds_assign_c_str (&tokstr, id); - str_copy_trunc (tokid, sizeof tokid, ds_c_str (&tokstr)); + lex_error (lexer, _("expecting identifier")); + return false; } -/* Weird line processing functions. */ +/* Token accessors. */ -/* Returns the entire contents of the current line. */ -const char * -lex_entire_line (void) +/* Returns the type of LEXER's current token. */ +enum token_type +lex_token (const struct lexer *lexer) { - return ds_c_str (&getl_buf); + return lex_next_token (lexer, 0); } -/* As lex_entire_line(), but only returns the part of the current line - that hasn't already been tokenized. - If END_DOT is non-null, stores nonzero into *END_DOT if the line - ends with a terminal dot, or zero if it doesn't. */ -const char * -lex_rest_of_line (int *end_dot) +/* Returns the number in LEXER's current token. + + Only T_NEG_NUM and T_POS_NUM tokens have meaningful values. For other + tokens this function will always return zero. */ +double +lex_tokval (const struct lexer *lexer) { - if (end_dot) - *end_dot = dot; - return prog; + return lex_next_tokval (lexer, 0); } -/* Causes the rest of the current input line to be ignored for - tokenization purposes. */ -void -lex_discard_line (void) +/* Returns the null-terminated string in LEXER's current token, UTF-8 encoded. + + Only T_ID and T_STRING tokens have meaningful strings. For other tokens + this functions this function will always return NULL. + + The UTF-8 encoding of the returned string is correct for variable names and + other identifiers. Use filename_to_utf8() to use it as a filename. Use + data_in() to use it in a "union value". */ +const char * +lex_tokcstr (const struct lexer *lexer) { - prog = ds_end (&getl_buf); - dot = put_token = 0; + return lex_next_tokcstr (lexer, 0); } -/* Sets the current position in the current line to P, which must be - in getl_buf. */ -void -lex_set_prog (char *p) +/* Returns the string in LEXER's current token, UTF-8 encoded. The string is + null-terminated (but the null terminator is not included in the returned + substring's 'length'). + + Only T_ID and T_STRING tokens have meaningful strings. For other tokens + this functions this function will always return NULL. + + The UTF-8 encoding of the returned string is correct for variable names and + other identifiers. Use filename_to_utf8() to use it as a filename. Use + data_in() to use it in a "union value". */ +struct substring +lex_tokss (const struct lexer *lexer) { - prog = p; + return lex_next_tokss (lexer, 0); } -/* Weird line reading functions. */ +/* Looking ahead. -/* Remove C-style comments in STRING, begun by slash-star and - terminated by star-slash or newline. */ -static void -strip_comments (struct string *string) + A value of 0 for N as an argument to any of these functions refers to the + current token. Lookahead is limited to the current command. Any N greater + than the number of tokens remaining in the current command will be treated + as referring to a T_ENDCMD token. */ + +static const struct lex_token * +lex_next__ (const struct lexer *lexer_, int n) { - char *cp; - int quote; - bool in_comment; + struct lexer *lexer = CONST_CAST (struct lexer *, lexer_); + struct lex_source *src = lex_source__ (lexer); - in_comment = false; - quote = EOF; - for (cp = ds_c_str (string); *cp; ) + if (src != NULL) + return lex_source_next__ (src, n); + else { - /* If we're not in a comment, check for quote marks. */ - if (!in_comment) - { - if (*cp == quote) - quote = EOF; - else if (*cp == '\'' || *cp == '"') - quote = *cp; - } - - /* If we're not inside a quotation, check for comment. */ - if (quote == EOF) + static const struct lex_token stop_token = + { TOKEN_INITIALIZER (T_STOP, 0.0, ""), 0, 0, 0, 0 }; + + return &stop_token; + } +} + +static const struct lex_token * +lex_source_next__ (const struct lex_source *src, int n) +{ + while (deque_count (&src->deque) <= n) + { + if (!deque_is_empty (&src->deque)) { - if (cp[0] == '/' && cp[1] == '*') - { - in_comment = true; - *cp++ = ' '; - *cp++ = ' '; - continue; - } - else if (in_comment && cp[0] == '*' && cp[1] == '/') - { - in_comment = false; - *cp++ = ' '; - *cp++ = ' '; - continue; - } + struct lex_token *front; + + front = &src->tokens[deque_front (&src->deque, 0)]; + if (front->token.type == T_STOP || front->token.type == T_ENDCMD) + return front; } - - /* Check commenting. */ - if (in_comment) - *cp = ' '; - cp++; + + lex_source_get__ (src); } + + return &src->tokens[deque_back (&src->deque, n)]; } -/* Reads a line for use by the tokenizer, and preprocesses it by - removing comments, stripping trailing whitespace and the - terminal dot, and removing leading indentors. */ -bool -lex_get_line (void) +/* Returns the "struct token" of the token N after the current one in LEXER. + The returned pointer can be invalidated by pretty much any succeeding call + into the lexer, although the string pointer within the returned token is + only invalidated by consuming the token (e.g. with lex_get()). */ +const struct token * +lex_next (const struct lexer *lexer, int n) { - struct string *line = &getl_buf; - bool interactive; + return &lex_next__ (lexer, n)->token; +} - if (!getl_read_line (&interactive)) - return false; +/* Returns the type of the token N after the current one in LEXER. */ +enum token_type +lex_next_token (const struct lexer *lexer, int n) +{ + return lex_next (lexer, n)->type; +} - strip_comments (line); - ds_rtrim_spaces (line); - - /* Check for and remove terminal dot. */ - dot = (ds_chomp (line, get_endcmd ()) - || (ds_is_empty (line) && get_nulline ())); - - /* Strip leading indentors or insert a terminal dot (unless the - line was obtained interactively). */ - if (!interactive) - { - int first = ds_first (line); +/* Returns the number in the tokn N after the current one in LEXER. - if (first == '+' || first == '-') - *ds_data (line) = ' '; - else if (first != EOF && !isspace (first)) - put_token = '.'; - } + Only T_NEG_NUM and T_POS_NUM tokens have meaningful values. For other + tokens this function will always return zero. */ +double +lex_next_tokval (const struct lexer *lexer, int n) +{ + const struct token *token = lex_next (lexer, n); + return token->number; +} - prog = ds_c_str (line); +/* Returns the null-terminated string in the token N after the current one, in + UTF-8 encoding. - return true; -} - -/* Token names. */ + Only T_ID and T_STRING tokens have meaningful strings. For other tokens + this functions this function will always return NULL. -/* Returns the name of a token in a static buffer. */ + The UTF-8 encoding of the returned string is correct for variable names and + other identifiers. Use filename_to_utf8() to use it as a filename. Use + data_in() to use it in a "union value". */ const char * -lex_token_name (int token) +lex_next_tokcstr (const struct lexer *lexer, int n) { - if (token >= T_FIRST_KEYWORD && token <= T_LAST_KEYWORD) - return keywords[token - T_FIRST_KEYWORD]; + return lex_next_tokss (lexer, n).string; +} - if (token < 256) - { - static char t[2]; - t[0] = token; - return t; - } +/* Returns the string in the token N after the current one, in UTF-8 encoding. + The string is null-terminated (but the null terminator is not included in + the returned substring's 'length'). + + Only T_ID and T_STRING tokens have meaningful strings. For other tokens + this functions this function will always return NULL. - return _(""); + The UTF-8 encoding of the returned string is correct for variable names and + other identifiers. Use filename_to_utf8() to use it as a filename. Use + data_in() to use it in a "union value". */ +struct substring +lex_next_tokss (const struct lexer *lexer, int n) +{ + return lex_next (lexer, n)->string; } -/* Returns an ASCII representation of the current token as a - malloc()'d string. */ -char * -lex_token_representation (void) +static bool +lex_tokens_match (const struct token *actual, const struct token *expected) { - char *token_rep; - - switch (token) + if (actual->type != expected->type) + return false; + + switch (actual->type) { - case T_ID: case T_POS_NUM: case T_NEG_NUM: - return xstrdup (ds_c_str (&tokstr)); - break; + return actual->number == expected->number; + + case T_ID: + return lex_id_match (expected->string, actual->string); case T_STRING: - { - int hexstring = 0; - char *sp, *dp; - - for (sp = ds_c_str (&tokstr); sp < ds_end (&tokstr); sp++) - if (!isprint ((unsigned char) *sp)) - { - hexstring = 1; - break; - } - - token_rep = xmalloc (2 + ds_length (&tokstr) * 2 + 1 + 1); - - dp = token_rep; - if (hexstring) - *dp++ = 'X'; - *dp++ = '\''; - - if (!hexstring) - for (sp = ds_c_str (&tokstr); *sp; ) - { - if (*sp == '\'') - *dp++ = '\''; - *dp++ = (unsigned char) *sp++; - } - else - for (sp = ds_c_str (&tokstr); sp < ds_end (&tokstr); sp++) - { - *dp++ = (((unsigned char) *sp) >> 4)["0123456789ABCDEF"]; - *dp++ = (((unsigned char) *sp) & 15)["0123456789ABCDEF"]; - } - *dp++ = '\''; - *dp = '\0'; - - return token_rep; + return (actual->string.length == expected->string.length + && !memcmp (actual->string.string, expected->string.string, + actual->string.length)); + + default: + return true; + } +} + +/* If LEXER is positioned at the sequence of tokens that may be parsed from S, + skips it and returns true. Otherwise, returns false. + + S may consist of an arbitrary sequence of tokens, e.g. "KRUSKAL-WALLIS", + "2SLS", or "END INPUT PROGRAM". Identifiers may be abbreviated to their + first three letters. */ +bool +lex_match_phrase (struct lexer *lexer, const char *s) +{ + struct string_lexer slex; + struct token token; + int i; + + i = 0; + string_lexer_init (&slex, s, SEG_MODE_INTERACTIVE); + while (string_lexer_next (&slex, &token)) + if (token.type != SCAN_SKIP) + { + bool match = lex_tokens_match (lex_next (lexer, i++), &token); + token_destroy (&token); + if (!match) + return false; } - break; - case T_STOP: - token_rep = xmalloc (1); - *token_rep = '\0'; - return token_rep; + while (i-- > 0) + lex_get (lexer); + return true; +} - case T_EXP: - return xstrdup ("**"); +static int +lex_source_get_first_line_number (const struct lex_source *src, int n) +{ + return lex_source_next__ (src, n)->first_line; +} - default: - if (token >= T_FIRST_KEYWORD && token <= T_LAST_KEYWORD) - return xstrdup (keywords [token - T_FIRST_KEYWORD]); +static int +count_newlines (char *s, size_t length) +{ + int n_newlines = 0; + char *newline; + + while ((newline = memchr (s, '\n', length)) != NULL) + { + n_newlines++; + length -= (newline + 1) - s; + s = newline + 1; + } + + return n_newlines; +} + +static int +lex_source_get_last_line_number (const struct lex_source *src, int n) +{ + const struct lex_token *token = lex_source_next__ (src, n); + + if (token->first_line == 0) + return 0; + else + { + char *token_str = &src->buffer[token->token_pos - src->tail]; + return token->first_line + count_newlines (token_str, token->token_len) + 1; + } +} + +static int +count_columns (const char *s_, size_t length) +{ + const uint8_t *s = CHAR_CAST (const uint8_t *, s_); + int columns; + size_t ofs; + int mblen; + + columns = 0; + for (ofs = 0; ofs < length; ofs += mblen) + { + ucs4_t uc; + + mblen = u8_mbtouc (&uc, s + ofs, length - ofs); + if (uc != '\t') + { + int width = uc_width (uc, "UTF-8"); + if (width > 0) + columns += width; + } else - { - token_rep = xmalloc (2); - token_rep[0] = token; - token_rep[1] = '\0'; - return token_rep; - } + columns = ROUND_UP (columns + 1, 8); } - - assert (0); + + return columns + 1; +} + +static int +lex_source_get_first_column (const struct lex_source *src, int n) +{ + const struct lex_token *token = lex_source_next__ (src, n); + return count_columns (&src->buffer[token->line_pos - src->tail], + token->token_pos - token->line_pos); +} + +static int +lex_source_get_last_column (const struct lex_source *src, int n) +{ + const struct lex_token *token = lex_source_next__ (src, n); + char *start, *end, *newline; + + start = &src->buffer[token->line_pos - src->tail]; + end = &src->buffer[(token->token_pos + token->token_len) - src->tail]; + newline = memrchr (start, '\n', end - start); + if (newline != NULL) + start = newline + 1; + return count_columns (start, end - start); +} + +/* Returns the 1-based line number of the start of the syntax that represents + the token N after the current one in LEXER. Returns 0 for a T_STOP token or + if the token is drawn from a source that does not have line numbers. */ +int +lex_get_first_line_number (const struct lexer *lexer, int n) +{ + const struct lex_source *src = lex_source__ (lexer); + return src != NULL ? lex_source_get_first_line_number (src, n) : 0; } - -/* Really weird functions. */ -/* Most of the time, a `-' is a lead-in to a negative number. But - sometimes it's actually part of the syntax. If a dash can be part - of syntax then this function is called to rip it off of a - number. */ +/* Returns the 1-based line number of the end of the syntax that represents the + token N after the current one in LEXER, plus 1. Returns 0 for a T_STOP + token or if the token is drawn from a source that does not have line + numbers. + + Most of the time, a single token is wholly within a single line of syntax, + but there are two exceptions: a T_STRING token can be made up of multiple + segments on adjacent lines connected with "+" punctuators, and a T_NEG_NUM + token can consist of a "-" on one line followed by the number on the next. + */ +int +lex_get_last_line_number (const struct lexer *lexer, int n) +{ + const struct lex_source *src = lex_source__ (lexer); + return src != NULL ? lex_source_get_last_line_number (src, n) : 0; +} + +/* Returns the 1-based column number of the start of the syntax that represents + the token N after the current one in LEXER. Returns 0 for a T_STOP + token. + + Column numbers are measured according to the width of characters as shown in + a typical fixed-width font, in which CJK characters have width 2 and + combining characters have width 0. */ +int +lex_get_first_column (const struct lexer *lexer, int n) +{ + const struct lex_source *src = lex_source__ (lexer); + return src != NULL ? lex_source_get_first_column (src, n) : 0; +} + +/* Returns the 1-based column number of the end of the syntax that represents + the token N after the current one in LEXER, plus 1. Returns 0 for a T_STOP + token. + + Column numbers are measured according to the width of characters as shown in + a typical fixed-width font, in which CJK characters have width 2 and + combining characters have width 0. */ +int +lex_get_last_column (const struct lexer *lexer, int n) +{ + const struct lex_source *src = lex_source__ (lexer); + return src != NULL ? lex_source_get_last_column (src, n) : 0; +} + +/* Returns the name of the syntax file from which the current command is drawn. + Returns NULL for a T_STOP token or if the command's source does not have + line numbers. + + There is no version of this function that takes an N argument because + lookahead only works to the end of a command and any given command is always + within a single syntax file. */ +const char * +lex_get_file_name (const struct lexer *lexer) +{ + struct lex_source *src = lex_source__ (lexer); + return src == NULL ? NULL : src->reader->file_name; +} + +const char * +lex_get_encoding (const struct lexer *lexer) +{ + struct lex_source *src = lex_source__ (lexer); + return src == NULL ? NULL : src->reader->encoding; +} + + +/* Returns the syntax mode for the syntax file from which the current drawn is + drawn. Returns LEX_SYNTAX_AUTO for a T_STOP token or if the command's + source does not have line numbers. + + There is no version of this function that takes an N argument because + lookahead only works to the end of a command and any given command is always + within a single syntax file. */ +enum lex_syntax_mode +lex_get_syntax_mode (const struct lexer *lexer) +{ + struct lex_source *src = lex_source__ (lexer); + return src == NULL ? LEX_SYNTAX_AUTO : src->reader->syntax; +} + +/* Returns the error mode for the syntax file from which the current drawn is + drawn. Returns LEX_ERROR_TERMINAL for a T_STOP token or if the command's + source does not have line numbers. + + There is no version of this function that takes an N argument because + lookahead only works to the end of a command and any given command is always + within a single syntax file. */ +enum lex_error_mode +lex_get_error_mode (const struct lexer *lexer) +{ + struct lex_source *src = lex_source__ (lexer); + return src == NULL ? LEX_ERROR_TERMINAL : src->reader->error; +} + +/* If the source that LEXER is currently reading has error mode + LEX_ERROR_TERMINAL, discards all buffered input and tokens, so that the next + token to be read comes directly from whatever is next read from the stream. + + It makes sense to call this function after encountering an error in a + command entered on the console, because usually the user would prefer not to + have cascading errors. */ void -lex_negative_to_dash (void) +lex_interactive_reset (struct lexer *lexer) { - if (token == T_NEG_NUM) + struct lex_source *src = lex_source__ (lexer); + if (src != NULL && src->reader->error == LEX_ERROR_TERMINAL) { - token = T_POS_NUM; - tokval = -tokval; - ds_assign_substring (&tokstr, &tokstr, 1, SIZE_MAX); - save_token (); - token = '-'; + src->head = src->tail = 0; + src->journal_pos = src->seg_pos = src->line_pos = 0; + src->n_newlines = 0; + src->suppress_next_newline = false; + segmenter_init (&src->segmenter, segmenter_get_mode (&src->segmenter)); + while (!deque_is_empty (&src->deque)) + lex_source_pop__ (src); + lex_source_push_endcmd__ (src); } } - -/* We're not at eof any more. */ + +/* Advances past any tokens in LEXER up to a T_ENDCMD or T_STOP. */ void -lex_reset_eof (void) +lex_discard_rest_of_command (struct lexer *lexer) { - eof = 0; + while (lex_token (lexer) != T_STOP && lex_token (lexer) != T_ENDCMD) + lex_get (lexer); } -/* Skip a COMMENT command. */ +/* Discards all lookahead tokens in LEXER, then discards all input sources + until it encounters one with error mode LEX_ERROR_TERMINAL or until it + runs out of input sources. */ void -lex_skip_comment (void) +lex_discard_noninteractive (struct lexer *lexer) { - for (;;) + struct lex_source *src = lex_source__ (lexer); + + if (src != NULL) { - if (!lex_get_line ()) + while (!deque_is_empty (&src->deque)) + lex_source_pop__ (src); + + for (; src != NULL && src->reader->error != LEX_ERROR_TERMINAL; + src = lex_source__ (lexer)) + lex_source_destroy (src); + } +} + +static size_t +lex_source_max_tail__ (const struct lex_source *src) +{ + const struct lex_token *token; + size_t max_tail; + + assert (src->seg_pos >= src->line_pos); + max_tail = MIN (src->journal_pos, src->line_pos); + + /* Use the oldest token also. (We know that src->deque cannot be empty + because we are in the process of adding a new token, which is already + initialized enough to use here.) */ + token = &src->tokens[deque_back (&src->deque, 0)]; + assert (token->token_pos >= token->line_pos); + max_tail = MIN (max_tail, token->line_pos); + + return max_tail; +} + +static void +lex_source_expand__ (struct lex_source *src) +{ + if (src->head - src->tail >= src->allocated) + { + size_t max_tail = lex_source_max_tail__ (src); + if (max_tail > src->tail) { - put_token = T_STOP; - eof = 1; + /* Advance the tail, freeing up room at the head. */ + memmove (src->buffer, src->buffer + (max_tail - src->tail), + src->head - max_tail); + src->tail = max_tail; + } + else + { + /* Buffer is completely full. Expand it. */ + src->buffer = x2realloc (src->buffer, &src->allocated); + } + } + else + { + /* There's space available at the head of the buffer. Nothing to do. */ + } +} + +static void +lex_source_read__ (struct lex_source *src) +{ + do + { + lex_source_expand__ (src); + + size_t head_ofs = src->head - src->tail; + size_t space = src->allocated - head_ofs; + enum prompt_style prompt = segmenter_get_prompt (&src->segmenter); + size_t n = src->reader->class->read (src->reader, &src->buffer[head_ofs], + space, prompt); + assert (n <= space); + + for (char *p = &src->buffer[head_ofs]; p < &src->buffer[head_ofs + n]; + p++) + if (*p == '\0') + { + struct msg m; + m.category = MSG_C_SYNTAX; + m.severity = MSG_S_ERROR; + m.file_name = src->reader->file_name; + m.first_line = 0; + m.last_line = 0; + m.first_column = 0; + m.last_column = 0; + m.text = xstrdup ("Bad character U+0000 in input."); + msg_emit (&m); + + *p = ' '; + } + + if (n == 0) + { + /* End of input. + + Ensure that the input always ends in a new-line followed by a null + byte, as required by the segmenter library. */ + + if (src->head == src->tail + || src->buffer[src->head - src->tail - 1] != '\n') + src->buffer[src->head++ - src->tail] = '\n'; + + lex_source_expand__ (src); + src->buffer[src->head++ - src->tail] = '\0'; + return; } - - if (put_token == '.') - break; - prog = ds_end (&getl_buf); - if (dot) - break; + src->head += n; } + while (!memchr (&src->buffer[src->seg_pos - src->tail], '\n', + src->head - src->seg_pos)); +} + +static struct lex_source * +lex_source__ (const struct lexer *lexer) +{ + return (ll_is_empty (&lexer->sources) ? NULL + : ll_data (ll_head (&lexer->sources), struct lex_source, ll)); +} + +static struct substring +lex_source_get_syntax__ (const struct lex_source *src, int n0, int n1) +{ + const struct lex_token *token0 = lex_source_next__ (src, n0); + const struct lex_token *token1 = lex_source_next__ (src, MAX (n0, n1)); + size_t start = token0->token_pos; + size_t end = token1->token_pos + token1->token_len; + + return ss_buffer (&src->buffer[start - src->tail], end - start); } - -/* Private functions. */ -/* When invoked, tokstr contains a string of binary, octal, or - hex digits, according to TYPE. The string is converted to - characters having the specified values. */ static void -convert_numeric_string_to_char_string (enum string_type type) +lex_ellipsize__ (struct substring in, char *out, size_t out_size) { - const char *base_name; - int base; - int chars_per_byte; - size_t byte_cnt; - size_t i; - char *p; + size_t out_maxlen; + size_t out_len; + int mblen; - switch (type) + assert (out_size >= 16); + out_maxlen = out_size - (in.length >= out_size ? 3 : 0) - 1; + for (out_len = 0; out_len < in.length; out_len += mblen) { - case BINARY_STRING: - base_name = _("binary"); - base = 2; - chars_per_byte = 8; - break; - case OCTAL_STRING: - base_name = _("octal"); - base = 8; - chars_per_byte = 3; - break; - case HEX_STRING: - base_name = _("hex"); - base = 16; - chars_per_byte = 2; - break; - default: - abort (); + if (in.string[out_len] == '\n' + || (in.string[out_len] == '\r' + && out_len + 1 < in.length + && in.string[out_len + 1] == '\n')) + break; + + mblen = u8_mblen (CHAR_CAST (const uint8_t *, in.string + out_len), + in.length - out_len); + if (out_len + mblen > out_maxlen) + break; } - - byte_cnt = ds_length (&tokstr) / chars_per_byte; - if (ds_length (&tokstr) % chars_per_byte) - msg (SE, _("String of %s digits has %d characters, which is not a " - "multiple of %d."), - base_name, ds_length (&tokstr), chars_per_byte); - - p = ds_c_str (&tokstr); - for (i = 0; i < byte_cnt; i++) - { - int value; - int j; - - value = 0; - for (j = 0; j < chars_per_byte; j++, p++) - { - int v; - if (*p >= '0' && *p <= '9') - v = *p - '0'; - else - { - static const char alpha[] = "abcdef"; - const char *q = strchr (alpha, tolower ((unsigned char) *p)); + memcpy (out, in.string, out_len); + strcpy (&out[out_len], out_len < in.length ? "..." : ""); +} - if (q) - v = q - alpha + 10; - else - v = base; - } +static void +lex_source_error_valist (struct lex_source *src, int n0, int n1, + const char *format, va_list args) +{ + const struct lex_token *token; + struct string s; + struct msg m; - if (v >= base) - msg (SE, _("`%c' is not a valid %s digit."), *p, base_name); + ds_init_empty (&s); - value = value * base + v; - } + token = lex_source_next__ (src, n0); + if (token->token.type == T_ENDCMD) + ds_put_cstr (&s, _("Syntax error at end of command")); + else + { + struct substring syntax = lex_source_get_syntax__ (src, n0, n1); + if (!ss_is_empty (syntax)) + { + char syntax_cstr[64]; + + lex_ellipsize__ (syntax, syntax_cstr, sizeof syntax_cstr); + ds_put_format (&s, _("Syntax error at `%s'"), syntax_cstr); + } + else + ds_put_cstr (&s, _("Syntax error")); + } - ds_c_str (&tokstr)[i] = (unsigned char) value; + if (format) + { + ds_put_cstr (&s, ": "); + ds_put_vformat (&s, format, args); } + ds_put_byte (&s, '.'); + + m.category = MSG_C_SYNTAX; + m.severity = MSG_S_ERROR; + m.file_name = src->reader->file_name; + m.first_line = lex_source_get_first_line_number (src, n0); + m.last_line = lex_source_get_last_line_number (src, n1); + m.first_column = lex_source_get_first_column (src, n0); + m.last_column = lex_source_get_last_column (src, n1); + m.text = ds_steal_cstr (&s); + msg_emit (&m); +} + +static void PRINTF_FORMAT (2, 3) +lex_get_error (struct lex_source *src, const char *format, ...) +{ + va_list args; + int n; + + va_start (args, format); + + n = deque_count (&src->deque) - 1; + lex_source_error_valist (src, n, n, format, args); + lex_source_pop_front (src); - ds_truncate (&tokstr, byte_cnt); + va_end (args); } -/* Parses a string from the input buffer into tokstr. The input - buffer pointer prog must point to the initial single or double - quote. TYPE indicates the type of string to be parsed. - Returns token type. */ -static int -parse_string (enum string_type type) +/* Attempts to append an additional token into SRC's deque, reading more from + the underlying lex_reader if necessary.. Returns true if successful, false + if the deque already represents (a suffix of) the whole lex_reader's + contents, */ +static bool +lex_source_get__ (const struct lex_source *src_) { - /* Accumulate the entire string, joining sections indicated by + - signs. */ + struct lex_source *src = CONST_CAST (struct lex_source *, src_); + if (src->eof) + return false; + + /* State maintained while scanning tokens. Usually we only need a single + state, but scanner_push() can return SCAN_SAVE to indicate that the state + needs to be saved and possibly restored later with SCAN_BACK. */ + struct state + { + struct segmenter segmenter; + enum segment_type last_segment; + int newlines; /* Number of newlines encountered so far. */ + /* Maintained here so we can update lex_source's similar members when we + finish. */ + size_t line_pos; + size_t seg_pos; + }; + + /* Initialize state. */ + struct state state = + { + .segmenter = src->segmenter, + .newlines = 0, + .seg_pos = src->seg_pos, + .line_pos = src->line_pos, + }; + struct state saved = state; + + /* Append a new token to SRC and initialize it. */ + struct lex_token *token = lex_push_token__ (src); + struct scanner scanner; + scanner_init (&scanner, &token->token); + token->line_pos = src->line_pos; + token->token_pos = src->seg_pos; + if (src->reader->line_number > 0) + token->first_line = src->reader->line_number + src->n_newlines; + else + token->first_line = 0; + + /* Extract segments and pass them through the scanner until we obtain a + token. */ for (;;) { - /* Single or double quote. */ - int c = *prog++; - - /* Accumulate section. */ - for (;;) - { - /* Check end of line. */ - if (*prog == '\0') - { - msg (SE, _("Unterminated string constant.")); - goto finish; - } - - /* Double quote characters to embed them in strings. */ - if (*prog == c) - { - if (prog[1] == c) - prog++; - else - break; - } - - ds_putc (&tokstr, *prog++); - } - prog++; + /* Extract a segment. */ + const char *segment = &src->buffer[state.seg_pos - src->tail]; + size_t seg_maxlen = src->head - state.seg_pos; + enum segment_type type; + int seg_len = segmenter_push (&state.segmenter, segment, seg_maxlen, + &type); + if (seg_len < 0) + { + /* The segmenter needs more input to produce a segment. */ + lex_source_read__ (src); + continue; + } - /* Skip whitespace after final quote mark. */ - if (eof) - break; - for (;;) - { - while (isspace ((unsigned char) *prog)) - prog++; - if (*prog) - break; + /* Update state based on the segment. */ + state.last_segment = type; + state.seg_pos += seg_len; + if (type == SEG_NEWLINE) + { + state.newlines++; + state.line_pos = state.seg_pos; + } - if (dot) - goto finish; + /* Pass the segment into the scanner and try to get a token out. */ + enum scan_result result = scanner_push (&scanner, type, + ss_buffer (segment, seg_len), + &token->token); + if (result == SCAN_SAVE) + saved = state; + else if (result == SCAN_BACK) + { + state = saved; + break; + } + else if (result == SCAN_DONE) + break; + } - if (!lex_get_line ()) - goto finish; - } + /* If we've reached the end of a line, or the end of a command, then pass + the line to the output engine as a syntax text item. */ + int n_lines = state.newlines; + if (state.last_segment == SEG_END_COMMAND && !src->suppress_next_newline) + { + n_lines++; + src->suppress_next_newline = true; + } + else if (n_lines > 0 && src->suppress_next_newline) + { + n_lines--; + src->suppress_next_newline = false; + } + for (int i = 0; i < n_lines; i++) + { + const char *line = &src->buffer[src->journal_pos - src->tail]; + const char *newline = rawmemchr (line, '\n'); + size_t line_len = newline - line; + if (line_len > 0 && line[line_len - 1] == '\r') + line_len--; - /* Skip plus sign. */ - if (*prog != '+') - break; - prog++; + char *syntax = malloc (line_len + 2); + memcpy (syntax, line, line_len); + syntax[line_len] = '\n'; + syntax[line_len + 1] = '\0'; - /* Skip whitespace after plus sign. */ - if (eof) - break; - for (;;) - { - while (isspace ((unsigned char) *prog)) - prog++; - if (*prog) - break; - - if (dot) - goto finish; - - if (!lex_get_line ()) - { - msg (SE, _("Unexpected end of file in string concatenation.")); - goto finish; - } - } + text_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX, syntax)); - /* Ensure that a valid string follows. */ - if (*prog != '\'' && *prog != '"') - { - msg (SE, _("String expected following `+'.")); - goto finish; - } + src->journal_pos += newline - line + 1; } - /* We come here when we've finished concatenating all the string sections - into one large string. */ -finish: - if (type != CHARACTER_STRING) - convert_numeric_string_to_char_string (type); + token->token_len = state.seg_pos - src->seg_pos; - if (ds_length (&tokstr) > 255) + src->segmenter = state.segmenter; + src->seg_pos = state.seg_pos; + src->line_pos = state.line_pos; + src->n_newlines += state.newlines; + + switch (token->token.type) { - msg (SE, _("String exceeds 255 characters in length (%d characters)."), - ds_length (&tokstr)); - ds_truncate (&tokstr, 255); - } - - { - /* FIXME. */ - size_t i; - int warned = 0; + default: + break; - for (i = 0; i < ds_length (&tokstr); i++) - if (ds_c_str (&tokstr)[i] == 0) - { - if (!warned) - { - msg (SE, _("Sorry, literal strings may not contain null " - "characters. Replacing with spaces.")); - warned = 1; - } - ds_c_str (&tokstr)[i] = ' '; - } - } + case T_STOP: + token->token.type = T_ENDCMD; + src->eof = true; + break; - return T_STRING; -} - -#if DUMP_TOKENS -/* Reads one token from the lexer and writes a textual representation - on stdout for debugging purposes. */ -static void -dump_token (void) -{ - { - const char *curfn; - int curln; - - getl_location (&curfn, &curln); - if (curfn) - fprintf (stderr, "%s:%d\t", curfn, curln); - } - - switch (token) - { - case T_ID: - fprintf (stderr, "ID\t%s\n", tokid); + case SCAN_BAD_HEX_LENGTH: + lex_get_error (src, _("String of hex digits has %d characters, which " + "is not a multiple of 2"), + (int) token->token.number); break; - case T_POS_NUM: - case T_NEG_NUM: - fprintf (stderr, "NUM\t%f\n", tokval); + case SCAN_BAD_HEX_DIGIT: + case SCAN_BAD_UNICODE_DIGIT: + lex_get_error (src, _("`%c' is not a valid hex digit"), + (int) token->token.number); break; - case T_STRING: - fprintf (stderr, "STRING\t\"%s\"\n", ds_c_str (&tokstr)); + case SCAN_BAD_UNICODE_LENGTH: + lex_get_error (src, _("Unicode string contains %d bytes, which is " + "not in the valid range of 1 to 8 bytes"), + (int) token->token.number); break; - case T_STOP: - fprintf (stderr, "STOP\n"); + case SCAN_BAD_UNICODE_CODE_POINT: + lex_get_error (src, _("U+%04X is not a valid Unicode code point"), + (int) token->token.number); break; - case T_EXP: - fprintf (stderr, "MISC\tEXP\""); + case SCAN_EXPECTED_QUOTE: + lex_get_error (src, _("Unterminated string constant")); break; - case 0: - fprintf (stderr, "MISC\tEOF\n"); + case SCAN_EXPECTED_EXPONENT: + lex_get_error (src, _("Missing exponent following `%s'"), + token->token.string.string); break; - default: - if (token >= T_FIRST_KEYWORD && token <= T_LAST_KEYWORD) - fprintf (stderr, "KEYWORD\t%s\n", lex_token_name (token)); - else - fprintf (stderr, "PUNCT\t%c\n", token); + case SCAN_UNEXPECTED_DOT: + lex_get_error (src, _("Unexpected `.' in middle of command")); + break; + + case SCAN_UNEXPECTED_CHAR: + { + char c_name[16]; + lex_get_error (src, _("Bad character %s in input"), + uc_name (token->token.number, c_name)); + } + break; + + case SCAN_SKIP: + lex_source_pop_front (src); break; } + + return true; } -#endif /* DUMP_TOKENS */ + +static void +lex_source_push_endcmd__ (struct lex_source *src) +{ + struct lex_token *token = lex_push_token__ (src); + token->token.type = T_ENDCMD; + token->token_pos = 0; + token->token_len = 0; + token->line_pos = 0; + token->first_line = 0; +} + +static struct lex_source * +lex_source_create (struct lex_reader *reader) +{ + struct lex_source *src; + enum segmenter_mode mode; + + src = xzalloc (sizeof *src); + src->reader = reader; + + if (reader->syntax == LEX_SYNTAX_AUTO) + mode = SEG_MODE_AUTO; + else if (reader->syntax == LEX_SYNTAX_INTERACTIVE) + mode = SEG_MODE_INTERACTIVE; + else if (reader->syntax == LEX_SYNTAX_BATCH) + mode = SEG_MODE_BATCH; + else + NOT_REACHED (); + segmenter_init (&src->segmenter, mode); + + src->tokens = deque_init (&src->deque, 4, sizeof *src->tokens); + + lex_source_push_endcmd__ (src); + + return src; +} + +static void +lex_source_destroy (struct lex_source *src) +{ + char *file_name = src->reader->file_name; + char *encoding = src->reader->encoding; + if (src->reader->class->destroy != NULL) + src->reader->class->destroy (src->reader); + free (file_name); + free (encoding); + free (src->buffer); + while (!deque_is_empty (&src->deque)) + lex_source_pop__ (src); + free (src->tokens); + ll_remove (&src->ll); + free (src); +} + +struct lex_file_reader + { + struct lex_reader reader; + struct u8_istream *istream; + }; + +static struct lex_reader_class lex_file_reader_class; + +/* Creates and returns a new lex_reader that will read from file FILE_NAME (or + from stdin if FILE_NAME is "-"). The file is expected to be encoded with + ENCODING, which should take one of the forms accepted by + u8_istream_for_file(). SYNTAX and ERROR become the syntax mode and error + mode of the new reader, respectively. + + Returns a null pointer if FILE_NAME cannot be opened. */ +struct lex_reader * +lex_reader_for_file (const char *file_name, const char *encoding, + enum lex_syntax_mode syntax, + enum lex_error_mode error) +{ + struct lex_file_reader *r; + struct u8_istream *istream; + + istream = (!strcmp(file_name, "-") + ? u8_istream_for_fd (encoding, STDIN_FILENO) + : u8_istream_for_file (encoding, file_name, O_RDONLY)); + if (istream == NULL) + { + msg (ME, _("Opening `%s': %s."), file_name, strerror (errno)); + return NULL; + } + + r = xmalloc (sizeof *r); + lex_reader_init (&r->reader, &lex_file_reader_class); + r->reader.syntax = syntax; + r->reader.error = error; + r->reader.file_name = xstrdup (file_name); + r->reader.encoding = encoding ? xstrdup (encoding) : NULL; + r->reader.line_number = 1; + r->istream = istream; + + return &r->reader; +} + +static struct lex_file_reader * +lex_file_reader_cast (struct lex_reader *r) +{ + return UP_CAST (r, struct lex_file_reader, reader); +} + +static size_t +lex_file_read (struct lex_reader *r_, char *buf, size_t n, + enum prompt_style prompt_style UNUSED) +{ + struct lex_file_reader *r = lex_file_reader_cast (r_); + ssize_t n_read = u8_istream_read (r->istream, buf, n); + if (n_read < 0) + { + msg (ME, _("Error reading `%s': %s."), r_->file_name, strerror (errno)); + return 0; + } + return n_read; +} + +static void +lex_file_close (struct lex_reader *r_) +{ + struct lex_file_reader *r = lex_file_reader_cast (r_); + + if (u8_istream_fileno (r->istream) != STDIN_FILENO) + { + if (u8_istream_close (r->istream) != 0) + msg (ME, _("Error closing `%s': %s."), r_->file_name, strerror (errno)); + } + else + u8_istream_free (r->istream); + + free (r); +} + +static struct lex_reader_class lex_file_reader_class = + { + lex_file_read, + lex_file_close + }; + +struct lex_string_reader + { + struct lex_reader reader; + struct substring s; + size_t offset; + }; + +static struct lex_reader_class lex_string_reader_class; + +/* Creates and returns a new lex_reader for the contents of S, which must be + encoded in the given ENCODING. The new reader takes ownership of S and will free it + with ss_dealloc() when it is closed. */ +struct lex_reader * +lex_reader_for_substring_nocopy (struct substring s, const char *encoding) +{ + struct lex_string_reader *r; + + r = xmalloc (sizeof *r); + lex_reader_init (&r->reader, &lex_string_reader_class); + r->reader.syntax = LEX_SYNTAX_AUTO; + r->reader.encoding = encoding ? xstrdup (encoding) : NULL; + r->s = s; + r->offset = 0; + + return &r->reader; +} + +/* Creates and returns a new lex_reader for a copy of null-terminated string S, + which must be encoded in ENCODING. The caller retains ownership of S. */ +struct lex_reader * +lex_reader_for_string (const char *s, const char *encoding) +{ + struct substring ss; + ss_alloc_substring (&ss, ss_cstr (s)); + return lex_reader_for_substring_nocopy (ss, encoding); +} + +/* Formats FORMAT as a printf()-like format string and creates and returns a + new lex_reader for the formatted result. */ +struct lex_reader * +lex_reader_for_format (const char *format, const char *encoding, ...) +{ + struct lex_reader *r; + va_list args; + + va_start (args, encoding); + r = lex_reader_for_substring_nocopy (ss_cstr (xvasprintf (format, args)), encoding); + va_end (args); + + return r; +} + +static struct lex_string_reader * +lex_string_reader_cast (struct lex_reader *r) +{ + return UP_CAST (r, struct lex_string_reader, reader); +} + +static size_t +lex_string_read (struct lex_reader *r_, char *buf, size_t n, + enum prompt_style prompt_style UNUSED) +{ + struct lex_string_reader *r = lex_string_reader_cast (r_); + size_t chunk; + + chunk = MIN (n, r->s.length - r->offset); + memcpy (buf, r->s.string + r->offset, chunk); + r->offset += chunk; + + return chunk; +} + +static void +lex_string_close (struct lex_reader *r_) +{ + struct lex_string_reader *r = lex_string_reader_cast (r_); + + ss_dealloc (&r->s); + free (r); +} + +static struct lex_reader_class lex_string_reader_class = + { + lex_string_read, + lex_string_close + };