X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=src%2Flanguage%2Flexer%2Flexer.c;h=b69bc9a5d0df1c12772afa777b4fb43a4d10bb1c;hb=0a18cc59b123294641d8e2bafc3d579be1f59e77;hp=bbe16cbe17a2c02305f631e8a04cfed5355a805b;hpb=693ac90cdac91d29870f303b88763a3685b3f341;p=pspp diff --git a/src/language/lexer/lexer.c b/src/language/lexer/lexer.c index bbe16cbe17..b69bc9a5d0 100644 --- a/src/language/lexer/lexer.c +++ b/src/language/lexer/lexer.c @@ -1,5 +1,5 @@ /* PSPP - a program for statistical analysis. - Copyright (C) 1997-9, 2000, 2006, 2009, 2010, 2011, 2013 Free Software Foundation, Inc. + Copyright (C) 1997-9, 2000, 2006, 2009, 2010, 2011, 2013, 2016 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -30,7 +30,6 @@ #include #include -#include "data/file-name.h" #include "language/command.h" #include "language/lexer/scan.h" #include "language/lexer/segment.h" @@ -45,7 +44,7 @@ #include "libpspp/str.h" #include "libpspp/u8-istream.h" #include "output/journal.h" -#include "output/text-item.h" +#include "output/output-item.h" #include "gl/c-ctype.h" #include "gl/minmax.h" @@ -131,7 +130,9 @@ lex_reader_init (struct lex_reader *reader, reader->syntax = LEX_SYNTAX_AUTO; reader->error = LEX_ERROR_CONTINUE; reader->file_name = NULL; + reader->encoding = NULL; reader->line_number = 0; + reader->eof = false; } /* Frees any file name already in READER and replaces it by a copy of @@ -140,7 +141,7 @@ void lex_reader_set_file_name (struct lex_reader *reader, const char *file_name) { free (reader->file_name); - reader->file_name = file_name != NULL ? xstrdup (file_name) : NULL; + reader->file_name = xstrdup_if_nonnull (file_name); } /* Creates and returns a new lexer. */ @@ -184,7 +185,7 @@ lex_append (struct lexer *lexer, struct lex_reader *reader) ll_push_tail (&lexer->sources, &lex_source_create (reader)->ll); } -/* Advacning. */ +/* Advancing. */ static struct lex_token * lex_push_token__ (struct lex_source *src) @@ -268,23 +269,40 @@ lex_next_error (struct lexer *lexer, int n0, int n1, const char *format, ...) va_end (args); } -/* Prints a syntax error message saying that OPTION0 or one of the other - strings following it, up to the first NULL, is expected. */ +/* Prints a syntax error message saying that one of the strings provided as + varargs, up to the first NULL, is expected. */ void -lex_error_expecting (struct lexer *lexer, const char *option0, ...) +(lex_error_expecting) (struct lexer *lexer, ...) { - enum { MAX_OPTIONS = 8 }; - const char *options[MAX_OPTIONS + 1]; va_list args; - int n; - va_start (args, option0); - options[0] = option0; - n = 0; - while (n + 1 < MAX_OPTIONS && options[n] != NULL) - options[++n] = va_arg (args, const char *); + va_start (args, lexer); + lex_error_expecting_valist (lexer, args); va_end (args); +} + +/* Prints a syntax error message saying that one of the options provided in + ARGS, up to the first NULL, is expected. */ +void +lex_error_expecting_valist (struct lexer *lexer, va_list args) +{ + enum { MAX_OPTIONS = 9 }; + const char *options[MAX_OPTIONS]; + int n = 0; + while (n < MAX_OPTIONS) + { + const char *option = va_arg (args, const char *); + if (!option) + break; + options[n++] = option; + } + lex_error_expecting_array (lexer, options, n); +} + +void +lex_error_expecting_array (struct lexer *lexer, const char **options, size_t n) +{ switch (n) { case 0: @@ -333,7 +351,7 @@ lex_error_expecting (struct lexer *lexer, const char *option0, ...) break; default: - NOT_REACHED (); + lex_error (lexer, NULL); } } @@ -427,14 +445,14 @@ lex_end_of_command (struct lexer *lexer) /* Returns true if the current token is a number. */ bool -lex_is_number (struct lexer *lexer) +lex_is_number (const struct lexer *lexer) { return lex_next_is_number (lexer, 0); } /* Returns true if the current token is a string. */ bool -lex_is_string (struct lexer *lexer) +lex_is_string (const struct lexer *lexer) { return lex_next_is_string (lexer, 0); } @@ -442,14 +460,14 @@ lex_is_string (struct lexer *lexer) /* Returns the value of the current token, which must be a floating point number. */ double -lex_number (struct lexer *lexer) +lex_number (const struct lexer *lexer) { return lex_next_number (lexer, 0); } /* Returns true iff the current token is an integer. */ bool -lex_is_integer (struct lexer *lexer) +lex_is_integer (const struct lexer *lexer) { return lex_next_is_integer (lexer, 0); } @@ -457,7 +475,7 @@ lex_is_integer (struct lexer *lexer) /* Returns the value of the current token, which must be an integer. */ long -lex_integer (struct lexer *lexer) +lex_integer (const struct lexer *lexer) { return lex_next_integer (lexer, 0); } @@ -471,7 +489,7 @@ lex_integer (struct lexer *lexer) /* Returns true if the token N ahead of the current token is a number. */ bool -lex_next_is_number (struct lexer *lexer, int n) +lex_next_is_number (const struct lexer *lexer, int n) { enum token_type next_token = lex_next_token (lexer, n); return next_token == T_POS_NUM || next_token == T_NEG_NUM; @@ -479,7 +497,7 @@ lex_next_is_number (struct lexer *lexer, int n) /* Returns true if the token N ahead of the current token is a string. */ bool -lex_next_is_string (struct lexer *lexer, int n) +lex_next_is_string (const struct lexer *lexer, int n) { return lex_next_token (lexer, n) == T_STRING; } @@ -487,7 +505,7 @@ lex_next_is_string (struct lexer *lexer, int n) /* Returns the value of the token N ahead of the current token, which must be a floating point number. */ double -lex_next_number (struct lexer *lexer, int n) +lex_next_number (const struct lexer *lexer, int n) { assert (lex_next_is_number (lexer, n)); return lex_next_tokval (lexer, n); @@ -495,7 +513,7 @@ lex_next_number (struct lexer *lexer, int n) /* Returns true if the token N ahead of the current token is an integer. */ bool -lex_next_is_integer (struct lexer *lexer, int n) +lex_next_is_integer (const struct lexer *lexer, int n) { double value; @@ -509,7 +527,7 @@ lex_next_is_integer (struct lexer *lexer, int n) /* Returns the value of the token N ahead of the current token, which must be an integer. */ long -lex_next_integer (struct lexer *lexer, int n) +lex_next_integer (const struct lexer *lexer, int n) { assert (lex_next_is_integer (lexer, n)); return lex_next_tokval (lexer, n); @@ -587,7 +605,7 @@ lex_force_match_id (struct lexer *lexer, const char *identifier) return true; else { - lex_error_expecting (lexer, identifier, NULL_SENTINEL); + lex_error_expecting (lexer, identifier); return false; } } @@ -604,9 +622,16 @@ lex_force_match (struct lexer *lexer, enum token_type type) } else { - char *s = xasprintf ("`%s'", token_type_to_string (type)); - lex_error_expecting (lexer, s, NULL_SENTINEL); - free (s); + const char *type_string = token_type_to_string (type); + if (type_string) + { + char *s = xasprintf ("`%s'", type_string); + lex_error_expecting (lexer, s); + free (s); + } + else + lex_error_expecting (lexer, token_type_to_name (type)); + return false; } } @@ -637,7 +662,7 @@ lex_force_string (struct lexer *lexer) bool lex_force_string_or_id (struct lexer *lexer) { - return lex_is_integer (lexer) || lex_force_string (lexer); + return lex_token (lexer) == T_ID || lex_force_string (lexer); } /* If the current token is an integer, does nothing and returns true. @@ -869,7 +894,7 @@ lex_match_phrase (struct lexer *lexer, const char *s) int i; i = 0; - string_lexer_init (&slex, s, SEG_MODE_INTERACTIVE); + string_lexer_init (&slex, s, strlen (s), SEG_MODE_INTERACTIVE); while (string_lexer_next (&slex, &token)) if (token.type != SCAN_SKIP) { @@ -1038,6 +1063,14 @@ lex_get_file_name (const struct lexer *lexer) return src == NULL ? NULL : src->reader->file_name; } +const char * +lex_get_encoding (const struct lexer *lexer) +{ + struct lex_source *src = lex_source__ (lexer); + return src == NULL ? NULL : src->reader->encoding; +} + + /* Returns the syntax mode for the syntax file from which the current drawn is drawn. Returns LEX_SYNTAX_AUTO for a T_STOP token or if the command's source does not have line numbers. @@ -1166,33 +1199,20 @@ lex_source_read__ (struct lex_source *src) { do { - size_t head_ofs; - size_t space; - size_t n; - lex_source_expand__ (src); - head_ofs = src->head - src->tail; - space = src->allocated - head_ofs; - n = src->reader->class->read (src->reader, &src->buffer[head_ofs], - space, - segmenter_get_prompt (&src->segmenter)); + size_t head_ofs = src->head - src->tail; + size_t space = src->allocated - head_ofs; + enum prompt_style prompt = segmenter_get_prompt (&src->segmenter); + size_t n = src->reader->class->read (src->reader, &src->buffer[head_ofs], + space, prompt); assert (n <= space); if (n == 0) { - /* End of input. - - Ensure that the input always ends in a new-line followed by a null - byte, as required by the segmenter library. */ - - if (src->head == src->tail - || src->buffer[src->head - src->tail - 1] != '\n') - src->buffer[src->head++ - src->tail] = '\n'; - + /* End of input. */ + src->reader->eof = true; lex_source_expand__ (src); - src->buffer[src->head++ - src->tail] = '\0'; - return; } @@ -1228,10 +1248,14 @@ lex_ellipsize__ (struct substring in, char *out, size_t out_size) int mblen; assert (out_size >= 16); - out_maxlen = out_size - (in.length >= out_size ? 3 : 0) - 1; + out_maxlen = out_size - 1; + if (in.length > out_maxlen - 3) + out_maxlen -= 3; + for (out_len = 0; out_len < in.length; out_len += mblen) { if (in.string[out_len] == '\n' + || in.string[out_len] == '\0' || (in.string[out_len] == '\r' && out_len + 1 < in.length && in.string[out_len + 1] == '\n')) @@ -1239,6 +1263,10 @@ lex_ellipsize__ (struct substring in, char *out, size_t out_size) mblen = u8_mblen (CHAR_CAST (const uint8_t *, in.string + out_len), in.length - out_len); + + if (mblen < 0) + break; + if (out_len + mblen > out_maxlen) break; } @@ -1253,7 +1281,6 @@ lex_source_error_valist (struct lex_source *src, int n0, int n1, { const struct lex_token *token; struct string s; - struct msg m; ds_init_empty (&s); @@ -1281,14 +1308,16 @@ lex_source_error_valist (struct lex_source *src, int n0, int n1, } ds_put_byte (&s, '.'); - m.category = MSG_C_SYNTAX; - m.severity = MSG_S_ERROR; - m.file_name = src->reader->file_name; - m.first_line = lex_source_get_first_line_number (src, n0); - m.last_line = lex_source_get_last_line_number (src, n1); - m.first_column = lex_source_get_first_column (src, n0); - m.last_column = lex_source_get_last_column (src, n1); - m.text = ds_steal_cstr (&s); + struct msg m = { + .category = MSG_C_SYNTAX, + .severity = MSG_S_ERROR, + .file_name = src->reader->file_name, + .first_line = lex_source_get_first_line_number (src, n0), + .last_line = lex_source_get_last_line_number (src, n1), + .first_column = lex_source_get_first_column (src, n0), + .last_column = lex_source_get_last_column (src, n1), + .text = ds_steal_cstr (&s), + }; msg_emit (&m); } @@ -1307,37 +1336,44 @@ lex_get_error (struct lex_source *src, const char *format, ...) va_end (args); } +/* Attempts to append an additional token into SRC's deque, reading more from + the underlying lex_reader if necessary.. Returns true if successful, false + if the deque already represents (a suffix of) the whole lex_reader's + contents, */ static bool lex_source_get__ (const struct lex_source *src_) { struct lex_source *src = CONST_CAST (struct lex_source *, src_); + if (src->eof) + return false; + /* State maintained while scanning tokens. Usually we only need a single + state, but scanner_push() can return SCAN_SAVE to indicate that the state + needs to be saved and possibly restored later with SCAN_BACK. */ struct state { struct segmenter segmenter; enum segment_type last_segment; - int newlines; + int newlines; /* Number of newlines encountered so far. */ + /* Maintained here so we can update lex_source's similar members when we + finish. */ size_t line_pos; size_t seg_pos; }; - struct state state, saved; - enum scan_result result; - struct scanner scanner; - struct lex_token *token; - int n_lines; - int i; - - if (src->eof) - return false; - - state.segmenter = src->segmenter; - state.newlines = 0; - state.seg_pos = src->seg_pos; - state.line_pos = src->line_pos; - saved = state; + /* Initialize state. */ + struct state state = + { + .segmenter = src->segmenter, + .newlines = 0, + .seg_pos = src->seg_pos, + .line_pos = src->line_pos, + }; + struct state saved = state; - token = lex_push_token__ (src); + /* Append a new token to SRC and initialize it. */ + struct lex_token *token = lex_push_token__ (src); + struct scanner scanner; scanner_init (&scanner, &token->token); token->line_pos = src->line_pos; token->token_pos = src->seg_pos; @@ -1346,22 +1382,25 @@ lex_source_get__ (const struct lex_source *src_) else token->first_line = 0; + /* Extract segments and pass them through the scanner until we obtain a + token. */ for (;;) { + /* Extract a segment. */ + const char *segment = &src->buffer[state.seg_pos - src->tail]; + size_t seg_maxlen = src->head - state.seg_pos; enum segment_type type; - const char *segment; - size_t seg_maxlen; - int seg_len; - - segment = &src->buffer[state.seg_pos - src->tail]; - seg_maxlen = src->head - state.seg_pos; - seg_len = segmenter_push (&state.segmenter, segment, seg_maxlen, &type); + int seg_len = segmenter_push (&state.segmenter, segment, seg_maxlen, + src->reader->eof, &type); if (seg_len < 0) { + /* The segmenter needs more input to produce a segment. */ + assert (!src->reader->eof); lex_source_read__ (src); continue; } + /* Update state based on the segment. */ state.last_segment = type; state.seg_pos += seg_len; if (type == SEG_NEWLINE) @@ -1370,8 +1409,10 @@ lex_source_get__ (const struct lex_source *src_) state.line_pos = state.seg_pos; } - result = scanner_push (&scanner, type, ss_buffer (segment, seg_len), - &token->token); + /* Pass the segment into the scanner and try to get a token out. */ + enum scan_result result = scanner_push (&scanner, type, + ss_buffer (segment, seg_len), + &token->token); if (result == SCAN_SAVE) saved = state; else if (result == SCAN_BACK) @@ -1383,7 +1424,9 @@ lex_source_get__ (const struct lex_source *src_) break; } - n_lines = state.newlines; + /* If we've reached the end of a line, or the end of a command, then pass + the line to the output engine as a syntax text item. */ + int n_lines = state.newlines; if (state.last_segment == SEG_END_COMMAND && !src->suppress_next_newline) { n_lines++; @@ -1394,27 +1437,34 @@ lex_source_get__ (const struct lex_source *src_) n_lines--; src->suppress_next_newline = false; } - for (i = 0; i < n_lines; i++) + for (int i = 0; i < n_lines; i++) { - const char *newline; - const char *line; - size_t line_len; - char *syntax; - - line = &src->buffer[src->journal_pos - src->tail]; - newline = rawmemchr (line, '\n'); - line_len = newline - line; - if (line_len > 0 && line[line_len - 1] == '\r') - line_len--; - - syntax = malloc (line_len + 2); - memcpy (syntax, line, line_len); - syntax[line_len] = '\n'; - syntax[line_len + 1] = '\0'; - - text_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX, syntax)); - - src->journal_pos += newline - line + 1; + /* Beginning of line. */ + const char *line = &src->buffer[src->journal_pos - src->tail]; + + /* Calculate line length, including \n or \r\n end-of-line if present. + + We use src->head even though that may be beyond what we've actually + converted to tokens (which is only through state.line_pos). That's + because, if we're emitting the line due to SEG_END_COMMAND, we want to + take the whole line through the newline, not just through the '.'. */ + size_t max_len = src->head - src->journal_pos; + const char *newline = memchr (line, '\n', max_len); + size_t line_len = newline ? newline - line + 1 : max_len; + + /* Calculate line length excluding end-of-line. */ + size_t copy_len = line_len; + if (copy_len > 0 && line[copy_len - 1] == '\n') + copy_len--; + if (copy_len > 0 && line[copy_len - 1] == '\r') + copy_len--; + + /* Submit the line as syntax. */ + output_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX, + xmemdup0 (line, copy_len), + NULL)); + + src->journal_pos += line_len; } token->token_len = state.seg_pos - src->seg_pos; @@ -1527,9 +1577,11 @@ static void lex_source_destroy (struct lex_source *src) { char *file_name = src->reader->file_name; + char *encoding = src->reader->encoding; if (src->reader->class->destroy != NULL) src->reader->class->destroy (src->reader); free (file_name); + free (encoding); free (src->buffer); while (!deque_is_empty (&src->deque)) lex_source_pop__ (src); @@ -1542,7 +1594,6 @@ struct lex_file_reader { struct lex_reader reader; struct u8_istream *istream; - char *file_name; }; static struct lex_reader_class lex_file_reader_class; @@ -1576,9 +1627,9 @@ lex_reader_for_file (const char *file_name, const char *encoding, r->reader.syntax = syntax; r->reader.error = error; r->reader.file_name = xstrdup (file_name); + r->reader.encoding = xstrdup_if_nonnull (encoding); r->reader.line_number = 1; r->istream = istream; - r->file_name = xstrdup (file_name); return &r->reader; } @@ -1597,7 +1648,7 @@ lex_file_read (struct lex_reader *r_, char *buf, size_t n, ssize_t n_read = u8_istream_read (r->istream, buf, n); if (n_read < 0) { - msg (ME, _("Error reading `%s': %s."), r->file_name, strerror (errno)); + msg (ME, _("Error reading `%s': %s."), r_->file_name, strerror (errno)); return 0; } return n_read; @@ -1611,12 +1662,11 @@ lex_file_close (struct lex_reader *r_) if (u8_istream_fileno (r->istream) != STDIN_FILENO) { if (u8_istream_close (r->istream) != 0) - msg (ME, _("Error closing `%s': %s."), r->file_name, strerror (errno)); + msg (ME, _("Error closing `%s': %s."), r_->file_name, strerror (errno)); } else u8_istream_free (r->istream); - free (r->file_name); free (r); } @@ -1636,16 +1686,17 @@ struct lex_string_reader static struct lex_reader_class lex_string_reader_class; /* Creates and returns a new lex_reader for the contents of S, which must be - encoded in UTF-8. The new reader takes ownership of S and will free it + encoded in the given ENCODING. The new reader takes ownership of S and will free it with ss_dealloc() when it is closed. */ struct lex_reader * -lex_reader_for_substring_nocopy (struct substring s) +lex_reader_for_substring_nocopy (struct substring s, const char *encoding) { struct lex_string_reader *r; r = xmalloc (sizeof *r); lex_reader_init (&r->reader, &lex_string_reader_class); r->reader.syntax = LEX_SYNTAX_AUTO; + r->reader.encoding = xstrdup_if_nonnull (encoding); r->s = s; r->offset = 0; @@ -1653,25 +1704,25 @@ lex_reader_for_substring_nocopy (struct substring s) } /* Creates and returns a new lex_reader for a copy of null-terminated string S, - which must be encoded in UTF-8. The caller retains ownership of S. */ + which must be encoded in ENCODING. The caller retains ownership of S. */ struct lex_reader * -lex_reader_for_string (const char *s) +lex_reader_for_string (const char *s, const char *encoding) { struct substring ss; ss_alloc_substring (&ss, ss_cstr (s)); - return lex_reader_for_substring_nocopy (ss); + return lex_reader_for_substring_nocopy (ss, encoding); } /* Formats FORMAT as a printf()-like format string and creates and returns a new lex_reader for the formatted result. */ struct lex_reader * -lex_reader_for_format (const char *format, ...) +lex_reader_for_format (const char *format, const char *encoding, ...) { struct lex_reader *r; va_list args; - va_start (args, format); - r = lex_reader_for_substring_nocopy (ss_cstr (xvasprintf (format, args))); + va_start (args, encoding); + r = lex_reader_for_substring_nocopy (ss_cstr (xvasprintf (format, args)), encoding); va_end (args); return r;