X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=src%2Flanguage%2Flexer%2Flexer.c;h=21309f5f975f688693e0ea09534cd750cf29dab9;hb=5581c901aba8df3b31f6406d7fff09e26a9e7fc1;hp=cde5f58ac46d95a7c727a3e639ddf2cd2ce8f95a;hpb=7c3ed885759bae94d80064c24135083e597873e1;p=pspp diff --git a/src/language/lexer/lexer.c b/src/language/lexer/lexer.c index cde5f58ac4..21309f5f97 100644 --- a/src/language/lexer/lexer.c +++ b/src/language/lexer/lexer.c @@ -109,6 +109,8 @@ struct lexer }; static struct lex_source *lex_source__ (const struct lexer *); +static struct substring lex_source_get_syntax__ (const struct lex_source *, + int n0, int n1); static const struct lex_token *lex_next__ (const struct lexer *, int n); static void lex_source_push_endcmd__ (struct lex_source *); @@ -196,7 +198,7 @@ lex_push_token__ (struct lex_source *src) src->tokens = deque_expand (&src->deque, src->tokens, sizeof *src->tokens); token = &src->tokens[deque_push_front (&src->deque)]; - token_init (&token->token); + token->token = (struct token) { .type = T_STOP }; return token; } @@ -850,9 +852,7 @@ lex_next__ (const struct lexer *lexer_, int n) return lex_source_next__ (src, n); else { - static const struct lex_token stop_token = - { TOKEN_INITIALIZER (T_STOP, 0.0, ""), 0, 0, 0, 0 }; - + static const struct lex_token stop_token = { .token = { .type = T_STOP } }; return &stop_token; } } @@ -935,6 +935,18 @@ lex_next_tokss (const struct lexer *lexer, int n) return lex_next (lexer, n)->string; } +/* Returns the text of the syntax in tokens N0 ahead of the current one, + through N1 ahead of the current one, inclusive. (For example, if N0 and N1 + are both zero, this requests the syntax for the current token.) The caller + must not modify or free the returned string. The syntax is encoded in UTF-8 + and in the original form supplied to the lexer so that, for example, it may + include comments, spaces, and new-lines if it spans multiple tokens. */ +struct substring +lex_next_representation (const struct lexer *lexer, int n0, int n1) +{ + return lex_source_get_syntax__ (lex_source__ (lexer), n0, n1); +} + static bool lex_tokens_match (const struct token *actual, const struct token *expected) { @@ -974,7 +986,7 @@ lex_match_phrase (struct lexer *lexer, const char *s) int i; i = 0; - string_lexer_init (&slex, s, strlen (s), SEG_MODE_INTERACTIVE); + string_lexer_init (&slex, s, strlen (s), SEG_MODE_INTERACTIVE, true); while (string_lexer_next (&slex, &token)) if (token.type != SCAN_SKIP) { @@ -1143,6 +1155,34 @@ lex_get_file_name (const struct lexer *lexer) return src == NULL ? NULL : src->reader->file_name; } +/* Returns a newly allocated msg_location for the syntax that represents tokens + with 0-based offsets N0...N1, inclusive, from the current token. The caller + must eventually free the location (with msg_location_destroy()). */ +struct msg_location * +lex_get_location (const struct lexer *lexer, int n0, int n1) +{ + struct msg_location *loc = lex_get_lines (lexer, n0, n1); + loc->first_column = lex_get_first_column (lexer, n0); + loc->last_column = lex_get_last_column (lexer, n1); + return loc; +} + +/* Returns a newly allocated msg_location for the syntax that represents tokens + with 0-based offsets N0...N1, inclusive, from the current token. The + location only covers the tokens' lines, not the columns. The caller must + eventually free the location (with msg_location_destroy()). */ +struct msg_location * +lex_get_lines (const struct lexer *lexer, int n0, int n1) +{ + struct msg_location *loc = xmalloc (sizeof *loc); + *loc = (struct msg_location) { + .file_name = xstrdup_if_nonnull (lex_get_file_name (lexer)), + .first_line = lex_get_first_line_number (lexer, n0), + .last_line = lex_get_last_line_number (lexer, n1), + }; + return loc; +} + const char * lex_get_encoding (const struct lexer *lexer) { @@ -1196,7 +1236,8 @@ lex_interactive_reset (struct lexer *lexer) src->journal_pos = src->seg_pos = src->line_pos = 0; src->n_newlines = 0; src->suppress_next_newline = false; - segmenter_init (&src->segmenter, segmenter_get_mode (&src->segmenter)); + src->segmenter = segmenter_init (segmenter_get_mode (&src->segmenter), + false); while (!deque_is_empty (&src->deque)) lex_source_pop__ (src); lex_source_push_endcmd__ (src); @@ -1310,49 +1351,22 @@ lex_source__ (const struct lexer *lexer) } static struct substring -lex_source_get_syntax__ (const struct lex_source *src, int n0, int n1) +lex_tokens_get_syntax__ (const struct lex_source *src, + const struct lex_token *token0, + const struct lex_token *token1) { - const struct lex_token *token0 = lex_source_next__ (src, n0); - const struct lex_token *token1 = lex_source_next__ (src, MAX (n0, n1)); size_t start = token0->token_pos; size_t end = token1->token_pos + token1->token_len; return ss_buffer (&src->buffer[start - src->tail], end - start); } -static void -lex_ellipsize__ (struct substring in, char *out, size_t out_size) +static struct substring +lex_source_get_syntax__ (const struct lex_source *src, int n0, int n1) { - size_t out_maxlen; - size_t out_len; - int mblen; - - assert (out_size >= 16); - out_maxlen = out_size - 1; - if (in.length > out_maxlen - 3) - out_maxlen -= 3; - - for (out_len = 0; out_len < in.length; out_len += mblen) - { - if (in.string[out_len] == '\n' - || in.string[out_len] == '\0' - || (in.string[out_len] == '\r' - && out_len + 1 < in.length - && in.string[out_len + 1] == '\n')) - break; - - mblen = u8_mblen (CHAR_CAST (const uint8_t *, in.string + out_len), - in.length - out_len); - - if (mblen < 0) - break; - - if (out_len + mblen > out_maxlen) - break; - } - - memcpy (out, in.string, out_len); - strcpy (&out[out_len], out_len < in.length ? "..." : ""); + return lex_tokens_get_syntax__ (src, + lex_source_next__ (src, n0), + lex_source_next__ (src, MAX (n0, n1))); } static void @@ -1374,7 +1388,7 @@ lex_source_error_valist (struct lex_source *src, int n0, int n1, { char syntax_cstr[64]; - lex_ellipsize__ (syntax, syntax_cstr, sizeof syntax_cstr); + str_ellipsize (syntax, syntax_cstr, sizeof syntax_cstr); ds_put_format (&s, _("Syntax error at `%s'"), syntax_cstr); } else @@ -1389,32 +1403,40 @@ lex_source_error_valist (struct lex_source *src, int n0, int n1, if (ds_last (&s) != '.') ds_put_byte (&s, '.'); - struct msg m = { - .category = MSG_C_SYNTAX, - .severity = MSG_S_ERROR, - .file_name = src->reader->file_name, + struct msg_location *location = xmalloc (sizeof *location); + *location = (struct msg_location) { + .file_name = xstrdup_if_nonnull (src->reader->file_name), .first_line = lex_source_get_first_line_number (src, n0), .last_line = lex_source_get_last_line_number (src, n1), .first_column = lex_source_get_first_column (src, n0), .last_column = lex_source_get_last_column (src, n1), + }; + struct msg *m = xmalloc (sizeof *m); + *m = (struct msg) { + .category = MSG_C_SYNTAX, + .severity = MSG_S_ERROR, + .location = location, .text = ds_steal_cstr (&s), }; - msg_emit (&m); + msg_emit (m); } -static void PRINTF_FORMAT (2, 3) -lex_get_error (struct lex_source *src, const char *format, ...) +static void PRINTF_FORMAT (4, 5) +lex_source_error (struct lex_source *src, int n0, int n1, + const char *format, ...) { va_list args; - int n; - va_start (args, format); + lex_source_error_valist (src, n0, n1, format, args); + va_end (args); +} - n = deque_count (&src->deque) - 1; - lex_source_error_valist (src, n, n, format, args); +static void +lex_get_error (struct lex_source *src, const char *s) +{ + int n = deque_count (&src->deque) - 1; + lex_source_error (src, n, n, "%s", s); lex_source_pop_front (src); - - va_end (args); } /* Attempts to append an additional token into SRC's deque, reading more from @@ -1566,43 +1588,16 @@ lex_source_get__ (const struct lex_source *src_) break; case SCAN_BAD_HEX_LENGTH: - lex_get_error (src, _("String of hex digits has %d characters, which " - "is not a multiple of 2"), - (int) token->token.number); - break; - case SCAN_BAD_HEX_DIGIT: case SCAN_BAD_UNICODE_DIGIT: - lex_get_error (src, _("`%c' is not a valid hex digit"), - (int) token->token.number); - break; - case SCAN_BAD_UNICODE_LENGTH: - lex_get_error (src, _("Unicode string contains %d bytes, which is " - "not in the valid range of 1 to 8 bytes"), - (int) token->token.number); - break; - case SCAN_BAD_UNICODE_CODE_POINT: - lex_get_error (src, _("U+%04X is not a valid Unicode code point"), - (int) token->token.number); - break; - case SCAN_EXPECTED_QUOTE: - lex_get_error (src, _("Unterminated string constant")); - break; - case SCAN_EXPECTED_EXPONENT: - lex_get_error (src, _("Missing exponent following `%s'"), - token->token.string.string); - break; - case SCAN_UNEXPECTED_CHAR: - { - char c_name[16]; - lex_get_error (src, _("Bad character %s in input"), - uc_name (token->token.number, c_name)); - } + char *msg = scan_token_to_error (&token->token); + lex_get_error (src, msg); + free (msg); break; case SCAN_SKIP: @@ -1627,12 +1622,12 @@ lex_source_push_endcmd__ (struct lex_source *src) static struct lex_source * lex_source_create (struct lex_reader *reader) { - struct lex_source *src; - - src = xzalloc (sizeof *src); - src->reader = reader; - segmenter_init (&src->segmenter, reader->syntax); - src->tokens = deque_init (&src->deque, 4, sizeof *src->tokens); + struct lex_source *src = xmalloc (sizeof *src); + *src = (struct lex_source) { + .reader = reader, + .segmenter = segmenter_init (reader->syntax, false), + .tokens = deque_init (&src->deque, 4, sizeof *src->tokens), + }; lex_source_push_endcmd__ (src);