1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 1997-9, 2000, 2006, 2009, 2010, 2011, 2013, 2016 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "language/lexer/lexer.h"
33 #include "language/command.h"
34 #include "language/lexer/macro.h"
35 #include "language/lexer/scan.h"
36 #include "language/lexer/segment.h"
37 #include "language/lexer/token.h"
38 #include "libpspp/assertion.h"
39 #include "libpspp/cast.h"
40 #include "libpspp/deque.h"
41 #include "libpspp/i18n.h"
42 #include "libpspp/ll.h"
43 #include "libpspp/message.h"
44 #include "libpspp/misc.h"
45 #include "libpspp/str.h"
46 #include "libpspp/u8-istream.h"
47 #include "output/journal.h"
48 #include "output/output-item.h"
50 #include "gl/c-ctype.h"
51 #include "gl/minmax.h"
52 #include "gl/xalloc.h"
53 #include "gl/xmemdup0.h"
56 #define _(msgid) gettext (msgid)
57 #define N_(msgid) msgid
59 /* A token within a lex_source. */
62 /* The regular token information. */
65 /* Location of token in terms of the lex_source's buffer.
66 src->tail <= line_pos <= token_pos <= src->head. */
67 size_t token_pos; /* Start of token. */
68 size_t token_len; /* Length of source for token in bytes. */
69 size_t line_pos; /* Start of line containing token_pos. */
70 int first_line; /* Line number at token_pos. */
74 /* A source of tokens, corresponding to a syntax file.
76 This is conceptually a lex_reader wrapped with everything needed to convert
77 its UTF-8 bytes into tokens. */
80 struct ll ll; /* In lexer's list of sources. */
81 struct lex_reader *reader;
83 struct segmenter segmenter;
84 bool eof; /* True if T_STOP was read from 'reader'. */
86 /* Buffer of UTF-8 bytes. */
88 size_t allocated; /* Number of bytes allocated. */
89 size_t tail; /* &buffer[0] offset into UTF-8 source. */
90 size_t head; /* &buffer[head - tail] offset into source. */
92 /* Positions in source file, tail <= pos <= head for each member here. */
93 size_t journal_pos; /* First byte not yet output to journal. */
94 size_t seg_pos; /* First byte not yet scanned as token. */
95 size_t line_pos; /* First byte of line containing seg_pos. */
97 int n_newlines; /* Number of new-lines up to seg_pos. */
98 bool suppress_next_newline;
101 struct deque deque; /* Indexes into 'tokens'. */
102 struct lex_token *tokens; /* Lookahead tokens for parser. */
105 static struct lex_source *lex_source_create (struct lexer *,
106 struct lex_reader *);
107 static void lex_source_destroy (struct lex_source *);
112 struct ll_list sources; /* Contains "struct lex_source"s. */
113 struct macro_set *macros;
116 static struct lex_source *lex_source__ (const struct lexer *);
117 static struct substring lex_source_get_syntax__ (const struct lex_source *,
119 static const struct lex_token *lex_next__ (const struct lexer *, int n);
120 static void lex_source_push_endcmd__ (struct lex_source *);
122 static void lex_source_pop__ (struct lex_source *);
123 static bool lex_source_get (const struct lex_source *);
124 static void lex_source_error_valist (struct lex_source *, int n0, int n1,
125 const char *format, va_list)
126 PRINTF_FORMAT (4, 0);
127 static const struct lex_token *lex_source_next__ (const struct lex_source *,
130 /* Initializes READER with the specified CLASS and otherwise some reasonable
131 defaults. The caller should fill in the others members as desired. */
133 lex_reader_init (struct lex_reader *reader,
134 const struct lex_reader_class *class)
136 reader->class = class;
137 reader->syntax = SEG_MODE_AUTO;
138 reader->error = LEX_ERROR_CONTINUE;
139 reader->file_name = NULL;
140 reader->encoding = NULL;
141 reader->line_number = 0;
145 /* Frees any file name already in READER and replaces it by a copy of
146 FILE_NAME, or if FILE_NAME is null then clears any existing name. */
148 lex_reader_set_file_name (struct lex_reader *reader, const char *file_name)
150 free (reader->file_name);
151 reader->file_name = xstrdup_if_nonnull (file_name);
154 /* Creates and returns a new lexer. */
158 struct lexer *lexer = xmalloc (sizeof *lexer);
159 *lexer = (struct lexer) {
160 .sources = LL_INITIALIZER (lexer->sources),
161 .macros = macro_set_create (),
166 /* Destroys LEXER. */
168 lex_destroy (struct lexer *lexer)
172 struct lex_source *source, *next;
174 ll_for_each_safe (source, next, struct lex_source, ll, &lexer->sources)
175 lex_source_destroy (source);
176 macro_set_destroy (lexer->macros);
181 /* Adds M to LEXER's set of macros. M replaces any existing macro with the
182 same name. Takes ownership of M. */
184 lex_define_macro (struct lexer *lexer, struct macro *m)
186 macro_set_add (lexer->macros, m);
189 /* Inserts READER into LEXER so that the next token read by LEXER comes from
190 READER. Before the caller, LEXER must either be empty or at a T_ENDCMD
193 lex_include (struct lexer *lexer, struct lex_reader *reader)
195 assert (ll_is_empty (&lexer->sources) || lex_token (lexer) == T_ENDCMD);
196 ll_push_head (&lexer->sources, &lex_source_create (lexer, reader)->ll);
199 /* Appends READER to LEXER, so that it will be read after all other current
200 readers have already been read. */
202 lex_append (struct lexer *lexer, struct lex_reader *reader)
204 ll_push_tail (&lexer->sources, &lex_source_create (lexer, reader)->ll);
209 static struct lex_token *
210 lex_push_token__ (struct lex_source *src)
212 struct lex_token *token;
214 if (deque_is_full (&src->deque))
215 src->tokens = deque_expand (&src->deque, src->tokens, sizeof *src->tokens);
217 token = &src->tokens[deque_push_front (&src->deque)];
218 token->token = (struct token) { .type = T_STOP };
219 token->from_macro = false;
224 lex_source_pop__ (struct lex_source *src)
226 token_uninit (&src->tokens[deque_pop_back (&src->deque)].token);
230 lex_source_pop_front (struct lex_source *src)
232 token_uninit (&src->tokens[deque_pop_front (&src->deque)].token);
235 /* Advances LEXER to the next token, consuming the current token. */
237 lex_get (struct lexer *lexer)
239 struct lex_source *src;
241 src = lex_source__ (lexer);
245 if (!deque_is_empty (&src->deque))
246 lex_source_pop__ (src);
248 while (deque_is_empty (&src->deque))
249 if (!lex_source_get (src))
251 lex_source_destroy (src);
252 src = lex_source__ (lexer);
258 /* Issuing errors. */
260 /* Prints a syntax error message containing the current token and
261 given message MESSAGE (if non-null). */
263 lex_error (struct lexer *lexer, const char *format, ...)
267 va_start (args, format);
268 lex_next_error_valist (lexer, 0, 0, format, args);
272 /* Prints a syntax error message containing the current token and
273 given message MESSAGE (if non-null). */
275 lex_error_valist (struct lexer *lexer, const char *format, va_list args)
277 lex_next_error_valist (lexer, 0, 0, format, args);
280 /* Prints a syntax error message containing the current token and
281 given message MESSAGE (if non-null). */
283 lex_next_error (struct lexer *lexer, int n0, int n1, const char *format, ...)
287 va_start (args, format);
288 lex_next_error_valist (lexer, n0, n1, format, args);
292 /* Prints a syntax error message saying that one of the strings provided as
293 varargs, up to the first NULL, is expected. */
295 (lex_error_expecting) (struct lexer *lexer, ...)
299 va_start (args, lexer);
300 lex_error_expecting_valist (lexer, args);
304 /* Prints a syntax error message saying that one of the options provided in
305 ARGS, up to the first NULL, is expected. */
307 lex_error_expecting_valist (struct lexer *lexer, va_list args)
309 enum { MAX_OPTIONS = 9 };
310 const char *options[MAX_OPTIONS];
312 while (n < MAX_OPTIONS)
314 const char *option = va_arg (args, const char *);
318 options[n++] = option;
320 lex_error_expecting_array (lexer, options, n);
324 lex_error_expecting_array (struct lexer *lexer, const char **options, size_t n)
329 lex_error (lexer, NULL);
333 lex_error (lexer, _("expecting %s"), options[0]);
337 lex_error (lexer, _("expecting %s or %s"), options[0], options[1]);
341 lex_error (lexer, _("expecting %s, %s, or %s"), options[0], options[1],
346 lex_error (lexer, _("expecting %s, %s, %s, or %s"),
347 options[0], options[1], options[2], options[3]);
351 lex_error (lexer, _("expecting %s, %s, %s, %s, or %s"),
352 options[0], options[1], options[2], options[3], options[4]);
356 lex_error (lexer, _("expecting %s, %s, %s, %s, %s, or %s"),
357 options[0], options[1], options[2], options[3], options[4],
362 lex_error (lexer, _("expecting %s, %s, %s, %s, %s, %s, or %s"),
363 options[0], options[1], options[2], options[3], options[4],
364 options[5], options[6]);
368 lex_error (lexer, _("expecting %s, %s, %s, %s, %s, %s, %s, or %s"),
369 options[0], options[1], options[2], options[3], options[4],
370 options[5], options[6], options[7]);
374 lex_error (lexer, NULL);
378 /* Reports an error to the effect that subcommand SBC may only be specified
381 This function does not take a lexer as an argument or use lex_error(),
382 because the result would ordinarily just be redundant: "Syntax error at
383 SUBCOMMAND: Subcommand SUBCOMMAND may only be specified once.", which does
384 not help the user find the error. */
386 lex_sbc_only_once (const char *sbc)
388 msg (SE, _("Subcommand %s may only be specified once."), sbc);
391 /* Reports an error to the effect that subcommand SBC is missing.
393 This function does not take a lexer as an argument or use lex_error(),
394 because a missing subcommand can normally be detected only after the whole
395 command has been parsed, and so lex_error() would always report "Syntax
396 error at end of command", which does not help the user find the error. */
398 lex_sbc_missing (const char *sbc)
400 msg (SE, _("Required subcommand %s was not specified."), sbc);
403 /* Reports an error to the effect that specification SPEC may only be specified
404 once within subcommand SBC. */
406 lex_spec_only_once (struct lexer *lexer, const char *sbc, const char *spec)
408 lex_error (lexer, _("%s may only be specified once within subcommand %s"),
412 /* Reports an error to the effect that specification SPEC is missing within
415 lex_spec_missing (struct lexer *lexer, const char *sbc, const char *spec)
417 lex_error (lexer, _("Required %s specification missing from %s subcommand"),
421 /* Prints a syntax error message containing the current token and
422 given message MESSAGE (if non-null). */
424 lex_next_error_valist (struct lexer *lexer, int n0, int n1,
425 const char *format, va_list args)
427 struct lex_source *src = lex_source__ (lexer);
430 lex_source_error_valist (src, n0, n1, format, args);
436 ds_put_format (&s, _("Syntax error at end of input"));
439 ds_put_cstr (&s, ": ");
440 ds_put_vformat (&s, format, args);
442 ds_put_byte (&s, '.');
443 msg (SE, "%s", ds_cstr (&s));
448 /* Checks that we're at end of command.
449 If so, returns a successful command completion code.
450 If not, flags a syntax error and returns an error command
453 lex_end_of_command (struct lexer *lexer)
455 if (lex_token (lexer) != T_ENDCMD && lex_token (lexer) != T_STOP)
457 lex_error (lexer, _("expecting end of command"));
464 /* Token testing functions. */
466 /* Returns true if the current token is a number. */
468 lex_is_number (const struct lexer *lexer)
470 return lex_next_is_number (lexer, 0);
473 /* Returns true if the current token is a string. */
475 lex_is_string (const struct lexer *lexer)
477 return lex_next_is_string (lexer, 0);
480 /* Returns the value of the current token, which must be a
481 floating point number. */
483 lex_number (const struct lexer *lexer)
485 return lex_next_number (lexer, 0);
488 /* Returns true iff the current token is an integer. */
490 lex_is_integer (const struct lexer *lexer)
492 return lex_next_is_integer (lexer, 0);
495 /* Returns the value of the current token, which must be an
498 lex_integer (const struct lexer *lexer)
500 return lex_next_integer (lexer, 0);
503 /* Token testing functions with lookahead.
505 A value of 0 for N as an argument to any of these functions refers to the
506 current token. Lookahead is limited to the current command. Any N greater
507 than the number of tokens remaining in the current command will be treated
508 as referring to a T_ENDCMD token. */
510 /* Returns true if the token N ahead of the current token is a number. */
512 lex_next_is_number (const struct lexer *lexer, int n)
514 return token_is_number (lex_next (lexer, n));
517 /* Returns true if the token N ahead of the current token is a string. */
519 lex_next_is_string (const struct lexer *lexer, int n)
521 return token_is_string (lex_next (lexer, n));
524 /* Returns the value of the token N ahead of the current token, which must be a
525 floating point number. */
527 lex_next_number (const struct lexer *lexer, int n)
529 return token_number (lex_next (lexer, n));
532 /* Returns true if the token N ahead of the current token is an integer. */
534 lex_next_is_integer (const struct lexer *lexer, int n)
536 return token_is_integer (lex_next (lexer, n));
539 /* Returns the value of the token N ahead of the current token, which must be
542 lex_next_integer (const struct lexer *lexer, int n)
544 return token_integer (lex_next (lexer, n));
547 /* Token matching functions. */
549 /* If the current token has the specified TYPE, skips it and returns true.
550 Otherwise, returns false. */
552 lex_match (struct lexer *lexer, enum token_type type)
554 if (lex_token (lexer) == type)
563 /* If the current token matches IDENTIFIER, skips it and returns true.
564 IDENTIFIER may be abbreviated to its first three letters. Otherwise,
567 IDENTIFIER must be an ASCII string. */
569 lex_match_id (struct lexer *lexer, const char *identifier)
571 return lex_match_id_n (lexer, identifier, 3);
574 /* If the current token is IDENTIFIER, skips it and returns true. IDENTIFIER
575 may be abbreviated to its first N letters. Otherwise, returns false.
577 IDENTIFIER must be an ASCII string. */
579 lex_match_id_n (struct lexer *lexer, const char *identifier, size_t n)
581 if (lex_token (lexer) == T_ID
582 && lex_id_match_n (ss_cstr (identifier), lex_tokss (lexer), n))
591 /* If the current token is integer X, skips it and returns true. Otherwise,
594 lex_match_int (struct lexer *lexer, int x)
596 if (lex_is_integer (lexer) && lex_integer (lexer) == x)
605 /* Forced matches. */
607 /* If this token is IDENTIFIER, skips it and returns true. IDENTIFIER may be
608 abbreviated to its first 3 letters. Otherwise, reports an error and returns
611 IDENTIFIER must be an ASCII string. */
613 lex_force_match_id (struct lexer *lexer, const char *identifier)
615 if (lex_match_id (lexer, identifier))
619 lex_error_expecting (lexer, identifier);
624 /* If the current token has the specified TYPE, skips it and returns true.
625 Otherwise, reports an error and returns false. */
627 lex_force_match (struct lexer *lexer, enum token_type type)
629 if (lex_token (lexer) == type)
636 const char *type_string = token_type_to_string (type);
639 char *s = xasprintf ("`%s'", type_string);
640 lex_error_expecting (lexer, s);
644 lex_error_expecting (lexer, token_type_to_name (type));
650 /* If the current token is a string, does nothing and returns true.
651 Otherwise, reports an error and returns false. */
653 lex_force_string (struct lexer *lexer)
655 if (lex_is_string (lexer))
659 lex_error (lexer, _("expecting string"));
664 /* If the current token is a string or an identifier, does nothing and returns
665 true. Otherwise, reports an error and returns false.
667 This is meant for use in syntactic situations where we want to encourage the
668 user to supply a quoted string, but for compatibility we also accept
669 identifiers. (One example of such a situation is file names.) Therefore,
670 the error message issued when the current token is wrong only says that a
671 string is expected and doesn't mention that an identifier would also be
674 lex_force_string_or_id (struct lexer *lexer)
676 return lex_token (lexer) == T_ID || lex_force_string (lexer);
679 /* If the current token is an integer, does nothing and returns true.
680 Otherwise, reports an error and returns false. */
682 lex_force_int (struct lexer *lexer)
684 if (lex_is_integer (lexer))
688 lex_error (lexer, _("expecting integer"));
693 /* If the current token is an integer in the range MIN...MAX (inclusive), does
694 nothing and returns true. Otherwise, reports an error and returns false.
695 If NAME is nonnull, then it is used in the error message. */
697 lex_force_int_range (struct lexer *lexer, const char *name, long min, long max)
699 bool is_integer = lex_is_integer (lexer);
700 bool too_small = is_integer && lex_integer (lexer) < min;
701 bool too_big = is_integer && lex_integer (lexer) > max;
702 if (is_integer && !too_small && !too_big)
707 /* Weird, maybe a bug in the caller. Just report that we needed an
710 lex_error (lexer, _("Integer expected for %s."), name);
712 lex_error (lexer, _("Integer expected."));
717 lex_error (lexer, _("Expected %ld for %s."), min, name);
719 lex_error (lexer, _("Expected %ld."), min);
721 else if (min + 1 == max)
724 lex_error (lexer, _("Expected %ld or %ld for %s."), min, min + 1, name);
726 lex_error (lexer, _("Expected %ld or %ld."), min, min + 1);
730 bool report_lower_bound = (min > INT_MIN / 2) || too_small;
731 bool report_upper_bound = (max < INT_MAX / 2) || too_big;
733 if (report_lower_bound && report_upper_bound)
737 _("Expected integer between %ld and %ld for %s."),
740 lex_error (lexer, _("Expected integer between %ld and %ld."),
743 else if (report_lower_bound)
748 lex_error (lexer, _("Expected non-negative integer for %s."),
751 lex_error (lexer, _("Expected non-negative integer."));
756 lex_error (lexer, _("Expected positive integer for %s."),
759 lex_error (lexer, _("Expected positive integer."));
762 else if (report_upper_bound)
766 _("Expected integer less than or equal to %ld for %s."),
769 lex_error (lexer, _("Expected integer less than or equal to %ld."),
775 lex_error (lexer, _("Integer expected for %s."), name);
777 lex_error (lexer, _("Integer expected."));
783 /* If the current token is a number, does nothing and returns true.
784 Otherwise, reports an error and returns false. */
786 lex_force_num (struct lexer *lexer)
788 if (lex_is_number (lexer))
791 lex_error (lexer, _("expecting number"));
795 /* If the current token is an identifier, does nothing and returns true.
796 Otherwise, reports an error and returns false. */
798 lex_force_id (struct lexer *lexer)
800 if (lex_token (lexer) == T_ID)
803 lex_error (lexer, _("expecting identifier"));
807 /* Token accessors. */
809 /* Returns the type of LEXER's current token. */
811 lex_token (const struct lexer *lexer)
813 return lex_next_token (lexer, 0);
816 /* Returns the number in LEXER's current token.
818 Only T_NEG_NUM and T_POS_NUM tokens have meaningful values. For other
819 tokens this function will always return zero. */
821 lex_tokval (const struct lexer *lexer)
823 return lex_next_tokval (lexer, 0);
826 /* Returns the null-terminated string in LEXER's current token, UTF-8 encoded.
828 Only T_ID and T_STRING tokens have meaningful strings. For other tokens
829 this functions this function will always return NULL.
831 The UTF-8 encoding of the returned string is correct for variable names and
832 other identifiers. Use filename_to_utf8() to use it as a filename. Use
833 data_in() to use it in a "union value". */
835 lex_tokcstr (const struct lexer *lexer)
837 return lex_next_tokcstr (lexer, 0);
840 /* Returns the string in LEXER's current token, UTF-8 encoded. The string is
841 null-terminated (but the null terminator is not included in the returned
842 substring's 'length').
844 Only T_ID and T_STRING tokens have meaningful strings. For other tokens
845 this functions this function will always return NULL.
847 The UTF-8 encoding of the returned string is correct for variable names and
848 other identifiers. Use filename_to_utf8() to use it as a filename. Use
849 data_in() to use it in a "union value". */
851 lex_tokss (const struct lexer *lexer)
853 return lex_next_tokss (lexer, 0);
858 A value of 0 for N as an argument to any of these functions refers to the
859 current token. Lookahead is limited to the current command. Any N greater
860 than the number of tokens remaining in the current command will be treated
861 as referring to a T_ENDCMD token. */
863 static const struct lex_token *
864 lex_next__ (const struct lexer *lexer_, int n)
866 struct lexer *lexer = CONST_CAST (struct lexer *, lexer_);
867 struct lex_source *src = lex_source__ (lexer);
870 return lex_source_next__ (src, n);
873 static const struct lex_token stop_token = { .token = { .type = T_STOP } };
878 static const struct lex_token *
879 lex_source_front (const struct lex_source *src)
881 return &src->tokens[deque_front (&src->deque, 0)];
884 static const struct lex_token *
885 lex_source_next__ (const struct lex_source *src, int n)
887 while (deque_count (&src->deque) <= n)
889 if (!deque_is_empty (&src->deque))
891 const struct lex_token *front = lex_source_front (src);
892 if (front->token.type == T_STOP || front->token.type == T_ENDCMD)
896 lex_source_get (src);
899 return &src->tokens[deque_back (&src->deque, n)];
902 /* Returns the "struct token" of the token N after the current one in LEXER.
903 The returned pointer can be invalidated by pretty much any succeeding call
904 into the lexer, although the string pointer within the returned token is
905 only invalidated by consuming the token (e.g. with lex_get()). */
907 lex_next (const struct lexer *lexer, int n)
909 return &lex_next__ (lexer, n)->token;
912 /* Returns the type of the token N after the current one in LEXER. */
914 lex_next_token (const struct lexer *lexer, int n)
916 return lex_next (lexer, n)->type;
919 /* Returns the number in the tokn N after the current one in LEXER.
921 Only T_NEG_NUM and T_POS_NUM tokens have meaningful values. For other
922 tokens this function will always return zero. */
924 lex_next_tokval (const struct lexer *lexer, int n)
926 return token_number (lex_next (lexer, n));
929 /* Returns the null-terminated string in the token N after the current one, in
932 Only T_ID and T_STRING tokens have meaningful strings. For other tokens
933 this functions this function will always return NULL.
935 The UTF-8 encoding of the returned string is correct for variable names and
936 other identifiers. Use filename_to_utf8() to use it as a filename. Use
937 data_in() to use it in a "union value". */
939 lex_next_tokcstr (const struct lexer *lexer, int n)
941 return lex_next_tokss (lexer, n).string;
944 /* Returns the string in the token N after the current one, in UTF-8 encoding.
945 The string is null-terminated (but the null terminator is not included in
946 the returned substring's 'length').
948 Only T_ID, T_MACRO_ID, T_STRING tokens have meaningful strings. For other
949 tokens this functions this function will always return NULL.
951 The UTF-8 encoding of the returned string is correct for variable names and
952 other identifiers. Use filename_to_utf8() to use it as a filename. Use
953 data_in() to use it in a "union value". */
955 lex_next_tokss (const struct lexer *lexer, int n)
957 return lex_next (lexer, n)->string;
961 lex_next_representation (const struct lexer *lexer, int n0, int n1)
963 return lex_source_get_syntax__ (lex_source__ (lexer), n0, n1);
967 lex_next_is_from_macro (const struct lexer *lexer, int n)
969 return lex_next__ (lexer, n)->from_macro;
973 lex_tokens_match (const struct token *actual, const struct token *expected)
975 if (actual->type != expected->type)
978 switch (actual->type)
982 return actual->number == expected->number;
985 return lex_id_match (expected->string, actual->string);
988 return (actual->string.length == expected->string.length
989 && !memcmp (actual->string.string, expected->string.string,
990 actual->string.length));
997 /* If LEXER is positioned at the sequence of tokens that may be parsed from S,
998 skips it and returns true. Otherwise, returns false.
1000 S may consist of an arbitrary sequence of tokens, e.g. "KRUSKAL-WALLIS",
1001 "2SLS", or "END INPUT PROGRAM". Identifiers may be abbreviated to their
1002 first three letters. */
1004 lex_match_phrase (struct lexer *lexer, const char *s)
1006 struct string_lexer slex;
1011 string_lexer_init (&slex, s, strlen (s), SEG_MODE_INTERACTIVE);
1012 while (string_lexer_next (&slex, &token))
1013 if (token.type != SCAN_SKIP)
1015 bool match = lex_tokens_match (lex_next (lexer, i++), &token);
1016 token_uninit (&token);
1027 lex_source_get_first_line_number (const struct lex_source *src, int n)
1029 return lex_source_next__ (src, n)->first_line;
1033 count_newlines (char *s, size_t length)
1038 while ((newline = memchr (s, '\n', length)) != NULL)
1041 length -= (newline + 1) - s;
1049 lex_source_get_last_line_number (const struct lex_source *src, int n)
1051 const struct lex_token *token = lex_source_next__ (src, n);
1053 if (token->first_line == 0)
1057 char *token_str = &src->buffer[token->token_pos - src->tail];
1058 return token->first_line + count_newlines (token_str, token->token_len) + 1;
1063 count_columns (const char *s_, size_t length)
1065 const uint8_t *s = CHAR_CAST (const uint8_t *, s_);
1071 for (ofs = 0; ofs < length; ofs += mblen)
1075 mblen = u8_mbtouc (&uc, s + ofs, length - ofs);
1078 int width = uc_width (uc, "UTF-8");
1083 columns = ROUND_UP (columns + 1, 8);
1090 lex_source_get_first_column (const struct lex_source *src, int n)
1092 const struct lex_token *token = lex_source_next__ (src, n);
1093 return count_columns (&src->buffer[token->line_pos - src->tail],
1094 token->token_pos - token->line_pos);
1098 lex_source_get_last_column (const struct lex_source *src, int n)
1100 const struct lex_token *token = lex_source_next__ (src, n);
1101 char *start, *end, *newline;
1103 start = &src->buffer[token->line_pos - src->tail];
1104 end = &src->buffer[(token->token_pos + token->token_len) - src->tail];
1105 newline = memrchr (start, '\n', end - start);
1106 if (newline != NULL)
1107 start = newline + 1;
1108 return count_columns (start, end - start);
1111 /* Returns the 1-based line number of the start of the syntax that represents
1112 the token N after the current one in LEXER. Returns 0 for a T_STOP token or
1113 if the token is drawn from a source that does not have line numbers. */
1115 lex_get_first_line_number (const struct lexer *lexer, int n)
1117 const struct lex_source *src = lex_source__ (lexer);
1118 return src != NULL ? lex_source_get_first_line_number (src, n) : 0;
1121 /* Returns the 1-based line number of the end of the syntax that represents the
1122 token N after the current one in LEXER, plus 1. Returns 0 for a T_STOP
1123 token or if the token is drawn from a source that does not have line
1126 Most of the time, a single token is wholly within a single line of syntax,
1127 but there are two exceptions: a T_STRING token can be made up of multiple
1128 segments on adjacent lines connected with "+" punctuators, and a T_NEG_NUM
1129 token can consist of a "-" on one line followed by the number on the next.
1132 lex_get_last_line_number (const struct lexer *lexer, int n)
1134 const struct lex_source *src = lex_source__ (lexer);
1135 return src != NULL ? lex_source_get_last_line_number (src, n) : 0;
1138 /* Returns the 1-based column number of the start of the syntax that represents
1139 the token N after the current one in LEXER. Returns 0 for a T_STOP
1142 Column numbers are measured according to the width of characters as shown in
1143 a typical fixed-width font, in which CJK characters have width 2 and
1144 combining characters have width 0. */
1146 lex_get_first_column (const struct lexer *lexer, int n)
1148 const struct lex_source *src = lex_source__ (lexer);
1149 return src != NULL ? lex_source_get_first_column (src, n) : 0;
1152 /* Returns the 1-based column number of the end of the syntax that represents
1153 the token N after the current one in LEXER, plus 1. Returns 0 for a T_STOP
1156 Column numbers are measured according to the width of characters as shown in
1157 a typical fixed-width font, in which CJK characters have width 2 and
1158 combining characters have width 0. */
1160 lex_get_last_column (const struct lexer *lexer, int n)
1162 const struct lex_source *src = lex_source__ (lexer);
1163 return src != NULL ? lex_source_get_last_column (src, n) : 0;
1166 /* Returns the name of the syntax file from which the current command is drawn.
1167 Returns NULL for a T_STOP token or if the command's source does not have
1170 There is no version of this function that takes an N argument because
1171 lookahead only works to the end of a command and any given command is always
1172 within a single syntax file. */
1174 lex_get_file_name (const struct lexer *lexer)
1176 struct lex_source *src = lex_source__ (lexer);
1177 return src == NULL ? NULL : src->reader->file_name;
1181 lex_get_encoding (const struct lexer *lexer)
1183 struct lex_source *src = lex_source__ (lexer);
1184 return src == NULL ? NULL : src->reader->encoding;
1187 /* Returns the syntax mode for the syntax file from which the current drawn is
1188 drawn. Returns SEG_MODE_AUTO for a T_STOP token or if the command's source
1189 does not have line numbers.
1191 There is no version of this function that takes an N argument because
1192 lookahead only works to the end of a command and any given command is always
1193 within a single syntax file. */
1195 lex_get_syntax_mode (const struct lexer *lexer)
1197 struct lex_source *src = lex_source__ (lexer);
1198 return src == NULL ? SEG_MODE_AUTO : src->reader->syntax;
1201 /* Returns the error mode for the syntax file from which the current drawn is
1202 drawn. Returns LEX_ERROR_TERMINAL for a T_STOP token or if the command's
1203 source does not have line numbers.
1205 There is no version of this function that takes an N argument because
1206 lookahead only works to the end of a command and any given command is always
1207 within a single syntax file. */
1209 lex_get_error_mode (const struct lexer *lexer)
1211 struct lex_source *src = lex_source__ (lexer);
1212 return src == NULL ? LEX_ERROR_TERMINAL : src->reader->error;
1215 /* If the source that LEXER is currently reading has error mode
1216 LEX_ERROR_TERMINAL, discards all buffered input and tokens, so that the next
1217 token to be read comes directly from whatever is next read from the stream.
1219 It makes sense to call this function after encountering an error in a
1220 command entered on the console, because usually the user would prefer not to
1221 have cascading errors. */
1223 lex_interactive_reset (struct lexer *lexer)
1225 struct lex_source *src = lex_source__ (lexer);
1226 if (src != NULL && src->reader->error == LEX_ERROR_TERMINAL)
1228 src->head = src->tail = 0;
1229 src->journal_pos = src->seg_pos = src->line_pos = 0;
1230 src->n_newlines = 0;
1231 src->suppress_next_newline = false;
1232 src->segmenter = segmenter_init (segmenter_get_mode (&src->segmenter),
1234 while (!deque_is_empty (&src->deque))
1235 lex_source_pop__ (src);
1236 lex_source_push_endcmd__ (src);
1240 /* Advances past any tokens in LEXER up to a T_ENDCMD or T_STOP. */
1242 lex_discard_rest_of_command (struct lexer *lexer)
1244 while (lex_token (lexer) != T_STOP && lex_token (lexer) != T_ENDCMD)
1248 /* Discards all lookahead tokens in LEXER, then discards all input sources
1249 until it encounters one with error mode LEX_ERROR_TERMINAL or until it
1250 runs out of input sources. */
1252 lex_discard_noninteractive (struct lexer *lexer)
1254 struct lex_source *src = lex_source__ (lexer);
1258 while (!deque_is_empty (&src->deque))
1259 lex_source_pop__ (src);
1261 for (; src != NULL && src->reader->error != LEX_ERROR_TERMINAL;
1262 src = lex_source__ (lexer))
1263 lex_source_destroy (src);
1268 lex_source_max_tail__ (const struct lex_source *src)
1270 const struct lex_token *token;
1273 assert (src->seg_pos >= src->line_pos);
1274 max_tail = MIN (src->journal_pos, src->line_pos);
1276 /* Use the oldest token also. (We know that src->deque cannot be empty
1277 because we are in the process of adding a new token, which is already
1278 initialized enough to use here.) */
1279 token = &src->tokens[deque_back (&src->deque, 0)];
1280 assert (token->token_pos >= token->line_pos);
1281 max_tail = MIN (max_tail, token->line_pos);
1287 lex_source_expand__ (struct lex_source *src)
1289 if (src->head - src->tail >= src->allocated)
1291 size_t max_tail = lex_source_max_tail__ (src);
1292 if (max_tail > src->tail)
1294 /* Advance the tail, freeing up room at the head. */
1295 memmove (src->buffer, src->buffer + (max_tail - src->tail),
1296 src->head - max_tail);
1297 src->tail = max_tail;
1301 /* Buffer is completely full. Expand it. */
1302 src->buffer = x2realloc (src->buffer, &src->allocated);
1307 /* There's space available at the head of the buffer. Nothing to do. */
1312 lex_source_read__ (struct lex_source *src)
1316 lex_source_expand__ (src);
1318 size_t head_ofs = src->head - src->tail;
1319 size_t space = src->allocated - head_ofs;
1320 enum prompt_style prompt = segmenter_get_prompt (&src->segmenter);
1321 size_t n = src->reader->class->read (src->reader, &src->buffer[head_ofs],
1323 assert (n <= space);
1328 src->reader->eof = true;
1329 lex_source_expand__ (src);
1335 while (!memchr (&src->buffer[src->seg_pos - src->tail], '\n',
1336 src->head - src->seg_pos));
1339 static struct lex_source *
1340 lex_source__ (const struct lexer *lexer)
1342 return (ll_is_empty (&lexer->sources) ? NULL
1343 : ll_data (ll_head (&lexer->sources), struct lex_source, ll));
1346 static struct substring
1347 lex_tokens_get_syntax__ (const struct lex_source *src,
1348 const struct lex_token *token0,
1349 const struct lex_token *token1)
1351 size_t start = token0->token_pos;
1352 size_t end = token1->token_pos + token1->token_len;
1354 return ss_buffer (&src->buffer[start - src->tail], end - start);
1357 static struct substring
1358 lex_source_get_syntax__ (const struct lex_source *src, int n0, int n1)
1360 return lex_tokens_get_syntax__ (src,
1361 lex_source_next__ (src, n0),
1362 lex_source_next__ (src, MAX (n0, n1)));
1366 lex_ellipsize__ (struct substring in, char *out, size_t out_size)
1372 assert (out_size >= 16);
1373 out_maxlen = out_size - 1;
1374 if (in.length > out_maxlen - 3)
1377 for (out_len = 0; out_len < in.length; out_len += mblen)
1379 if (in.string[out_len] == '\n'
1380 || in.string[out_len] == '\0'
1381 || (in.string[out_len] == '\r'
1382 && out_len + 1 < in.length
1383 && in.string[out_len + 1] == '\n'))
1386 mblen = u8_mblen (CHAR_CAST (const uint8_t *, in.string + out_len),
1387 in.length - out_len);
1392 if (out_len + mblen > out_maxlen)
1396 memcpy (out, in.string, out_len);
1397 strcpy (&out[out_len], out_len < in.length ? "..." : "");
1401 lex_source_error_valist (struct lex_source *src, int n0, int n1,
1402 const char *format, va_list args)
1404 const struct lex_token *token;
1409 token = lex_source_next__ (src, n0);
1410 if (token->token.type == T_ENDCMD)
1411 ds_put_cstr (&s, _("Syntax error at end of command"));
1412 else if (token->from_macro)
1414 /* XXX this isn't ideal, we should get the actual syntax */
1415 char *syntax = token_to_string (&token->token);
1417 ds_put_format (&s, _("Syntax error at `%s'"), syntax);
1419 ds_put_cstr (&s, _("Syntax error"));
1424 struct substring syntax = lex_source_get_syntax__ (src, n0, n1);
1425 if (!ss_is_empty (syntax))
1427 char syntax_cstr[64];
1429 lex_ellipsize__ (syntax, syntax_cstr, sizeof syntax_cstr);
1430 ds_put_format (&s, _("Syntax error at `%s'"), syntax_cstr);
1433 ds_put_cstr (&s, _("Syntax error"));
1438 ds_put_cstr (&s, ": ");
1439 ds_put_vformat (&s, format, args);
1441 if (ds_last (&s) != '.')
1442 ds_put_byte (&s, '.');
1445 .category = MSG_C_SYNTAX,
1446 .severity = MSG_S_ERROR,
1447 .file_name = src->reader->file_name,
1448 .first_line = lex_source_get_first_line_number (src, n0),
1449 .last_line = lex_source_get_last_line_number (src, n1),
1450 .first_column = lex_source_get_first_column (src, n0),
1451 .last_column = lex_source_get_last_column (src, n1),
1452 .text = ds_steal_cstr (&s),
1457 static void PRINTF_FORMAT (2, 3)
1458 lex_get_error (struct lex_source *src, const char *format, ...)
1463 va_start (args, format);
1465 n = deque_count (&src->deque) - 1;
1466 lex_source_error_valist (src, n, n, format, args);
1467 lex_source_pop_front (src);
1472 /* Attempts to append an additional token into SRC's deque, reading more from
1473 the underlying lex_reader if necessary. Returns true if a new token was
1474 added to SRC's deque, false otherwise. */
1476 lex_source_try_get (struct lex_source *src)
1478 /* State maintained while scanning tokens. Usually we only need a single
1479 state, but scanner_push() can return SCAN_SAVE to indicate that the state
1480 needs to be saved and possibly restored later with SCAN_BACK. */
1483 struct segmenter segmenter;
1484 enum segment_type last_segment;
1485 int newlines; /* Number of newlines encountered so far. */
1486 /* Maintained here so we can update lex_source's similar members when we
1492 /* Initialize state. */
1493 struct state state =
1495 .segmenter = src->segmenter,
1497 .seg_pos = src->seg_pos,
1498 .line_pos = src->line_pos,
1500 struct state saved = state;
1502 /* Append a new token to SRC and initialize it. */
1503 struct lex_token *token = lex_push_token__ (src);
1504 struct scanner scanner;
1505 scanner_init (&scanner, &token->token);
1506 token->line_pos = src->line_pos;
1507 token->token_pos = src->seg_pos;
1508 if (src->reader->line_number > 0)
1509 token->first_line = src->reader->line_number + src->n_newlines;
1511 token->first_line = 0;
1513 /* Extract segments and pass them through the scanner until we obtain a
1517 /* Extract a segment. */
1518 const char *segment = &src->buffer[state.seg_pos - src->tail];
1519 size_t seg_maxlen = src->head - state.seg_pos;
1520 enum segment_type type;
1521 int seg_len = segmenter_push (&state.segmenter, segment, seg_maxlen,
1522 src->reader->eof, &type);
1525 /* The segmenter needs more input to produce a segment. */
1526 assert (!src->reader->eof);
1527 lex_source_read__ (src);
1531 /* Update state based on the segment. */
1532 state.last_segment = type;
1533 state.seg_pos += seg_len;
1534 if (type == SEG_NEWLINE)
1537 state.line_pos = state.seg_pos;
1540 /* Pass the segment into the scanner and try to get a token out. */
1541 enum scan_result result = scanner_push (&scanner, type,
1542 ss_buffer (segment, seg_len),
1544 if (result == SCAN_SAVE)
1546 else if (result == SCAN_BACK)
1551 else if (result == SCAN_DONE)
1555 /* If we've reached the end of a line, or the end of a command, then pass
1556 the line to the output engine as a syntax text item. */
1557 int n_lines = state.newlines;
1558 if (state.last_segment == SEG_END_COMMAND && !src->suppress_next_newline)
1561 src->suppress_next_newline = true;
1563 else if (n_lines > 0 && src->suppress_next_newline)
1566 src->suppress_next_newline = false;
1568 for (int i = 0; i < n_lines; i++)
1570 /* Beginning of line. */
1571 const char *line = &src->buffer[src->journal_pos - src->tail];
1573 /* Calculate line length, including \n or \r\n end-of-line if present.
1575 We use src->head even though that may be beyond what we've actually
1576 converted to tokens (which is only through state.line_pos). That's
1577 because, if we're emitting the line due to SEG_END_COMMAND, we want to
1578 take the whole line through the newline, not just through the '.'. */
1579 size_t max_len = src->head - src->journal_pos;
1580 const char *newline = memchr (line, '\n', max_len);
1581 size_t line_len = newline ? newline - line + 1 : max_len;
1583 /* Calculate line length excluding end-of-line. */
1584 size_t copy_len = line_len;
1585 if (copy_len > 0 && line[copy_len - 1] == '\n')
1587 if (copy_len > 0 && line[copy_len - 1] == '\r')
1590 /* Submit the line as syntax. */
1591 output_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX,
1592 xmemdup0 (line, copy_len),
1595 src->journal_pos += line_len;
1598 token->token_len = state.seg_pos - src->seg_pos;
1600 src->segmenter = state.segmenter;
1601 src->seg_pos = state.seg_pos;
1602 src->line_pos = state.line_pos;
1603 src->n_newlines += state.newlines;
1605 switch (token->token.type)
1611 token->token.type = T_ENDCMD;
1615 case SCAN_BAD_HEX_LENGTH:
1616 lex_get_error (src, _("String of hex digits has %d characters, which "
1617 "is not a multiple of 2"),
1618 (int) token->token.number);
1621 case SCAN_BAD_HEX_DIGIT:
1622 case SCAN_BAD_UNICODE_DIGIT:
1623 lex_get_error (src, _("`%c' is not a valid hex digit"),
1624 (int) token->token.number);
1627 case SCAN_BAD_UNICODE_LENGTH:
1628 lex_get_error (src, _("Unicode string contains %d bytes, which is "
1629 "not in the valid range of 1 to 8 bytes"),
1630 (int) token->token.number);
1633 case SCAN_BAD_UNICODE_CODE_POINT:
1634 lex_get_error (src, _("U+%04X is not a valid Unicode code point"),
1635 (int) token->token.number);
1638 case SCAN_EXPECTED_QUOTE:
1639 lex_get_error (src, _("Unterminated string constant"));
1642 case SCAN_EXPECTED_EXPONENT:
1643 lex_get_error (src, _("Missing exponent following `%s'"),
1644 token->token.string.string);
1647 case SCAN_UNEXPECTED_CHAR:
1650 lex_get_error (src, _("Bad character %s in input"),
1651 uc_name (token->token.number, c_name));
1656 lex_source_pop_front (src);
1664 lex_source_get__ (struct lex_source *src)
1670 else if (lex_source_try_get (src))
1676 lex_source_get (const struct lex_source *src_)
1678 struct lex_source *src = CONST_CAST (struct lex_source *, src_);
1680 size_t old_count = deque_count (&src->deque);
1681 if (!lex_source_get__ (src))
1684 if (!settings_get_mexpand ())
1687 struct macro_expander *me;
1688 int retval = macro_expander_create (src->lexer->macros,
1689 &lex_source_front (src)->token,
1693 if (!lex_source_get__ (src))
1695 /* This should not be reachable because we always get a T_ENDCMD at
1696 the end of an input file (transformed from T_STOP by
1697 lex_source_try_get()) and the macro_expander should always
1698 terminate expansion on T_ENDCMD. */
1702 const struct lex_token *front = lex_source_front (src);
1703 const struct macro_token mt = {
1704 .token = front->token,
1705 .representation = lex_tokens_get_syntax__ (src, front, front)
1707 retval = macro_expander_add (me, &mt);
1711 /* XXX handle case where there's a macro invocation starting from some
1712 later token we've already obtained */
1713 macro_expander_destroy (me);
1717 /* XXX handle case where the macro invocation doesn't use all the tokens */
1718 while (deque_count (&src->deque) > old_count)
1719 lex_source_pop_front (src);
1721 struct macro_tokens expansion = { .n = 0 };
1722 macro_expander_get_expansion (me, &expansion);
1723 macro_expander_destroy (me);
1725 if (settings_get_mprint ())
1727 struct string mprint = DS_EMPTY_INITIALIZER;
1728 macro_tokens_to_representation (&expansion, &mprint);
1729 output_item_submit (text_item_create (TEXT_ITEM_LOG, ds_cstr (&mprint),
1730 _("Macro Expansion")));
1731 ds_destroy (&mprint);
1734 for (size_t i = 0; i < expansion.n; i++)
1736 *lex_push_token__ (src) = (struct lex_token) {
1737 .token = expansion.mts[i].token,
1742 ss_dealloc (&expansion.mts[i].representation); /* XXX should feed into lexer */
1744 free (expansion.mts);
1750 lex_source_push_endcmd__ (struct lex_source *src)
1752 *lex_push_token__ (src) = (struct lex_token) { .token = { .type = T_ENDCMD } };
1755 static struct lex_source *
1756 lex_source_create (struct lexer *lexer, struct lex_reader *reader)
1758 struct lex_source *src;
1760 src = xzalloc (sizeof *src);
1761 src->reader = reader;
1762 src->segmenter = segmenter_init (reader->syntax, false);
1764 src->tokens = deque_init (&src->deque, 4, sizeof *src->tokens);
1766 lex_source_push_endcmd__ (src);
1772 lex_source_destroy (struct lex_source *src)
1774 char *file_name = src->reader->file_name;
1775 char *encoding = src->reader->encoding;
1776 if (src->reader->class->destroy != NULL)
1777 src->reader->class->destroy (src->reader);
1781 while (!deque_is_empty (&src->deque))
1782 lex_source_pop__ (src);
1784 ll_remove (&src->ll);
1788 struct lex_file_reader
1790 struct lex_reader reader;
1791 struct u8_istream *istream;
1794 static struct lex_reader_class lex_file_reader_class;
1796 /* Creates and returns a new lex_reader that will read from file FILE_NAME (or
1797 from stdin if FILE_NAME is "-"). The file is expected to be encoded with
1798 ENCODING, which should take one of the forms accepted by
1799 u8_istream_for_file(). SYNTAX and ERROR become the syntax mode and error
1800 mode of the new reader, respectively.
1802 Returns a null pointer if FILE_NAME cannot be opened. */
1804 lex_reader_for_file (const char *file_name, const char *encoding,
1805 enum segmenter_mode syntax,
1806 enum lex_error_mode error)
1808 struct lex_file_reader *r;
1809 struct u8_istream *istream;
1811 istream = (!strcmp(file_name, "-")
1812 ? u8_istream_for_fd (encoding, STDIN_FILENO)
1813 : u8_istream_for_file (encoding, file_name, O_RDONLY));
1814 if (istream == NULL)
1816 msg (ME, _("Opening `%s': %s."), file_name, strerror (errno));
1820 r = xmalloc (sizeof *r);
1821 lex_reader_init (&r->reader, &lex_file_reader_class);
1822 r->reader.syntax = syntax;
1823 r->reader.error = error;
1824 r->reader.file_name = xstrdup (file_name);
1825 r->reader.encoding = xstrdup_if_nonnull (encoding);
1826 r->reader.line_number = 1;
1827 r->istream = istream;
1832 static struct lex_file_reader *
1833 lex_file_reader_cast (struct lex_reader *r)
1835 return UP_CAST (r, struct lex_file_reader, reader);
1839 lex_file_read (struct lex_reader *r_, char *buf, size_t n,
1840 enum prompt_style prompt_style UNUSED)
1842 struct lex_file_reader *r = lex_file_reader_cast (r_);
1843 ssize_t n_read = u8_istream_read (r->istream, buf, n);
1846 msg (ME, _("Error reading `%s': %s."), r_->file_name, strerror (errno));
1853 lex_file_close (struct lex_reader *r_)
1855 struct lex_file_reader *r = lex_file_reader_cast (r_);
1857 if (u8_istream_fileno (r->istream) != STDIN_FILENO)
1859 if (u8_istream_close (r->istream) != 0)
1860 msg (ME, _("Error closing `%s': %s."), r_->file_name, strerror (errno));
1863 u8_istream_free (r->istream);
1868 static struct lex_reader_class lex_file_reader_class =
1874 struct lex_string_reader
1876 struct lex_reader reader;
1881 static struct lex_reader_class lex_string_reader_class;
1883 /* Creates and returns a new lex_reader for the contents of S, which must be
1884 encoded in the given ENCODING. The new reader takes ownership of S and will free it
1885 with ss_dealloc() when it is closed. */
1887 lex_reader_for_substring_nocopy (struct substring s, const char *encoding)
1889 struct lex_string_reader *r;
1891 r = xmalloc (sizeof *r);
1892 lex_reader_init (&r->reader, &lex_string_reader_class);
1893 r->reader.syntax = SEG_MODE_AUTO;
1894 r->reader.encoding = xstrdup_if_nonnull (encoding);
1901 /* Creates and returns a new lex_reader for a copy of null-terminated string S,
1902 which must be encoded in ENCODING. The caller retains ownership of S. */
1904 lex_reader_for_string (const char *s, const char *encoding)
1906 struct substring ss;
1907 ss_alloc_substring (&ss, ss_cstr (s));
1908 return lex_reader_for_substring_nocopy (ss, encoding);
1911 /* Formats FORMAT as a printf()-like format string and creates and returns a
1912 new lex_reader for the formatted result. */
1914 lex_reader_for_format (const char *format, const char *encoding, ...)
1916 struct lex_reader *r;
1919 va_start (args, encoding);
1920 r = lex_reader_for_substring_nocopy (ss_cstr (xvasprintf (format, args)), encoding);
1926 static struct lex_string_reader *
1927 lex_string_reader_cast (struct lex_reader *r)
1929 return UP_CAST (r, struct lex_string_reader, reader);
1933 lex_string_read (struct lex_reader *r_, char *buf, size_t n,
1934 enum prompt_style prompt_style UNUSED)
1936 struct lex_string_reader *r = lex_string_reader_cast (r_);
1939 chunk = MIN (n, r->s.length - r->offset);
1940 memcpy (buf, r->s.string + r->offset, chunk);
1947 lex_string_close (struct lex_reader *r_)
1949 struct lex_string_reader *r = lex_string_reader_cast (r_);
1955 static struct lex_reader_class lex_string_reader_class =