1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 1997-9, 2000, 2006, 2009, 2010, 2011, 2013, 2016 Free Software Foundation, Inc.
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "language/lexer/lexer.h"
33 #include "language/command.h"
34 #include "language/lexer/macro.h"
35 #include "language/lexer/scan.h"
36 #include "language/lexer/segment.h"
37 #include "language/lexer/token.h"
38 #include "libpspp/assertion.h"
39 #include "libpspp/cast.h"
40 #include "libpspp/deque.h"
41 #include "libpspp/i18n.h"
42 #include "libpspp/ll.h"
43 #include "libpspp/message.h"
44 #include "libpspp/misc.h"
45 #include "libpspp/str.h"
46 #include "libpspp/u8-istream.h"
47 #include "output/journal.h"
48 #include "output/output-item.h"
50 #include "gl/c-ctype.h"
51 #include "gl/minmax.h"
52 #include "gl/xalloc.h"
53 #include "gl/xmemdup0.h"
56 #define _(msgid) gettext (msgid)
57 #define N_(msgid) msgid
59 /* A token within a lex_source. */
62 /* The regular token information. */
65 /* Location of token in terms of the lex_source's buffer.
66 src->tail <= line_pos <= token_pos <= src->head. */
67 size_t token_pos; /* Start of token. */
68 size_t token_len; /* Length of source for token in bytes. */
69 size_t line_pos; /* Start of line containing token_pos. */
70 int first_line; /* Line number at token_pos. */
74 /* A source of tokens, corresponding to a syntax file.
76 This is conceptually a lex_reader wrapped with everything needed to convert
77 its UTF-8 bytes into tokens. */
80 struct ll ll; /* In lexer's list of sources. */
81 struct lex_reader *reader;
83 struct segmenter segmenter;
84 bool eof; /* True if T_STOP was read from 'reader'. */
86 /* Buffer of UTF-8 bytes. */
88 size_t allocated; /* Number of bytes allocated. */
89 size_t tail; /* &buffer[0] offset into UTF-8 source. */
90 size_t head; /* &buffer[head - tail] offset into source. */
92 /* Positions in source file, tail <= pos <= head for each member here. */
93 size_t journal_pos; /* First byte not yet output to journal. */
94 size_t seg_pos; /* First byte not yet scanned as token. */
95 size_t line_pos; /* First byte of line containing seg_pos. */
97 int n_newlines; /* Number of new-lines up to seg_pos. */
98 bool suppress_next_newline;
101 struct deque deque; /* Indexes into 'tokens'. */
102 struct lex_token *tokens; /* Lookahead tokens for parser. */
105 static struct lex_source *lex_source_create (struct lexer *,
106 struct lex_reader *);
107 static void lex_source_destroy (struct lex_source *);
112 struct ll_list sources; /* Contains "struct lex_source"s. */
113 struct macro_set *macros;
116 static struct lex_source *lex_source__ (const struct lexer *);
117 static struct substring lex_source_get_syntax__ (const struct lex_source *,
119 static const struct lex_token *lex_next__ (const struct lexer *, int n);
120 static void lex_source_push_endcmd__ (struct lex_source *);
122 static void lex_source_pop__ (struct lex_source *);
123 static bool lex_source_get (const struct lex_source *);
124 static void lex_source_error_valist (struct lex_source *, int n0, int n1,
125 const char *format, va_list)
126 PRINTF_FORMAT (4, 0);
127 static const struct lex_token *lex_source_next__ (const struct lex_source *,
130 /* Initializes READER with the specified CLASS and otherwise some reasonable
131 defaults. The caller should fill in the others members as desired. */
133 lex_reader_init (struct lex_reader *reader,
134 const struct lex_reader_class *class)
136 reader->class = class;
137 reader->syntax = SEG_MODE_AUTO;
138 reader->error = LEX_ERROR_CONTINUE;
139 reader->file_name = NULL;
140 reader->encoding = NULL;
141 reader->line_number = 0;
145 /* Frees any file name already in READER and replaces it by a copy of
146 FILE_NAME, or if FILE_NAME is null then clears any existing name. */
148 lex_reader_set_file_name (struct lex_reader *reader, const char *file_name)
150 free (reader->file_name);
151 reader->file_name = xstrdup_if_nonnull (file_name);
154 /* Creates and returns a new lexer. */
158 struct lexer *lexer = xmalloc (sizeof *lexer);
159 *lexer = (struct lexer) {
160 .sources = LL_INITIALIZER (lexer->sources),
161 .macros = macro_set_create (),
166 /* Destroys LEXER. */
168 lex_destroy (struct lexer *lexer)
172 struct lex_source *source, *next;
174 ll_for_each_safe (source, next, struct lex_source, ll, &lexer->sources)
175 lex_source_destroy (source);
176 macro_set_destroy (lexer->macros);
181 /* Adds M to LEXER's set of macros. M replaces any existing macro with the
182 same name. Takes ownership of M. */
184 lex_define_macro (struct lexer *lexer, struct macro *m)
186 macro_set_add (lexer->macros, m);
189 /* Inserts READER into LEXER so that the next token read by LEXER comes from
190 READER. Before the caller, LEXER must either be empty or at a T_ENDCMD
193 lex_include (struct lexer *lexer, struct lex_reader *reader)
195 assert (ll_is_empty (&lexer->sources) || lex_token (lexer) == T_ENDCMD);
196 ll_push_head (&lexer->sources, &lex_source_create (lexer, reader)->ll);
199 /* Appends READER to LEXER, so that it will be read after all other current
200 readers have already been read. */
202 lex_append (struct lexer *lexer, struct lex_reader *reader)
204 ll_push_tail (&lexer->sources, &lex_source_create (lexer, reader)->ll);
209 static struct lex_token *
210 lex_push_token__ (struct lex_source *src)
212 struct lex_token *token;
214 if (deque_is_full (&src->deque))
215 src->tokens = deque_expand (&src->deque, src->tokens, sizeof *src->tokens);
217 token = &src->tokens[deque_push_front (&src->deque)];
218 token->token = (struct token) { .type = T_STOP };
219 token->from_macro = false;
224 lex_source_pop__ (struct lex_source *src)
226 token_uninit (&src->tokens[deque_pop_back (&src->deque)].token);
230 lex_source_pop_front (struct lex_source *src)
232 token_uninit (&src->tokens[deque_pop_front (&src->deque)].token);
235 /* Advances LEXER to the next token, consuming the current token. */
237 lex_get (struct lexer *lexer)
239 struct lex_source *src;
241 src = lex_source__ (lexer);
245 if (!deque_is_empty (&src->deque))
246 lex_source_pop__ (src);
248 while (deque_is_empty (&src->deque))
249 if (!lex_source_get (src))
251 lex_source_destroy (src);
252 src = lex_source__ (lexer);
258 /* Issuing errors. */
260 /* Prints a syntax error message containing the current token and
261 given message MESSAGE (if non-null). */
263 lex_error (struct lexer *lexer, const char *format, ...)
267 va_start (args, format);
268 lex_next_error_valist (lexer, 0, 0, format, args);
272 /* Prints a syntax error message containing the current token and
273 given message MESSAGE (if non-null). */
275 lex_error_valist (struct lexer *lexer, const char *format, va_list args)
277 lex_next_error_valist (lexer, 0, 0, format, args);
280 /* Prints a syntax error message containing the current token and
281 given message MESSAGE (if non-null). */
283 lex_next_error (struct lexer *lexer, int n0, int n1, const char *format, ...)
287 va_start (args, format);
288 lex_next_error_valist (lexer, n0, n1, format, args);
292 /* Prints a syntax error message saying that one of the strings provided as
293 varargs, up to the first NULL, is expected. */
295 (lex_error_expecting) (struct lexer *lexer, ...)
299 va_start (args, lexer);
300 lex_error_expecting_valist (lexer, args);
304 /* Prints a syntax error message saying that one of the options provided in
305 ARGS, up to the first NULL, is expected. */
307 lex_error_expecting_valist (struct lexer *lexer, va_list args)
309 enum { MAX_OPTIONS = 9 };
310 const char *options[MAX_OPTIONS];
312 while (n < MAX_OPTIONS)
314 const char *option = va_arg (args, const char *);
318 options[n++] = option;
320 lex_error_expecting_array (lexer, options, n);
324 lex_error_expecting_array (struct lexer *lexer, const char **options, size_t n)
329 lex_error (lexer, NULL);
333 lex_error (lexer, _("expecting %s"), options[0]);
337 lex_error (lexer, _("expecting %s or %s"), options[0], options[1]);
341 lex_error (lexer, _("expecting %s, %s, or %s"), options[0], options[1],
346 lex_error (lexer, _("expecting %s, %s, %s, or %s"),
347 options[0], options[1], options[2], options[3]);
351 lex_error (lexer, _("expecting %s, %s, %s, %s, or %s"),
352 options[0], options[1], options[2], options[3], options[4]);
356 lex_error (lexer, _("expecting %s, %s, %s, %s, %s, or %s"),
357 options[0], options[1], options[2], options[3], options[4],
362 lex_error (lexer, _("expecting %s, %s, %s, %s, %s, %s, or %s"),
363 options[0], options[1], options[2], options[3], options[4],
364 options[5], options[6]);
368 lex_error (lexer, _("expecting %s, %s, %s, %s, %s, %s, %s, or %s"),
369 options[0], options[1], options[2], options[3], options[4],
370 options[5], options[6], options[7]);
374 lex_error (lexer, NULL);
378 /* Reports an error to the effect that subcommand SBC may only be specified
381 This function does not take a lexer as an argument or use lex_error(),
382 because the result would ordinarily just be redundant: "Syntax error at
383 SUBCOMMAND: Subcommand SUBCOMMAND may only be specified once.", which does
384 not help the user find the error. */
386 lex_sbc_only_once (const char *sbc)
388 msg (SE, _("Subcommand %s may only be specified once."), sbc);
391 /* Reports an error to the effect that subcommand SBC is missing.
393 This function does not take a lexer as an argument or use lex_error(),
394 because a missing subcommand can normally be detected only after the whole
395 command has been parsed, and so lex_error() would always report "Syntax
396 error at end of command", which does not help the user find the error. */
398 lex_sbc_missing (const char *sbc)
400 msg (SE, _("Required subcommand %s was not specified."), sbc);
403 /* Reports an error to the effect that specification SPEC may only be specified
404 once within subcommand SBC. */
406 lex_spec_only_once (struct lexer *lexer, const char *sbc, const char *spec)
408 lex_error (lexer, _("%s may only be specified once within subcommand %s"),
412 /* Reports an error to the effect that specification SPEC is missing within
415 lex_spec_missing (struct lexer *lexer, const char *sbc, const char *spec)
417 lex_error (lexer, _("Required %s specification missing from %s subcommand"),
421 /* Prints a syntax error message containing the current token and
422 given message MESSAGE (if non-null). */
424 lex_next_error_valist (struct lexer *lexer, int n0, int n1,
425 const char *format, va_list args)
427 struct lex_source *src = lex_source__ (lexer);
430 lex_source_error_valist (src, n0, n1, format, args);
436 ds_put_format (&s, _("Syntax error at end of input"));
439 ds_put_cstr (&s, ": ");
440 ds_put_vformat (&s, format, args);
442 ds_put_byte (&s, '.');
443 msg (SE, "%s", ds_cstr (&s));
448 /* Checks that we're at end of command.
449 If so, returns a successful command completion code.
450 If not, flags a syntax error and returns an error command
453 lex_end_of_command (struct lexer *lexer)
455 if (lex_token (lexer) != T_ENDCMD && lex_token (lexer) != T_STOP)
457 lex_error (lexer, _("expecting end of command"));
464 /* Token testing functions. */
466 /* Returns true if the current token is a number. */
468 lex_is_number (const struct lexer *lexer)
470 return lex_next_is_number (lexer, 0);
473 /* Returns true if the current token is a string. */
475 lex_is_string (const struct lexer *lexer)
477 return lex_next_is_string (lexer, 0);
480 /* Returns the value of the current token, which must be a
481 floating point number. */
483 lex_number (const struct lexer *lexer)
485 return lex_next_number (lexer, 0);
488 /* Returns true iff the current token is an integer. */
490 lex_is_integer (const struct lexer *lexer)
492 return lex_next_is_integer (lexer, 0);
495 /* Returns the value of the current token, which must be an
498 lex_integer (const struct lexer *lexer)
500 return lex_next_integer (lexer, 0);
503 /* Token testing functions with lookahead.
505 A value of 0 for N as an argument to any of these functions refers to the
506 current token. Lookahead is limited to the current command. Any N greater
507 than the number of tokens remaining in the current command will be treated
508 as referring to a T_ENDCMD token. */
510 /* Returns true if the token N ahead of the current token is a number. */
512 lex_next_is_number (const struct lexer *lexer, int n)
514 return token_is_number (lex_next (lexer, n));
517 /* Returns true if the token N ahead of the current token is a string. */
519 lex_next_is_string (const struct lexer *lexer, int n)
521 return token_is_string (lex_next (lexer, n));
524 /* Returns the value of the token N ahead of the current token, which must be a
525 floating point number. */
527 lex_next_number (const struct lexer *lexer, int n)
529 return token_number (lex_next (lexer, n));
532 /* Returns true if the token N ahead of the current token is an integer. */
534 lex_next_is_integer (const struct lexer *lexer, int n)
536 return token_is_integer (lex_next (lexer, n));
539 /* Returns the value of the token N ahead of the current token, which must be
542 lex_next_integer (const struct lexer *lexer, int n)
544 return token_integer (lex_next (lexer, n));
547 /* Token matching functions. */
549 /* If the current token has the specified TYPE, skips it and returns true.
550 Otherwise, returns false. */
552 lex_match (struct lexer *lexer, enum token_type type)
554 if (lex_token (lexer) == type)
563 /* If the current token matches IDENTIFIER, skips it and returns true.
564 IDENTIFIER may be abbreviated to its first three letters. Otherwise,
567 IDENTIFIER must be an ASCII string. */
569 lex_match_id (struct lexer *lexer, const char *identifier)
571 return lex_match_id_n (lexer, identifier, 3);
574 /* If the current token is IDENTIFIER, skips it and returns true. IDENTIFIER
575 may be abbreviated to its first N letters. Otherwise, returns false.
577 IDENTIFIER must be an ASCII string. */
579 lex_match_id_n (struct lexer *lexer, const char *identifier, size_t n)
581 if (lex_token (lexer) == T_ID
582 && lex_id_match_n (ss_cstr (identifier), lex_tokss (lexer), n))
591 /* If the current token is integer X, skips it and returns true. Otherwise,
594 lex_match_int (struct lexer *lexer, int x)
596 if (lex_is_integer (lexer) && lex_integer (lexer) == x)
605 /* Forced matches. */
607 /* If this token is IDENTIFIER, skips it and returns true. IDENTIFIER may be
608 abbreviated to its first 3 letters. Otherwise, reports an error and returns
611 IDENTIFIER must be an ASCII string. */
613 lex_force_match_id (struct lexer *lexer, const char *identifier)
615 if (lex_match_id (lexer, identifier))
619 lex_error_expecting (lexer, identifier);
624 /* If the current token has the specified TYPE, skips it and returns true.
625 Otherwise, reports an error and returns false. */
627 lex_force_match (struct lexer *lexer, enum token_type type)
629 if (lex_token (lexer) == type)
636 const char *type_string = token_type_to_string (type);
639 char *s = xasprintf ("`%s'", type_string);
640 lex_error_expecting (lexer, s);
644 lex_error_expecting (lexer, token_type_to_name (type));
650 /* If the current token is a string, does nothing and returns true.
651 Otherwise, reports an error and returns false. */
653 lex_force_string (struct lexer *lexer)
655 if (lex_is_string (lexer))
659 lex_error (lexer, _("expecting string"));
664 /* If the current token is a string or an identifier, does nothing and returns
665 true. Otherwise, reports an error and returns false.
667 This is meant for use in syntactic situations where we want to encourage the
668 user to supply a quoted string, but for compatibility we also accept
669 identifiers. (One example of such a situation is file names.) Therefore,
670 the error message issued when the current token is wrong only says that a
671 string is expected and doesn't mention that an identifier would also be
674 lex_force_string_or_id (struct lexer *lexer)
676 return lex_token (lexer) == T_ID || lex_force_string (lexer);
679 /* If the current token is an integer, does nothing and returns true.
680 Otherwise, reports an error and returns false. */
682 lex_force_int (struct lexer *lexer)
684 if (lex_is_integer (lexer))
688 lex_error (lexer, _("expecting integer"));
693 /* If the current token is an integer in the range MIN...MAX (inclusive), does
694 nothing and returns true. Otherwise, reports an error and returns false.
695 If NAME is nonnull, then it is used in the error message. */
697 lex_force_int_range (struct lexer *lexer, const char *name, long min, long max)
699 bool is_integer = lex_is_integer (lexer);
700 bool too_small = is_integer && lex_integer (lexer) < min;
701 bool too_big = is_integer && lex_integer (lexer) > max;
702 if (is_integer && !too_small && !too_big)
707 /* Weird, maybe a bug in the caller. Just report that we needed an
710 lex_error (lexer, _("Integer expected for %s."), name);
712 lex_error (lexer, _("Integer expected."));
717 lex_error (lexer, _("Expected %ld for %s."), min, name);
719 lex_error (lexer, _("Expected %ld."), min);
721 else if (min + 1 == max)
724 lex_error (lexer, _("Expected %ld or %ld for %s."), min, min + 1, name);
726 lex_error (lexer, _("Expected %ld or %ld."), min, min + 1);
730 bool report_lower_bound = (min > INT_MIN / 2) || too_small;
731 bool report_upper_bound = (max < INT_MAX / 2) || too_big;
733 if (report_lower_bound && report_upper_bound)
737 _("Expected integer between %ld and %ld for %s."),
740 lex_error (lexer, _("Expected integer between %ld and %ld."),
743 else if (report_lower_bound)
748 lex_error (lexer, _("Expected non-negative integer for %s."),
751 lex_error (lexer, _("Expected non-negative integer."));
756 lex_error (lexer, _("Expected positive integer for %s."),
759 lex_error (lexer, _("Expected positive integer."));
762 else if (report_upper_bound)
766 _("Expected integer less than or equal to %ld for %s."),
769 lex_error (lexer, _("Expected integer less than or equal to %ld."),
775 lex_error (lexer, _("Integer expected for %s."), name);
777 lex_error (lexer, _("Integer expected."));
783 /* If the current token is a number, does nothing and returns true.
784 Otherwise, reports an error and returns false. */
786 lex_force_num (struct lexer *lexer)
788 if (lex_is_number (lexer))
791 lex_error (lexer, _("expecting number"));
795 /* If the current token is an identifier, does nothing and returns true.
796 Otherwise, reports an error and returns false. */
798 lex_force_id (struct lexer *lexer)
800 if (lex_token (lexer) == T_ID)
803 lex_error (lexer, _("expecting identifier"));
807 /* Token accessors. */
809 /* Returns the type of LEXER's current token. */
811 lex_token (const struct lexer *lexer)
813 return lex_next_token (lexer, 0);
816 /* Returns the number in LEXER's current token.
818 Only T_NEG_NUM and T_POS_NUM tokens have meaningful values. For other
819 tokens this function will always return zero. */
821 lex_tokval (const struct lexer *lexer)
823 return lex_next_tokval (lexer, 0);
826 /* Returns the null-terminated string in LEXER's current token, UTF-8 encoded.
828 Only T_ID and T_STRING tokens have meaningful strings. For other tokens
829 this functions this function will always return NULL.
831 The UTF-8 encoding of the returned string is correct for variable names and
832 other identifiers. Use filename_to_utf8() to use it as a filename. Use
833 data_in() to use it in a "union value". */
835 lex_tokcstr (const struct lexer *lexer)
837 return lex_next_tokcstr (lexer, 0);
840 /* Returns the string in LEXER's current token, UTF-8 encoded. The string is
841 null-terminated (but the null terminator is not included in the returned
842 substring's 'length').
844 Only T_ID and T_STRING tokens have meaningful strings. For other tokens
845 this functions this function will always return NULL.
847 The UTF-8 encoding of the returned string is correct for variable names and
848 other identifiers. Use filename_to_utf8() to use it as a filename. Use
849 data_in() to use it in a "union value". */
851 lex_tokss (const struct lexer *lexer)
853 return lex_next_tokss (lexer, 0);
858 A value of 0 for N as an argument to any of these functions refers to the
859 current token. Lookahead is limited to the current command. Any N greater
860 than the number of tokens remaining in the current command will be treated
861 as referring to a T_ENDCMD token. */
863 static const struct lex_token *
864 lex_next__ (const struct lexer *lexer_, int n)
866 struct lexer *lexer = CONST_CAST (struct lexer *, lexer_);
867 struct lex_source *src = lex_source__ (lexer);
870 return lex_source_next__ (src, n);
873 static const struct lex_token stop_token = { .token = { .type = T_STOP } };
878 static const struct lex_token *
879 lex_source_front (const struct lex_source *src)
881 return &src->tokens[deque_front (&src->deque, 0)];
884 static const struct lex_token *
885 lex_source_next__ (const struct lex_source *src, int n)
887 while (deque_count (&src->deque) <= n)
889 if (!deque_is_empty (&src->deque))
891 const struct lex_token *front = lex_source_front (src);
892 if (front->token.type == T_STOP || front->token.type == T_ENDCMD)
896 lex_source_get (src);
899 return &src->tokens[deque_back (&src->deque, n)];
902 /* Returns the "struct token" of the token N after the current one in LEXER.
903 The returned pointer can be invalidated by pretty much any succeeding call
904 into the lexer, although the string pointer within the returned token is
905 only invalidated by consuming the token (e.g. with lex_get()). */
907 lex_next (const struct lexer *lexer, int n)
909 return &lex_next__ (lexer, n)->token;
912 /* Returns the type of the token N after the current one in LEXER. */
914 lex_next_token (const struct lexer *lexer, int n)
916 return lex_next (lexer, n)->type;
919 /* Returns the number in the tokn N after the current one in LEXER.
921 Only T_NEG_NUM and T_POS_NUM tokens have meaningful values. For other
922 tokens this function will always return zero. */
924 lex_next_tokval (const struct lexer *lexer, int n)
926 return token_number (lex_next (lexer, n));
929 /* Returns the null-terminated string in the token N after the current one, in
932 Only T_ID and T_STRING tokens have meaningful strings. For other tokens
933 this functions this function will always return NULL.
935 The UTF-8 encoding of the returned string is correct for variable names and
936 other identifiers. Use filename_to_utf8() to use it as a filename. Use
937 data_in() to use it in a "union value". */
939 lex_next_tokcstr (const struct lexer *lexer, int n)
941 return lex_next_tokss (lexer, n).string;
944 /* Returns the string in the token N after the current one, in UTF-8 encoding.
945 The string is null-terminated (but the null terminator is not included in
946 the returned substring's 'length').
948 Only T_ID, T_MACRO_ID, T_STRING tokens have meaningful strings. For other
949 tokens this functions this function will always return NULL.
951 The UTF-8 encoding of the returned string is correct for variable names and
952 other identifiers. Use filename_to_utf8() to use it as a filename. Use
953 data_in() to use it in a "union value". */
955 lex_next_tokss (const struct lexer *lexer, int n)
957 return lex_next (lexer, n)->string;
961 lex_next_representation (const struct lexer *lexer, int n0, int n1)
963 return lex_source_get_syntax__ (lex_source__ (lexer), n0, n1);
967 lex_next_is_from_macro (const struct lexer *lexer, int n)
969 return lex_next__ (lexer, n)->from_macro;
973 lex_tokens_match (const struct token *actual, const struct token *expected)
975 if (actual->type != expected->type)
978 switch (actual->type)
982 return actual->number == expected->number;
985 return lex_id_match (expected->string, actual->string);
988 return (actual->string.length == expected->string.length
989 && !memcmp (actual->string.string, expected->string.string,
990 actual->string.length));
997 /* If LEXER is positioned at the sequence of tokens that may be parsed from S,
998 skips it and returns true. Otherwise, returns false.
1000 S may consist of an arbitrary sequence of tokens, e.g. "KRUSKAL-WALLIS",
1001 "2SLS", or "END INPUT PROGRAM". Identifiers may be abbreviated to their
1002 first three letters. */
1004 lex_match_phrase (struct lexer *lexer, const char *s)
1006 struct string_lexer slex;
1011 string_lexer_init (&slex, s, strlen (s), SEG_MODE_INTERACTIVE);
1012 while (string_lexer_next (&slex, &token))
1013 if (token.type != SCAN_SKIP)
1015 bool match = lex_tokens_match (lex_next (lexer, i++), &token);
1016 token_uninit (&token);
1027 lex_source_get_first_line_number (const struct lex_source *src, int n)
1029 return lex_source_next__ (src, n)->first_line;
1033 count_newlines (char *s, size_t length)
1038 while ((newline = memchr (s, '\n', length)) != NULL)
1041 length -= (newline + 1) - s;
1049 lex_source_get_last_line_number (const struct lex_source *src, int n)
1051 const struct lex_token *token = lex_source_next__ (src, n);
1053 if (token->first_line == 0)
1057 char *token_str = &src->buffer[token->token_pos - src->tail];
1058 return token->first_line + count_newlines (token_str, token->token_len) + 1;
1063 count_columns (const char *s_, size_t length)
1065 const uint8_t *s = CHAR_CAST (const uint8_t *, s_);
1071 for (ofs = 0; ofs < length; ofs += mblen)
1075 mblen = u8_mbtouc (&uc, s + ofs, length - ofs);
1078 int width = uc_width (uc, "UTF-8");
1083 columns = ROUND_UP (columns + 1, 8);
1090 lex_source_get_first_column (const struct lex_source *src, int n)
1092 const struct lex_token *token = lex_source_next__ (src, n);
1093 return count_columns (&src->buffer[token->line_pos - src->tail],
1094 token->token_pos - token->line_pos);
1098 lex_source_get_last_column (const struct lex_source *src, int n)
1100 const struct lex_token *token = lex_source_next__ (src, n);
1101 char *start, *end, *newline;
1103 start = &src->buffer[token->line_pos - src->tail];
1104 end = &src->buffer[(token->token_pos + token->token_len) - src->tail];
1105 newline = memrchr (start, '\n', end - start);
1106 if (newline != NULL)
1107 start = newline + 1;
1108 return count_columns (start, end - start);
1111 /* Returns the 1-based line number of the start of the syntax that represents
1112 the token N after the current one in LEXER. Returns 0 for a T_STOP token or
1113 if the token is drawn from a source that does not have line numbers. */
1115 lex_get_first_line_number (const struct lexer *lexer, int n)
1117 const struct lex_source *src = lex_source__ (lexer);
1118 return src != NULL ? lex_source_get_first_line_number (src, n) : 0;
1121 /* Returns the 1-based line number of the end of the syntax that represents the
1122 token N after the current one in LEXER, plus 1. Returns 0 for a T_STOP
1123 token or if the token is drawn from a source that does not have line
1126 Most of the time, a single token is wholly within a single line of syntax,
1127 but there are two exceptions: a T_STRING token can be made up of multiple
1128 segments on adjacent lines connected with "+" punctuators, and a T_NEG_NUM
1129 token can consist of a "-" on one line followed by the number on the next.
1132 lex_get_last_line_number (const struct lexer *lexer, int n)
1134 const struct lex_source *src = lex_source__ (lexer);
1135 return src != NULL ? lex_source_get_last_line_number (src, n) : 0;
1138 /* Returns the 1-based column number of the start of the syntax that represents
1139 the token N after the current one in LEXER. Returns 0 for a T_STOP
1142 Column numbers are measured according to the width of characters as shown in
1143 a typical fixed-width font, in which CJK characters have width 2 and
1144 combining characters have width 0. */
1146 lex_get_first_column (const struct lexer *lexer, int n)
1148 const struct lex_source *src = lex_source__ (lexer);
1149 return src != NULL ? lex_source_get_first_column (src, n) : 0;
1152 /* Returns the 1-based column number of the end of the syntax that represents
1153 the token N after the current one in LEXER, plus 1. Returns 0 for a T_STOP
1156 Column numbers are measured according to the width of characters as shown in
1157 a typical fixed-width font, in which CJK characters have width 2 and
1158 combining characters have width 0. */
1160 lex_get_last_column (const struct lexer *lexer, int n)
1162 const struct lex_source *src = lex_source__ (lexer);
1163 return src != NULL ? lex_source_get_last_column (src, n) : 0;
1166 /* Returns the name of the syntax file from which the current command is drawn.
1167 Returns NULL for a T_STOP token or if the command's source does not have
1170 There is no version of this function that takes an N argument because
1171 lookahead only works to the end of a command and any given command is always
1172 within a single syntax file. */
1174 lex_get_file_name (const struct lexer *lexer)
1176 struct lex_source *src = lex_source__ (lexer);
1177 return src == NULL ? NULL : src->reader->file_name;
1181 lex_get_encoding (const struct lexer *lexer)
1183 struct lex_source *src = lex_source__ (lexer);
1184 return src == NULL ? NULL : src->reader->encoding;
1187 /* Returns the syntax mode for the syntax file from which the current drawn is
1188 drawn. Returns SEG_MODE_AUTO for a T_STOP token or if the command's source
1189 does not have line numbers.
1191 There is no version of this function that takes an N argument because
1192 lookahead only works to the end of a command and any given command is always
1193 within a single syntax file. */
1195 lex_get_syntax_mode (const struct lexer *lexer)
1197 struct lex_source *src = lex_source__ (lexer);
1198 return src == NULL ? SEG_MODE_AUTO : src->reader->syntax;
1201 /* Returns the error mode for the syntax file from which the current drawn is
1202 drawn. Returns LEX_ERROR_TERMINAL for a T_STOP token or if the command's
1203 source does not have line numbers.
1205 There is no version of this function that takes an N argument because
1206 lookahead only works to the end of a command and any given command is always
1207 within a single syntax file. */
1209 lex_get_error_mode (const struct lexer *lexer)
1211 struct lex_source *src = lex_source__ (lexer);
1212 return src == NULL ? LEX_ERROR_TERMINAL : src->reader->error;
1215 /* If the source that LEXER is currently reading has error mode
1216 LEX_ERROR_TERMINAL, discards all buffered input and tokens, so that the next
1217 token to be read comes directly from whatever is next read from the stream.
1219 It makes sense to call this function after encountering an error in a
1220 command entered on the console, because usually the user would prefer not to
1221 have cascading errors. */
1223 lex_interactive_reset (struct lexer *lexer)
1225 struct lex_source *src = lex_source__ (lexer);
1226 if (src != NULL && src->reader->error == LEX_ERROR_TERMINAL)
1228 src->head = src->tail = 0;
1229 src->journal_pos = src->seg_pos = src->line_pos = 0;
1230 src->n_newlines = 0;
1231 src->suppress_next_newline = false;
1232 segmenter_init (&src->segmenter, segmenter_get_mode (&src->segmenter));
1233 while (!deque_is_empty (&src->deque))
1234 lex_source_pop__ (src);
1235 lex_source_push_endcmd__ (src);
1239 /* Advances past any tokens in LEXER up to a T_ENDCMD or T_STOP. */
1241 lex_discard_rest_of_command (struct lexer *lexer)
1243 while (lex_token (lexer) != T_STOP && lex_token (lexer) != T_ENDCMD)
1247 /* Discards all lookahead tokens in LEXER, then discards all input sources
1248 until it encounters one with error mode LEX_ERROR_TERMINAL or until it
1249 runs out of input sources. */
1251 lex_discard_noninteractive (struct lexer *lexer)
1253 struct lex_source *src = lex_source__ (lexer);
1257 while (!deque_is_empty (&src->deque))
1258 lex_source_pop__ (src);
1260 for (; src != NULL && src->reader->error != LEX_ERROR_TERMINAL;
1261 src = lex_source__ (lexer))
1262 lex_source_destroy (src);
1267 lex_source_max_tail__ (const struct lex_source *src)
1269 const struct lex_token *token;
1272 assert (src->seg_pos >= src->line_pos);
1273 max_tail = MIN (src->journal_pos, src->line_pos);
1275 /* Use the oldest token also. (We know that src->deque cannot be empty
1276 because we are in the process of adding a new token, which is already
1277 initialized enough to use here.) */
1278 token = &src->tokens[deque_back (&src->deque, 0)];
1279 assert (token->token_pos >= token->line_pos);
1280 max_tail = MIN (max_tail, token->line_pos);
1286 lex_source_expand__ (struct lex_source *src)
1288 if (src->head - src->tail >= src->allocated)
1290 size_t max_tail = lex_source_max_tail__ (src);
1291 if (max_tail > src->tail)
1293 /* Advance the tail, freeing up room at the head. */
1294 memmove (src->buffer, src->buffer + (max_tail - src->tail),
1295 src->head - max_tail);
1296 src->tail = max_tail;
1300 /* Buffer is completely full. Expand it. */
1301 src->buffer = x2realloc (src->buffer, &src->allocated);
1306 /* There's space available at the head of the buffer. Nothing to do. */
1311 lex_source_read__ (struct lex_source *src)
1315 lex_source_expand__ (src);
1317 size_t head_ofs = src->head - src->tail;
1318 size_t space = src->allocated - head_ofs;
1319 enum prompt_style prompt = segmenter_get_prompt (&src->segmenter);
1320 size_t n = src->reader->class->read (src->reader, &src->buffer[head_ofs],
1322 assert (n <= space);
1327 src->reader->eof = true;
1328 lex_source_expand__ (src);
1334 while (!memchr (&src->buffer[src->seg_pos - src->tail], '\n',
1335 src->head - src->seg_pos));
1338 static struct lex_source *
1339 lex_source__ (const struct lexer *lexer)
1341 return (ll_is_empty (&lexer->sources) ? NULL
1342 : ll_data (ll_head (&lexer->sources), struct lex_source, ll));
1345 static struct substring
1346 lex_tokens_get_syntax__ (const struct lex_source *src,
1347 const struct lex_token *token0,
1348 const struct lex_token *token1)
1350 size_t start = token0->token_pos;
1351 size_t end = token1->token_pos + token1->token_len;
1353 return ss_buffer (&src->buffer[start - src->tail], end - start);
1356 static struct substring
1357 lex_source_get_syntax__ (const struct lex_source *src, int n0, int n1)
1359 return lex_tokens_get_syntax__ (src,
1360 lex_source_next__ (src, n0),
1361 lex_source_next__ (src, MAX (n0, n1)));
1365 lex_ellipsize__ (struct substring in, char *out, size_t out_size)
1371 assert (out_size >= 16);
1372 out_maxlen = out_size - 1;
1373 if (in.length > out_maxlen - 3)
1376 for (out_len = 0; out_len < in.length; out_len += mblen)
1378 if (in.string[out_len] == '\n'
1379 || in.string[out_len] == '\0'
1380 || (in.string[out_len] == '\r'
1381 && out_len + 1 < in.length
1382 && in.string[out_len + 1] == '\n'))
1385 mblen = u8_mblen (CHAR_CAST (const uint8_t *, in.string + out_len),
1386 in.length - out_len);
1391 if (out_len + mblen > out_maxlen)
1395 memcpy (out, in.string, out_len);
1396 strcpy (&out[out_len], out_len < in.length ? "..." : "");
1400 lex_source_error_valist (struct lex_source *src, int n0, int n1,
1401 const char *format, va_list args)
1403 const struct lex_token *token;
1408 token = lex_source_next__ (src, n0);
1409 if (token->token.type == T_ENDCMD)
1410 ds_put_cstr (&s, _("Syntax error at end of command"));
1411 else if (token->from_macro)
1413 /* XXX this isn't ideal, we should get the actual syntax */
1414 char *syntax = token_to_string (&token->token);
1416 ds_put_format (&s, _("Syntax error at `%s'"), syntax);
1418 ds_put_cstr (&s, _("Syntax error"));
1423 struct substring syntax = lex_source_get_syntax__ (src, n0, n1);
1424 if (!ss_is_empty (syntax))
1426 char syntax_cstr[64];
1428 lex_ellipsize__ (syntax, syntax_cstr, sizeof syntax_cstr);
1429 ds_put_format (&s, _("Syntax error at `%s'"), syntax_cstr);
1432 ds_put_cstr (&s, _("Syntax error"));
1437 ds_put_cstr (&s, ": ");
1438 ds_put_vformat (&s, format, args);
1440 if (ds_last (&s) != '.')
1441 ds_put_byte (&s, '.');
1444 .category = MSG_C_SYNTAX,
1445 .severity = MSG_S_ERROR,
1446 .file_name = src->reader->file_name,
1447 .first_line = lex_source_get_first_line_number (src, n0),
1448 .last_line = lex_source_get_last_line_number (src, n1),
1449 .first_column = lex_source_get_first_column (src, n0),
1450 .last_column = lex_source_get_last_column (src, n1),
1451 .text = ds_steal_cstr (&s),
1456 static void PRINTF_FORMAT (2, 3)
1457 lex_get_error (struct lex_source *src, const char *format, ...)
1462 va_start (args, format);
1464 n = deque_count (&src->deque) - 1;
1465 lex_source_error_valist (src, n, n, format, args);
1466 lex_source_pop_front (src);
1471 /* Attempts to append an additional token into SRC's deque, reading more from
1472 the underlying lex_reader if necessary. Returns true if a new token was
1473 added to SRC's deque, false otherwise. */
1475 lex_source_try_get (struct lex_source *src)
1477 /* State maintained while scanning tokens. Usually we only need a single
1478 state, but scanner_push() can return SCAN_SAVE to indicate that the state
1479 needs to be saved and possibly restored later with SCAN_BACK. */
1482 struct segmenter segmenter;
1483 enum segment_type last_segment;
1484 int newlines; /* Number of newlines encountered so far. */
1485 /* Maintained here so we can update lex_source's similar members when we
1491 /* Initialize state. */
1492 struct state state =
1494 .segmenter = src->segmenter,
1496 .seg_pos = src->seg_pos,
1497 .line_pos = src->line_pos,
1499 struct state saved = state;
1501 /* Append a new token to SRC and initialize it. */
1502 struct lex_token *token = lex_push_token__ (src);
1503 struct scanner scanner;
1504 scanner_init (&scanner, &token->token);
1505 token->line_pos = src->line_pos;
1506 token->token_pos = src->seg_pos;
1507 if (src->reader->line_number > 0)
1508 token->first_line = src->reader->line_number + src->n_newlines;
1510 token->first_line = 0;
1512 /* Extract segments and pass them through the scanner until we obtain a
1516 /* Extract a segment. */
1517 const char *segment = &src->buffer[state.seg_pos - src->tail];
1518 size_t seg_maxlen = src->head - state.seg_pos;
1519 enum segment_type type;
1520 int seg_len = segmenter_push (&state.segmenter, segment, seg_maxlen,
1521 src->reader->eof, &type);
1524 /* The segmenter needs more input to produce a segment. */
1525 assert (!src->reader->eof);
1526 lex_source_read__ (src);
1530 /* Update state based on the segment. */
1531 state.last_segment = type;
1532 state.seg_pos += seg_len;
1533 if (type == SEG_NEWLINE)
1536 state.line_pos = state.seg_pos;
1539 /* Pass the segment into the scanner and try to get a token out. */
1540 enum scan_result result = scanner_push (&scanner, type,
1541 ss_buffer (segment, seg_len),
1543 if (result == SCAN_SAVE)
1545 else if (result == SCAN_BACK)
1550 else if (result == SCAN_DONE)
1554 /* If we've reached the end of a line, or the end of a command, then pass
1555 the line to the output engine as a syntax text item. */
1556 int n_lines = state.newlines;
1557 if (state.last_segment == SEG_END_COMMAND && !src->suppress_next_newline)
1560 src->suppress_next_newline = true;
1562 else if (n_lines > 0 && src->suppress_next_newline)
1565 src->suppress_next_newline = false;
1567 for (int i = 0; i < n_lines; i++)
1569 /* Beginning of line. */
1570 const char *line = &src->buffer[src->journal_pos - src->tail];
1572 /* Calculate line length, including \n or \r\n end-of-line if present.
1574 We use src->head even though that may be beyond what we've actually
1575 converted to tokens (which is only through state.line_pos). That's
1576 because, if we're emitting the line due to SEG_END_COMMAND, we want to
1577 take the whole line through the newline, not just through the '.'. */
1578 size_t max_len = src->head - src->journal_pos;
1579 const char *newline = memchr (line, '\n', max_len);
1580 size_t line_len = newline ? newline - line + 1 : max_len;
1582 /* Calculate line length excluding end-of-line. */
1583 size_t copy_len = line_len;
1584 if (copy_len > 0 && line[copy_len - 1] == '\n')
1586 if (copy_len > 0 && line[copy_len - 1] == '\r')
1589 /* Submit the line as syntax. */
1590 output_item_submit (text_item_create_nocopy (TEXT_ITEM_SYNTAX,
1591 xmemdup0 (line, copy_len),
1594 src->journal_pos += line_len;
1597 token->token_len = state.seg_pos - src->seg_pos;
1599 src->segmenter = state.segmenter;
1600 src->seg_pos = state.seg_pos;
1601 src->line_pos = state.line_pos;
1602 src->n_newlines += state.newlines;
1604 switch (token->token.type)
1610 token->token.type = T_ENDCMD;
1614 case SCAN_BAD_HEX_LENGTH:
1615 lex_get_error (src, _("String of hex digits has %d characters, which "
1616 "is not a multiple of 2"),
1617 (int) token->token.number);
1620 case SCAN_BAD_HEX_DIGIT:
1621 case SCAN_BAD_UNICODE_DIGIT:
1622 lex_get_error (src, _("`%c' is not a valid hex digit"),
1623 (int) token->token.number);
1626 case SCAN_BAD_UNICODE_LENGTH:
1627 lex_get_error (src, _("Unicode string contains %d bytes, which is "
1628 "not in the valid range of 1 to 8 bytes"),
1629 (int) token->token.number);
1632 case SCAN_BAD_UNICODE_CODE_POINT:
1633 lex_get_error (src, _("U+%04X is not a valid Unicode code point"),
1634 (int) token->token.number);
1637 case SCAN_EXPECTED_QUOTE:
1638 lex_get_error (src, _("Unterminated string constant"));
1641 case SCAN_EXPECTED_EXPONENT:
1642 lex_get_error (src, _("Missing exponent following `%s'"),
1643 token->token.string.string);
1646 case SCAN_UNEXPECTED_CHAR:
1649 lex_get_error (src, _("Bad character %s in input"),
1650 uc_name (token->token.number, c_name));
1655 lex_source_pop_front (src);
1663 lex_source_get__ (struct lex_source *src)
1669 else if (lex_source_try_get (src))
1675 lex_source_get (const struct lex_source *src_)
1677 struct lex_source *src = CONST_CAST (struct lex_source *, src_);
1679 size_t old_count = deque_count (&src->deque);
1680 if (!lex_source_get__ (src))
1683 if (!settings_get_mexpand ())
1686 struct macro_expander *me;
1687 int retval = macro_expander_create (src->lexer->macros,
1688 &lex_source_front (src)->token,
1692 if (!lex_source_get__ (src))
1694 /* This should not be reachable because we always get a T_ENDCMD at
1695 the end of an input file (transformed from T_STOP by
1696 lex_source_try_get()) and the macro_expander should always
1697 terminate expansion on T_ENDCMD. */
1701 const struct lex_token *front = lex_source_front (src);
1702 const struct macro_token mt = {
1703 .token = front->token,
1704 .representation = lex_tokens_get_syntax__ (src, front, front)
1706 retval = macro_expander_add (me, &mt);
1710 /* XXX handle case where there's a macro invocation starting from some
1711 later token we've already obtained */
1712 macro_expander_destroy (me);
1716 /* XXX handle case where the macro invocation doesn't use all the tokens */
1717 while (deque_count (&src->deque) > old_count)
1718 lex_source_pop_front (src);
1720 struct macro_tokens expansion = { .n = 0 };
1721 macro_expander_get_expansion (me, &expansion);
1722 macro_expander_destroy (me);
1724 if (settings_get_mprint ())
1726 struct string mprint = DS_EMPTY_INITIALIZER;
1727 macro_tokens_to_representation (&expansion, &mprint);
1728 output_item_submit (text_item_create (TEXT_ITEM_LOG, ds_cstr (&mprint),
1729 _("Macro Expansion")));
1730 ds_destroy (&mprint);
1733 for (size_t i = 0; i < expansion.n; i++)
1735 *lex_push_token__ (src) = (struct lex_token) {
1736 .token = expansion.mts[i].token,
1741 ss_dealloc (&expansion.mts[i].representation); /* XXX should feed into lexer */
1743 free (expansion.mts);
1749 lex_source_push_endcmd__ (struct lex_source *src)
1751 *lex_push_token__ (src) = (struct lex_token) { .token = { .type = T_ENDCMD } };
1754 static struct lex_source *
1755 lex_source_create (struct lexer *lexer, struct lex_reader *reader)
1757 struct lex_source *src;
1759 src = xzalloc (sizeof *src);
1760 src->reader = reader;
1761 segmenter_init (&src->segmenter, reader->syntax);
1763 src->tokens = deque_init (&src->deque, 4, sizeof *src->tokens);
1765 lex_source_push_endcmd__ (src);
1771 lex_source_destroy (struct lex_source *src)
1773 char *file_name = src->reader->file_name;
1774 char *encoding = src->reader->encoding;
1775 if (src->reader->class->destroy != NULL)
1776 src->reader->class->destroy (src->reader);
1780 while (!deque_is_empty (&src->deque))
1781 lex_source_pop__ (src);
1783 ll_remove (&src->ll);
1787 struct lex_file_reader
1789 struct lex_reader reader;
1790 struct u8_istream *istream;
1793 static struct lex_reader_class lex_file_reader_class;
1795 /* Creates and returns a new lex_reader that will read from file FILE_NAME (or
1796 from stdin if FILE_NAME is "-"). The file is expected to be encoded with
1797 ENCODING, which should take one of the forms accepted by
1798 u8_istream_for_file(). SYNTAX and ERROR become the syntax mode and error
1799 mode of the new reader, respectively.
1801 Returns a null pointer if FILE_NAME cannot be opened. */
1803 lex_reader_for_file (const char *file_name, const char *encoding,
1804 enum segmenter_mode syntax,
1805 enum lex_error_mode error)
1807 struct lex_file_reader *r;
1808 struct u8_istream *istream;
1810 istream = (!strcmp(file_name, "-")
1811 ? u8_istream_for_fd (encoding, STDIN_FILENO)
1812 : u8_istream_for_file (encoding, file_name, O_RDONLY));
1813 if (istream == NULL)
1815 msg (ME, _("Opening `%s': %s."), file_name, strerror (errno));
1819 r = xmalloc (sizeof *r);
1820 lex_reader_init (&r->reader, &lex_file_reader_class);
1821 r->reader.syntax = syntax;
1822 r->reader.error = error;
1823 r->reader.file_name = xstrdup (file_name);
1824 r->reader.encoding = xstrdup_if_nonnull (encoding);
1825 r->reader.line_number = 1;
1826 r->istream = istream;
1831 static struct lex_file_reader *
1832 lex_file_reader_cast (struct lex_reader *r)
1834 return UP_CAST (r, struct lex_file_reader, reader);
1838 lex_file_read (struct lex_reader *r_, char *buf, size_t n,
1839 enum prompt_style prompt_style UNUSED)
1841 struct lex_file_reader *r = lex_file_reader_cast (r_);
1842 ssize_t n_read = u8_istream_read (r->istream, buf, n);
1845 msg (ME, _("Error reading `%s': %s."), r_->file_name, strerror (errno));
1852 lex_file_close (struct lex_reader *r_)
1854 struct lex_file_reader *r = lex_file_reader_cast (r_);
1856 if (u8_istream_fileno (r->istream) != STDIN_FILENO)
1858 if (u8_istream_close (r->istream) != 0)
1859 msg (ME, _("Error closing `%s': %s."), r_->file_name, strerror (errno));
1862 u8_istream_free (r->istream);
1867 static struct lex_reader_class lex_file_reader_class =
1873 struct lex_string_reader
1875 struct lex_reader reader;
1880 static struct lex_reader_class lex_string_reader_class;
1882 /* Creates and returns a new lex_reader for the contents of S, which must be
1883 encoded in the given ENCODING. The new reader takes ownership of S and will free it
1884 with ss_dealloc() when it is closed. */
1886 lex_reader_for_substring_nocopy (struct substring s, const char *encoding)
1888 struct lex_string_reader *r;
1890 r = xmalloc (sizeof *r);
1891 lex_reader_init (&r->reader, &lex_string_reader_class);
1892 r->reader.syntax = SEG_MODE_AUTO;
1893 r->reader.encoding = xstrdup_if_nonnull (encoding);
1900 /* Creates and returns a new lex_reader for a copy of null-terminated string S,
1901 which must be encoded in ENCODING. The caller retains ownership of S. */
1903 lex_reader_for_string (const char *s, const char *encoding)
1905 struct substring ss;
1906 ss_alloc_substring (&ss, ss_cstr (s));
1907 return lex_reader_for_substring_nocopy (ss, encoding);
1910 /* Formats FORMAT as a printf()-like format string and creates and returns a
1911 new lex_reader for the formatted result. */
1913 lex_reader_for_format (const char *format, const char *encoding, ...)
1915 struct lex_reader *r;
1918 va_start (args, encoding);
1919 r = lex_reader_for_substring_nocopy (ss_cstr (xvasprintf (format, args)), encoding);
1925 static struct lex_string_reader *
1926 lex_string_reader_cast (struct lex_reader *r)
1928 return UP_CAST (r, struct lex_string_reader, reader);
1932 lex_string_read (struct lex_reader *r_, char *buf, size_t n,
1933 enum prompt_style prompt_style UNUSED)
1935 struct lex_string_reader *r = lex_string_reader_cast (r_);
1938 chunk = MIN (n, r->s.length - r->offset);
1939 memcpy (buf, r->s.string + r->offset, chunk);
1946 lex_string_close (struct lex_reader *r_)
1948 struct lex_string_reader *r = lex_string_reader_cast (r_);
1954 static struct lex_reader_class lex_string_reader_class =