2 * Copyright 2006-2007 Adrian Thurston <thurston@cs.queensu.ca>
5 /* This file is part of Ragel.
7 * Ragel is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Ragel is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Ragel; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 * The Scanner for Importing
50 machine inline_token_scan;
54 # Import scanner tokens.
59 IMP_Define IMP_Word IMP_UInt => {
60 int base = tok_tokstart - token_data;
64 directToParser( inclToParser, fileName, line, column, TK_Word,
65 token_strings[base+nameOff], token_lens[base+nameOff] );
66 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
67 directToParser( inclToParser, fileName, line, column, TK_UInt,
68 token_strings[base+numOff], token_lens[base+numOff] );
69 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
72 # Assignment of number.
73 IMP_Word '=' IMP_UInt => {
74 int base = tok_tokstart - token_data;
78 directToParser( inclToParser, fileName, line, column, TK_Word,
79 token_strings[base+nameOff], token_lens[base+nameOff] );
80 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
81 directToParser( inclToParser, fileName, line, column, TK_UInt,
82 token_strings[base+numOff], token_lens[base+numOff] );
83 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
87 IMP_Define IMP_Word IMP_Literal => {
88 int base = tok_tokstart - token_data;
92 directToParser( inclToParser, fileName, line, column, TK_Word,
93 token_strings[base+nameOff], token_lens[base+nameOff] );
94 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
95 directToParser( inclToParser, fileName, line, column, TK_Literal,
96 token_strings[base+litOff], token_lens[base+litOff] );
97 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
100 # Assignment of literal.
101 IMP_Word '=' IMP_Literal => {
102 int base = tok_tokstart - token_data;
106 directToParser( inclToParser, fileName, line, column, TK_Word,
107 token_strings[base+nameOff], token_lens[base+nameOff] );
108 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
109 directToParser( inclToParser, fileName, line, column, TK_Literal,
110 token_strings[base+litOff], token_lens[base+litOff] );
111 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
114 # Catch everything else.
121 void Scanner::flushImport()
124 int *pe = token_data + cur_token;
129 if ( tok_tokstart == 0 )
132 cur_token = pe - tok_tokstart;
133 int ts_offset = tok_tokstart - token_data;
134 memmove( token_data, token_data+ts_offset, cur_token*sizeof(token_data[0]) );
135 memmove( token_strings, token_strings+ts_offset, cur_token*sizeof(token_strings[0]) );
136 memmove( token_lens, token_lens+ts_offset, cur_token*sizeof(token_lens[0]) );
140 void Scanner::directToParser( Parser *toParser, char *tokFileName, int tokLine,
141 int tokColumn, int type, char *tokdata, int toklen )
146 cerr << "scanner:" << tokLine << ":" << tokColumn <<
147 ": sending token to the parser " << Parser_lelNames[type];
148 cerr << " " << toklen;
150 cerr << " " << tokdata;
154 loc.fileName = tokFileName;
158 toParser->token( loc, type, tokdata, toklen );
161 void Scanner::importToken( int token, char *start, char *end )
163 if ( cur_token == max_tokens )
166 token_data[cur_token] = token;
168 token_strings[cur_token] = 0;
169 token_lens[cur_token] = 0;
172 int toklen = end-start;
173 token_lens[cur_token] = toklen;
174 token_strings[cur_token] = new char[toklen+1];
175 memcpy( token_strings[cur_token], start, toklen );
176 token_strings[cur_token][toklen] = 0;
181 void Scanner::pass( int token, char *start, char *end )
183 if ( importMachines )
184 importToken( token, start, end );
192 /* If no errors and we are at the bottom of the include stack (the
193 * source file listed on the command line) then write out the data. */
194 if ( includeDepth == 0 && machineSpec == 0 && machineName == 0 )
195 xmlEscapeHost( output, tokstart, tokend-tokstart );
199 * The scanner for processing sections, includes, imports, etc.
203 machine section_parse;
209 void Scanner::init( )
214 bool Scanner::active()
219 if ( parser == 0 && ! parserExistsError ) {
220 scan_error() << "there is no previous specification name" << endl;
221 parserExistsError = true;
230 ostream &Scanner::scan_error()
232 /* Maintain the error count. */
234 cerr << fileName << ":" << line << ":" << column << ": ";
238 bool Scanner::recursiveInclude( char *inclFileName, char *inclSectionName )
240 for ( IncludeStack::Iter si = includeStack; si.lte(); si++ ) {
241 if ( strcmp( si->fileName, inclFileName ) == 0 &&
242 strcmp( si->sectionName, inclSectionName ) == 0 )
250 void Scanner::updateCol()
255 //cerr << "adding " << tokend - from << " to column" << endl;
256 column += tokend - from;
261 machine section_parse;
263 # Need the defines representing tokens.
266 action clear_words { word = lit = 0; word_len = lit_len = 0; }
267 action store_word { word = tokdata; word_len = toklen; }
268 action store_lit { lit = tokdata; lit_len = toklen; }
270 action mach_err { scan_error() << "bad machine statement" << endl; }
271 action incl_err { scan_error() << "bad include statement" << endl; }
272 action import_err { scan_error() << "bad import statement" << endl; }
273 action write_err { scan_error() << "bad write statement" << endl; }
275 action handle_machine
277 /* Assign a name to the machine. */
278 char *machine = word;
280 if ( !importMachines && inclSectionTarg == 0 ) {
281 ignoreSection = false;
283 ParserDictEl *pdEl = parserDict.find( machine );
285 pdEl = new ParserDictEl( machine );
286 pdEl->value = new Parser( fileName, machine, sectionLoc );
288 parserDict.insert( pdEl );
291 parser = pdEl->value;
293 else if ( !importMachines && strcmp( inclSectionTarg, machine ) == 0 ) {
294 /* found include target */
295 ignoreSection = false;
296 parser = inclToParser;
299 /* ignoring section */
300 ignoreSection = true;
306 ( KW_Machine TK_Word @store_word ';' ) @handle_machine
307 <>err mach_err <>eof mach_err;
309 action handle_include
312 char *inclSectionName = word;
313 char *inclFileName = 0;
315 /* Implement defaults for the input file and section name. */
316 if ( inclSectionName == 0 )
317 inclSectionName = parser->sectionName;
320 inclFileName = prepareFileName( lit, lit_len );
322 inclFileName = fileName;
324 /* Check for a recursive include structure. Add the current file/section
325 * name then check if what we are including is already in the stack. */
326 includeStack.append( IncludeStackItem( fileName, parser->sectionName ) );
328 if ( recursiveInclude( inclFileName, inclSectionName ) )
329 scan_error() << "include: this is a recursive include operation" << endl;
331 /* Open the input file for reading. */
332 ifstream *inFile = new ifstream( inclFileName );
333 if ( ! inFile->is_open() ) {
334 scan_error() << "include: could not open " <<
335 inclFileName << " for reading" << endl;
338 Scanner scanner( inclFileName, *inFile, output, parser,
339 inclSectionName, includeDepth+1, false );
344 /* Remove the last element (len-1) */
345 includeStack.remove( -1 );
350 TK_Word @store_word ( TK_Literal @store_lit )? |
351 TK_Literal @store_lit
355 ( KW_Include include_names ';' ) @handle_include
356 <>err incl_err <>eof incl_err;
361 char *importFileName = prepareFileName( lit, lit_len );
363 /* Open the input file for reading. */
364 ifstream *inFile = new ifstream( importFileName );
365 if ( ! inFile->is_open() ) {
366 scan_error() << "import: could not open " <<
367 importFileName << " for reading" << endl;
370 Scanner scanner( importFileName, *inFile, output, parser,
371 0, includeDepth+1, true );
373 scanner.importToken( 0, 0, 0 );
374 scanner.flushImport();
380 ( KW_Import TK_Literal @store_lit ';' ) @handle_import
381 <>err import_err <>eof import_err;
385 if ( active() && machineSpec == 0 && machineName == 0 ) {
387 " def_name=\"" << parser->sectionName << "\""
388 " line=\"" << line << "\""
389 " col=\"" << column << "\""
396 if ( active() && machineSpec == 0 && machineName == 0 )
397 output << "<arg>" << tokdata << "</arg>";
402 if ( active() && machineSpec == 0 && machineName == 0 )
403 output << "</write>\n";
407 ( KW_Write @write_command
408 ( TK_Word @write_arg )+ ';' @write_close )
409 <>err write_err <>eof write_err;
413 /* Send the token off to the parser. */
415 directToParser( parser, fileName, line, column, type, tokdata, toklen );
418 # Catch everything else.
420 ^( KW_Machine | KW_Include | KW_Import | KW_Write ) @handle_token;
431 void Scanner::token( int type, char c )
433 token( type, &c, &c + 1 );
436 void Scanner::token( int type )
441 void Scanner::token( int type, char *start, char *end )
447 tokdata = new char[toklen+1];
448 memcpy( tokdata, start, toklen );
452 processToken( type, tokdata, toklen );
455 void Scanner::processToken( int type, char *tokdata, int toklen )
461 machine section_parse;
467 /* Record the last token for use in controlling the scan of subsequent
472 void Scanner::startSection( )
474 parserExistsError = false;
476 if ( includeDepth == 0 ) {
477 if ( machineSpec == 0 && machineName == 0 )
478 output << "</host>\n";
481 sectionLoc.fileName = fileName;
482 sectionLoc.line = line;
486 void Scanner::endSection( )
488 /* Execute the eof actions for the section parser. */
490 machine section_parse;
494 /* Close off the section with the parser. */
497 loc.fileName = fileName;
501 parser->token( loc, TK_EndSection, 0, 0 );
504 if ( includeDepth == 0 ) {
505 if ( machineSpec == 0 && machineName == 0 ) {
506 /* The end section may include a newline on the end, so
507 * we use the last line, which will count the newline. */
508 output << "<host line=\"" << line << "\">";
516 # This is sent by the driver code.
526 # Identifiers, numbers, commetns, and other common things.
527 ident = ( alpha | '_' ) ( alpha |digit |'_' )*;
529 hex_number = '0x' [0-9a-fA-F]+;
532 '/*' ( any | NL )* :>> '*/';
537 c_cpp_comment = c_comment | cpp_comment;
539 ruby_comment = '#' [^\n]* NL;
541 # These literal forms are common to host code and ragel.
542 s_literal = "'" ([^'\\] | NL | '\\' (any | NL))* "'";
543 d_literal = '"' ([^"\\] | NL | '\\' (any | NL))* '"';
544 host_re_literal = '/' ([^/\\] | NL | '\\' (any | NL))* '/';
546 whitespace = [ \t] | NL;
547 pound_comment = '#' [^\n]* NL;
549 # An inline block of code for Ruby.
550 inline_code_ruby := |*
551 # Inline expression keywords.
552 "fpc" => { token( KW_PChar ); };
553 "fc" => { token( KW_Char ); };
554 "fcurs" => { token( KW_CurState ); };
555 "ftargs" => { token( KW_TargState ); };
557 whitespaceOn = false;
561 # Inline statement keywords.
563 whitespaceOn = false;
566 "fexec" => { token( KW_Exec, 0, 0 ); };
568 whitespaceOn = false;
572 whitespaceOn = false;
576 whitespaceOn = false;
580 whitespaceOn = false;
584 whitespaceOn = false;
588 ident => { token( TK_Word, tokstart, tokend ); };
590 number => { token( TK_UInt, tokstart, tokend ); };
591 hex_number => { token( TK_Hex, tokstart, tokend ); };
593 ( s_literal | d_literal | host_re_literal )
594 => { token( IL_Literal, tokstart, tokend ); };
598 token( IL_WhiteSpace, tokstart, tokend );
601 ruby_comment => { token( IL_Comment, tokstart, tokend ); };
603 "::" => { token( TK_NameSep, tokstart, tokend ); };
605 # Some symbols need to go to the parser as with their cardinal value as
606 # the token type (as opposed to being sent as anonymous symbols)
607 # because they are part of the sequences which we interpret. The * ) ;
608 # symbols cause whitespace parsing to come back on. This gets turned
609 # off by some keywords.
613 token( *tokstart, tokstart, tokend );
614 if ( inlineBlockType == SemiTerminated )
620 token( *tokstart, tokstart, tokend );
623 [,(] => { token( *tokstart, tokstart, tokend ); };
626 token( IL_Symbol, tokstart, tokend );
631 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
632 /* Inline code block ends. */
637 /* Either a semi terminated inline block or only the closing
638 * brace of some inner scope, not the block's closing brace. */
639 token( IL_Symbol, tokstart, tokend );
644 scan_error() << "unterminated code block" << endl;
647 # Send every other character as a symbol.
648 any => { token( IL_Symbol, tokstart, tokend ); };
652 # An inline block of code for languages other than Ruby.
654 # Inline expression keywords.
655 "fpc" => { token( KW_PChar ); };
656 "fc" => { token( KW_Char ); };
657 "fcurs" => { token( KW_CurState ); };
658 "ftargs" => { token( KW_TargState ); };
660 whitespaceOn = false;
664 # Inline statement keywords.
666 whitespaceOn = false;
669 "fexec" => { token( KW_Exec, 0, 0 ); };
671 whitespaceOn = false;
675 whitespaceOn = false;
679 whitespaceOn = false;
683 whitespaceOn = false;
687 whitespaceOn = false;
691 ident => { token( TK_Word, tokstart, tokend ); };
693 number => { token( TK_UInt, tokstart, tokend ); };
694 hex_number => { token( TK_Hex, tokstart, tokend ); };
696 ( s_literal | d_literal )
697 => { token( IL_Literal, tokstart, tokend ); };
701 token( IL_WhiteSpace, tokstart, tokend );
704 c_cpp_comment => { token( IL_Comment, tokstart, tokend ); };
706 "::" => { token( TK_NameSep, tokstart, tokend ); };
708 # Some symbols need to go to the parser as with their cardinal value as
709 # the token type (as opposed to being sent as anonymous symbols)
710 # because they are part of the sequences which we interpret. The * ) ;
711 # symbols cause whitespace parsing to come back on. This gets turned
712 # off by some keywords.
716 token( *tokstart, tokstart, tokend );
717 if ( inlineBlockType == SemiTerminated )
723 token( *tokstart, tokstart, tokend );
726 [,(] => { token( *tokstart, tokstart, tokend ); };
729 token( IL_Symbol, tokstart, tokend );
734 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
735 /* Inline code block ends. */
740 /* Either a semi terminated inline block or only the closing
741 * brace of some inner scope, not the block's closing brace. */
742 token( IL_Symbol, tokstart, tokend );
747 scan_error() << "unterminated code block" << endl;
750 # Send every other character as a symbol.
751 any => { token( IL_Symbol, tokstart, tokend ); };
755 # Escape sequences in OR expressions.
756 '\\0' => { token( RE_Char, '\0' ); };
757 '\\a' => { token( RE_Char, '\a' ); };
758 '\\b' => { token( RE_Char, '\b' ); };
759 '\\t' => { token( RE_Char, '\t' ); };
760 '\\n' => { token( RE_Char, '\n' ); };
761 '\\v' => { token( RE_Char, '\v' ); };
762 '\\f' => { token( RE_Char, '\f' ); };
763 '\\r' => { token( RE_Char, '\r' ); };
764 '\\\n' => { updateCol(); };
765 '\\' any => { token( RE_Char, tokstart+1, tokend ); };
767 # Range dash in an OR expression.
768 '-' => { token( RE_Dash, 0, 0 ); };
770 # Terminate an OR expression.
771 ']' => { token( RE_SqClose ); fret; };
774 scan_error() << "unterminated OR literal" << endl;
777 # Characters in an OR expression.
778 [^\]] => { token( RE_Char, tokstart, tokend ); };
782 ragel_re_literal := |*
783 # Escape sequences in regular expressions.
784 '\\0' => { token( RE_Char, '\0' ); };
785 '\\a' => { token( RE_Char, '\a' ); };
786 '\\b' => { token( RE_Char, '\b' ); };
787 '\\t' => { token( RE_Char, '\t' ); };
788 '\\n' => { token( RE_Char, '\n' ); };
789 '\\v' => { token( RE_Char, '\v' ); };
790 '\\f' => { token( RE_Char, '\f' ); };
791 '\\r' => { token( RE_Char, '\r' ); };
792 '\\\n' => { updateCol(); };
793 '\\' any => { token( RE_Char, tokstart+1, tokend ); };
795 # Terminate an OR expression.
797 token( RE_Slash, tokstart, tokend );
801 # Special characters.
802 '.' => { token( RE_Dot ); };
803 '*' => { token( RE_Star ); };
805 '[' => { token( RE_SqOpen ); fcall or_literal; };
806 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
809 scan_error() << "unterminated regular expression" << endl;
812 # Characters in an OR expression.
813 [^\/] => { token( RE_Char, tokstart, tokend ); };
816 # We need a separate token space here to avoid the ragel keywords.
817 write_statement := |*
818 ident => { token( TK_Word, tokstart, tokend ); } ;
819 [ \t\n]+ => { updateCol(); };
820 ';' => { token( ';' ); fgoto parser_def; };
823 scan_error() << "unterminated write statement" << endl;
827 # Parser definitions.
829 'machine' => { token( KW_Machine ); };
830 'include' => { token( KW_Include ); };
831 'import' => { token( KW_Import ); };
834 fgoto write_statement;
836 'action' => { token( KW_Action ); };
837 'alphtype' => { token( KW_AlphType ); };
839 # FIXME: Enable this post 5.17.
840 # 'range' => { token( KW_Range ); };
844 inlineBlockType = SemiTerminated;
845 if ( hostLangType == RubyCode )
846 fcall inline_code_ruby;
852 inlineBlockType = SemiTerminated;
853 if ( hostLangType == RubyCode )
854 fcall inline_code_ruby;
859 token( KW_Variable );
860 inlineBlockType = SemiTerminated;
861 if ( hostLangType == RubyCode )
862 fcall inline_code_ruby;
866 'when' => { token( KW_When ); };
867 'eof' => { token( KW_Eof ); };
868 'err' => { token( KW_Err ); };
869 'lerr' => { token( KW_Lerr ); };
870 'to' => { token( KW_To ); };
871 'from' => { token( KW_From ); };
872 'export' => { token( KW_Export ); };
875 ident => { token( TK_Word, tokstart, tokend ); } ;
878 number => { token( TK_UInt, tokstart, tokend ); };
879 hex_number => { token( TK_Hex, tokstart, tokend ); };
881 # Literals, with optionals.
882 ( s_literal | d_literal ) [i]?
883 => { token( TK_Literal, tokstart, tokend ); };
885 '[' => { token( RE_SqOpen ); fcall or_literal; };
886 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
888 '/' => { token( RE_Slash ); fgoto ragel_re_literal; };
891 pound_comment => { updateCol(); };
893 ':=' => { token( TK_ColonEquals ); };
896 ">~" => { token( TK_StartToState ); };
897 "$~" => { token( TK_AllToState ); };
898 "%~" => { token( TK_FinalToState ); };
899 "<~" => { token( TK_NotStartToState ); };
900 "@~" => { token( TK_NotFinalToState ); };
901 "<>~" => { token( TK_MiddleToState ); };
904 ">*" => { token( TK_StartFromState ); };
905 "$*" => { token( TK_AllFromState ); };
906 "%*" => { token( TK_FinalFromState ); };
907 "<*" => { token( TK_NotStartFromState ); };
908 "@*" => { token( TK_NotFinalFromState ); };
909 "<>*" => { token( TK_MiddleFromState ); };
912 ">/" => { token( TK_StartEOF ); };
913 "$/" => { token( TK_AllEOF ); };
914 "%/" => { token( TK_FinalEOF ); };
915 "</" => { token( TK_NotStartEOF ); };
916 "@/" => { token( TK_NotFinalEOF ); };
917 "<>/" => { token( TK_MiddleEOF ); };
919 # Global Error actions.
920 ">!" => { token( TK_StartGblError ); };
921 "$!" => { token( TK_AllGblError ); };
922 "%!" => { token( TK_FinalGblError ); };
923 "<!" => { token( TK_NotStartGblError ); };
924 "@!" => { token( TK_NotFinalGblError ); };
925 "<>!" => { token( TK_MiddleGblError ); };
927 # Local error actions.
928 ">^" => { token( TK_StartLocalError ); };
929 "$^" => { token( TK_AllLocalError ); };
930 "%^" => { token( TK_FinalLocalError ); };
931 "<^" => { token( TK_NotStartLocalError ); };
932 "@^" => { token( TK_NotFinalLocalError ); };
933 "<>^" => { token( TK_MiddleLocalError ); };
936 "<>" => { token( TK_Middle ); };
939 '>?' => { token( TK_StartCond ); };
940 '$?' => { token( TK_AllCond ); };
941 '%?' => { token( TK_LeavingCond ); };
943 '..' => { token( TK_DotDot ); };
944 '**' => { token( TK_StarStar ); };
945 '--' => { token( TK_DashDash ); };
946 '->' => { token( TK_Arrow ); };
947 '=>' => { token( TK_DoubleArrow ); };
949 ":>" => { token( TK_ColonGt ); };
950 ":>>" => { token( TK_ColonGtGt ); };
951 "<:" => { token( TK_LtColon ); };
953 # Opening of longest match.
954 "|*" => { token( TK_BarStar ); };
956 # Separater for name references.
957 "::" => { token( TK_NameSep, tokstart, tokend ); };
965 [ \t\r]+ => { updateCol(); };
967 # If we are in a single line machine then newline may end the spec.
970 if ( singleLineSpec ) {
977 if ( lastToken == KW_Export || lastToken == KW_Entry )
982 inlineBlockType = CurlyDelimited;
983 if ( hostLangType == RubyCode )
984 fcall inline_code_ruby;
991 scan_error() << "unterminated ragel section" << endl;
994 any => { token( *tokstart ); } ;
997 # Outside code scanner. These tokens get passed through.
999 ident => { pass( IMP_Word, tokstart, tokend ); };
1000 number => { pass( IMP_UInt, tokstart, tokend ); };
1001 ruby_comment => { pass(); };
1002 ( s_literal | d_literal | host_re_literal )
1003 => { pass( IMP_Literal, tokstart, tokend ); };
1007 singleLineSpec = false;
1013 singleLineSpec = true;
1017 whitespace+ => { pass(); };
1019 any => { pass( *tokstart, 0, 0 ); };
1022 # Outside code scanner. These tokens get passed through.
1024 'define' => { pass( IMP_Define, 0, 0 ); };
1025 ident => { pass( IMP_Word, tokstart, tokend ); };
1026 number => { pass( IMP_UInt, tokstart, tokend ); };
1027 c_cpp_comment => { pass(); };
1028 ( s_literal | d_literal ) => { pass( IMP_Literal, tokstart, tokend ); };
1032 singleLineSpec = false;
1038 singleLineSpec = true;
1042 whitespace+ => { pass(); };
1044 any => { pass( *tokstart, 0, 0 ); };
1050 void Scanner::do_scan()
1053 char *buf = new char[bufsize];
1054 const char last_char = 0;
1055 int cs, act, have = 0;
1058 /* The stack is two deep, one level for going into ragel defs from the main
1059 * machines which process outside code, and another for going into or literals
1060 * from either a ragel spec, or a regular expression. */
1062 int curly_count = 0;
1063 bool execute = true;
1064 bool singleLineSpec = false;
1065 InlineBlockType inlineBlockType = CurlyDelimited;
1067 /* Init the section parser and the character scanner. */
1071 /* Set up the start state. FIXME: After 5.20 is released the nocs write
1072 * init option should be used, the main machine eliminated and this statement moved
1073 * above the write init. */
1074 if ( hostLangType == RubyCode )
1075 cs = rlscan_en_main_ruby;
1077 cs = rlscan_en_main;
1080 char *p = buf + have;
1081 int space = bufsize - have;
1084 /* We filled up the buffer trying to scan a token. Grow it. */
1085 bufsize = bufsize * 2;
1086 char *newbuf = new char[bufsize];
1088 /* Recompute p and space. */
1090 space = bufsize - have;
1092 /* Patch up pointers possibly in use. */
1093 if ( tokstart != 0 )
1094 tokstart = newbuf + ( tokstart - buf );
1095 tokend = newbuf + ( tokend - buf );
1097 /* Copy the new buffer in. */
1098 memcpy( newbuf, buf, have );
1103 input.read( p, space );
1104 int len = input.gcount();
1106 /* If we see eof then append the EOF char. */
1108 p[0] = last_char, len = 1;
1115 /* Check if we failed. */
1116 if ( cs == rlscan_error ) {
1117 /* Machine failed before finding a token. I'm not yet sure if this
1119 scan_error() << "scanner error" << endl;
1123 /* Decide if we need to preserve anything. */
1124 char *preserve = tokstart;
1126 /* Now set up the prefix. */
1127 if ( preserve == 0 )
1130 /* There is data that needs to be shifted over. */
1131 have = pe - preserve;
1132 memmove( buf, preserve, have );
1133 unsigned int shiftback = preserve - buf;
1134 if ( tokstart != 0 )
1135 tokstart -= shiftback;
1136 tokend -= shiftback;