2 * Copyright 2006-2007 Adrian Thurston <thurston@cs.queensu.ca>
5 /* This file is part of Ragel.
7 * Ragel is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Ragel is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Ragel; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 * The Scanner for Importing
50 machine inline_token_scan;
54 # Import scanner tokens.
59 IMP_Define IMP_Word IMP_UInt => {
60 int base = tok_tokstart - token_data;
64 directToParser( inclToParser, fileName, line, column, TK_Word,
65 token_strings[base+nameOff], token_lens[base+nameOff] );
66 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
67 directToParser( inclToParser, fileName, line, column, TK_UInt,
68 token_strings[base+numOff], token_lens[base+numOff] );
69 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
72 # Assignment of number.
73 IMP_Word '=' IMP_UInt => {
74 int base = tok_tokstart - token_data;
78 directToParser( inclToParser, fileName, line, column, TK_Word,
79 token_strings[base+nameOff], token_lens[base+nameOff] );
80 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
81 directToParser( inclToParser, fileName, line, column, TK_UInt,
82 token_strings[base+numOff], token_lens[base+numOff] );
83 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
87 IMP_Define IMP_Word IMP_Literal => {
88 int base = tok_tokstart - token_data;
92 directToParser( inclToParser, fileName, line, column, TK_Word,
93 token_strings[base+nameOff], token_lens[base+nameOff] );
94 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
95 directToParser( inclToParser, fileName, line, column, TK_Literal,
96 token_strings[base+litOff], token_lens[base+litOff] );
97 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
100 # Assignment of literal.
101 IMP_Word '=' IMP_Literal => {
102 int base = tok_tokstart - token_data;
106 directToParser( inclToParser, fileName, line, column, TK_Word,
107 token_strings[base+nameOff], token_lens[base+nameOff] );
108 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
109 directToParser( inclToParser, fileName, line, column, TK_Literal,
110 token_strings[base+litOff], token_lens[base+litOff] );
111 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
114 # Catch everything else.
121 void Scanner::flushImport()
124 int *pe = token_data + cur_token;
129 if ( tok_tokstart == 0 )
132 cur_token = pe - tok_tokstart;
133 int ts_offset = tok_tokstart - token_data;
134 memmove( token_data, token_data+ts_offset, cur_token*sizeof(token_data[0]) );
135 memmove( token_strings, token_strings+ts_offset, cur_token*sizeof(token_strings[0]) );
136 memmove( token_lens, token_lens+ts_offset, cur_token*sizeof(token_lens[0]) );
140 void Scanner::directToParser( Parser *toParser, char *tokFileName, int tokLine,
141 int tokColumn, int type, char *tokdata, int toklen )
146 cerr << "scanner:" << tokLine << ":" << tokColumn <<
147 ": sending token to the parser " << Parser_lelNames[type];
148 cerr << " " << toklen;
150 cerr << " " << tokdata;
154 loc.fileName = tokFileName;
158 toParser->token( loc, type, tokdata, toklen );
161 void Scanner::importToken( int token, char *start, char *end )
163 if ( cur_token == max_tokens )
166 token_data[cur_token] = token;
168 token_strings[cur_token] = 0;
169 token_lens[cur_token] = 0;
172 int toklen = end-start;
173 token_lens[cur_token] = toklen;
174 token_strings[cur_token] = new char[toklen+1];
175 memcpy( token_strings[cur_token], start, toklen );
176 token_strings[cur_token][toklen] = 0;
181 void Scanner::pass( int token, char *start, char *end )
183 if ( importMachines )
184 importToken( token, start, end );
192 /* If no errors and we are at the bottom of the include stack (the
193 * source file listed on the command line) then write out the data. */
194 if ( includeDepth == 0 && machineSpec == 0 && machineName == 0 )
195 xmlEscapeHost( output, tokstart, tokend-tokstart );
199 * The scanner for processing sections, includes, imports, etc.
203 machine section_parse;
209 void Scanner::init( )
214 bool Scanner::active()
219 if ( parser == 0 && ! parserExistsError ) {
220 scan_error() << "this specification has no name, nor does any previous"
221 " specification" << endl;
222 parserExistsError = true;
231 ostream &Scanner::scan_error()
233 /* Maintain the error count. */
235 cerr << fileName << ":" << line << ":" << column << ": ";
239 bool Scanner::recursiveInclude( char *inclFileName, char *inclSectionName )
241 for ( IncludeStack::Iter si = includeStack; si.lte(); si++ ) {
242 if ( strcmp( si->fileName, inclFileName ) == 0 &&
243 strcmp( si->sectionName, inclSectionName ) == 0 )
251 void Scanner::updateCol()
256 //cerr << "adding " << tokend - from << " to column" << endl;
257 column += tokend - from;
262 machine section_parse;
264 # Need the defines representing tokens.
267 action clear_words { word = lit = 0; word_len = lit_len = 0; }
268 action store_word { word = tokdata; word_len = toklen; }
269 action store_lit { lit = tokdata; lit_len = toklen; }
271 action mach_err { scan_error() << "bad machine statement" << endl; }
272 action incl_err { scan_error() << "bad include statement" << endl; }
273 action import_err { scan_error() << "bad import statement" << endl; }
274 action write_err { scan_error() << "bad write statement" << endl; }
276 action handle_machine
278 /* Assign a name to the machine. */
279 char *machine = word;
281 if ( !importMachines && inclSectionTarg == 0 ) {
282 ignoreSection = false;
284 ParserDictEl *pdEl = parserDict.find( machine );
286 pdEl = new ParserDictEl( machine );
287 pdEl->value = new Parser( fileName, machine, sectionLoc );
289 parserDict.insert( pdEl );
292 parser = pdEl->value;
294 else if ( !importMachines && strcmp( inclSectionTarg, machine ) == 0 ) {
295 /* found include target */
296 ignoreSection = false;
297 parser = inclToParser;
300 /* ignoring section */
301 ignoreSection = true;
307 ( KW_Machine TK_Word @store_word ';' ) @handle_machine
308 <>err mach_err <>eof mach_err;
310 action handle_include
313 char *inclSectionName = word;
314 char *inclFileName = 0;
316 /* Implement defaults for the input file and section name. */
317 if ( inclSectionName == 0 )
318 inclSectionName = parser->sectionName;
321 inclFileName = prepareFileName( lit, lit_len );
323 inclFileName = fileName;
325 /* Check for a recursive include structure. Add the current file/section
326 * name then check if what we are including is already in the stack. */
327 includeStack.append( IncludeStackItem( fileName, parser->sectionName ) );
329 if ( recursiveInclude( inclFileName, inclSectionName ) )
330 scan_error() << "include: this is a recursive include operation" << endl;
332 /* Open the input file for reading. */
333 ifstream *inFile = new ifstream( inclFileName );
334 if ( ! inFile->is_open() ) {
335 scan_error() << "include: could not open " <<
336 inclFileName << " for reading" << endl;
339 Scanner scanner( inclFileName, *inFile, output, parser,
340 inclSectionName, includeDepth+1, false );
345 /* Remove the last element (len-1) */
346 includeStack.remove( -1 );
351 TK_Word @store_word ( TK_Literal @store_lit )? |
352 TK_Literal @store_lit
356 ( KW_Include include_names ';' ) @handle_include
357 <>err incl_err <>eof incl_err;
362 char *importFileName = prepareFileName( lit, lit_len );
364 /* Open the input file for reading. */
365 ifstream *inFile = new ifstream( importFileName );
366 if ( ! inFile->is_open() ) {
367 scan_error() << "import: could not open " <<
368 importFileName << " for reading" << endl;
371 Scanner scanner( importFileName, *inFile, output, parser,
372 0, includeDepth+1, true );
374 scanner.importToken( 0, 0, 0 );
375 scanner.flushImport();
381 ( KW_Import TK_Literal @store_lit ';' ) @handle_import
382 <>err import_err <>eof import_err;
386 if ( active() && machineSpec == 0 && machineName == 0 ) {
388 " def_name=\"" << parser->sectionName << "\""
389 " line=\"" << line << "\""
390 " col=\"" << column << "\""
397 if ( active() && machineSpec == 0 && machineName == 0 )
398 output << "<arg>" << tokdata << "</arg>";
403 if ( active() && machineSpec == 0 && machineName == 0 )
404 output << "</write>\n";
408 ( KW_Write @write_command
409 ( TK_Word @write_arg )+ ';' @write_close )
410 <>err write_err <>eof write_err;
414 /* Send the token off to the parser. */
416 directToParser( parser, fileName, line, column, type, tokdata, toklen );
419 # Catch everything else.
421 ^( KW_Machine | KW_Include | KW_Import | KW_Write ) @handle_token;
432 void Scanner::token( int type, char c )
434 token( type, &c, &c + 1 );
437 void Scanner::token( int type )
442 void Scanner::token( int type, char *start, char *end )
448 tokdata = new char[toklen+1];
449 memcpy( tokdata, start, toklen );
453 processToken( type, tokdata, toklen );
456 void Scanner::processToken( int type, char *tokdata, int toklen )
462 machine section_parse;
468 /* Record the last token for use in controlling the scan of subsequent
473 void Scanner::startSection( )
475 parserExistsError = false;
477 if ( includeDepth == 0 ) {
478 if ( machineSpec == 0 && machineName == 0 )
479 output << "</host>\n";
482 sectionLoc.fileName = fileName;
483 sectionLoc.line = line;
487 void Scanner::endSection( )
489 /* Execute the eof actions for the section parser. */
491 machine section_parse;
495 /* Close off the section with the parser. */
498 loc.fileName = fileName;
502 parser->token( loc, TK_EndSection, 0, 0 );
505 if ( includeDepth == 0 ) {
506 if ( machineSpec == 0 && machineName == 0 ) {
507 /* The end section may include a newline on the end, so
508 * we use the last line, which will count the newline. */
509 output << "<host line=\"" << line << "\">";
517 # This is sent by the driver code.
527 # Identifiers, numbers, commetns, and other common things.
528 ident = ( alpha | '_' ) ( alpha |digit |'_' )*;
530 hex_number = '0x' [0-9a-fA-F]+;
533 '/*' ( any | NL )* :>> '*/';
538 c_cpp_comment = c_comment | cpp_comment;
540 ruby_comment = '#' [^\n]* NL;
542 # These literal forms are common to host code and ragel.
543 s_literal = "'" ([^'\\] | NL | '\\' (any | NL))* "'";
544 d_literal = '"' ([^"\\] | NL | '\\' (any | NL))* '"';
545 host_re_literal = '/' ([^/\\] | NL | '\\' (any | NL))* '/';
547 whitespace = [ \t] | NL;
548 pound_comment = '#' [^\n]* NL;
550 # An inline block of code for Ruby.
551 inline_code_ruby := |*
552 # Inline expression keywords.
553 "fpc" => { token( KW_PChar ); };
554 "fc" => { token( KW_Char ); };
555 "fcurs" => { token( KW_CurState ); };
556 "ftargs" => { token( KW_TargState ); };
558 whitespaceOn = false;
562 # Inline statement keywords.
564 whitespaceOn = false;
567 "fexec" => { token( KW_Exec, 0, 0 ); };
569 whitespaceOn = false;
573 whitespaceOn = false;
577 whitespaceOn = false;
581 whitespaceOn = false;
585 whitespaceOn = false;
589 ident => { token( TK_Word, tokstart, tokend ); };
591 number => { token( TK_UInt, tokstart, tokend ); };
592 hex_number => { token( TK_Hex, tokstart, tokend ); };
594 ( s_literal | d_literal | host_re_literal )
595 => { token( IL_Literal, tokstart, tokend ); };
599 token( IL_WhiteSpace, tokstart, tokend );
602 ruby_comment => { token( IL_Comment, tokstart, tokend ); };
604 "::" => { token( TK_NameSep, tokstart, tokend ); };
606 # Some symbols need to go to the parser as with their cardinal value as
607 # the token type (as opposed to being sent as anonymous symbols)
608 # because they are part of the sequences which we interpret. The * ) ;
609 # symbols cause whitespace parsing to come back on. This gets turned
610 # off by some keywords.
614 token( *tokstart, tokstart, tokend );
615 if ( inlineBlockType == SemiTerminated )
621 token( *tokstart, tokstart, tokend );
624 [,(] => { token( *tokstart, tokstart, tokend ); };
627 token( IL_Symbol, tokstart, tokend );
632 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
633 /* Inline code block ends. */
638 /* Either a semi terminated inline block or only the closing
639 * brace of some inner scope, not the block's closing brace. */
640 token( IL_Symbol, tokstart, tokend );
645 scan_error() << "unterminated code block" << endl;
648 # Send every other character as a symbol.
649 any => { token( IL_Symbol, tokstart, tokend ); };
653 # An inline block of code for languages other than Ruby.
655 # Inline expression keywords.
656 "fpc" => { token( KW_PChar ); };
657 "fc" => { token( KW_Char ); };
658 "fcurs" => { token( KW_CurState ); };
659 "ftargs" => { token( KW_TargState ); };
661 whitespaceOn = false;
665 # Inline statement keywords.
667 whitespaceOn = false;
670 "fexec" => { token( KW_Exec, 0, 0 ); };
672 whitespaceOn = false;
676 whitespaceOn = false;
680 whitespaceOn = false;
684 whitespaceOn = false;
688 whitespaceOn = false;
692 ident => { token( TK_Word, tokstart, tokend ); };
694 number => { token( TK_UInt, tokstart, tokend ); };
695 hex_number => { token( TK_Hex, tokstart, tokend ); };
697 ( s_literal | d_literal )
698 => { token( IL_Literal, tokstart, tokend ); };
702 token( IL_WhiteSpace, tokstart, tokend );
705 c_cpp_comment => { token( IL_Comment, tokstart, tokend ); };
707 "::" => { token( TK_NameSep, tokstart, tokend ); };
709 # Some symbols need to go to the parser as with their cardinal value as
710 # the token type (as opposed to being sent as anonymous symbols)
711 # because they are part of the sequences which we interpret. The * ) ;
712 # symbols cause whitespace parsing to come back on. This gets turned
713 # off by some keywords.
717 token( *tokstart, tokstart, tokend );
718 if ( inlineBlockType == SemiTerminated )
724 token( *tokstart, tokstart, tokend );
727 [,(] => { token( *tokstart, tokstart, tokend ); };
730 token( IL_Symbol, tokstart, tokend );
735 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
736 /* Inline code block ends. */
741 /* Either a semi terminated inline block or only the closing
742 * brace of some inner scope, not the block's closing brace. */
743 token( IL_Symbol, tokstart, tokend );
748 scan_error() << "unterminated code block" << endl;
751 # Send every other character as a symbol.
752 any => { token( IL_Symbol, tokstart, tokend ); };
756 # Escape sequences in OR expressions.
757 '\\0' => { token( RE_Char, '\0' ); };
758 '\\a' => { token( RE_Char, '\a' ); };
759 '\\b' => { token( RE_Char, '\b' ); };
760 '\\t' => { token( RE_Char, '\t' ); };
761 '\\n' => { token( RE_Char, '\n' ); };
762 '\\v' => { token( RE_Char, '\v' ); };
763 '\\f' => { token( RE_Char, '\f' ); };
764 '\\r' => { token( RE_Char, '\r' ); };
765 '\\\n' => { updateCol(); };
766 '\\' any => { token( RE_Char, tokstart+1, tokend ); };
768 # Range dash in an OR expression.
769 '-' => { token( RE_Dash, 0, 0 ); };
771 # Terminate an OR expression.
772 ']' => { token( RE_SqClose ); fret; };
775 scan_error() << "unterminated OR literal" << endl;
778 # Characters in an OR expression.
779 [^\]] => { token( RE_Char, tokstart, tokend ); };
783 ragel_re_literal := |*
784 # Escape sequences in regular expressions.
785 '\\0' => { token( RE_Char, '\0' ); };
786 '\\a' => { token( RE_Char, '\a' ); };
787 '\\b' => { token( RE_Char, '\b' ); };
788 '\\t' => { token( RE_Char, '\t' ); };
789 '\\n' => { token( RE_Char, '\n' ); };
790 '\\v' => { token( RE_Char, '\v' ); };
791 '\\f' => { token( RE_Char, '\f' ); };
792 '\\r' => { token( RE_Char, '\r' ); };
793 '\\\n' => { updateCol(); };
794 '\\' any => { token( RE_Char, tokstart+1, tokend ); };
796 # Terminate an OR expression.
798 token( RE_Slash, tokstart, tokend );
802 # Special characters.
803 '.' => { token( RE_Dot ); };
804 '*' => { token( RE_Star ); };
806 '[' => { token( RE_SqOpen ); fcall or_literal; };
807 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
810 scan_error() << "unterminated regular expression" << endl;
813 # Characters in an OR expression.
814 [^\/] => { token( RE_Char, tokstart, tokend ); };
817 # We need a separate token space here to avoid the ragel keywords.
818 write_statement := |*
819 ident => { token( TK_Word, tokstart, tokend ); } ;
820 [ \t\n]+ => { updateCol(); };
821 ';' => { token( ';' ); fgoto parser_def; };
824 scan_error() << "unterminated write statement" << endl;
828 # Parser definitions.
830 'machine' => { token( KW_Machine ); };
831 'include' => { token( KW_Include ); };
832 'import' => { token( KW_Import ); };
835 fgoto write_statement;
837 'action' => { token( KW_Action ); };
838 'alphtype' => { token( KW_AlphType ); };
839 'prepush' => { token( KW_PrePush ); };
840 'postpop' => { token( KW_PostPop ); };
842 # FIXME: Enable this post 5.17.
843 # 'range' => { token( KW_Range ); };
847 inlineBlockType = SemiTerminated;
848 if ( hostLang->lang == HostLang::Ruby )
849 fcall inline_code_ruby;
855 inlineBlockType = SemiTerminated;
856 if ( hostLang->lang == HostLang::Ruby )
857 fcall inline_code_ruby;
862 token( KW_Variable );
863 inlineBlockType = SemiTerminated;
864 if ( hostLang->lang == HostLang::Ruby )
865 fcall inline_code_ruby;
869 'when' => { token( KW_When ); };
870 'inwhen' => { token( KW_InWhen ); };
871 'outwhen' => { token( KW_OutWhen ); };
872 'eof' => { token( KW_Eof ); };
873 'err' => { token( KW_Err ); };
874 'lerr' => { token( KW_Lerr ); };
875 'to' => { token( KW_To ); };
876 'from' => { token( KW_From ); };
877 'export' => { token( KW_Export ); };
880 ident => { token( TK_Word, tokstart, tokend ); } ;
883 number => { token( TK_UInt, tokstart, tokend ); };
884 hex_number => { token( TK_Hex, tokstart, tokend ); };
886 # Literals, with optionals.
887 ( s_literal | d_literal ) [i]?
888 => { token( TK_Literal, tokstart, tokend ); };
890 '[' => { token( RE_SqOpen ); fcall or_literal; };
891 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
893 '/' => { token( RE_Slash ); fgoto ragel_re_literal; };
896 pound_comment => { updateCol(); };
898 ':=' => { token( TK_ColonEquals ); };
901 ">~" => { token( TK_StartToState ); };
902 "$~" => { token( TK_AllToState ); };
903 "%~" => { token( TK_FinalToState ); };
904 "<~" => { token( TK_NotStartToState ); };
905 "@~" => { token( TK_NotFinalToState ); };
906 "<>~" => { token( TK_MiddleToState ); };
909 ">*" => { token( TK_StartFromState ); };
910 "$*" => { token( TK_AllFromState ); };
911 "%*" => { token( TK_FinalFromState ); };
912 "<*" => { token( TK_NotStartFromState ); };
913 "@*" => { token( TK_NotFinalFromState ); };
914 "<>*" => { token( TK_MiddleFromState ); };
917 ">/" => { token( TK_StartEOF ); };
918 "$/" => { token( TK_AllEOF ); };
919 "%/" => { token( TK_FinalEOF ); };
920 "</" => { token( TK_NotStartEOF ); };
921 "@/" => { token( TK_NotFinalEOF ); };
922 "<>/" => { token( TK_MiddleEOF ); };
924 # Global Error actions.
925 ">!" => { token( TK_StartGblError ); };
926 "$!" => { token( TK_AllGblError ); };
927 "%!" => { token( TK_FinalGblError ); };
928 "<!" => { token( TK_NotStartGblError ); };
929 "@!" => { token( TK_NotFinalGblError ); };
930 "<>!" => { token( TK_MiddleGblError ); };
932 # Local error actions.
933 ">^" => { token( TK_StartLocalError ); };
934 "$^" => { token( TK_AllLocalError ); };
935 "%^" => { token( TK_FinalLocalError ); };
936 "<^" => { token( TK_NotStartLocalError ); };
937 "@^" => { token( TK_NotFinalLocalError ); };
938 "<>^" => { token( TK_MiddleLocalError ); };
941 "<>" => { token( TK_Middle ); };
944 '>?' => { token( TK_StartCond ); };
945 '$?' => { token( TK_AllCond ); };
946 '%?' => { token( TK_LeavingCond ); };
948 '..' => { token( TK_DotDot ); };
949 '**' => { token( TK_StarStar ); };
950 '--' => { token( TK_DashDash ); };
951 '->' => { token( TK_Arrow ); };
952 '=>' => { token( TK_DoubleArrow ); };
954 ":>" => { token( TK_ColonGt ); };
955 ":>>" => { token( TK_ColonGtGt ); };
956 "<:" => { token( TK_LtColon ); };
958 # Opening of longest match.
959 "|*" => { token( TK_BarStar ); };
961 # Separater for name references.
962 "::" => { token( TK_NameSep, tokstart, tokend ); };
970 [ \t\r]+ => { updateCol(); };
972 # If we are in a single line machine then newline may end the spec.
975 if ( singleLineSpec ) {
982 if ( lastToken == KW_Export || lastToken == KW_Entry )
987 inlineBlockType = CurlyDelimited;
988 if ( hostLang->lang == HostLang::Ruby )
989 fcall inline_code_ruby;
996 scan_error() << "unterminated ragel section" << endl;
999 any => { token( *tokstart ); } ;
1002 # Outside code scanner. These tokens get passed through.
1004 ident => { pass( IMP_Word, tokstart, tokend ); };
1005 number => { pass( IMP_UInt, tokstart, tokend ); };
1006 ruby_comment => { pass(); };
1007 ( s_literal | d_literal | host_re_literal )
1008 => { pass( IMP_Literal, tokstart, tokend ); };
1012 singleLineSpec = false;
1018 singleLineSpec = true;
1022 whitespace+ => { pass(); };
1024 any => { pass( *tokstart, 0, 0 ); };
1027 # Outside code scanner. These tokens get passed through.
1029 'define' => { pass( IMP_Define, 0, 0 ); };
1030 ident => { pass( IMP_Word, tokstart, tokend ); };
1031 number => { pass( IMP_UInt, tokstart, tokend ); };
1032 c_cpp_comment => { pass(); };
1033 ( s_literal | d_literal ) => { pass( IMP_Literal, tokstart, tokend ); };
1037 singleLineSpec = false;
1043 singleLineSpec = true;
1047 whitespace+ => { pass(); };
1049 any => { pass( *tokstart, 0, 0 ); };
1055 void Scanner::do_scan()
1058 char *buf = new char[bufsize];
1059 const char last_char = 0;
1060 int cs, act, have = 0;
1063 /* The stack is two deep, one level for going into ragel defs from the main
1064 * machines which process outside code, and another for going into or literals
1065 * from either a ragel spec, or a regular expression. */
1067 int curly_count = 0;
1068 bool execute = true;
1069 bool singleLineSpec = false;
1070 InlineBlockType inlineBlockType = CurlyDelimited;
1072 /* Init the section parser and the character scanner. */
1076 /* Set up the start state. FIXME: After 5.20 is released the nocs write
1077 * init option should be used, the main machine eliminated and this statement moved
1078 * above the write init. */
1079 if ( hostLang->lang == HostLang::Ruby )
1080 cs = rlscan_en_main_ruby;
1082 cs = rlscan_en_main;
1085 char *p = buf + have;
1086 int space = bufsize - have;
1089 /* We filled up the buffer trying to scan a token. Grow it. */
1090 bufsize = bufsize * 2;
1091 char *newbuf = new char[bufsize];
1093 /* Recompute p and space. */
1095 space = bufsize - have;
1097 /* Patch up pointers possibly in use. */
1098 if ( tokstart != 0 )
1099 tokstart = newbuf + ( tokstart - buf );
1100 tokend = newbuf + ( tokend - buf );
1102 /* Copy the new buffer in. */
1103 memcpy( newbuf, buf, have );
1108 input.read( p, space );
1109 int len = input.gcount();
1111 /* If we see eof then append the EOF char. */
1113 p[0] = last_char, len = 1;
1120 /* Check if we failed. */
1121 if ( cs == rlscan_error ) {
1122 /* Machine failed before finding a token. I'm not yet sure if this
1124 scan_error() << "scanner error" << endl;
1128 /* Decide if we need to preserve anything. */
1129 char *preserve = tokstart;
1131 /* Now set up the prefix. */
1132 if ( preserve == 0 )
1135 /* There is data that needs to be shifted over. */
1136 have = pe - preserve;
1137 memmove( buf, preserve, have );
1138 unsigned int shiftback = preserve - buf;
1139 if ( tokstart != 0 )
1140 tokstart -= shiftback;
1141 tokend -= shiftback;