2 * Copyright 2006-2007 Adrian Thurston <thurston@cs.queensu.ca>
5 /* This file is part of Ragel.
7 * Ragel is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Ragel is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Ragel; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
52 * The Scanner for Importing
56 machine inline_token_scan;
60 # Import scanner tokens.
65 IMP_Define IMP_Word IMP_UInt => {
66 int base = tok_ts - token_data;
70 directToParser( inclToParser, fileName, line, column, TK_Word,
71 token_strings[base+nameOff], token_lens[base+nameOff] );
72 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
73 directToParser( inclToParser, fileName, line, column, TK_UInt,
74 token_strings[base+numOff], token_lens[base+numOff] );
75 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
78 # Assignment of number.
79 IMP_Word '=' IMP_UInt => {
80 int base = tok_ts - token_data;
84 directToParser( inclToParser, fileName, line, column, TK_Word,
85 token_strings[base+nameOff], token_lens[base+nameOff] );
86 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
87 directToParser( inclToParser, fileName, line, column, TK_UInt,
88 token_strings[base+numOff], token_lens[base+numOff] );
89 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
93 IMP_Define IMP_Word IMP_Literal => {
94 int base = tok_ts - token_data;
98 directToParser( inclToParser, fileName, line, column, TK_Word,
99 token_strings[base+nameOff], token_lens[base+nameOff] );
100 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
101 directToParser( inclToParser, fileName, line, column, TK_Literal,
102 token_strings[base+litOff], token_lens[base+litOff] );
103 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
106 # Assignment of literal.
107 IMP_Word '=' IMP_Literal => {
108 int base = tok_ts - token_data;
112 directToParser( inclToParser, fileName, line, column, TK_Word,
113 token_strings[base+nameOff], token_lens[base+nameOff] );
114 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
115 directToParser( inclToParser, fileName, line, column, TK_Literal,
116 token_strings[base+litOff], token_lens[base+litOff] );
117 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
120 # Catch everything else.
127 void Scanner::flushImport()
130 int *pe = token_data + cur_token;
134 machine inline_token_scan;
142 cur_token = pe - tok_ts;
143 int ts_offset = tok_ts - token_data;
144 memmove( token_data, token_data+ts_offset, cur_token*sizeof(token_data[0]) );
145 memmove( token_strings, token_strings+ts_offset, cur_token*sizeof(token_strings[0]) );
146 memmove( token_lens, token_lens+ts_offset, cur_token*sizeof(token_lens[0]) );
150 void Scanner::directToParser( Parser *toParser, char *tokFileName, int tokLine,
151 int tokColumn, int type, char *tokdata, int toklen )
156 cerr << "scanner:" << tokLine << ":" << tokColumn <<
157 ": sending token to the parser " << Parser_lelNames[type];
158 cerr << " " << toklen;
160 cerr << " " << tokdata;
164 loc.fileName = tokFileName;
168 toParser->token( loc, type, tokdata, toklen );
171 void Scanner::importToken( int token, char *start, char *end )
173 if ( cur_token == max_tokens )
176 token_data[cur_token] = token;
178 token_strings[cur_token] = 0;
179 token_lens[cur_token] = 0;
182 int toklen = end-start;
183 token_lens[cur_token] = toklen;
184 token_strings[cur_token] = new char[toklen+1];
185 memcpy( token_strings[cur_token], start, toklen );
186 token_strings[cur_token][toklen] = 0;
191 void Scanner::pass( int token, char *start, char *end )
193 if ( importMachines )
194 importToken( token, start, end );
202 /* If no errors and we are at the bottom of the include stack (the
203 * source file listed on the command line) then write out the data. */
204 if ( includeDepth == 0 && machineSpec == 0 && machineName == 0 )
205 xmlEscapeHost( output, ts, te-ts );
209 * The scanner for processing sections, includes, imports, etc.
213 machine section_parse;
219 void Scanner::init( )
224 bool Scanner::active()
229 if ( parser == 0 && ! parserExistsError ) {
230 scan_error() << "this specification has no name, nor does any previous"
231 " specification" << endl;
232 parserExistsError = true;
241 ostream &Scanner::scan_error()
243 /* Maintain the error count. */
245 cerr << makeInputLoc( fileName, line, column ) << ": ";
249 /* An approximate check for duplicate includes. Due to aliasing of files it's
250 * possible for duplicates to creep in. */
251 bool Scanner::duplicateInclude( char *inclFileName, char *inclSectionName )
253 for ( IncludeHistory::Iter hi = parser->includeHistory; hi.lte(); hi++ ) {
254 if ( strcmp( hi->fileName, inclFileName ) == 0 &&
255 strcmp( hi->sectionName, inclSectionName ) == 0 )
263 void Scanner::updateCol()
268 //cerr << "adding " << te - from << " to column" << endl;
273 void Scanner::handleMachine()
275 /* Assign a name to the machine. */
276 char *machine = word;
278 if ( !importMachines && inclSectionTarg == 0 ) {
279 ignoreSection = false;
281 ParserDictEl *pdEl = parserDict.find( machine );
283 pdEl = new ParserDictEl( machine );
284 pdEl->value = new Parser( fileName, machine, sectionLoc );
286 parserDict.insert( pdEl );
289 parser = pdEl->value;
291 else if ( !importMachines && strcmp( inclSectionTarg, machine ) == 0 ) {
292 /* found include target */
293 ignoreSection = false;
294 parser = inclToParser;
297 /* ignoring section */
298 ignoreSection = true;
303 void Scanner::handleInclude()
306 char *inclSectionName = word;
307 char **includeChecks = 0;
309 /* Implement defaults for the input file and section name. */
310 if ( inclSectionName == 0 )
311 inclSectionName = parser->sectionName;
314 includeChecks = makeIncludePathChecks( fileName, lit, lit_len );
316 char *test = new char[strlen(fileName)+1];
317 strcpy( test, fileName );
319 includeChecks = new char*[2];
321 includeChecks[0] = test;
322 includeChecks[1] = 0;
326 ifstream *inFile = tryOpenInclude( includeChecks, found );
328 scan_error() << "include: failed to locate file" << endl;
329 char **tried = includeChecks;
330 while ( *tried != 0 )
331 scan_error() << "include: attempted: \"" << *tried++ << '\"' << endl;
334 /* Don't include anything that's already been included. */
335 if ( !duplicateInclude( includeChecks[found], inclSectionName ) ) {
336 parser->includeHistory.append( IncludeHistoryItem(
337 includeChecks[found], inclSectionName ) );
339 Scanner scanner( includeChecks[found], *inFile, output, parser,
340 inclSectionName, includeDepth+1, false );
348 void Scanner::handleImport()
351 char **importChecks = makeIncludePathChecks( fileName, lit, lit_len );
353 /* Open the input file for reading. */
355 ifstream *inFile = tryOpenInclude( importChecks, found );
357 scan_error() << "import: could not open import file " <<
358 "for reading" << endl;
359 char **tried = importChecks;
360 while ( *tried != 0 )
361 scan_error() << "import: attempted: \"" << *tried++ << '\"' << endl;
364 Scanner scanner( importChecks[found], *inFile, output, parser,
365 0, includeDepth+1, true );
367 scanner.importToken( 0, 0, 0 );
368 scanner.flushImport();
374 machine section_parse;
376 # Need the defines representing tokens.
379 action clear_words { word = lit = 0; word_len = lit_len = 0; }
380 action store_word { word = tokdata; word_len = toklen; }
381 action store_lit { lit = tokdata; lit_len = toklen; }
383 action mach_err { scan_error() << "bad machine statement" << endl; }
384 action incl_err { scan_error() << "bad include statement" << endl; }
385 action import_err { scan_error() << "bad import statement" << endl; }
386 action write_err { scan_error() << "bad write statement" << endl; }
388 action handle_machine { handleMachine(); }
389 action handle_include { handleInclude(); }
390 action handle_import { handleImport(); }
393 ( KW_Machine TK_Word @store_word ';' ) @handle_machine
394 <>err mach_err <>eof mach_err;
397 TK_Word @store_word ( TK_Literal @store_lit )? |
398 TK_Literal @store_lit
402 ( KW_Include include_names ';' ) @handle_include
403 <>err incl_err <>eof incl_err;
406 ( KW_Import TK_Literal @store_lit ';' ) @handle_import
407 <>err import_err <>eof import_err;
411 if ( active() && machineSpec == 0 && machineName == 0 ) {
413 " def_name=\"" << parser->sectionName << "\""
414 " line=\"" << line << "\""
415 " col=\"" << column << "\""
422 if ( active() && machineSpec == 0 && machineName == 0 )
423 output << "<arg>" << tokdata << "</arg>";
428 if ( active() && machineSpec == 0 && machineName == 0 )
429 output << "</write>\n";
433 ( KW_Write @write_command
434 ( TK_Word @write_arg )+ ';' @write_close )
435 <>err write_err <>eof write_err;
439 /* Send the token off to the parser. */
441 directToParser( parser, fileName, line, column, type, tokdata, toklen );
444 # Catch everything else.
446 ^( KW_Machine | KW_Include | KW_Import | KW_Write ) @handle_token;
457 void Scanner::token( int type, char c )
459 token( type, &c, &c + 1 );
462 void Scanner::token( int type )
467 void Scanner::token( int type, char *start, char *end )
473 tokdata = new char[toklen+1];
474 memcpy( tokdata, start, toklen );
478 processToken( type, tokdata, toklen );
481 void Scanner::processToken( int type, char *tokdata, int toklen )
495 machine section_parse;
501 /* Record the last token for use in controlling the scan of subsequent
506 void Scanner::startSection( )
508 parserExistsError = false;
510 if ( includeDepth == 0 ) {
511 if ( machineSpec == 0 && machineName == 0 )
512 output << "</host>\n";
515 sectionLoc.fileName = fileName;
516 sectionLoc.line = line;
520 void Scanner::endSection( )
522 /* Execute the eof actions for the section parser. */
523 processToken( -1, 0, 0 );
525 /* Close off the section with the parser. */
528 loc.fileName = fileName;
532 parser->token( loc, TK_EndSection, 0, 0 );
535 if ( includeDepth == 0 ) {
536 if ( machineSpec == 0 && machineName == 0 ) {
537 /* The end section may include a newline on the end, so
538 * we use the last line, which will count the newline. */
539 output << "<host line=\"" << line << "\">";
544 bool isAbsolutePath( const char *path )
547 return isalpha( path[0] ) && path[1] == ':' && path[2] == '\\';
549 return path[0] == '/';
553 char **Scanner::makeIncludePathChecks( char *thisFileName, char *fileName, int fnlen )
555 char **checks = new char*[2];
558 bool caseInsensitive = false;
560 char *data = prepareLitString( InputLoc(), fileName, fnlen,
561 length, caseInsensitive );
564 if ( isAbsolutePath( data ) )
565 checks[nextCheck++] = data;
567 /* Search from the the location of the current file. */
568 const char *lastSlash = strrchr( thisFileName, PATH_SEP );
569 if ( lastSlash == 0 )
570 checks[nextCheck++] = data;
572 long givenPathLen = (lastSlash - thisFileName) + 1;
573 long checklen = givenPathLen + length;
574 char *check = new char[checklen+1];
575 memcpy( check, thisFileName, givenPathLen );
576 memcpy( check+givenPathLen, data, length );
578 checks[nextCheck++] = check;
581 /* Search from the include paths given on the command line. */
582 for ( ArgsVector::Iter incp = includePaths; incp.lte(); incp++ ) {
583 long pathLen = strlen( *incp );
584 long checkLen = pathLen + 1 + length;
585 char *check = new char[checkLen+1];
586 memcpy( check, *incp, pathLen );
587 check[pathLen] = PATH_SEP;
588 memcpy( check+pathLen+1, data, length );
590 checks[nextCheck++] = check;
594 checks[nextCheck] = 0;
598 ifstream *Scanner::tryOpenInclude( char **pathChecks, long &found )
600 char **check = pathChecks;
601 ifstream *inFile = new ifstream;
603 while ( *check != 0 ) {
604 inFile->open( *check );
605 if ( inFile->is_open() ) {
606 found = check - pathChecks;
620 # This is sent by the driver code.
630 # Identifiers, numbers, commetns, and other common things.
631 ident = ( alpha | '_' ) ( alpha |digit |'_' )*;
633 hex_number = '0x' [0-9a-fA-F]+;
636 '/*' ( any | NL )* :>> '*/';
641 c_cpp_comment = c_comment | cpp_comment;
643 ruby_comment = '#' [^\n]* NL;
645 # These literal forms are common to host code and ragel.
646 s_literal = "'" ([^'\\] | NL | '\\' (any | NL))* "'";
647 d_literal = '"' ([^"\\] | NL | '\\' (any | NL))* '"';
648 host_re_literal = '/' ([^/\\] | NL | '\\' (any | NL))* '/';
650 whitespace = [ \t] | NL;
651 pound_comment = '#' [^\n]* NL;
653 # An inline block of code for Ruby.
654 inline_code_ruby := |*
655 # Inline expression keywords.
656 "fpc" => { token( KW_PChar ); };
657 "fc" => { token( KW_Char ); };
658 "fcurs" => { token( KW_CurState ); };
659 "ftargs" => { token( KW_TargState ); };
661 whitespaceOn = false;
665 # Inline statement keywords.
667 whitespaceOn = false;
670 "fexec" => { token( KW_Exec, 0, 0 ); };
672 whitespaceOn = false;
676 whitespaceOn = false;
680 whitespaceOn = false;
684 whitespaceOn = false;
688 whitespaceOn = false;
692 ident => { token( TK_Word, ts, te ); };
694 number => { token( TK_UInt, ts, te ); };
695 hex_number => { token( TK_Hex, ts, te ); };
697 ( s_literal | d_literal | host_re_literal )
698 => { token( IL_Literal, ts, te ); };
702 token( IL_WhiteSpace, ts, te );
705 ruby_comment => { token( IL_Comment, ts, te ); };
707 "::" => { token( TK_NameSep, ts, te ); };
709 # Some symbols need to go to the parser as with their cardinal value as
710 # the token type (as opposed to being sent as anonymous symbols)
711 # because they are part of the sequences which we interpret. The * ) ;
712 # symbols cause whitespace parsing to come back on. This gets turned
713 # off by some keywords.
717 token( *ts, ts, te );
718 if ( inlineBlockType == SemiTerminated )
724 token( *ts, ts, te );
727 [,(] => { token( *ts, ts, te ); };
730 token( IL_Symbol, ts, te );
735 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
736 /* Inline code block ends. */
741 /* Either a semi terminated inline block or only the closing
742 * brace of some inner scope, not the block's closing brace. */
743 token( IL_Symbol, ts, te );
748 scan_error() << "unterminated code block" << endl;
751 # Send every other character as a symbol.
752 any => { token( IL_Symbol, ts, te ); };
756 # An inline block of code for languages other than Ruby.
758 # Inline expression keywords.
759 "fpc" => { token( KW_PChar ); };
760 "fc" => { token( KW_Char ); };
761 "fcurs" => { token( KW_CurState ); };
762 "ftargs" => { token( KW_TargState ); };
764 whitespaceOn = false;
768 # Inline statement keywords.
770 whitespaceOn = false;
773 "fexec" => { token( KW_Exec, 0, 0 ); };
775 whitespaceOn = false;
779 whitespaceOn = false;
783 whitespaceOn = false;
787 whitespaceOn = false;
791 whitespaceOn = false;
795 ident => { token( TK_Word, ts, te ); };
797 number => { token( TK_UInt, ts, te ); };
798 hex_number => { token( TK_Hex, ts, te ); };
800 ( s_literal | d_literal )
801 => { token( IL_Literal, ts, te ); };
805 token( IL_WhiteSpace, ts, te );
808 c_cpp_comment => { token( IL_Comment, ts, te ); };
810 "::" => { token( TK_NameSep, ts, te ); };
812 # Some symbols need to go to the parser as with their cardinal value as
813 # the token type (as opposed to being sent as anonymous symbols)
814 # because they are part of the sequences which we interpret. The * ) ;
815 # symbols cause whitespace parsing to come back on. This gets turned
816 # off by some keywords.
820 token( *ts, ts, te );
821 if ( inlineBlockType == SemiTerminated )
827 token( *ts, ts, te );
830 [,(] => { token( *ts, ts, te ); };
833 token( IL_Symbol, ts, te );
838 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
839 /* Inline code block ends. */
844 /* Either a semi terminated inline block or only the closing
845 * brace of some inner scope, not the block's closing brace. */
846 token( IL_Symbol, ts, te );
851 scan_error() << "unterminated code block" << endl;
854 # Send every other character as a symbol.
855 any => { token( IL_Symbol, ts, te ); };
859 # Escape sequences in OR expressions.
860 '\\0' => { token( RE_Char, '\0' ); };
861 '\\a' => { token( RE_Char, '\a' ); };
862 '\\b' => { token( RE_Char, '\b' ); };
863 '\\t' => { token( RE_Char, '\t' ); };
864 '\\n' => { token( RE_Char, '\n' ); };
865 '\\v' => { token( RE_Char, '\v' ); };
866 '\\f' => { token( RE_Char, '\f' ); };
867 '\\r' => { token( RE_Char, '\r' ); };
868 '\\\n' => { updateCol(); };
869 '\\' any => { token( RE_Char, ts+1, te ); };
871 # Range dash in an OR expression.
872 '-' => { token( RE_Dash, 0, 0 ); };
874 # Terminate an OR expression.
875 ']' => { token( RE_SqClose ); fret; };
878 scan_error() << "unterminated OR literal" << endl;
881 # Characters in an OR expression.
882 [^\]] => { token( RE_Char, ts, te ); };
886 ragel_re_literal := |*
887 # Escape sequences in regular expressions.
888 '\\0' => { token( RE_Char, '\0' ); };
889 '\\a' => { token( RE_Char, '\a' ); };
890 '\\b' => { token( RE_Char, '\b' ); };
891 '\\t' => { token( RE_Char, '\t' ); };
892 '\\n' => { token( RE_Char, '\n' ); };
893 '\\v' => { token( RE_Char, '\v' ); };
894 '\\f' => { token( RE_Char, '\f' ); };
895 '\\r' => { token( RE_Char, '\r' ); };
896 '\\\n' => { updateCol(); };
897 '\\' any => { token( RE_Char, ts+1, te ); };
899 # Terminate an OR expression.
901 token( RE_Slash, ts, te );
905 # Special characters.
906 '.' => { token( RE_Dot ); };
907 '*' => { token( RE_Star ); };
909 '[' => { token( RE_SqOpen ); fcall or_literal; };
910 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
913 scan_error() << "unterminated regular expression" << endl;
916 # Characters in an OR expression.
917 [^\/] => { token( RE_Char, ts, te ); };
920 # We need a separate token space here to avoid the ragel keywords.
921 write_statement := |*
922 ident => { token( TK_Word, ts, te ); } ;
923 [ \t\n]+ => { updateCol(); };
924 ';' => { token( ';' ); fgoto parser_def; };
927 scan_error() << "unterminated write statement" << endl;
931 # Parser definitions.
933 'machine' => { token( KW_Machine ); };
934 'include' => { token( KW_Include ); };
935 'import' => { token( KW_Import ); };
938 fgoto write_statement;
940 'action' => { token( KW_Action ); };
941 'alphtype' => { token( KW_AlphType ); };
942 'prepush' => { token( KW_PrePush ); };
943 'postpop' => { token( KW_PostPop ); };
945 # FIXME: Enable this post 5.17.
946 # 'range' => { token( KW_Range ); };
950 inlineBlockType = SemiTerminated;
951 if ( hostLang->lang == HostLang::Ruby )
952 fcall inline_code_ruby;
958 inlineBlockType = SemiTerminated;
959 if ( hostLang->lang == HostLang::Ruby )
960 fcall inline_code_ruby;
965 token( KW_Variable );
966 inlineBlockType = SemiTerminated;
967 if ( hostLang->lang == HostLang::Ruby )
968 fcall inline_code_ruby;
972 'when' => { token( KW_When ); };
973 'inwhen' => { token( KW_InWhen ); };
974 'outwhen' => { token( KW_OutWhen ); };
975 'eof' => { token( KW_Eof ); };
976 'err' => { token( KW_Err ); };
977 'lerr' => { token( KW_Lerr ); };
978 'to' => { token( KW_To ); };
979 'from' => { token( KW_From ); };
980 'export' => { token( KW_Export ); };
983 ident => { token( TK_Word, ts, te ); } ;
986 number => { token( TK_UInt, ts, te ); };
987 hex_number => { token( TK_Hex, ts, te ); };
989 # Literals, with optionals.
990 ( s_literal | d_literal ) [i]?
991 => { token( TK_Literal, ts, te ); };
993 '[' => { token( RE_SqOpen ); fcall or_literal; };
994 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
996 '/' => { token( RE_Slash ); fgoto ragel_re_literal; };
999 pound_comment => { updateCol(); };
1001 ':=' => { token( TK_ColonEquals ); };
1004 ">~" => { token( TK_StartToState ); };
1005 "$~" => { token( TK_AllToState ); };
1006 "%~" => { token( TK_FinalToState ); };
1007 "<~" => { token( TK_NotStartToState ); };
1008 "@~" => { token( TK_NotFinalToState ); };
1009 "<>~" => { token( TK_MiddleToState ); };
1011 # From State actions
1012 ">*" => { token( TK_StartFromState ); };
1013 "$*" => { token( TK_AllFromState ); };
1014 "%*" => { token( TK_FinalFromState ); };
1015 "<*" => { token( TK_NotStartFromState ); };
1016 "@*" => { token( TK_NotFinalFromState ); };
1017 "<>*" => { token( TK_MiddleFromState ); };
1020 ">/" => { token( TK_StartEOF ); };
1021 "$/" => { token( TK_AllEOF ); };
1022 "%/" => { token( TK_FinalEOF ); };
1023 "</" => { token( TK_NotStartEOF ); };
1024 "@/" => { token( TK_NotFinalEOF ); };
1025 "<>/" => { token( TK_MiddleEOF ); };
1027 # Global Error actions.
1028 ">!" => { token( TK_StartGblError ); };
1029 "$!" => { token( TK_AllGblError ); };
1030 "%!" => { token( TK_FinalGblError ); };
1031 "<!" => { token( TK_NotStartGblError ); };
1032 "@!" => { token( TK_NotFinalGblError ); };
1033 "<>!" => { token( TK_MiddleGblError ); };
1035 # Local error actions.
1036 ">^" => { token( TK_StartLocalError ); };
1037 "$^" => { token( TK_AllLocalError ); };
1038 "%^" => { token( TK_FinalLocalError ); };
1039 "<^" => { token( TK_NotStartLocalError ); };
1040 "@^" => { token( TK_NotFinalLocalError ); };
1041 "<>^" => { token( TK_MiddleLocalError ); };
1044 "<>" => { token( TK_Middle ); };
1047 '>?' => { token( TK_StartCond ); };
1048 '$?' => { token( TK_AllCond ); };
1049 '%?' => { token( TK_LeavingCond ); };
1051 '..' => { token( TK_DotDot ); };
1052 '**' => { token( TK_StarStar ); };
1053 '--' => { token( TK_DashDash ); };
1054 '->' => { token( TK_Arrow ); };
1055 '=>' => { token( TK_DoubleArrow ); };
1057 ":>" => { token( TK_ColonGt ); };
1058 ":>>" => { token( TK_ColonGtGt ); };
1059 "<:" => { token( TK_LtColon ); };
1061 # Opening of longest match.
1062 "|*" => { token( TK_BarStar ); };
1064 # Separater for name references.
1065 "::" => { token( TK_NameSep, ts, te ); };
1073 [ \t\r]+ => { updateCol(); };
1075 # If we are in a single line machine then newline may end the spec.
1078 if ( singleLineSpec ) {
1085 if ( lastToken == KW_Export || lastToken == KW_Entry )
1090 inlineBlockType = CurlyDelimited;
1091 if ( hostLang->lang == HostLang::Ruby )
1092 fcall inline_code_ruby;
1099 scan_error() << "unterminated ragel section" << endl;
1102 any => { token( *ts ); } ;
1105 # Outside code scanner. These tokens get passed through.
1107 ident => { pass( IMP_Word, ts, te ); };
1108 number => { pass( IMP_UInt, ts, te ); };
1109 ruby_comment => { pass(); };
1110 ( s_literal | d_literal | host_re_literal )
1111 => { pass( IMP_Literal, ts, te ); };
1115 singleLineSpec = false;
1121 singleLineSpec = true;
1125 whitespace+ => { pass(); };
1127 any => { pass( *ts, 0, 0 ); };
1130 # Outside code scanner. These tokens get passed through.
1132 'define' => { pass( IMP_Define, 0, 0 ); };
1133 ident => { pass( IMP_Word, ts, te ); };
1134 number => { pass( IMP_UInt, ts, te ); };
1135 c_cpp_comment => { pass(); };
1136 ( s_literal | d_literal ) => { pass( IMP_Literal, ts, te ); };
1140 singleLineSpec = false;
1146 singleLineSpec = true;
1150 whitespace+ => { pass(); };
1152 any => { pass( *ts, 0, 0 ); };
1158 void Scanner::do_scan()
1161 char *buf = new char[bufsize];
1162 int cs, act, have = 0;
1165 /* The stack is two deep, one level for going into ragel defs from the main
1166 * machines which process outside code, and another for going into or literals
1167 * from either a ragel spec, or a regular expression. */
1169 int curly_count = 0;
1170 bool execute = true;
1171 bool singleLineSpec = false;
1172 InlineBlockType inlineBlockType = CurlyDelimited;
1174 /* Init the section parser and the character scanner. */
1178 /* Set up the start state. FIXME: After 5.20 is released the nocs write
1179 * init option should be used, the main machine eliminated and this statement moved
1180 * above the write init. */
1181 if ( hostLang->lang == HostLang::Ruby )
1182 cs = rlscan_en_main_ruby;
1184 cs = rlscan_en_main;
1187 char *p = buf + have;
1188 int space = bufsize - have;
1191 /* We filled up the buffer trying to scan a token. Grow it. */
1192 bufsize = bufsize * 2;
1193 char *newbuf = new char[bufsize];
1195 /* Recompute p and space. */
1197 space = bufsize - have;
1199 /* Patch up pointers possibly in use. */
1201 ts = newbuf + ( ts - buf );
1202 te = newbuf + ( te - buf );
1204 /* Copy the new buffer in. */
1205 memcpy( newbuf, buf, have );
1210 input.read( p, space );
1211 int len = input.gcount();
1214 /* If we see eof then append the eof var. */
1223 /* Check if we failed. */
1224 if ( cs == rlscan_error ) {
1225 /* Machine failed before finding a token. I'm not yet sure if this
1227 scan_error() << "scanner error" << endl;
1231 /* Decide if we need to preserve anything. */
1232 char *preserve = ts;
1234 /* Now set up the prefix. */
1235 if ( preserve == 0 )
1238 /* There is data that needs to be shifted over. */
1239 have = pe - preserve;
1240 memmove( buf, preserve, have );
1241 unsigned int shiftback = preserve - buf;