2 * Copyright 2006-2007 Adrian Thurston <thurston@complang.org>
5 /* This file is part of Ragel.
7 * Ragel is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Ragel is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Ragel; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include "inputdata.h"
53 * The Scanner for Importing
57 machine inline_token_scan;
61 # Import scanner tokens.
66 IMP_Define IMP_Word IMP_UInt => {
67 int base = tok_ts - token_data;
71 directToParser( inclToParser, fileName, line, column, TK_Word,
72 token_strings[base+nameOff], token_lens[base+nameOff] );
73 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
74 directToParser( inclToParser, fileName, line, column, TK_UInt,
75 token_strings[base+numOff], token_lens[base+numOff] );
76 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
79 # Assignment of number.
80 IMP_Word '=' IMP_UInt => {
81 int base = tok_ts - token_data;
85 directToParser( inclToParser, fileName, line, column, TK_Word,
86 token_strings[base+nameOff], token_lens[base+nameOff] );
87 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
88 directToParser( inclToParser, fileName, line, column, TK_UInt,
89 token_strings[base+numOff], token_lens[base+numOff] );
90 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
94 IMP_Define IMP_Word IMP_Literal => {
95 int base = tok_ts - token_data;
99 directToParser( inclToParser, fileName, line, column, TK_Word,
100 token_strings[base+nameOff], token_lens[base+nameOff] );
101 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
102 directToParser( inclToParser, fileName, line, column, TK_Literal,
103 token_strings[base+litOff], token_lens[base+litOff] );
104 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
107 # Assignment of literal.
108 IMP_Word '=' IMP_Literal => {
109 int base = tok_ts - token_data;
113 directToParser( inclToParser, fileName, line, column, TK_Word,
114 token_strings[base+nameOff], token_lens[base+nameOff] );
115 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
116 directToParser( inclToParser, fileName, line, column, TK_Literal,
117 token_strings[base+litOff], token_lens[base+litOff] );
118 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
121 # Catch everything else.
128 void Scanner::flushImport()
131 int *pe = token_data + cur_token;
135 machine inline_token_scan;
143 cur_token = pe - tok_ts;
144 int ts_offset = tok_ts - token_data;
145 memmove( token_data, token_data+ts_offset, cur_token*sizeof(token_data[0]) );
146 memmove( token_strings, token_strings+ts_offset, cur_token*sizeof(token_strings[0]) );
147 memmove( token_lens, token_lens+ts_offset, cur_token*sizeof(token_lens[0]) );
151 void Scanner::directToParser( Parser *toParser, const char *tokFileName, int tokLine,
152 int tokColumn, int type, char *tokdata, int toklen )
157 cerr << "scanner:" << tokLine << ":" << tokColumn <<
158 ": sending token to the parser " << Parser_lelNames[type];
159 cerr << " " << toklen;
161 cerr << " " << tokdata;
165 loc.fileName = tokFileName;
169 toParser->token( loc, type, tokdata, toklen );
172 void Scanner::importToken( int token, char *start, char *end )
174 if ( cur_token == max_tokens )
177 token_data[cur_token] = token;
179 token_strings[cur_token] = 0;
180 token_lens[cur_token] = 0;
183 int toklen = end-start;
184 token_lens[cur_token] = toklen;
185 token_strings[cur_token] = new char[toklen+1];
186 memcpy( token_strings[cur_token], start, toklen );
187 token_strings[cur_token][toklen] = 0;
192 void Scanner::pass( int token, char *start, char *end )
194 if ( importMachines )
195 importToken( token, start, end );
203 /* If no errors and we are at the bottom of the include stack (the
204 * source file listed on the command line) then write out the data. */
205 if ( includeDepth == 0 && machineSpec == 0 && machineName == 0 )
206 id.inputItems.tail->data.write( ts, te-ts );
210 * The scanner for processing sections, includes, imports, etc.
214 machine section_parse;
220 void Scanner::init( )
225 bool Scanner::active()
230 if ( parser == 0 && ! parserExistsError ) {
231 scan_error() << "this specification has no name, nor does any previous"
232 " specification" << endl;
233 parserExistsError = true;
242 ostream &Scanner::scan_error()
244 /* Maintain the error count. */
246 cerr << makeInputLoc( fileName, line, column ) << ": ";
250 /* An approximate check for duplicate includes. Due to aliasing of files it's
251 * possible for duplicates to creep in. */
252 bool Scanner::duplicateInclude( char *inclFileName, char *inclSectionName )
254 for ( IncludeHistory::Iter hi = parser->includeHistory; hi.lte(); hi++ ) {
255 if ( strcmp( hi->fileName, inclFileName ) == 0 &&
256 strcmp( hi->sectionName, inclSectionName ) == 0 )
264 void Scanner::updateCol()
269 //cerr << "adding " << te - from << " to column" << endl;
274 void Scanner::handleMachine()
276 /* Assign a name to the machine. */
277 char *machine = word;
279 if ( !importMachines && inclSectionTarg == 0 ) {
280 ignoreSection = false;
282 ParserDictEl *pdEl = id.parserDict.find( machine );
284 pdEl = new ParserDictEl( machine );
285 pdEl->value = new Parser( fileName, machine, sectionLoc );
287 id.parserDict.insert( pdEl );
288 id.parserList.append( pdEl->value );
291 parser = pdEl->value;
293 else if ( !importMachines && strcmp( inclSectionTarg, machine ) == 0 ) {
294 /* found include target */
295 ignoreSection = false;
296 parser = inclToParser;
299 /* ignoring section */
300 ignoreSection = true;
305 void Scanner::handleInclude()
308 char *inclSectionName = word;
309 char **includeChecks = 0;
311 /* Implement defaults for the input file and section name. */
312 if ( inclSectionName == 0 )
313 inclSectionName = parser->sectionName;
316 includeChecks = makeIncludePathChecks( fileName, lit, lit_len );
318 char *test = new char[strlen(fileName)+1];
319 strcpy( test, fileName );
321 includeChecks = new char*[2];
323 includeChecks[0] = test;
324 includeChecks[1] = 0;
328 ifstream *inFile = tryOpenInclude( includeChecks, found );
330 scan_error() << "include: failed to locate file" << endl;
331 char **tried = includeChecks;
332 while ( *tried != 0 )
333 scan_error() << "include: attempted: \"" << *tried++ << '\"' << endl;
336 /* Don't include anything that's already been included. */
337 if ( !duplicateInclude( includeChecks[found], inclSectionName ) ) {
338 parser->includeHistory.append( IncludeHistoryItem(
339 includeChecks[found], inclSectionName ) );
341 Scanner scanner( id, includeChecks[found], *inFile, parser,
342 inclSectionName, includeDepth+1, false );
350 void Scanner::handleImport()
353 char **importChecks = makeIncludePathChecks( fileName, lit, lit_len );
355 /* Open the input file for reading. */
357 ifstream *inFile = tryOpenInclude( importChecks, found );
359 scan_error() << "import: could not open import file " <<
360 "for reading" << endl;
361 char **tried = importChecks;
362 while ( *tried != 0 )
363 scan_error() << "import: attempted: \"" << *tried++ << '\"' << endl;
366 Scanner scanner( id, importChecks[found], *inFile, parser,
367 0, includeDepth+1, true );
369 scanner.importToken( 0, 0, 0 );
370 scanner.flushImport();
376 machine section_parse;
378 # Need the defines representing tokens.
381 action clear_words { word = lit = 0; word_len = lit_len = 0; }
382 action store_word { word = tokdata; word_len = toklen; }
383 action store_lit { lit = tokdata; lit_len = toklen; }
385 action mach_err { scan_error() << "bad machine statement" << endl; }
386 action incl_err { scan_error() << "bad include statement" << endl; }
387 action import_err { scan_error() << "bad import statement" << endl; }
388 action write_err { scan_error() << "bad write statement" << endl; }
390 action handle_machine { handleMachine(); }
391 action handle_include { handleInclude(); }
392 action handle_import { handleImport(); }
395 ( KW_Machine TK_Word @store_word ';' ) @handle_machine
396 <>err mach_err <>eof mach_err;
399 TK_Word @store_word ( TK_Literal @store_lit )? |
400 TK_Literal @store_lit
404 ( KW_Include include_names ';' ) @handle_include
405 <>err incl_err <>eof incl_err;
408 ( KW_Import TK_Literal @store_lit ';' ) @handle_import
409 <>err import_err <>eof import_err;
413 if ( active() && machineSpec == 0 && machineName == 0 ) {
414 InputItem *inputItem = new InputItem;
415 inputItem->type = InputItem::Write;
416 inputItem->loc.line = line;
417 inputItem->loc.col = column;
418 inputItem->name = parser->sectionName;
419 inputItem->pd = parser->pd;
420 id.inputItems.append( inputItem );
426 if ( active() && machineSpec == 0 && machineName == 0 )
427 id.inputItems.tail->writeArgs.append( strdup(tokdata) );
432 if ( active() && machineSpec == 0 && machineName == 0 )
433 id.inputItems.tail->writeArgs.append( 0 );
437 ( KW_Write @write_command
438 ( TK_Word @write_arg )+ ';' @write_close )
439 <>err write_err <>eof write_err;
443 /* Send the token off to the parser. */
445 directToParser( parser, fileName, line, column, type, tokdata, toklen );
448 # Catch everything else.
450 ^( KW_Machine | KW_Include | KW_Import | KW_Write ) @handle_token;
461 void Scanner::token( int type, char c )
463 token( type, &c, &c + 1 );
466 void Scanner::token( int type )
471 void Scanner::token( int type, char *start, char *end )
477 tokdata = new char[toklen+1];
478 memcpy( tokdata, start, toklen );
482 processToken( type, tokdata, toklen );
485 void Scanner::processToken( int type, char *tokdata, int toklen )
498 machine section_parse;
504 /* Record the last token for use in controlling the scan of subsequent
509 void Scanner::startSection( )
511 parserExistsError = false;
513 sectionLoc.fileName = fileName;
514 sectionLoc.line = line;
515 sectionLoc.col = column;
518 void Scanner::endSection( )
520 /* Execute the eof actions for the section parser. */
521 processToken( -1, 0, 0 );
523 /* Close off the section with the parser. */
526 loc.fileName = fileName;
530 parser->token( loc, TK_EndSection, 0, 0 );
533 if ( includeDepth == 0 ) {
534 if ( machineSpec == 0 && machineName == 0 ) {
535 /* The end section may include a newline on the end, so
536 * we use the last line, which will count the newline. */
537 InputItem *inputItem = new InputItem;
538 inputItem->type = InputItem::HostData;
539 inputItem->loc.line = line;
540 inputItem->loc.col = column;
541 id.inputItems.append( inputItem );
546 bool isAbsolutePath( const char *path )
549 return isalpha( path[0] ) && path[1] == ':' && path[2] == '\\';
551 return path[0] == '/';
555 char **Scanner::makeIncludePathChecks( const char *thisFileName,
556 const char *fileName, int fnlen )
558 char **checks = new char*[2];
561 bool caseInsensitive = false;
563 char *data = prepareLitString( InputLoc(), fileName, fnlen,
564 length, caseInsensitive );
567 if ( isAbsolutePath( data ) )
568 checks[nextCheck++] = data;
570 /* Search from the the location of the current file. */
571 const char *lastSlash = strrchr( thisFileName, PATH_SEP );
572 if ( lastSlash == 0 )
573 checks[nextCheck++] = data;
575 long givenPathLen = (lastSlash - thisFileName) + 1;
576 long checklen = givenPathLen + length;
577 char *check = new char[checklen+1];
578 memcpy( check, thisFileName, givenPathLen );
579 memcpy( check+givenPathLen, data, length );
581 checks[nextCheck++] = check;
584 /* Search from the include paths given on the command line. */
585 for ( ArgsVector::Iter incp = id.includePaths; incp.lte(); incp++ ) {
586 long pathLen = strlen( *incp );
587 long checkLen = pathLen + 1 + length;
588 char *check = new char[checkLen+1];
589 memcpy( check, *incp, pathLen );
590 check[pathLen] = PATH_SEP;
591 memcpy( check+pathLen+1, data, length );
593 checks[nextCheck++] = check;
597 checks[nextCheck] = 0;
601 ifstream *Scanner::tryOpenInclude( char **pathChecks, long &found )
603 char **check = pathChecks;
604 ifstream *inFile = new ifstream;
606 while ( *check != 0 ) {
607 inFile->open( *check );
608 if ( inFile->is_open() ) {
609 found = check - pathChecks;
623 # This is sent by the driver code.
633 # Identifiers, numbers, commetns, and other common things.
634 ident = ( alpha | '_' ) ( alpha |digit |'_' )*;
636 hex_number = '0x' [0-9a-fA-F]+;
639 '/*' ( any | NL )* :>> '*/';
644 c_cpp_comment = c_comment | cpp_comment;
646 ruby_comment = '#' [^\n]* NL;
648 # These literal forms are common to host code and ragel.
649 s_literal = "'" ([^'\\] | NL | '\\' (any | NL))* "'";
650 d_literal = '"' ([^"\\] | NL | '\\' (any | NL))* '"';
651 host_re_literal = '/' ([^/\\] | NL | '\\' (any | NL))* '/';
653 whitespace = [ \t] | NL;
654 pound_comment = '#' [^\n]* NL;
656 # An inline block of code for Ruby.
657 inline_code_ruby := |*
658 # Inline expression keywords.
659 "fpc" => { token( KW_PChar ); };
660 "fc" => { token( KW_Char ); };
661 "fcurs" => { token( KW_CurState ); };
662 "ftargs" => { token( KW_TargState ); };
664 whitespaceOn = false;
668 # Inline statement keywords.
670 whitespaceOn = false;
673 "fexec" => { token( KW_Exec, 0, 0 ); };
675 whitespaceOn = false;
679 whitespaceOn = false;
683 whitespaceOn = false;
687 whitespaceOn = false;
691 whitespaceOn = false;
695 ident => { token( TK_Word, ts, te ); };
697 number => { token( TK_UInt, ts, te ); };
698 hex_number => { token( TK_Hex, ts, te ); };
700 ( s_literal | d_literal | host_re_literal )
701 => { token( IL_Literal, ts, te ); };
705 token( IL_WhiteSpace, ts, te );
708 ruby_comment => { token( IL_Comment, ts, te ); };
710 "::" => { token( TK_NameSep, ts, te ); };
712 # Some symbols need to go to the parser as with their cardinal value as
713 # the token type (as opposed to being sent as anonymous symbols)
714 # because they are part of the sequences which we interpret. The * ) ;
715 # symbols cause whitespace parsing to come back on. This gets turned
716 # off by some keywords.
720 token( *ts, ts, te );
721 if ( inlineBlockType == SemiTerminated )
727 token( *ts, ts, te );
730 [,(] => { token( *ts, ts, te ); };
733 token( IL_Symbol, ts, te );
738 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
739 /* Inline code block ends. */
744 /* Either a semi terminated inline block or only the closing
745 * brace of some inner scope, not the block's closing brace. */
746 token( IL_Symbol, ts, te );
751 scan_error() << "unterminated code block" << endl;
754 # Send every other character as a symbol.
755 any => { token( IL_Symbol, ts, te ); };
759 # An inline block of code for languages other than Ruby.
761 # Inline expression keywords.
762 "fpc" => { token( KW_PChar ); };
763 "fc" => { token( KW_Char ); };
764 "fcurs" => { token( KW_CurState ); };
765 "ftargs" => { token( KW_TargState ); };
767 whitespaceOn = false;
771 # Inline statement keywords.
773 whitespaceOn = false;
776 "fexec" => { token( KW_Exec, 0, 0 ); };
778 whitespaceOn = false;
782 whitespaceOn = false;
786 whitespaceOn = false;
790 whitespaceOn = false;
794 whitespaceOn = false;
798 ident => { token( TK_Word, ts, te ); };
800 number => { token( TK_UInt, ts, te ); };
801 hex_number => { token( TK_Hex, ts, te ); };
803 ( s_literal | d_literal )
804 => { token( IL_Literal, ts, te ); };
808 token( IL_WhiteSpace, ts, te );
811 c_cpp_comment => { token( IL_Comment, ts, te ); };
813 "::" => { token( TK_NameSep, ts, te ); };
815 # Some symbols need to go to the parser as with their cardinal value as
816 # the token type (as opposed to being sent as anonymous symbols)
817 # because they are part of the sequences which we interpret. The * ) ;
818 # symbols cause whitespace parsing to come back on. This gets turned
819 # off by some keywords.
823 token( *ts, ts, te );
824 if ( inlineBlockType == SemiTerminated )
830 token( *ts, ts, te );
833 [,(] => { token( *ts, ts, te ); };
836 token( IL_Symbol, ts, te );
841 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
842 /* Inline code block ends. */
847 /* Either a semi terminated inline block or only the closing
848 * brace of some inner scope, not the block's closing brace. */
849 token( IL_Symbol, ts, te );
854 scan_error() << "unterminated code block" << endl;
857 # Send every other character as a symbol.
858 any => { token( IL_Symbol, ts, te ); };
862 # Escape sequences in OR expressions.
863 '\\0' => { token( RE_Char, '\0' ); };
864 '\\a' => { token( RE_Char, '\a' ); };
865 '\\b' => { token( RE_Char, '\b' ); };
866 '\\t' => { token( RE_Char, '\t' ); };
867 '\\n' => { token( RE_Char, '\n' ); };
868 '\\v' => { token( RE_Char, '\v' ); };
869 '\\f' => { token( RE_Char, '\f' ); };
870 '\\r' => { token( RE_Char, '\r' ); };
871 '\\\n' => { updateCol(); };
872 '\\' any => { token( RE_Char, ts+1, te ); };
874 # Range dash in an OR expression.
875 '-' => { token( RE_Dash, 0, 0 ); };
877 # Terminate an OR expression.
878 ']' => { token( RE_SqClose ); fret; };
881 scan_error() << "unterminated OR literal" << endl;
884 # Characters in an OR expression.
885 [^\]] => { token( RE_Char, ts, te ); };
889 ragel_re_literal := |*
890 # Escape sequences in regular expressions.
891 '\\0' => { token( RE_Char, '\0' ); };
892 '\\a' => { token( RE_Char, '\a' ); };
893 '\\b' => { token( RE_Char, '\b' ); };
894 '\\t' => { token( RE_Char, '\t' ); };
895 '\\n' => { token( RE_Char, '\n' ); };
896 '\\v' => { token( RE_Char, '\v' ); };
897 '\\f' => { token( RE_Char, '\f' ); };
898 '\\r' => { token( RE_Char, '\r' ); };
899 '\\\n' => { updateCol(); };
900 '\\' any => { token( RE_Char, ts+1, te ); };
902 # Terminate an OR expression.
904 token( RE_Slash, ts, te );
908 # Special characters.
909 '.' => { token( RE_Dot ); };
910 '*' => { token( RE_Star ); };
912 '[' => { token( RE_SqOpen ); fcall or_literal; };
913 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
916 scan_error() << "unterminated regular expression" << endl;
919 # Characters in an OR expression.
920 [^\/] => { token( RE_Char, ts, te ); };
923 # We need a separate token space here to avoid the ragel keywords.
924 write_statement := |*
925 ident => { token( TK_Word, ts, te ); } ;
926 [ \t\n]+ => { updateCol(); };
927 ';' => { token( ';' ); fgoto parser_def; };
930 scan_error() << "unterminated write statement" << endl;
934 # Parser definitions.
936 'length_cond' => { token( KW_Length ); };
937 'machine' => { token( KW_Machine ); };
938 'include' => { token( KW_Include ); };
939 'import' => { token( KW_Import ); };
942 fgoto write_statement;
944 'action' => { token( KW_Action ); };
945 'alphtype' => { token( KW_AlphType ); };
946 'prepush' => { token( KW_PrePush ); };
947 'postpop' => { token( KW_PostPop ); };
949 # FIXME: Enable this post 5.17.
950 # 'range' => { token( KW_Range ); };
954 inlineBlockType = SemiTerminated;
955 if ( hostLang->lang == HostLang::Ruby )
956 fcall inline_code_ruby;
962 inlineBlockType = SemiTerminated;
963 if ( hostLang->lang == HostLang::Ruby )
964 fcall inline_code_ruby;
969 token( KW_Variable );
970 inlineBlockType = SemiTerminated;
971 if ( hostLang->lang == HostLang::Ruby )
972 fcall inline_code_ruby;
976 'when' => { token( KW_When ); };
977 'inwhen' => { token( KW_InWhen ); };
978 'outwhen' => { token( KW_OutWhen ); };
979 'eof' => { token( KW_Eof ); };
980 'err' => { token( KW_Err ); };
981 'lerr' => { token( KW_Lerr ); };
982 'to' => { token( KW_To ); };
983 'from' => { token( KW_From ); };
984 'export' => { token( KW_Export ); };
987 ident => { token( TK_Word, ts, te ); } ;
990 number => { token( TK_UInt, ts, te ); };
991 hex_number => { token( TK_Hex, ts, te ); };
993 # Literals, with optionals.
994 ( s_literal | d_literal ) [i]?
995 => { token( TK_Literal, ts, te ); };
997 '[' => { token( RE_SqOpen ); fcall or_literal; };
998 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
1000 '/' => { token( RE_Slash ); fgoto ragel_re_literal; };
1003 pound_comment => { updateCol(); };
1005 ':=' => { token( TK_ColonEquals ); };
1008 ">~" => { token( TK_StartToState ); };
1009 "$~" => { token( TK_AllToState ); };
1010 "%~" => { token( TK_FinalToState ); };
1011 "<~" => { token( TK_NotStartToState ); };
1012 "@~" => { token( TK_NotFinalToState ); };
1013 "<>~" => { token( TK_MiddleToState ); };
1015 # From State actions
1016 ">*" => { token( TK_StartFromState ); };
1017 "$*" => { token( TK_AllFromState ); };
1018 "%*" => { token( TK_FinalFromState ); };
1019 "<*" => { token( TK_NotStartFromState ); };
1020 "@*" => { token( TK_NotFinalFromState ); };
1021 "<>*" => { token( TK_MiddleFromState ); };
1024 ">/" => { token( TK_StartEOF ); };
1025 "$/" => { token( TK_AllEOF ); };
1026 "%/" => { token( TK_FinalEOF ); };
1027 "</" => { token( TK_NotStartEOF ); };
1028 "@/" => { token( TK_NotFinalEOF ); };
1029 "<>/" => { token( TK_MiddleEOF ); };
1031 # Global Error actions.
1032 ">!" => { token( TK_StartGblError ); };
1033 "$!" => { token( TK_AllGblError ); };
1034 "%!" => { token( TK_FinalGblError ); };
1035 "<!" => { token( TK_NotStartGblError ); };
1036 "@!" => { token( TK_NotFinalGblError ); };
1037 "<>!" => { token( TK_MiddleGblError ); };
1039 # Local error actions.
1040 ">^" => { token( TK_StartLocalError ); };
1041 "$^" => { token( TK_AllLocalError ); };
1042 "%^" => { token( TK_FinalLocalError ); };
1043 "<^" => { token( TK_NotStartLocalError ); };
1044 "@^" => { token( TK_NotFinalLocalError ); };
1045 "<>^" => { token( TK_MiddleLocalError ); };
1048 "<>" => { token( TK_Middle ); };
1051 '>?' => { token( TK_StartCond ); };
1052 '$?' => { token( TK_AllCond ); };
1053 '%?' => { token( TK_LeavingCond ); };
1055 '..' => { token( TK_DotDot ); };
1056 '**' => { token( TK_StarStar ); };
1057 '--' => { token( TK_DashDash ); };
1058 '->' => { token( TK_Arrow ); };
1059 '=>' => { token( TK_DoubleArrow ); };
1061 ":>" => { token( TK_ColonGt ); };
1062 ":>>" => { token( TK_ColonGtGt ); };
1063 "<:" => { token( TK_LtColon ); };
1065 # Opening of longest match.
1066 "|*" => { token( TK_BarStar ); };
1068 # Separater for name references.
1069 "::" => { token( TK_NameSep, ts, te ); };
1077 [ \t\r]+ => { updateCol(); };
1079 # If we are in a single line machine then newline may end the spec.
1082 if ( singleLineSpec ) {
1089 if ( lastToken == KW_Export || lastToken == KW_Entry )
1094 inlineBlockType = CurlyDelimited;
1095 if ( hostLang->lang == HostLang::Ruby )
1096 fcall inline_code_ruby;
1103 scan_error() << "unterminated ragel section" << endl;
1106 any => { token( *ts ); } ;
1109 # Outside code scanner. These tokens get passed through.
1111 ident => { pass( IMP_Word, ts, te ); };
1112 number => { pass( IMP_UInt, ts, te ); };
1113 ruby_comment => { pass(); };
1114 ( s_literal | d_literal | host_re_literal )
1115 => { pass( IMP_Literal, ts, te ); };
1119 singleLineSpec = false;
1125 singleLineSpec = true;
1129 whitespace+ => { pass(); };
1131 any => { pass( *ts, 0, 0 ); };
1134 # Outside code scanner. These tokens get passed through.
1136 'define' => { pass( IMP_Define, 0, 0 ); };
1137 ident => { pass( IMP_Word, ts, te ); };
1138 number => { pass( IMP_UInt, ts, te ); };
1139 c_cpp_comment => { pass(); };
1140 ( s_literal | d_literal ) => { pass( IMP_Literal, ts, te ); };
1144 singleLineSpec = false;
1150 singleLineSpec = true;
1154 whitespace+ => { pass(); };
1156 any => { pass( *ts, 0, 0 ); };
1162 void Scanner::do_scan()
1165 char *buf = new char[bufsize];
1166 int cs, act, have = 0;
1169 /* The stack is two deep, one level for going into ragel defs from the main
1170 * machines which process outside code, and another for going into or literals
1171 * from either a ragel spec, or a regular expression. */
1173 int curly_count = 0;
1174 bool execute = true;
1175 bool singleLineSpec = false;
1176 InlineBlockType inlineBlockType = CurlyDelimited;
1178 /* Init the section parser and the character scanner. */
1182 /* Set up the start state. FIXME: After 5.20 is released the nocs write
1183 * init option should be used, the main machine eliminated and this statement moved
1184 * above the write init. */
1185 if ( hostLang->lang == HostLang::Ruby )
1186 cs = rlscan_en_main_ruby;
1188 cs = rlscan_en_main;
1191 char *p = buf + have;
1192 int space = bufsize - have;
1195 /* We filled up the buffer trying to scan a token. Grow it. */
1196 bufsize = bufsize * 2;
1197 char *newbuf = new char[bufsize];
1199 /* Recompute p and space. */
1201 space = bufsize - have;
1203 /* Patch up pointers possibly in use. */
1205 ts = newbuf + ( ts - buf );
1206 te = newbuf + ( te - buf );
1208 /* Copy the new buffer in. */
1209 memcpy( newbuf, buf, have );
1214 input.read( p, space );
1215 int len = input.gcount();
1218 /* If we see eof then append the eof var. */
1227 /* Check if we failed. */
1228 if ( cs == rlscan_error ) {
1229 /* Machine failed before finding a token. I'm not yet sure if this
1231 scan_error() << "scanner error" << endl;
1235 /* Decide if we need to preserve anything. */
1236 char *preserve = ts;
1238 /* Now set up the prefix. */
1239 if ( preserve == 0 )
1242 /* There is data that needs to be shifted over. */
1243 have = pe - preserve;
1244 memmove( buf, preserve, have );
1245 unsigned int shiftback = preserve - buf;