2 * Copyright 2006-2007 Adrian Thurston <thurston@complang.org>
5 /* This file is part of Ragel.
7 * Ragel is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Ragel is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Ragel; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include "inputdata.h"
53 * The Scanner for Importing
57 machine inline_token_scan;
61 # Import scanner tokens.
66 IMP_Define IMP_Word IMP_UInt => {
67 int base = tok_ts - token_data;
71 directToParser( inclToParser, fileName, line, column, TK_Word,
72 token_strings[base+nameOff], token_lens[base+nameOff] );
73 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
74 directToParser( inclToParser, fileName, line, column, TK_UInt,
75 token_strings[base+numOff], token_lens[base+numOff] );
76 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
79 # Assignment of number.
80 IMP_Word '=' IMP_UInt => {
81 int base = tok_ts - token_data;
85 directToParser( inclToParser, fileName, line, column, TK_Word,
86 token_strings[base+nameOff], token_lens[base+nameOff] );
87 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
88 directToParser( inclToParser, fileName, line, column, TK_UInt,
89 token_strings[base+numOff], token_lens[base+numOff] );
90 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
94 IMP_Define IMP_Word IMP_Literal => {
95 int base = tok_ts - token_data;
99 directToParser( inclToParser, fileName, line, column, TK_Word,
100 token_strings[base+nameOff], token_lens[base+nameOff] );
101 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
102 directToParser( inclToParser, fileName, line, column, TK_Literal,
103 token_strings[base+litOff], token_lens[base+litOff] );
104 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
107 # Assignment of literal.
108 IMP_Word '=' IMP_Literal => {
109 int base = tok_ts - token_data;
113 directToParser( inclToParser, fileName, line, column, TK_Word,
114 token_strings[base+nameOff], token_lens[base+nameOff] );
115 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
116 directToParser( inclToParser, fileName, line, column, TK_Literal,
117 token_strings[base+litOff], token_lens[base+litOff] );
118 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
121 # Catch everything else.
128 void Scanner::flushImport()
131 int *pe = token_data + cur_token;
135 machine inline_token_scan;
143 cur_token = pe - tok_ts;
144 int ts_offset = tok_ts - token_data;
145 memmove( token_data, token_data+ts_offset, cur_token*sizeof(token_data[0]) );
146 memmove( token_strings, token_strings+ts_offset, cur_token*sizeof(token_strings[0]) );
147 memmove( token_lens, token_lens+ts_offset, cur_token*sizeof(token_lens[0]) );
151 void Scanner::directToParser( Parser *toParser, const char *tokFileName, int tokLine,
152 int tokColumn, int type, char *tokdata, int toklen )
157 cerr << "scanner:" << tokLine << ":" << tokColumn <<
158 ": sending token to the parser " << Parser_lelNames[type];
159 cerr << " " << toklen;
161 cerr << " " << tokdata;
165 loc.fileName = tokFileName;
169 toParser->token( loc, type, tokdata, toklen );
172 void Scanner::importToken( int token, char *start, char *end )
174 if ( cur_token == max_tokens )
177 token_data[cur_token] = token;
179 token_strings[cur_token] = 0;
180 token_lens[cur_token] = 0;
183 int toklen = end-start;
184 token_lens[cur_token] = toklen;
185 token_strings[cur_token] = new char[toklen+1];
186 memcpy( token_strings[cur_token], start, toklen );
187 token_strings[cur_token][toklen] = 0;
192 void Scanner::pass( int token, char *start, char *end )
194 if ( importMachines )
195 importToken( token, start, end );
203 /* If no errors and we are at the bottom of the include stack (the
204 * source file listed on the command line) then write out the data. */
205 if ( includeDepth == 0 && machineSpec == 0 && machineName == 0 )
206 id.inputItems.tail->data.write( ts, te-ts );
210 * The scanner for processing sections, includes, imports, etc.
214 machine section_parse;
220 void Scanner::init( )
225 bool Scanner::active()
230 if ( parser == 0 && ! parserExistsError ) {
231 scan_error() << "this specification has no name, nor does any previous"
232 " specification" << endl;
233 parserExistsError = true;
242 ostream &Scanner::scan_error()
244 /* Maintain the error count. */
246 cerr << makeInputLoc( fileName, line, column ) << ": ";
250 /* An approximate check for duplicate includes. Due to aliasing of files it's
251 * possible for duplicates to creep in. */
252 bool Scanner::duplicateInclude( char *inclFileName, char *inclSectionName )
254 for ( IncludeHistory::Iter hi = parser->includeHistory; hi.lte(); hi++ ) {
255 if ( strcmp( hi->fileName, inclFileName ) == 0 &&
256 strcmp( hi->sectionName, inclSectionName ) == 0 )
264 void Scanner::updateCol()
269 //cerr << "adding " << te - from << " to column" << endl;
274 void Scanner::handleMachine()
276 /* Assign a name to the machine. */
277 char *machine = word;
279 if ( !importMachines && inclSectionTarg == 0 ) {
280 ignoreSection = false;
282 ParserDictEl *pdEl = id.parserDict.find( machine );
284 pdEl = new ParserDictEl( machine );
285 pdEl->value = new Parser( fileName, machine, sectionLoc );
287 id.parserDict.insert( pdEl );
288 id.parserList.append( pdEl->value );
291 parser = pdEl->value;
293 else if ( !importMachines && strcmp( inclSectionTarg, machine ) == 0 ) {
294 /* found include target */
295 ignoreSection = false;
296 parser = inclToParser;
299 /* ignoring section */
300 ignoreSection = true;
305 void Scanner::handleInclude()
308 char *inclSectionName = word;
309 char **includeChecks = 0;
311 /* Implement defaults for the input file and section name. */
312 if ( inclSectionName == 0 )
313 inclSectionName = parser->sectionName;
316 includeChecks = makeIncludePathChecks( fileName, lit, lit_len );
318 char *test = new char[strlen(fileName)+1];
319 strcpy( test, fileName );
321 includeChecks = new char*[2];
323 includeChecks[0] = test;
324 includeChecks[1] = 0;
328 ifstream *inFile = tryOpenInclude( includeChecks, found );
330 scan_error() << "include: failed to locate file" << endl;
331 char **tried = includeChecks;
332 while ( *tried != 0 )
333 scan_error() << "include: attempted: \"" << *tried++ << '\"' << endl;
336 /* Don't include anything that's already been included. */
337 if ( !duplicateInclude( includeChecks[found], inclSectionName ) ) {
338 parser->includeHistory.append( IncludeHistoryItem(
339 includeChecks[found], inclSectionName ) );
341 Scanner scanner( id, includeChecks[found], *inFile, parser,
342 inclSectionName, includeDepth+1, false );
350 void Scanner::handleImport()
353 char **importChecks = makeIncludePathChecks( fileName, lit, lit_len );
355 /* Open the input file for reading. */
357 ifstream *inFile = tryOpenInclude( importChecks, found );
359 scan_error() << "import: could not open import file " <<
360 "for reading" << endl;
361 char **tried = importChecks;
362 while ( *tried != 0 )
363 scan_error() << "import: attempted: \"" << *tried++ << '\"' << endl;
366 Scanner scanner( id, importChecks[found], *inFile, parser,
367 0, includeDepth+1, true );
369 scanner.importToken( 0, 0, 0 );
370 scanner.flushImport();
376 machine section_parse;
378 # Need the defines representing tokens.
381 action clear_words { word = lit = 0; word_len = lit_len = 0; }
382 action store_word { word = tokdata; word_len = toklen; }
383 action store_lit { lit = tokdata; lit_len = toklen; }
385 action mach_err { scan_error() << "bad machine statement" << endl; }
386 action incl_err { scan_error() << "bad include statement" << endl; }
387 action import_err { scan_error() << "bad import statement" << endl; }
388 action write_err { scan_error() << "bad write statement" << endl; }
390 action handle_machine { handleMachine(); }
391 action handle_include { handleInclude(); }
392 action handle_import { handleImport(); }
395 ( KW_Machine TK_Word @store_word ';' ) @handle_machine
396 <>err mach_err <>eof mach_err;
399 TK_Word @store_word ( TK_Literal @store_lit )? |
400 TK_Literal @store_lit
404 ( KW_Include include_names ';' ) @handle_include
405 <>err incl_err <>eof incl_err;
408 ( KW_Import TK_Literal @store_lit ';' ) @handle_import
409 <>err import_err <>eof import_err;
413 if ( active() && machineSpec == 0 && machineName == 0 ) {
414 InputItem *inputItem = new InputItem;
415 inputItem->type = InputItem::Write;
416 inputItem->loc.fileName = fileName;
417 inputItem->loc.line = line;
418 inputItem->loc.col = column;
419 inputItem->name = parser->sectionName;
420 inputItem->pd = parser->pd;
421 id.inputItems.append( inputItem );
427 if ( active() && machineSpec == 0 && machineName == 0 )
428 id.inputItems.tail->writeArgs.append( strdup(tokdata) );
433 if ( active() && machineSpec == 0 && machineName == 0 )
434 id.inputItems.tail->writeArgs.append( 0 );
438 ( KW_Write @write_command
439 ( TK_Word @write_arg )+ ';' @write_close )
440 <>err write_err <>eof write_err;
444 /* Send the token off to the parser. */
446 directToParser( parser, fileName, line, column, type, tokdata, toklen );
449 # Catch everything else.
451 ^( KW_Machine | KW_Include | KW_Import | KW_Write ) @handle_token;
462 void Scanner::token( int type, char c )
464 token( type, &c, &c + 1 );
467 void Scanner::token( int type )
472 void Scanner::token( int type, char *start, char *end )
478 tokdata = new char[toklen+1];
479 memcpy( tokdata, start, toklen );
483 processToken( type, tokdata, toklen );
486 void Scanner::processToken( int type, char *tokdata, int toklen )
499 machine section_parse;
505 /* Record the last token for use in controlling the scan of subsequent
510 void Scanner::startSection( )
512 parserExistsError = false;
514 sectionLoc.fileName = fileName;
515 sectionLoc.line = line;
516 sectionLoc.col = column;
519 void Scanner::endSection( )
521 /* Execute the eof actions for the section parser. */
522 processToken( -1, 0, 0 );
524 /* Close off the section with the parser. */
527 loc.fileName = fileName;
531 parser->token( loc, TK_EndSection, 0, 0 );
534 if ( includeDepth == 0 ) {
535 if ( machineSpec == 0 && machineName == 0 ) {
536 /* The end section may include a newline on the end, so
537 * we use the last line, which will count the newline. */
538 InputItem *inputItem = new InputItem;
539 inputItem->type = InputItem::HostData;
540 inputItem->loc.line = line;
541 inputItem->loc.col = column;
542 id.inputItems.append( inputItem );
547 bool isAbsolutePath( const char *path )
550 return isalpha( path[0] ) && path[1] == ':' && path[2] == '\\';
552 return path[0] == '/';
556 char **Scanner::makeIncludePathChecks( const char *thisFileName,
557 const char *fileName, int fnlen )
562 bool caseInsensitive = false;
563 char *data = prepareLitString( InputLoc(), fileName, fnlen,
564 length, caseInsensitive );
567 if ( isAbsolutePath( data ) ) {
568 checks = new char*[2];
569 checks[nextCheck++] = data;
572 checks = new char*[2 + id.includePaths.length()];
574 /* Search from the the location of the current file. */
575 const char *lastSlash = strrchr( thisFileName, PATH_SEP );
576 if ( lastSlash == 0 )
577 checks[nextCheck++] = data;
579 long givenPathLen = (lastSlash - thisFileName) + 1;
580 long checklen = givenPathLen + length;
581 char *check = new char[checklen+1];
582 memcpy( check, thisFileName, givenPathLen );
583 memcpy( check+givenPathLen, data, length );
585 checks[nextCheck++] = check;
588 /* Search from the include paths given on the command line. */
589 for ( ArgsVector::Iter incp = id.includePaths; incp.lte(); incp++ ) {
590 long pathLen = strlen( *incp );
591 long checkLen = pathLen + 1 + length;
592 char *check = new char[checkLen+1];
593 memcpy( check, *incp, pathLen );
594 check[pathLen] = PATH_SEP;
595 memcpy( check+pathLen+1, data, length );
597 checks[nextCheck++] = check;
601 checks[nextCheck] = 0;
605 ifstream *Scanner::tryOpenInclude( char **pathChecks, long &found )
607 char **check = pathChecks;
608 ifstream *inFile = new ifstream;
610 while ( *check != 0 ) {
611 inFile->open( *check );
612 if ( inFile->is_open() ) {
613 found = check - pathChecks;
627 # This is sent by the driver code.
637 # Identifiers, numbers, commetns, and other common things.
638 ident = ( alpha | '_' ) ( alpha |digit |'_' )*;
640 hex_number = '0x' [0-9a-fA-F]+;
643 '/*' ( any | NL )* :>> '*/';
648 c_cpp_comment = c_comment | cpp_comment;
650 ruby_comment = '#' [^\n]* NL;
652 # These literal forms are common to host code and ragel.
653 s_literal = "'" ([^'\\] | NL | '\\' (any | NL))* "'";
654 d_literal = '"' ([^"\\] | NL | '\\' (any | NL))* '"';
655 host_re_literal = '/' ([^/\\] | NL | '\\' (any | NL))* '/';
657 whitespace = [ \t] | NL;
658 pound_comment = '#' [^\n]* NL;
660 # An inline block of code for Ruby.
661 inline_code_ruby := |*
662 # Inline expression keywords.
663 "fpc" => { token( KW_PChar ); };
664 "fc" => { token( KW_Char ); };
665 "fcurs" => { token( KW_CurState ); };
666 "ftargs" => { token( KW_TargState ); };
668 whitespaceOn = false;
672 # Inline statement keywords.
674 whitespaceOn = false;
677 "fexec" => { token( KW_Exec, 0, 0 ); };
679 whitespaceOn = false;
683 whitespaceOn = false;
687 whitespaceOn = false;
691 whitespaceOn = false;
695 whitespaceOn = false;
699 ident => { token( TK_Word, ts, te ); };
701 number => { token( TK_UInt, ts, te ); };
702 hex_number => { token( TK_Hex, ts, te ); };
704 ( s_literal | d_literal | host_re_literal )
705 => { token( IL_Literal, ts, te ); };
709 token( IL_WhiteSpace, ts, te );
712 ruby_comment => { token( IL_Comment, ts, te ); };
714 "::" => { token( TK_NameSep, ts, te ); };
716 # Some symbols need to go to the parser as with their cardinal value as
717 # the token type (as opposed to being sent as anonymous symbols)
718 # because they are part of the sequences which we interpret. The * ) ;
719 # symbols cause whitespace parsing to come back on. This gets turned
720 # off by some keywords.
724 token( *ts, ts, te );
725 if ( inlineBlockType == SemiTerminated )
731 token( *ts, ts, te );
734 [,(] => { token( *ts, ts, te ); };
737 token( IL_Symbol, ts, te );
742 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
743 /* Inline code block ends. */
748 /* Either a semi terminated inline block or only the closing
749 * brace of some inner scope, not the block's closing brace. */
750 token( IL_Symbol, ts, te );
755 scan_error() << "unterminated code block" << endl;
758 # Send every other character as a symbol.
759 any => { token( IL_Symbol, ts, te ); };
763 # An inline block of code for languages other than Ruby.
765 # Inline expression keywords.
766 "fpc" => { token( KW_PChar ); };
767 "fc" => { token( KW_Char ); };
768 "fcurs" => { token( KW_CurState ); };
769 "ftargs" => { token( KW_TargState ); };
771 whitespaceOn = false;
775 # Inline statement keywords.
777 whitespaceOn = false;
780 "fexec" => { token( KW_Exec, 0, 0 ); };
782 whitespaceOn = false;
786 whitespaceOn = false;
790 whitespaceOn = false;
794 whitespaceOn = false;
798 whitespaceOn = false;
802 ident => { token( TK_Word, ts, te ); };
804 number => { token( TK_UInt, ts, te ); };
805 hex_number => { token( TK_Hex, ts, te ); };
807 ( s_literal | d_literal )
808 => { token( IL_Literal, ts, te ); };
812 token( IL_WhiteSpace, ts, te );
815 c_cpp_comment => { token( IL_Comment, ts, te ); };
817 "::" => { token( TK_NameSep, ts, te ); };
819 # Some symbols need to go to the parser as with their cardinal value as
820 # the token type (as opposed to being sent as anonymous symbols)
821 # because they are part of the sequences which we interpret. The * ) ;
822 # symbols cause whitespace parsing to come back on. This gets turned
823 # off by some keywords.
827 token( *ts, ts, te );
828 if ( inlineBlockType == SemiTerminated )
834 token( *ts, ts, te );
837 [,(] => { token( *ts, ts, te ); };
840 token( IL_Symbol, ts, te );
845 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
846 /* Inline code block ends. */
851 /* Either a semi terminated inline block or only the closing
852 * brace of some inner scope, not the block's closing brace. */
853 token( IL_Symbol, ts, te );
858 scan_error() << "unterminated code block" << endl;
861 # Send every other character as a symbol.
862 any => { token( IL_Symbol, ts, te ); };
866 # Escape sequences in OR expressions.
867 '\\0' => { token( RE_Char, '\0' ); };
868 '\\a' => { token( RE_Char, '\a' ); };
869 '\\b' => { token( RE_Char, '\b' ); };
870 '\\t' => { token( RE_Char, '\t' ); };
871 '\\n' => { token( RE_Char, '\n' ); };
872 '\\v' => { token( RE_Char, '\v' ); };
873 '\\f' => { token( RE_Char, '\f' ); };
874 '\\r' => { token( RE_Char, '\r' ); };
875 '\\\n' => { updateCol(); };
876 '\\' any => { token( RE_Char, ts+1, te ); };
878 # Range dash in an OR expression.
879 '-' => { token( RE_Dash, 0, 0 ); };
881 # Terminate an OR expression.
882 ']' => { token( RE_SqClose ); fret; };
885 scan_error() << "unterminated OR literal" << endl;
888 # Characters in an OR expression.
889 [^\]] => { token( RE_Char, ts, te ); };
893 ragel_re_literal := |*
894 # Escape sequences in regular expressions.
895 '\\0' => { token( RE_Char, '\0' ); };
896 '\\a' => { token( RE_Char, '\a' ); };
897 '\\b' => { token( RE_Char, '\b' ); };
898 '\\t' => { token( RE_Char, '\t' ); };
899 '\\n' => { token( RE_Char, '\n' ); };
900 '\\v' => { token( RE_Char, '\v' ); };
901 '\\f' => { token( RE_Char, '\f' ); };
902 '\\r' => { token( RE_Char, '\r' ); };
903 '\\\n' => { updateCol(); };
904 '\\' any => { token( RE_Char, ts+1, te ); };
906 # Terminate an OR expression.
908 token( RE_Slash, ts, te );
912 # Special characters.
913 '.' => { token( RE_Dot ); };
914 '*' => { token( RE_Star ); };
916 '[' => { token( RE_SqOpen ); fcall or_literal; };
917 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
920 scan_error() << "unterminated regular expression" << endl;
923 # Characters in an OR expression.
924 [^\/] => { token( RE_Char, ts, te ); };
927 # We need a separate token space here to avoid the ragel keywords.
928 write_statement := |*
929 ident => { token( TK_Word, ts, te ); } ;
930 [ \t\n]+ => { updateCol(); };
931 ';' => { token( ';' ); fgoto parser_def; };
934 scan_error() << "unterminated write statement" << endl;
938 # Parser definitions.
940 #'length_cond' => { token( KW_Length ); };
941 'machine' => { token( KW_Machine ); };
942 'include' => { token( KW_Include ); };
943 'import' => { token( KW_Import ); };
946 fgoto write_statement;
948 'action' => { token( KW_Action ); };
949 'alphtype' => { token( KW_AlphType ); };
950 'prepush' => { token( KW_PrePush ); };
951 'postpop' => { token( KW_PostPop ); };
953 # FIXME: Enable this post 5.17.
954 # 'range' => { token( KW_Range ); };
958 inlineBlockType = SemiTerminated;
959 if ( hostLang->lang == HostLang::Ruby )
960 fcall inline_code_ruby;
966 inlineBlockType = SemiTerminated;
967 if ( hostLang->lang == HostLang::Ruby )
968 fcall inline_code_ruby;
973 token( KW_Variable );
974 inlineBlockType = SemiTerminated;
975 if ( hostLang->lang == HostLang::Ruby )
976 fcall inline_code_ruby;
980 'when' => { token( KW_When ); };
981 'inwhen' => { token( KW_InWhen ); };
982 'outwhen' => { token( KW_OutWhen ); };
983 'eof' => { token( KW_Eof ); };
984 'err' => { token( KW_Err ); };
985 'lerr' => { token( KW_Lerr ); };
986 'to' => { token( KW_To ); };
987 'from' => { token( KW_From ); };
988 'export' => { token( KW_Export ); };
991 ident => { token( TK_Word, ts, te ); } ;
994 number => { token( TK_UInt, ts, te ); };
995 hex_number => { token( TK_Hex, ts, te ); };
997 # Literals, with optionals.
998 ( s_literal | d_literal ) [i]?
999 => { token( TK_Literal, ts, te ); };
1001 '[' => { token( RE_SqOpen ); fcall or_literal; };
1002 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
1004 '/' => { token( RE_Slash ); fgoto ragel_re_literal; };
1007 pound_comment => { updateCol(); };
1009 ':=' => { token( TK_ColonEquals ); };
1012 ">~" => { token( TK_StartToState ); };
1013 "$~" => { token( TK_AllToState ); };
1014 "%~" => { token( TK_FinalToState ); };
1015 "<~" => { token( TK_NotStartToState ); };
1016 "@~" => { token( TK_NotFinalToState ); };
1017 "<>~" => { token( TK_MiddleToState ); };
1019 # From State actions
1020 ">*" => { token( TK_StartFromState ); };
1021 "$*" => { token( TK_AllFromState ); };
1022 "%*" => { token( TK_FinalFromState ); };
1023 "<*" => { token( TK_NotStartFromState ); };
1024 "@*" => { token( TK_NotFinalFromState ); };
1025 "<>*" => { token( TK_MiddleFromState ); };
1028 ">/" => { token( TK_StartEOF ); };
1029 "$/" => { token( TK_AllEOF ); };
1030 "%/" => { token( TK_FinalEOF ); };
1031 "</" => { token( TK_NotStartEOF ); };
1032 "@/" => { token( TK_NotFinalEOF ); };
1033 "<>/" => { token( TK_MiddleEOF ); };
1035 # Global Error actions.
1036 ">!" => { token( TK_StartGblError ); };
1037 "$!" => { token( TK_AllGblError ); };
1038 "%!" => { token( TK_FinalGblError ); };
1039 "<!" => { token( TK_NotStartGblError ); };
1040 "@!" => { token( TK_NotFinalGblError ); };
1041 "<>!" => { token( TK_MiddleGblError ); };
1043 # Local error actions.
1044 ">^" => { token( TK_StartLocalError ); };
1045 "$^" => { token( TK_AllLocalError ); };
1046 "%^" => { token( TK_FinalLocalError ); };
1047 "<^" => { token( TK_NotStartLocalError ); };
1048 "@^" => { token( TK_NotFinalLocalError ); };
1049 "<>^" => { token( TK_MiddleLocalError ); };
1052 "<>" => { token( TK_Middle ); };
1055 '>?' => { token( TK_StartCond ); };
1056 '$?' => { token( TK_AllCond ); };
1057 '%?' => { token( TK_LeavingCond ); };
1059 '..' => { token( TK_DotDot ); };
1060 '**' => { token( TK_StarStar ); };
1061 '--' => { token( TK_DashDash ); };
1062 '->' => { token( TK_Arrow ); };
1063 '=>' => { token( TK_DoubleArrow ); };
1065 ":>" => { token( TK_ColonGt ); };
1066 ":>>" => { token( TK_ColonGtGt ); };
1067 "<:" => { token( TK_LtColon ); };
1069 # Opening of longest match.
1070 "|*" => { token( TK_BarStar ); };
1072 # Separater for name references.
1073 "::" => { token( TK_NameSep, ts, te ); };
1081 [ \t\r]+ => { updateCol(); };
1083 # If we are in a single line machine then newline may end the spec.
1086 if ( singleLineSpec ) {
1093 if ( lastToken == KW_Export || lastToken == KW_Entry )
1098 inlineBlockType = CurlyDelimited;
1099 if ( hostLang->lang == HostLang::Ruby )
1100 fcall inline_code_ruby;
1107 scan_error() << "unterminated ragel section" << endl;
1110 any => { token( *ts ); } ;
1113 # Outside code scanner. These tokens get passed through.
1115 ident => { pass( IMP_Word, ts, te ); };
1116 number => { pass( IMP_UInt, ts, te ); };
1117 ruby_comment => { pass(); };
1118 ( s_literal | d_literal | host_re_literal )
1119 => { pass( IMP_Literal, ts, te ); };
1123 singleLineSpec = false;
1129 singleLineSpec = true;
1133 whitespace+ => { pass(); };
1135 any => { pass( *ts, 0, 0 ); };
1138 # Outside code scanner. These tokens get passed through.
1140 'define' => { pass( IMP_Define, 0, 0 ); };
1141 ident => { pass( IMP_Word, ts, te ); };
1142 number => { pass( IMP_UInt, ts, te ); };
1143 c_cpp_comment => { pass(); };
1144 ( s_literal | d_literal ) => { pass( IMP_Literal, ts, te ); };
1148 singleLineSpec = false;
1154 singleLineSpec = true;
1158 whitespace+ => { pass(); };
1160 any => { pass( *ts, 0, 0 ); };
1166 void Scanner::do_scan()
1169 char *buf = new char[bufsize];
1170 int cs, act, have = 0;
1173 /* The stack is two deep, one level for going into ragel defs from the main
1174 * machines which process outside code, and another for going into or literals
1175 * from either a ragel spec, or a regular expression. */
1177 int curly_count = 0;
1178 bool execute = true;
1179 bool singleLineSpec = false;
1180 InlineBlockType inlineBlockType = CurlyDelimited;
1182 /* Init the section parser and the character scanner. */
1186 /* Set up the start state. FIXME: After 5.20 is released the nocs write
1187 * init option should be used, the main machine eliminated and this statement moved
1188 * above the write init. */
1189 if ( hostLang->lang == HostLang::Ruby )
1190 cs = rlscan_en_main_ruby;
1192 cs = rlscan_en_main;
1195 char *p = buf + have;
1196 int space = bufsize - have;
1199 /* We filled up the buffer trying to scan a token. Grow it. */
1200 bufsize = bufsize * 2;
1201 char *newbuf = new char[bufsize];
1203 /* Recompute p and space. */
1205 space = bufsize - have;
1207 /* Patch up pointers possibly in use. */
1209 ts = newbuf + ( ts - buf );
1210 te = newbuf + ( te - buf );
1212 /* Copy the new buffer in. */
1213 memcpy( newbuf, buf, have );
1218 input.read( p, space );
1219 int len = input.gcount();
1222 /* If we see eof then append the eof var. */
1231 /* Check if we failed. */
1232 if ( cs == rlscan_error ) {
1233 /* Machine failed before finding a token. I'm not yet sure if this
1235 scan_error() << "scanner error" << endl;
1239 /* Decide if we need to preserve anything. */
1240 char *preserve = ts;
1242 /* Now set up the prefix. */
1243 if ( preserve == 0 )
1246 /* There is data that needs to be shifted over. */
1247 have = pe - preserve;
1248 memmove( buf, preserve, have );
1249 unsigned int shiftback = preserve - buf;