2 * Copyright 2006-2007 Adrian Thurston <thurston@cs.queensu.ca>
5 /* This file is part of Ragel.
7 * Ragel is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Ragel is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Ragel; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 * The Scanner for Importing
50 #define IMP_Literal 129
52 #define IMP_Define 131
55 machine inline_token_scan;
66 IMP_Define IMP_Word IMP_UInt => {
67 int base = tok_tokstart - token_data;
71 directToParser( inclToParser, fileName, line, column, TK_Word,
72 token_strings[base+nameOff], token_lens[base+nameOff] );
73 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
74 directToParser( inclToParser, fileName, line, column, TK_UInt,
75 token_strings[base+numOff], token_lens[base+numOff] );
76 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
79 # Assignment of number.
80 IMP_Word '=' IMP_UInt => {
81 int base = tok_tokstart - token_data;
85 directToParser( inclToParser, fileName, line, column, TK_Word,
86 token_strings[base+nameOff], token_lens[base+nameOff] );
87 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
88 directToParser( inclToParser, fileName, line, column, TK_UInt,
89 token_strings[base+numOff], token_lens[base+numOff] );
90 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
94 IMP_Define IMP_Word IMP_Literal => {
95 int base = tok_tokstart - token_data;
99 directToParser( inclToParser, fileName, line, column, TK_Word,
100 token_strings[base+nameOff], token_lens[base+nameOff] );
101 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
102 directToParser( inclToParser, fileName, line, column, TK_Literal,
103 token_strings[base+litOff], token_lens[base+litOff] );
104 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
107 # Assignment of literal.
108 IMP_Word '=' IMP_Literal => {
109 int base = tok_tokstart - token_data;
113 directToParser( inclToParser, fileName, line, column, TK_Word,
114 token_strings[base+nameOff], token_lens[base+nameOff] );
115 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
116 directToParser( inclToParser, fileName, line, column, TK_Literal,
117 token_strings[base+litOff], token_lens[base+litOff] );
118 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
121 # Catch everything else.
128 void Scanner::flushImport()
131 int *pe = token_data + cur_token;
136 if ( tok_tokstart == 0 )
139 cerr << "BLOCK BREAK" << endl;
140 cur_token = pe - tok_tokstart;
141 int ts_offset = tok_tokstart - token_data;
142 memmove( token_data, token_data+ts_offset, cur_token*sizeof(token_data[0]) );
143 memmove( token_strings, token_strings+ts_offset, cur_token*sizeof(token_strings[0]) );
144 memmove( token_lens, token_lens+ts_offset, cur_token*sizeof(token_lens[0]) );
148 void Scanner::directToParser( Parser *toParser, char *tokFileName, int tokLine,
149 int tokColumn, int type, char *tokdata, int toklen )
154 cerr << "scanner:" << tokLine << ":" << tokColumn <<
155 ": sending token to the parser " << Parser_lelNames[type];
156 cerr << " " << toklen;
158 cerr << " " << tokdata;
162 loc.fileName = tokFileName;
166 toParser->token( loc, type, tokdata, toklen );
169 void Scanner::importToken( int token, char *start, char *end )
171 if ( cur_token == max_tokens )
174 token_data[cur_token] = token;
176 token_strings[cur_token] = 0;
177 token_lens[cur_token] = 0;
180 int toklen = end-start;
181 token_lens[cur_token] = toklen;
182 token_strings[cur_token] = new char[toklen+1];
183 memcpy( token_strings[cur_token], start, toklen );
184 token_strings[cur_token][toklen] = 0;
189 void Scanner::pass( int token, char *start, char *end )
191 if ( importMachines )
192 importToken( token, start, end );
200 /* If no errors and we are at the bottom of the include stack (the
201 * source file listed on the command line) then write out the data. */
202 if ( includeDepth == 0 && machineSpec == 0 && machineName == 0 )
203 xmlEscapeHost( output, tokstart, tokend-tokstart );
207 * The scanner for processing sections, includes, imports, etc.
211 machine section_parse;
217 void Scanner::init( )
222 bool Scanner::active()
227 if ( parser == 0 && ! parserExistsError ) {
228 scan_error() << "there is no previous specification name" << endl;
229 parserExistsError = true;
238 ostream &Scanner::scan_error()
240 /* Maintain the error count. */
242 cerr << fileName << ":" << line << ":" << column << ": ";
246 bool Scanner::recursiveInclude( char *inclFileName, char *inclSectionName )
248 for ( IncludeStack::Iter si = includeStack; si.lte(); si++ ) {
249 if ( strcmp( si->fileName, inclFileName ) == 0 &&
250 strcmp( si->sectionName, inclSectionName ) == 0 )
258 void Scanner::updateCol()
263 //cerr << "adding " << tokend - from << " to column" << endl;
264 column += tokend - from;
269 machine section_parse;
271 # This relies on the the kelbt implementation and the order
272 # that tokens are declared.
280 action clear_words { word = lit = 0; word_len = lit_len = 0; }
281 action store_word { word = tokdata; word_len = toklen; }
282 action store_lit { lit = tokdata; lit_len = toklen; }
284 action mach_err { scan_error() << "bad machine statement" << endl; }
285 action incl_err { scan_error() << "bad include statement" << endl; }
286 action import_err { scan_error() << "bad import statement" << endl; }
287 action write_err { scan_error() << "bad write statement" << endl; }
289 action handle_machine
291 /* Assign a name to the machine. */
292 char *machine = word;
294 if ( !importMachines && inclSectionTarg == 0 ) {
295 ignoreSection = false;
297 ParserDictEl *pdEl = parserDict.find( machine );
299 pdEl = new ParserDictEl( machine );
300 pdEl->value = new Parser( fileName, machine, sectionLoc );
302 parserDict.insert( pdEl );
305 parser = pdEl->value;
307 else if ( !importMachines && strcmp( inclSectionTarg, machine ) == 0 ) {
308 /* found include target */
309 ignoreSection = false;
310 parser = inclToParser;
313 /* ignoring section */
314 ignoreSection = true;
320 ( KW_Machine TK_Word @store_word ';' ) @handle_machine
321 <>err mach_err <>eof mach_err;
323 action handle_include
326 char *inclSectionName = word;
327 char *inclFileName = 0;
329 /* Implement defaults for the input file and section name. */
330 if ( inclSectionName == 0 )
331 inclSectionName = parser->sectionName;
334 inclFileName = prepareFileName( lit, lit_len );
336 inclFileName = fileName;
338 /* Check for a recursive include structure. Add the current file/section
339 * name then check if what we are including is already in the stack. */
340 includeStack.append( IncludeStackItem( fileName, parser->sectionName ) );
342 if ( recursiveInclude( inclFileName, inclSectionName ) )
343 scan_error() << "include: this is a recursive include operation" << endl;
345 /* Open the input file for reading. */
346 ifstream *inFile = new ifstream( inclFileName );
347 if ( ! inFile->is_open() ) {
348 scan_error() << "include: could not open " <<
349 inclFileName << " for reading" << endl;
352 Scanner scanner( inclFileName, *inFile, output, parser,
353 inclSectionName, includeDepth+1, false );
358 /* Remove the last element (len-1) */
359 includeStack.remove( -1 );
364 TK_Word @store_word ( TK_Literal @store_lit )? |
365 TK_Literal @store_lit
369 ( KW_Include include_names ';' ) @handle_include
370 <>err incl_err <>eof incl_err;
375 char *importFileName = prepareFileName( lit, lit_len );
377 /* Open the input file for reading. */
378 ifstream *inFile = new ifstream( importFileName );
379 if ( ! inFile->is_open() ) {
380 scan_error() << "import: could not open " <<
381 importFileName << " for reading" << endl;
384 Scanner scanner( importFileName, *inFile, output, parser,
385 0, includeDepth+1, true );
387 scanner.importToken( 0, 0, 0 );
388 scanner.flushImport();
394 ( KW_Import TK_Literal @store_lit ';' ) @handle_import
395 <>err import_err <>eof import_err;
399 if ( active() && machineSpec == 0 && machineName == 0 ) {
401 " def_name=\"" << parser->sectionName << "\""
402 " line=\"" << line << "\""
403 " col=\"" << column << "\""
410 if ( active() && machineSpec == 0 && machineName == 0 )
411 output << "<arg>" << tokdata << "</arg>";
416 if ( active() && machineSpec == 0 && machineName == 0 )
417 output << "</write>\n";
421 ( KW_Write @write_command
422 ( TK_Word @write_arg )+ ';' @write_close )
423 <>err write_err <>eof write_err;
427 /* Send the token off to the parser. */
429 directToParser( parser, fileName, line, column, type, tokdata, toklen );
432 # Catch everything else.
434 ^( KW_Machine | KW_Include | KW_Import | KW_Write ) @handle_token;
445 void Scanner::token( int type, char c )
447 token( type, &c, &c + 1 );
450 void Scanner::token( int type )
455 void Scanner::token( int type, char *start, char *end )
461 tokdata = new char[toklen+1];
462 memcpy( tokdata, start, toklen );
466 processToken( type, tokdata, toklen );
469 void Scanner::processToken( int type, char *tokdata, int toklen )
475 machine section_parse;
481 /* Record the last token for use in controlling the scan of subsequent
486 void Scanner::startSection( )
488 parserExistsError = false;
490 if ( includeDepth == 0 ) {
491 if ( machineSpec == 0 && machineName == 0 )
492 output << "</host>\n";
495 sectionLoc.fileName = fileName;
496 sectionLoc.line = line;
500 void Scanner::endSection( )
502 /* Execute the eof actions for the section parser. */
504 machine section_parse;
508 /* Close off the section with the parser. */
511 loc.fileName = fileName;
515 parser->token( loc, TK_EndSection, 0, 0 );
518 if ( includeDepth == 0 ) {
519 if ( machineSpec == 0 && machineName == 0 ) {
520 /* The end section may include a newline on the end, so
521 * we use the last line, which will count the newline. */
522 output << "<host line=\"" << line << "\">";
530 # This is sent by the driver code.
540 # Identifiers, numbers, commetns, and other common things.
541 ident = ( alpha | '_' ) ( alpha |digit |'_' )*;
543 hex_number = '0x' [0-9a-fA-F]+;
546 '/*' ( any | NL )* :>> '*/';
551 c_cpp_comment = c_comment | cpp_comment;
553 # These literal forms are common to C-like host code and ragel.
554 s_literal = "'" ([^'\\] | NL | '\\' (any | NL))* "'";
555 d_literal = '"' ([^"\\] | NL | '\\' (any | NL))* '"';
557 whitespace = [ \t] | NL;
558 pound_comment = '#' [^\n]* NL;
560 # An inline block of code. This is specified as a scanned, but is sent to
561 # the parser as one long block. The inline_block pointer is used to handle
562 # the preservation of the data.
564 # Inline expression keywords.
565 "fpc" => { token( KW_PChar ); };
566 "fc" => { token( KW_Char ); };
567 "fcurs" => { token( KW_CurState ); };
568 "ftargs" => { token( KW_TargState ); };
570 whitespaceOn = false;
574 # Inline statement keywords.
576 whitespaceOn = false;
579 "fexec" => { token( KW_Exec, 0, 0 ); };
581 whitespaceOn = false;
585 whitespaceOn = false;
589 whitespaceOn = false;
593 whitespaceOn = false;
597 whitespaceOn = false;
601 ident => { token( TK_Word, tokstart, tokend ); };
603 number => { token( TK_UInt, tokstart, tokend ); };
604 hex_number => { token( TK_Hex, tokstart, tokend ); };
606 ( s_literal | d_literal )
607 => { token( IL_Literal, tokstart, tokend ); };
611 token( IL_WhiteSpace, tokstart, tokend );
613 c_cpp_comment => { token( IL_Comment, tokstart, tokend ); };
615 "::" => { token( TK_NameSep, tokstart, tokend ); };
617 # Some symbols need to go to the parser as with their cardinal value as
618 # the token type (as opposed to being sent as anonymous symbols)
619 # because they are part of the sequences which we interpret. The * ) ;
620 # symbols cause whitespace parsing to come back on. This gets turned
621 # off by some keywords.
625 token( *tokstart, tokstart, tokend );
626 if ( inlineBlockType == SemiTerminated )
632 token( *tokstart, tokstart, tokend );
635 [,(] => { token( *tokstart, tokstart, tokend ); };
638 token( IL_Symbol, tokstart, tokend );
643 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
644 /* Inline code block ends. */
649 /* Either a semi terminated inline block or only the closing
650 * brace of some inner scope, not the block's closing brace. */
651 token( IL_Symbol, tokstart, tokend );
656 scan_error() << "unterminated code block" << endl;
659 # Send every other character as a symbol.
660 any => { token( IL_Symbol, tokstart, tokend ); };
664 # Escape sequences in OR expressions.
665 '\\0' => { token( RE_Char, '\0' ); };
666 '\\a' => { token( RE_Char, '\a' ); };
667 '\\b' => { token( RE_Char, '\b' ); };
668 '\\t' => { token( RE_Char, '\t' ); };
669 '\\n' => { token( RE_Char, '\n' ); };
670 '\\v' => { token( RE_Char, '\v' ); };
671 '\\f' => { token( RE_Char, '\f' ); };
672 '\\r' => { token( RE_Char, '\r' ); };
673 '\\\n' => { updateCol(); };
674 '\\' any => { token( RE_Char, tokstart+1, tokend ); };
676 # Range dash in an OR expression.
677 '-' => { token( RE_Dash, 0, 0 ); };
679 # Terminate an OR expression.
680 ']' => { token( RE_SqClose ); fret; };
683 scan_error() << "unterminated OR literal" << endl;
686 # Characters in an OR expression.
687 [^\]] => { token( RE_Char, tokstart, tokend ); };
692 # Escape sequences in regular expressions.
693 '\\0' => { token( RE_Char, '\0' ); };
694 '\\a' => { token( RE_Char, '\a' ); };
695 '\\b' => { token( RE_Char, '\b' ); };
696 '\\t' => { token( RE_Char, '\t' ); };
697 '\\n' => { token( RE_Char, '\n' ); };
698 '\\v' => { token( RE_Char, '\v' ); };
699 '\\f' => { token( RE_Char, '\f' ); };
700 '\\r' => { token( RE_Char, '\r' ); };
701 '\\\n' => { updateCol(); };
702 '\\' any => { token( RE_Char, tokstart+1, tokend ); };
704 # Terminate an OR expression.
706 token( RE_Slash, tokstart, tokend );
710 # Special characters.
711 '.' => { token( RE_Dot ); };
712 '*' => { token( RE_Star ); };
714 '[' => { token( RE_SqOpen ); fcall or_literal; };
715 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
718 scan_error() << "unterminated regular expression" << endl;
721 # Characters in an OR expression.
722 [^\/] => { token( RE_Char, tokstart, tokend ); };
725 # We need a separate token space here to avoid the ragel keywords.
726 write_statement := |*
727 ident => { token( TK_Word, tokstart, tokend ); } ;
728 [ \t\n]+ => { updateCol(); };
729 ';' => { token( ';' ); fgoto parser_def; };
732 scan_error() << "unterminated write statement" << endl;
736 # Parser definitions.
738 'machine' => { token( KW_Machine ); };
739 'include' => { token( KW_Include ); };
740 'import' => { token( KW_Import ); };
743 fgoto write_statement;
745 'action' => { token( KW_Action ); };
746 'alphtype' => { token( KW_AlphType ); };
748 # FIXME: Enable this post 5.17.
749 # 'range' => { token( KW_Range ); };
753 inlineBlockType = SemiTerminated;
758 inlineBlockType = SemiTerminated;
762 token( KW_Variable );
763 inlineBlockType = SemiTerminated;
766 'when' => { token( KW_When ); };
767 'eof' => { token( KW_Eof ); };
768 'err' => { token( KW_Err ); };
769 'lerr' => { token( KW_Lerr ); };
770 'to' => { token( KW_To ); };
771 'from' => { token( KW_From ); };
772 'export' => { token( KW_Export ); };
775 ident => { token( TK_Word, tokstart, tokend ); } ;
778 number => { token( TK_UInt, tokstart, tokend ); };
779 hex_number => { token( TK_Hex, tokstart, tokend ); };
781 # Literals, with optionals.
782 ( s_literal | d_literal ) [i]?
783 => { token( TK_Literal, tokstart, tokend ); };
785 '[' => { token( RE_SqOpen ); fcall or_literal; };
786 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
788 '/' => { token( RE_Slash ); fgoto re_literal; };
791 pound_comment => { updateCol(); };
793 ':=' => { token( TK_ColonEquals ); };
796 ">~" => { token( TK_StartToState ); };
797 "$~" => { token( TK_AllToState ); };
798 "%~" => { token( TK_FinalToState ); };
799 "<~" => { token( TK_NotStartToState ); };
800 "@~" => { token( TK_NotFinalToState ); };
801 "<>~" => { token( TK_MiddleToState ); };
804 ">*" => { token( TK_StartFromState ); };
805 "$*" => { token( TK_AllFromState ); };
806 "%*" => { token( TK_FinalFromState ); };
807 "<*" => { token( TK_NotStartFromState ); };
808 "@*" => { token( TK_NotFinalFromState ); };
809 "<>*" => { token( TK_MiddleFromState ); };
812 ">/" => { token( TK_StartEOF ); };
813 "$/" => { token( TK_AllEOF ); };
814 "%/" => { token( TK_FinalEOF ); };
815 "</" => { token( TK_NotStartEOF ); };
816 "@/" => { token( TK_NotFinalEOF ); };
817 "<>/" => { token( TK_MiddleEOF ); };
819 # Global Error actions.
820 ">!" => { token( TK_StartGblError ); };
821 "$!" => { token( TK_AllGblError ); };
822 "%!" => { token( TK_FinalGblError ); };
823 "<!" => { token( TK_NotStartGblError ); };
824 "@!" => { token( TK_NotFinalGblError ); };
825 "<>!" => { token( TK_MiddleGblError ); };
827 # Local error actions.
828 ">^" => { token( TK_StartLocalError ); };
829 "$^" => { token( TK_AllLocalError ); };
830 "%^" => { token( TK_FinalLocalError ); };
831 "<^" => { token( TK_NotStartLocalError ); };
832 "@^" => { token( TK_NotFinalLocalError ); };
833 "<>^" => { token( TK_MiddleLocalError ); };
836 "<>" => { token( TK_Middle ); };
839 '>?' => { token( TK_StartCond ); };
840 '$?' => { token( TK_AllCond ); };
841 '%?' => { token( TK_LeavingCond ); };
843 '..' => { token( TK_DotDot ); };
844 '**' => { token( TK_StarStar ); };
845 '--' => { token( TK_DashDash ); };
846 '->' => { token( TK_Arrow ); };
847 '=>' => { token( TK_DoubleArrow ); };
849 ":>" => { token( TK_ColonGt ); };
850 ":>>" => { token( TK_ColonGtGt ); };
851 "<:" => { token( TK_LtColon ); };
853 # Opening of longest match.
854 "|*" => { token( TK_BarStar ); };
856 # Separater for name references.
857 "::" => { token( TK_NameSep, tokstart, tokend ); };
865 [ \t\r]+ => { updateCol(); };
867 # If we are in a single line machine then newline may end the spec.
870 if ( singleLineSpec ) {
877 if ( lastToken == KW_Export || lastToken == KW_Entry )
882 inlineBlockType = CurlyDelimited;
888 scan_error() << "unterminated ragel section" << endl;
891 any => { token( *tokstart ); } ;
894 # Outside code scanner. These tokens get passed through.
896 'define' => { pass( IMP_Define, 0, 0 ); };
897 ident => { pass( IMP_Word, tokstart, tokend ); };
898 number => { pass( IMP_UInt, tokstart, tokend ); };
899 c_cpp_comment => { pass(); };
900 ( s_literal | d_literal ) => { pass( IMP_Literal, tokstart, tokend ); };
904 singleLineSpec = false;
910 singleLineSpec = true;
914 whitespace+ => { pass(); };
916 any => { pass( *tokstart, 0, 0 ); };
922 void Scanner::do_scan()
925 char *buf = new char[bufsize];
926 const char last_char = 0;
927 int cs, act, have = 0;
931 bool singleLineSpec = false;
932 InlineBlockType inlineBlockType = CurlyDelimited;
934 /* Init the section parser and the character scanner. */
939 char *p = buf + have;
940 int space = bufsize - have;
943 /* We filled up the buffer trying to scan a token. Grow it. */
944 bufsize = bufsize * 2;
945 char *newbuf = new char[bufsize];
947 /* Recompute p and space. */
949 space = bufsize - have;
951 /* Patch up pointers possibly in use. */
953 tokstart = newbuf + ( tokstart - buf );
954 tokend = newbuf + ( tokend - buf );
956 /* Copy the new buffer in. */
957 memcpy( newbuf, buf, have );
962 input.read( p, space );
963 int len = input.gcount();
965 /* If we see eof then append the EOF char. */
967 p[0] = last_char, len = 1;
974 /* Check if we failed. */
975 if ( cs == rlscan_error ) {
976 /* Machine failed before finding a token. I'm not yet sure if this
978 scan_error() << "scanner error" << endl;
982 /* Decide if we need to preserve anything. */
983 char *preserve = tokstart;
985 /* Now set up the prefix. */
989 /* There is data that needs to be shifted over. */
990 have = pe - preserve;
991 memmove( buf, preserve, have );
992 unsigned int shiftback = preserve - buf;
994 tokstart -= shiftback;
1004 void scan( char *fileName, istream &input, ostream &output )