2 * Copyright 2006-2007 Adrian Thurston <thurston@cs.queensu.ca>
5 /* This file is part of Ragel.
7 * Ragel is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Ragel is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Ragel; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 * The Scanner for Importing
50 #define IMP_Literal 129
52 #define IMP_Define 131
55 machine inline_token_scan;
66 IMP_Define IMP_Word IMP_UInt => {
67 int base = tok_tokstart - token_data;
71 directToParser( inclToParser, fileName, line, column, TK_Word,
72 token_strings[base+nameOff], token_lens[base+nameOff] );
73 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
74 directToParser( inclToParser, fileName, line, column, TK_UInt,
75 token_strings[base+numOff], token_lens[base+numOff] );
76 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
79 # Assignment of number.
80 IMP_Word '=' IMP_UInt => {
81 int base = tok_tokstart - token_data;
85 directToParser( inclToParser, fileName, line, column, TK_Word,
86 token_strings[base+nameOff], token_lens[base+nameOff] );
87 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
88 directToParser( inclToParser, fileName, line, column, TK_UInt,
89 token_strings[base+numOff], token_lens[base+numOff] );
90 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
94 IMP_Define IMP_Word IMP_Literal => {
95 int base = tok_tokstart - token_data;
99 directToParser( inclToParser, fileName, line, column, TK_Word,
100 token_strings[base+nameOff], token_lens[base+nameOff] );
101 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
102 directToParser( inclToParser, fileName, line, column, TK_Literal,
103 token_strings[base+litOff], token_lens[base+litOff] );
104 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
107 # Assignment of literal.
108 IMP_Word '=' IMP_Literal => {
109 int base = tok_tokstart - token_data;
113 directToParser( inclToParser, fileName, line, column, TK_Word,
114 token_strings[base+nameOff], token_lens[base+nameOff] );
115 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
116 directToParser( inclToParser, fileName, line, column, TK_Literal,
117 token_strings[base+litOff], token_lens[base+litOff] );
118 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
121 # Catch everything else.
128 void Scanner::flushImport()
131 int *pe = token_data + cur_token;
136 if ( tok_tokstart == 0 )
139 cur_token = pe - tok_tokstart;
140 int ts_offset = tok_tokstart - token_data;
141 memmove( token_data, token_data+ts_offset, cur_token*sizeof(token_data[0]) );
142 memmove( token_strings, token_strings+ts_offset, cur_token*sizeof(token_strings[0]) );
143 memmove( token_lens, token_lens+ts_offset, cur_token*sizeof(token_lens[0]) );
147 void Scanner::directToParser( Parser *toParser, char *tokFileName, int tokLine,
148 int tokColumn, int type, char *tokdata, int toklen )
153 cerr << "scanner:" << tokLine << ":" << tokColumn <<
154 ": sending token to the parser " << Parser_lelNames[type];
155 cerr << " " << toklen;
157 cerr << " " << tokdata;
161 loc.fileName = tokFileName;
165 toParser->token( loc, type, tokdata, toklen );
168 void Scanner::importToken( int token, char *start, char *end )
170 if ( cur_token == max_tokens )
173 token_data[cur_token] = token;
175 token_strings[cur_token] = 0;
176 token_lens[cur_token] = 0;
179 int toklen = end-start;
180 token_lens[cur_token] = toklen;
181 token_strings[cur_token] = new char[toklen+1];
182 memcpy( token_strings[cur_token], start, toklen );
183 token_strings[cur_token][toklen] = 0;
188 void Scanner::pass( int token, char *start, char *end )
190 if ( importMachines )
191 importToken( token, start, end );
199 /* If no errors and we are at the bottom of the include stack (the
200 * source file listed on the command line) then write out the data. */
201 if ( includeDepth == 0 && machineSpec == 0 && machineName == 0 )
202 xmlEscapeHost( output, tokstart, tokend-tokstart );
206 * The scanner for processing sections, includes, imports, etc.
210 machine section_parse;
216 void Scanner::init( )
221 bool Scanner::active()
226 if ( parser == 0 && ! parserExistsError ) {
227 scan_error() << "there is no previous specification name" << endl;
228 parserExistsError = true;
237 ostream &Scanner::scan_error()
239 /* Maintain the error count. */
241 cerr << fileName << ":" << line << ":" << column << ": ";
245 bool Scanner::recursiveInclude( char *inclFileName, char *inclSectionName )
247 for ( IncludeStack::Iter si = includeStack; si.lte(); si++ ) {
248 if ( strcmp( si->fileName, inclFileName ) == 0 &&
249 strcmp( si->sectionName, inclSectionName ) == 0 )
257 void Scanner::updateCol()
262 //cerr << "adding " << tokend - from << " to column" << endl;
263 column += tokend - from;
268 machine section_parse;
270 # This relies on the the kelbt implementation and the order
271 # that tokens are declared.
279 action clear_words { word = lit = 0; word_len = lit_len = 0; }
280 action store_word { word = tokdata; word_len = toklen; }
281 action store_lit { lit = tokdata; lit_len = toklen; }
283 action mach_err { scan_error() << "bad machine statement" << endl; }
284 action incl_err { scan_error() << "bad include statement" << endl; }
285 action import_err { scan_error() << "bad import statement" << endl; }
286 action write_err { scan_error() << "bad write statement" << endl; }
288 action handle_machine
290 /* Assign a name to the machine. */
291 char *machine = word;
293 if ( !importMachines && inclSectionTarg == 0 ) {
294 ignoreSection = false;
296 ParserDictEl *pdEl = parserDict.find( machine );
298 pdEl = new ParserDictEl( machine );
299 pdEl->value = new Parser( fileName, machine, sectionLoc );
301 parserDict.insert( pdEl );
304 parser = pdEl->value;
306 else if ( !importMachines && strcmp( inclSectionTarg, machine ) == 0 ) {
307 /* found include target */
308 ignoreSection = false;
309 parser = inclToParser;
312 /* ignoring section */
313 ignoreSection = true;
319 ( KW_Machine TK_Word @store_word ';' ) @handle_machine
320 <>err mach_err <>eof mach_err;
322 action handle_include
325 char *inclSectionName = word;
326 char *inclFileName = 0;
328 /* Implement defaults for the input file and section name. */
329 if ( inclSectionName == 0 )
330 inclSectionName = parser->sectionName;
333 inclFileName = prepareFileName( lit, lit_len );
335 inclFileName = fileName;
337 /* Check for a recursive include structure. Add the current file/section
338 * name then check if what we are including is already in the stack. */
339 includeStack.append( IncludeStackItem( fileName, parser->sectionName ) );
341 if ( recursiveInclude( inclFileName, inclSectionName ) )
342 scan_error() << "include: this is a recursive include operation" << endl;
344 /* Open the input file for reading. */
345 ifstream *inFile = new ifstream( inclFileName );
346 if ( ! inFile->is_open() ) {
347 scan_error() << "include: could not open " <<
348 inclFileName << " for reading" << endl;
351 Scanner scanner( inclFileName, *inFile, output, parser,
352 inclSectionName, includeDepth+1, false );
357 /* Remove the last element (len-1) */
358 includeStack.remove( -1 );
363 TK_Word @store_word ( TK_Literal @store_lit )? |
364 TK_Literal @store_lit
368 ( KW_Include include_names ';' ) @handle_include
369 <>err incl_err <>eof incl_err;
374 char *importFileName = prepareFileName( lit, lit_len );
376 /* Open the input file for reading. */
377 ifstream *inFile = new ifstream( importFileName );
378 if ( ! inFile->is_open() ) {
379 scan_error() << "import: could not open " <<
380 importFileName << " for reading" << endl;
383 Scanner scanner( importFileName, *inFile, output, parser,
384 0, includeDepth+1, true );
386 scanner.importToken( 0, 0, 0 );
387 scanner.flushImport();
393 ( KW_Import TK_Literal @store_lit ';' ) @handle_import
394 <>err import_err <>eof import_err;
398 if ( active() && machineSpec == 0 && machineName == 0 ) {
400 " def_name=\"" << parser->sectionName << "\""
401 " line=\"" << line << "\""
402 " col=\"" << column << "\""
409 if ( active() && machineSpec == 0 && machineName == 0 )
410 output << "<arg>" << tokdata << "</arg>";
415 if ( active() && machineSpec == 0 && machineName == 0 )
416 output << "</write>\n";
420 ( KW_Write @write_command
421 ( TK_Word @write_arg )+ ';' @write_close )
422 <>err write_err <>eof write_err;
426 /* Send the token off to the parser. */
428 directToParser( parser, fileName, line, column, type, tokdata, toklen );
431 # Catch everything else.
433 ^( KW_Machine | KW_Include | KW_Import | KW_Write ) @handle_token;
444 void Scanner::token( int type, char c )
446 token( type, &c, &c + 1 );
449 void Scanner::token( int type )
454 void Scanner::token( int type, char *start, char *end )
460 tokdata = new char[toklen+1];
461 memcpy( tokdata, start, toklen );
465 processToken( type, tokdata, toklen );
468 void Scanner::processToken( int type, char *tokdata, int toklen )
474 machine section_parse;
480 /* Record the last token for use in controlling the scan of subsequent
485 void Scanner::startSection( )
487 parserExistsError = false;
489 if ( includeDepth == 0 ) {
490 if ( machineSpec == 0 && machineName == 0 )
491 output << "</host>\n";
494 sectionLoc.fileName = fileName;
495 sectionLoc.line = line;
499 void Scanner::endSection( )
501 /* Execute the eof actions for the section parser. */
503 machine section_parse;
507 /* Close off the section with the parser. */
510 loc.fileName = fileName;
514 parser->token( loc, TK_EndSection, 0, 0 );
517 if ( includeDepth == 0 ) {
518 if ( machineSpec == 0 && machineName == 0 ) {
519 /* The end section may include a newline on the end, so
520 * we use the last line, which will count the newline. */
521 output << "<host line=\"" << line << "\">";
529 # This is sent by the driver code.
539 # Identifiers, numbers, commetns, and other common things.
540 ident = ( alpha | '_' ) ( alpha |digit |'_' )*;
542 hex_number = '0x' [0-9a-fA-F]+;
545 '/*' ( any | NL )* :>> '*/';
550 c_cpp_comment = c_comment | cpp_comment;
552 # These literal forms are common to C-like host code and ragel.
553 s_literal = "'" ([^'\\] | NL | '\\' (any | NL))* "'";
554 d_literal = '"' ([^"\\] | NL | '\\' (any | NL))* '"';
556 whitespace = [ \t] | NL;
557 pound_comment = '#' [^\n]* NL;
559 # An inline block of code. This is specified as a scanned, but is sent to
560 # the parser as one long block. The inline_block pointer is used to handle
561 # the preservation of the data.
563 # Inline expression keywords.
564 "fpc" => { token( KW_PChar ); };
565 "fc" => { token( KW_Char ); };
566 "fcurs" => { token( KW_CurState ); };
567 "ftargs" => { token( KW_TargState ); };
569 whitespaceOn = false;
573 # Inline statement keywords.
575 whitespaceOn = false;
578 "fexec" => { token( KW_Exec, 0, 0 ); };
580 whitespaceOn = false;
584 whitespaceOn = false;
588 whitespaceOn = false;
592 whitespaceOn = false;
596 whitespaceOn = false;
600 ident => { token( TK_Word, tokstart, tokend ); };
602 number => { token( TK_UInt, tokstart, tokend ); };
603 hex_number => { token( TK_Hex, tokstart, tokend ); };
605 ( s_literal | d_literal )
606 => { token( IL_Literal, tokstart, tokend ); };
610 token( IL_WhiteSpace, tokstart, tokend );
612 c_cpp_comment => { token( IL_Comment, tokstart, tokend ); };
614 "::" => { token( TK_NameSep, tokstart, tokend ); };
616 # Some symbols need to go to the parser as with their cardinal value as
617 # the token type (as opposed to being sent as anonymous symbols)
618 # because they are part of the sequences which we interpret. The * ) ;
619 # symbols cause whitespace parsing to come back on. This gets turned
620 # off by some keywords.
624 token( *tokstart, tokstart, tokend );
625 if ( inlineBlockType == SemiTerminated )
631 token( *tokstart, tokstart, tokend );
634 [,(] => { token( *tokstart, tokstart, tokend ); };
637 token( IL_Symbol, tokstart, tokend );
642 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
643 /* Inline code block ends. */
648 /* Either a semi terminated inline block or only the closing
649 * brace of some inner scope, not the block's closing brace. */
650 token( IL_Symbol, tokstart, tokend );
655 scan_error() << "unterminated code block" << endl;
658 # Send every other character as a symbol.
659 any => { token( IL_Symbol, tokstart, tokend ); };
663 # Escape sequences in OR expressions.
664 '\\0' => { token( RE_Char, '\0' ); };
665 '\\a' => { token( RE_Char, '\a' ); };
666 '\\b' => { token( RE_Char, '\b' ); };
667 '\\t' => { token( RE_Char, '\t' ); };
668 '\\n' => { token( RE_Char, '\n' ); };
669 '\\v' => { token( RE_Char, '\v' ); };
670 '\\f' => { token( RE_Char, '\f' ); };
671 '\\r' => { token( RE_Char, '\r' ); };
672 '\\\n' => { updateCol(); };
673 '\\' any => { token( RE_Char, tokstart+1, tokend ); };
675 # Range dash in an OR expression.
676 '-' => { token( RE_Dash, 0, 0 ); };
678 # Terminate an OR expression.
679 ']' => { token( RE_SqClose ); fret; };
682 scan_error() << "unterminated OR literal" << endl;
685 # Characters in an OR expression.
686 [^\]] => { token( RE_Char, tokstart, tokend ); };
691 # Escape sequences in regular expressions.
692 '\\0' => { token( RE_Char, '\0' ); };
693 '\\a' => { token( RE_Char, '\a' ); };
694 '\\b' => { token( RE_Char, '\b' ); };
695 '\\t' => { token( RE_Char, '\t' ); };
696 '\\n' => { token( RE_Char, '\n' ); };
697 '\\v' => { token( RE_Char, '\v' ); };
698 '\\f' => { token( RE_Char, '\f' ); };
699 '\\r' => { token( RE_Char, '\r' ); };
700 '\\\n' => { updateCol(); };
701 '\\' any => { token( RE_Char, tokstart+1, tokend ); };
703 # Terminate an OR expression.
705 token( RE_Slash, tokstart, tokend );
709 # Special characters.
710 '.' => { token( RE_Dot ); };
711 '*' => { token( RE_Star ); };
713 '[' => { token( RE_SqOpen ); fcall or_literal; };
714 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
717 scan_error() << "unterminated regular expression" << endl;
720 # Characters in an OR expression.
721 [^\/] => { token( RE_Char, tokstart, tokend ); };
724 # We need a separate token space here to avoid the ragel keywords.
725 write_statement := |*
726 ident => { token( TK_Word, tokstart, tokend ); } ;
727 [ \t\n]+ => { updateCol(); };
728 ';' => { token( ';' ); fgoto parser_def; };
731 scan_error() << "unterminated write statement" << endl;
735 # Parser definitions.
737 'machine' => { token( KW_Machine ); };
738 'include' => { token( KW_Include ); };
739 'import' => { token( KW_Import ); };
742 fgoto write_statement;
744 'action' => { token( KW_Action ); };
745 'alphtype' => { token( KW_AlphType ); };
747 # FIXME: Enable this post 5.17.
748 # 'range' => { token( KW_Range ); };
752 inlineBlockType = SemiTerminated;
757 inlineBlockType = SemiTerminated;
761 token( KW_Variable );
762 inlineBlockType = SemiTerminated;
765 'when' => { token( KW_When ); };
766 'eof' => { token( KW_Eof ); };
767 'err' => { token( KW_Err ); };
768 'lerr' => { token( KW_Lerr ); };
769 'to' => { token( KW_To ); };
770 'from' => { token( KW_From ); };
771 'export' => { token( KW_Export ); };
774 ident => { token( TK_Word, tokstart, tokend ); } ;
777 number => { token( TK_UInt, tokstart, tokend ); };
778 hex_number => { token( TK_Hex, tokstart, tokend ); };
780 # Literals, with optionals.
781 ( s_literal | d_literal ) [i]?
782 => { token( TK_Literal, tokstart, tokend ); };
784 '[' => { token( RE_SqOpen ); fcall or_literal; };
785 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
787 '/' => { token( RE_Slash ); fgoto re_literal; };
790 pound_comment => { updateCol(); };
792 ':=' => { token( TK_ColonEquals ); };
795 ">~" => { token( TK_StartToState ); };
796 "$~" => { token( TK_AllToState ); };
797 "%~" => { token( TK_FinalToState ); };
798 "<~" => { token( TK_NotStartToState ); };
799 "@~" => { token( TK_NotFinalToState ); };
800 "<>~" => { token( TK_MiddleToState ); };
803 ">*" => { token( TK_StartFromState ); };
804 "$*" => { token( TK_AllFromState ); };
805 "%*" => { token( TK_FinalFromState ); };
806 "<*" => { token( TK_NotStartFromState ); };
807 "@*" => { token( TK_NotFinalFromState ); };
808 "<>*" => { token( TK_MiddleFromState ); };
811 ">/" => { token( TK_StartEOF ); };
812 "$/" => { token( TK_AllEOF ); };
813 "%/" => { token( TK_FinalEOF ); };
814 "</" => { token( TK_NotStartEOF ); };
815 "@/" => { token( TK_NotFinalEOF ); };
816 "<>/" => { token( TK_MiddleEOF ); };
818 # Global Error actions.
819 ">!" => { token( TK_StartGblError ); };
820 "$!" => { token( TK_AllGblError ); };
821 "%!" => { token( TK_FinalGblError ); };
822 "<!" => { token( TK_NotStartGblError ); };
823 "@!" => { token( TK_NotFinalGblError ); };
824 "<>!" => { token( TK_MiddleGblError ); };
826 # Local error actions.
827 ">^" => { token( TK_StartLocalError ); };
828 "$^" => { token( TK_AllLocalError ); };
829 "%^" => { token( TK_FinalLocalError ); };
830 "<^" => { token( TK_NotStartLocalError ); };
831 "@^" => { token( TK_NotFinalLocalError ); };
832 "<>^" => { token( TK_MiddleLocalError ); };
835 "<>" => { token( TK_Middle ); };
838 '>?' => { token( TK_StartCond ); };
839 '$?' => { token( TK_AllCond ); };
840 '%?' => { token( TK_LeavingCond ); };
842 '..' => { token( TK_DotDot ); };
843 '**' => { token( TK_StarStar ); };
844 '--' => { token( TK_DashDash ); };
845 '->' => { token( TK_Arrow ); };
846 '=>' => { token( TK_DoubleArrow ); };
848 ":>" => { token( TK_ColonGt ); };
849 ":>>" => { token( TK_ColonGtGt ); };
850 "<:" => { token( TK_LtColon ); };
852 # Opening of longest match.
853 "|*" => { token( TK_BarStar ); };
855 # Separater for name references.
856 "::" => { token( TK_NameSep, tokstart, tokend ); };
864 [ \t\r]+ => { updateCol(); };
866 # If we are in a single line machine then newline may end the spec.
869 if ( singleLineSpec ) {
876 if ( lastToken == KW_Export || lastToken == KW_Entry )
881 inlineBlockType = CurlyDelimited;
887 scan_error() << "unterminated ragel section" << endl;
890 any => { token( *tokstart ); } ;
893 # Outside code scanner. These tokens get passed through.
895 'define' => { pass( IMP_Define, 0, 0 ); };
896 ident => { pass( IMP_Word, tokstart, tokend ); };
897 number => { pass( IMP_UInt, tokstart, tokend ); };
898 c_cpp_comment => { pass(); };
899 ( s_literal | d_literal ) => { pass( IMP_Literal, tokstart, tokend ); };
903 singleLineSpec = false;
909 singleLineSpec = true;
913 whitespace+ => { pass(); };
915 any => { pass( *tokstart, 0, 0 ); };
921 void Scanner::do_scan()
924 char *buf = new char[bufsize];
925 const char last_char = 0;
926 int cs, act, have = 0;
930 bool singleLineSpec = false;
931 InlineBlockType inlineBlockType = CurlyDelimited;
933 /* Init the section parser and the character scanner. */
938 char *p = buf + have;
939 int space = bufsize - have;
942 /* We filled up the buffer trying to scan a token. Grow it. */
943 bufsize = bufsize * 2;
944 char *newbuf = new char[bufsize];
946 /* Recompute p and space. */
948 space = bufsize - have;
950 /* Patch up pointers possibly in use. */
952 tokstart = newbuf + ( tokstart - buf );
953 tokend = newbuf + ( tokend - buf );
955 /* Copy the new buffer in. */
956 memcpy( newbuf, buf, have );
961 input.read( p, space );
962 int len = input.gcount();
964 /* If we see eof then append the EOF char. */
966 p[0] = last_char, len = 1;
973 /* Check if we failed. */
974 if ( cs == rlscan_error ) {
975 /* Machine failed before finding a token. I'm not yet sure if this
977 scan_error() << "scanner error" << endl;
981 /* Decide if we need to preserve anything. */
982 char *preserve = tokstart;
984 /* Now set up the prefix. */
988 /* There is data that needs to be shifted over. */
989 have = pe - preserve;
990 memmove( buf, preserve, have );
991 unsigned int shiftback = preserve - buf;
993 tokstart -= shiftback;
1003 void scan( char *fileName, istream &input, ostream &output )