2 * Copyright 2006-2007 Adrian Thurston <thurston@cs.queensu.ca>
5 /* This file is part of Ragel.
7 * Ragel is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Ragel is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Ragel; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 * The Scanner for Importing
50 machine inline_token_scan;
54 # Import scanner tokens.
59 IMP_Define IMP_Word IMP_UInt => {
60 int base = tok_tokstart - token_data;
64 directToParser( inclToParser, fileName, line, column, TK_Word,
65 token_strings[base+nameOff], token_lens[base+nameOff] );
66 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
67 directToParser( inclToParser, fileName, line, column, TK_UInt,
68 token_strings[base+numOff], token_lens[base+numOff] );
69 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
72 # Assignment of number.
73 IMP_Word '=' IMP_UInt => {
74 int base = tok_tokstart - token_data;
78 directToParser( inclToParser, fileName, line, column, TK_Word,
79 token_strings[base+nameOff], token_lens[base+nameOff] );
80 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
81 directToParser( inclToParser, fileName, line, column, TK_UInt,
82 token_strings[base+numOff], token_lens[base+numOff] );
83 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
87 IMP_Define IMP_Word IMP_Literal => {
88 int base = tok_tokstart - token_data;
92 directToParser( inclToParser, fileName, line, column, TK_Word,
93 token_strings[base+nameOff], token_lens[base+nameOff] );
94 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
95 directToParser( inclToParser, fileName, line, column, TK_Literal,
96 token_strings[base+litOff], token_lens[base+litOff] );
97 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
100 # Assignment of literal.
101 IMP_Word '=' IMP_Literal => {
102 int base = tok_tokstart - token_data;
106 directToParser( inclToParser, fileName, line, column, TK_Word,
107 token_strings[base+nameOff], token_lens[base+nameOff] );
108 directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
109 directToParser( inclToParser, fileName, line, column, TK_Literal,
110 token_strings[base+litOff], token_lens[base+litOff] );
111 directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
114 # Catch everything else.
121 void Scanner::flushImport()
124 int *pe = token_data + cur_token;
129 if ( tok_tokstart == 0 )
132 cur_token = pe - tok_tokstart;
133 int ts_offset = tok_tokstart - token_data;
134 memmove( token_data, token_data+ts_offset, cur_token*sizeof(token_data[0]) );
135 memmove( token_strings, token_strings+ts_offset, cur_token*sizeof(token_strings[0]) );
136 memmove( token_lens, token_lens+ts_offset, cur_token*sizeof(token_lens[0]) );
140 void Scanner::directToParser( Parser *toParser, char *tokFileName, int tokLine,
141 int tokColumn, int type, char *tokdata, int toklen )
146 cerr << "scanner:" << tokLine << ":" << tokColumn <<
147 ": sending token to the parser " << Parser_lelNames[type];
148 cerr << " " << toklen;
150 cerr << " " << tokdata;
154 loc.fileName = tokFileName;
158 toParser->token( loc, type, tokdata, toklen );
161 void Scanner::importToken( int token, char *start, char *end )
163 if ( cur_token == max_tokens )
166 token_data[cur_token] = token;
168 token_strings[cur_token] = 0;
169 token_lens[cur_token] = 0;
172 int toklen = end-start;
173 token_lens[cur_token] = toklen;
174 token_strings[cur_token] = new char[toklen+1];
175 memcpy( token_strings[cur_token], start, toklen );
176 token_strings[cur_token][toklen] = 0;
181 void Scanner::pass( int token, char *start, char *end )
183 if ( importMachines )
184 importToken( token, start, end );
192 /* If no errors and we are at the bottom of the include stack (the
193 * source file listed on the command line) then write out the data. */
194 if ( includeDepth == 0 && machineSpec == 0 && machineName == 0 )
195 xmlEscapeHost( output, tokstart, tokend-tokstart );
199 * The scanner for processing sections, includes, imports, etc.
203 machine section_parse;
209 void Scanner::init( )
214 bool Scanner::active()
219 if ( parser == 0 && ! parserExistsError ) {
220 scan_error() << "there is no previous specification name" << endl;
221 parserExistsError = true;
230 ostream &Scanner::scan_error()
232 /* Maintain the error count. */
234 cerr << fileName << ":" << line << ":" << column << ": ";
238 bool Scanner::recursiveInclude( char *inclFileName, char *inclSectionName )
240 for ( IncludeStack::Iter si = includeStack; si.lte(); si++ ) {
241 if ( strcmp( si->fileName, inclFileName ) == 0 &&
242 strcmp( si->sectionName, inclSectionName ) == 0 )
250 void Scanner::updateCol()
255 //cerr << "adding " << tokend - from << " to column" << endl;
256 column += tokend - from;
261 machine section_parse;
263 # Need the defines representing tokens.
266 action clear_words { word = lit = 0; word_len = lit_len = 0; }
267 action store_word { word = tokdata; word_len = toklen; }
268 action store_lit { lit = tokdata; lit_len = toklen; }
270 action mach_err { scan_error() << "bad machine statement" << endl; }
271 action incl_err { scan_error() << "bad include statement" << endl; }
272 action import_err { scan_error() << "bad import statement" << endl; }
273 action write_err { scan_error() << "bad write statement" << endl; }
275 action handle_machine
277 /* Assign a name to the machine. */
278 char *machine = word;
280 if ( !importMachines && inclSectionTarg == 0 ) {
281 ignoreSection = false;
283 ParserDictEl *pdEl = parserDict.find( machine );
285 pdEl = new ParserDictEl( machine );
286 pdEl->value = new Parser( fileName, machine, sectionLoc );
288 parserDict.insert( pdEl );
291 parser = pdEl->value;
293 else if ( !importMachines && strcmp( inclSectionTarg, machine ) == 0 ) {
294 /* found include target */
295 ignoreSection = false;
296 parser = inclToParser;
299 /* ignoring section */
300 ignoreSection = true;
306 ( KW_Machine TK_Word @store_word ';' ) @handle_machine
307 <>err mach_err <>eof mach_err;
309 action handle_include
312 char *inclSectionName = word;
313 char *inclFileName = 0;
315 /* Implement defaults for the input file and section name. */
316 if ( inclSectionName == 0 )
317 inclSectionName = parser->sectionName;
320 inclFileName = prepareFileName( lit, lit_len );
322 inclFileName = fileName;
324 /* Check for a recursive include structure. Add the current file/section
325 * name then check if what we are including is already in the stack. */
326 includeStack.append( IncludeStackItem( fileName, parser->sectionName ) );
328 if ( recursiveInclude( inclFileName, inclSectionName ) )
329 scan_error() << "include: this is a recursive include operation" << endl;
331 /* Open the input file for reading. */
332 ifstream *inFile = new ifstream( inclFileName );
333 if ( ! inFile->is_open() ) {
334 scan_error() << "include: could not open " <<
335 inclFileName << " for reading" << endl;
338 Scanner scanner( inclFileName, *inFile, output, parser,
339 inclSectionName, includeDepth+1, false );
344 /* Remove the last element (len-1) */
345 includeStack.remove( -1 );
350 TK_Word @store_word ( TK_Literal @store_lit )? |
351 TK_Literal @store_lit
355 ( KW_Include include_names ';' ) @handle_include
356 <>err incl_err <>eof incl_err;
361 char *importFileName = prepareFileName( lit, lit_len );
363 /* Open the input file for reading. */
364 ifstream *inFile = new ifstream( importFileName );
365 if ( ! inFile->is_open() ) {
366 scan_error() << "import: could not open " <<
367 importFileName << " for reading" << endl;
370 Scanner scanner( importFileName, *inFile, output, parser,
371 0, includeDepth+1, true );
373 scanner.importToken( 0, 0, 0 );
374 scanner.flushImport();
380 ( KW_Import TK_Literal @store_lit ';' ) @handle_import
381 <>err import_err <>eof import_err;
385 if ( active() && machineSpec == 0 && machineName == 0 ) {
387 " def_name=\"" << parser->sectionName << "\""
388 " line=\"" << line << "\""
389 " col=\"" << column << "\""
396 if ( active() && machineSpec == 0 && machineName == 0 )
397 output << "<arg>" << tokdata << "</arg>";
402 if ( active() && machineSpec == 0 && machineName == 0 )
403 output << "</write>\n";
407 ( KW_Write @write_command
408 ( TK_Word @write_arg )+ ';' @write_close )
409 <>err write_err <>eof write_err;
413 /* Send the token off to the parser. */
415 directToParser( parser, fileName, line, column, type, tokdata, toklen );
418 # Catch everything else.
420 ^( KW_Machine | KW_Include | KW_Import | KW_Write ) @handle_token;
431 void Scanner::token( int type, char c )
433 token( type, &c, &c + 1 );
436 void Scanner::token( int type )
441 void Scanner::token( int type, char *start, char *end )
447 tokdata = new char[toklen+1];
448 memcpy( tokdata, start, toklen );
452 processToken( type, tokdata, toklen );
455 void Scanner::processToken( int type, char *tokdata, int toklen )
461 machine section_parse;
467 /* Record the last token for use in controlling the scan of subsequent
472 void Scanner::startSection( )
474 parserExistsError = false;
476 if ( includeDepth == 0 ) {
477 if ( machineSpec == 0 && machineName == 0 )
478 output << "</host>\n";
481 sectionLoc.fileName = fileName;
482 sectionLoc.line = line;
486 void Scanner::endSection( )
488 /* Execute the eof actions for the section parser. */
490 machine section_parse;
494 /* Close off the section with the parser. */
497 loc.fileName = fileName;
501 parser->token( loc, TK_EndSection, 0, 0 );
504 if ( includeDepth == 0 ) {
505 if ( machineSpec == 0 && machineName == 0 ) {
506 /* The end section may include a newline on the end, so
507 * we use the last line, which will count the newline. */
508 output << "<host line=\"" << line << "\">";
516 # This is sent by the driver code.
526 # Identifiers, numbers, commetns, and other common things.
527 ident = ( alpha | '_' ) ( alpha |digit |'_' )*;
529 hex_number = '0x' [0-9a-fA-F]+;
532 '/*' ( any | NL )* :>> '*/';
537 c_cpp_comment = c_comment | cpp_comment;
539 # These literal forms are common to C-like host code and ragel.
540 s_literal = "'" ([^'\\] | NL | '\\' (any | NL))* "'";
541 d_literal = '"' ([^"\\] | NL | '\\' (any | NL))* '"';
543 whitespace = [ \t] | NL;
544 pound_comment = '#' [^\n]* NL;
546 # An inline block of code. This is specified as a scanned, but is sent to
547 # the parser as one long block. The inline_block pointer is used to handle
548 # the preservation of the data.
550 # Inline expression keywords.
551 "fpc" => { token( KW_PChar ); };
552 "fc" => { token( KW_Char ); };
553 "fcurs" => { token( KW_CurState ); };
554 "ftargs" => { token( KW_TargState ); };
556 whitespaceOn = false;
560 # Inline statement keywords.
562 whitespaceOn = false;
565 "fexec" => { token( KW_Exec, 0, 0 ); };
567 whitespaceOn = false;
571 whitespaceOn = false;
575 whitespaceOn = false;
579 whitespaceOn = false;
583 whitespaceOn = false;
587 ident => { token( TK_Word, tokstart, tokend ); };
589 number => { token( TK_UInt, tokstart, tokend ); };
590 hex_number => { token( TK_Hex, tokstart, tokend ); };
592 ( s_literal | d_literal )
593 => { token( IL_Literal, tokstart, tokend ); };
597 token( IL_WhiteSpace, tokstart, tokend );
599 c_cpp_comment => { token( IL_Comment, tokstart, tokend ); };
601 "::" => { token( TK_NameSep, tokstart, tokend ); };
603 # Some symbols need to go to the parser as with their cardinal value as
604 # the token type (as opposed to being sent as anonymous symbols)
605 # because they are part of the sequences which we interpret. The * ) ;
606 # symbols cause whitespace parsing to come back on. This gets turned
607 # off by some keywords.
611 token( *tokstart, tokstart, tokend );
612 if ( inlineBlockType == SemiTerminated )
618 token( *tokstart, tokstart, tokend );
621 [,(] => { token( *tokstart, tokstart, tokend ); };
624 token( IL_Symbol, tokstart, tokend );
629 if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
630 /* Inline code block ends. */
635 /* Either a semi terminated inline block or only the closing
636 * brace of some inner scope, not the block's closing brace. */
637 token( IL_Symbol, tokstart, tokend );
642 scan_error() << "unterminated code block" << endl;
645 # Send every other character as a symbol.
646 any => { token( IL_Symbol, tokstart, tokend ); };
650 # Escape sequences in OR expressions.
651 '\\0' => { token( RE_Char, '\0' ); };
652 '\\a' => { token( RE_Char, '\a' ); };
653 '\\b' => { token( RE_Char, '\b' ); };
654 '\\t' => { token( RE_Char, '\t' ); };
655 '\\n' => { token( RE_Char, '\n' ); };
656 '\\v' => { token( RE_Char, '\v' ); };
657 '\\f' => { token( RE_Char, '\f' ); };
658 '\\r' => { token( RE_Char, '\r' ); };
659 '\\\n' => { updateCol(); };
660 '\\' any => { token( RE_Char, tokstart+1, tokend ); };
662 # Range dash in an OR expression.
663 '-' => { token( RE_Dash, 0, 0 ); };
665 # Terminate an OR expression.
666 ']' => { token( RE_SqClose ); fret; };
669 scan_error() << "unterminated OR literal" << endl;
672 # Characters in an OR expression.
673 [^\]] => { token( RE_Char, tokstart, tokend ); };
678 # Escape sequences in regular expressions.
679 '\\0' => { token( RE_Char, '\0' ); };
680 '\\a' => { token( RE_Char, '\a' ); };
681 '\\b' => { token( RE_Char, '\b' ); };
682 '\\t' => { token( RE_Char, '\t' ); };
683 '\\n' => { token( RE_Char, '\n' ); };
684 '\\v' => { token( RE_Char, '\v' ); };
685 '\\f' => { token( RE_Char, '\f' ); };
686 '\\r' => { token( RE_Char, '\r' ); };
687 '\\\n' => { updateCol(); };
688 '\\' any => { token( RE_Char, tokstart+1, tokend ); };
690 # Terminate an OR expression.
692 token( RE_Slash, tokstart, tokend );
696 # Special characters.
697 '.' => { token( RE_Dot ); };
698 '*' => { token( RE_Star ); };
700 '[' => { token( RE_SqOpen ); fcall or_literal; };
701 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
704 scan_error() << "unterminated regular expression" << endl;
707 # Characters in an OR expression.
708 [^\/] => { token( RE_Char, tokstart, tokend ); };
711 # We need a separate token space here to avoid the ragel keywords.
712 write_statement := |*
713 ident => { token( TK_Word, tokstart, tokend ); } ;
714 [ \t\n]+ => { updateCol(); };
715 ';' => { token( ';' ); fgoto parser_def; };
718 scan_error() << "unterminated write statement" << endl;
722 # Parser definitions.
724 'machine' => { token( KW_Machine ); };
725 'include' => { token( KW_Include ); };
726 'import' => { token( KW_Import ); };
729 fgoto write_statement;
731 'action' => { token( KW_Action ); };
732 'alphtype' => { token( KW_AlphType ); };
734 # FIXME: Enable this post 5.17.
735 # 'range' => { token( KW_Range ); };
739 inlineBlockType = SemiTerminated;
744 inlineBlockType = SemiTerminated;
748 token( KW_Variable );
749 inlineBlockType = SemiTerminated;
752 'when' => { token( KW_When ); };
753 'eof' => { token( KW_Eof ); };
754 'err' => { token( KW_Err ); };
755 'lerr' => { token( KW_Lerr ); };
756 'to' => { token( KW_To ); };
757 'from' => { token( KW_From ); };
758 'export' => { token( KW_Export ); };
761 ident => { token( TK_Word, tokstart, tokend ); } ;
764 number => { token( TK_UInt, tokstart, tokend ); };
765 hex_number => { token( TK_Hex, tokstart, tokend ); };
767 # Literals, with optionals.
768 ( s_literal | d_literal ) [i]?
769 => { token( TK_Literal, tokstart, tokend ); };
771 '[' => { token( RE_SqOpen ); fcall or_literal; };
772 '[^' => { token( RE_SqOpenNeg ); fcall or_literal; };
774 '/' => { token( RE_Slash ); fgoto re_literal; };
777 pound_comment => { updateCol(); };
779 ':=' => { token( TK_ColonEquals ); };
782 ">~" => { token( TK_StartToState ); };
783 "$~" => { token( TK_AllToState ); };
784 "%~" => { token( TK_FinalToState ); };
785 "<~" => { token( TK_NotStartToState ); };
786 "@~" => { token( TK_NotFinalToState ); };
787 "<>~" => { token( TK_MiddleToState ); };
790 ">*" => { token( TK_StartFromState ); };
791 "$*" => { token( TK_AllFromState ); };
792 "%*" => { token( TK_FinalFromState ); };
793 "<*" => { token( TK_NotStartFromState ); };
794 "@*" => { token( TK_NotFinalFromState ); };
795 "<>*" => { token( TK_MiddleFromState ); };
798 ">/" => { token( TK_StartEOF ); };
799 "$/" => { token( TK_AllEOF ); };
800 "%/" => { token( TK_FinalEOF ); };
801 "</" => { token( TK_NotStartEOF ); };
802 "@/" => { token( TK_NotFinalEOF ); };
803 "<>/" => { token( TK_MiddleEOF ); };
805 # Global Error actions.
806 ">!" => { token( TK_StartGblError ); };
807 "$!" => { token( TK_AllGblError ); };
808 "%!" => { token( TK_FinalGblError ); };
809 "<!" => { token( TK_NotStartGblError ); };
810 "@!" => { token( TK_NotFinalGblError ); };
811 "<>!" => { token( TK_MiddleGblError ); };
813 # Local error actions.
814 ">^" => { token( TK_StartLocalError ); };
815 "$^" => { token( TK_AllLocalError ); };
816 "%^" => { token( TK_FinalLocalError ); };
817 "<^" => { token( TK_NotStartLocalError ); };
818 "@^" => { token( TK_NotFinalLocalError ); };
819 "<>^" => { token( TK_MiddleLocalError ); };
822 "<>" => { token( TK_Middle ); };
825 '>?' => { token( TK_StartCond ); };
826 '$?' => { token( TK_AllCond ); };
827 '%?' => { token( TK_LeavingCond ); };
829 '..' => { token( TK_DotDot ); };
830 '**' => { token( TK_StarStar ); };
831 '--' => { token( TK_DashDash ); };
832 '->' => { token( TK_Arrow ); };
833 '=>' => { token( TK_DoubleArrow ); };
835 ":>" => { token( TK_ColonGt ); };
836 ":>>" => { token( TK_ColonGtGt ); };
837 "<:" => { token( TK_LtColon ); };
839 # Opening of longest match.
840 "|*" => { token( TK_BarStar ); };
842 # Separater for name references.
843 "::" => { token( TK_NameSep, tokstart, tokend ); };
851 [ \t\r]+ => { updateCol(); };
853 # If we are in a single line machine then newline may end the spec.
856 if ( singleLineSpec ) {
863 if ( lastToken == KW_Export || lastToken == KW_Entry )
868 inlineBlockType = CurlyDelimited;
874 scan_error() << "unterminated ragel section" << endl;
877 any => { token( *tokstart ); } ;
880 # Outside code scanner. These tokens get passed through.
882 'define' => { pass( IMP_Define, 0, 0 ); };
883 ident => { pass( IMP_Word, tokstart, tokend ); };
884 number => { pass( IMP_UInt, tokstart, tokend ); };
885 c_cpp_comment => { pass(); };
886 ( s_literal | d_literal ) => { pass( IMP_Literal, tokstart, tokend ); };
890 singleLineSpec = false;
896 singleLineSpec = true;
900 whitespace+ => { pass(); };
902 any => { pass( *tokstart, 0, 0 ); };
908 void Scanner::do_scan()
911 char *buf = new char[bufsize];
912 const char last_char = 0;
913 int cs, act, have = 0;
917 bool singleLineSpec = false;
918 InlineBlockType inlineBlockType = CurlyDelimited;
920 /* Init the section parser and the character scanner. */
925 char *p = buf + have;
926 int space = bufsize - have;
929 /* We filled up the buffer trying to scan a token. Grow it. */
930 bufsize = bufsize * 2;
931 char *newbuf = new char[bufsize];
933 /* Recompute p and space. */
935 space = bufsize - have;
937 /* Patch up pointers possibly in use. */
939 tokstart = newbuf + ( tokstart - buf );
940 tokend = newbuf + ( tokend - buf );
942 /* Copy the new buffer in. */
943 memcpy( newbuf, buf, have );
948 input.read( p, space );
949 int len = input.gcount();
951 /* If we see eof then append the EOF char. */
953 p[0] = last_char, len = 1;
960 /* Check if we failed. */
961 if ( cs == rlscan_error ) {
962 /* Machine failed before finding a token. I'm not yet sure if this
964 scan_error() << "scanner error" << endl;
968 /* Decide if we need to preserve anything. */
969 char *preserve = tokstart;
971 /* Now set up the prefix. */
975 /* There is data that needs to be shifted over. */
976 have = pe - preserve;
977 memmove( buf, preserve, have );
978 unsigned int shiftback = preserve - buf;
980 tokstart -= shiftback;
990 void scan( char *fileName, istream &input, ostream &output )