3 # Copyright 2007 The Closure Linter Authors. All Rights Reserved.
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
17 """Light weight EcmaScript state tracker that reads tokens and tracks state."""
19 __author__ = ('robbyw@google.com (Robert Walker)',
20 'ajp@google.com (Andy Perelson)')
24 from closure_linter import javascripttokenizer
25 from closure_linter import javascripttokens
26 from closure_linter import tokenutil
29 Type = javascripttokens.JavaScriptTokenType
32 class DocFlag(object):
33 """Generic doc flag object.
36 flag_type: param, return, define, type, etc.
37 flag_token: The flag token.
38 type_start_token: The first token specifying the flag type,
40 type_end_token: The last token specifying the flag type,
43 name_token: The token specifying the flag name.
45 description_start_token: The first token in the description.
46 description_end_token: The end token in the description.
47 description: The description.
50 # Please keep these lists alphabetized.
52 # The list of standard jsdoc tags is from
53 STANDARD_DOC = frozenset([
57 'consistentIdGenerator',
75 'ngInject', # This annotation is specific to AngularJS.
99 ANNOTATION = frozenset(['preserveTry', 'suppress'])
101 LEGAL_DOC = STANDARD_DOC | ANNOTATION
103 # Includes all Closure Compiler @suppress types.
104 # Not all of these annotations are interpreted by Closure Linter.
107 # - accessControls is supported by the compiler at the expression
108 # and method level to suppress warnings about private/protected
109 # access (method level applies to all references in the method).
110 # The linter mimics the compiler behavior.
111 SUPPRESS_TYPES = frozenset([
113 'ambiguousFunctionDecl',
115 'checkStructDictInheritance',
128 'internetExplorerChecks',
135 'strictModuleDepCheck',
144 'unusedPrivateMembers',
149 HAS_DESCRIPTION = frozenset([
150 'define', 'deprecated', 'desc', 'fileoverview', 'license', 'param',
151 'preserve', 'return', 'supported'])
153 HAS_TYPE = frozenset([
154 'define', 'enum', 'extends', 'implements', 'param', 'return', 'type',
155 'suppress', 'const', 'package', 'private', 'protected', 'public'])
157 CAN_OMIT_TYPE = frozenset(['enum', 'const', 'package', 'private',
158 'protected', 'public'])
160 TYPE_ONLY = frozenset(['enum', 'extends', 'implements', 'suppress', 'type',
161 'const', 'package', 'private', 'protected', 'public'])
163 HAS_NAME = frozenset(['param'])
165 EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
166 EMPTY_STRING = re.compile(r'^\s*$')
168 def __init__(self, flag_token):
169 """Creates the DocFlag object and attaches it to the given start token.
172 flag_token: The starting token of the flag.
174 self.flag_token = flag_token
175 self.flag_type = flag_token.string.strip().lstrip('@')
177 # Extract type, if applicable.
179 self.type_start_token = None
180 self.type_end_token = None
181 if self.flag_type in self.HAS_TYPE:
182 brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
183 Type.FLAG_ENDING_TYPES)
185 end_token, contents = _GetMatchingEndBraceAndContents(brace)
187 self.type_start_token = brace
188 self.type_end_token = end_token
189 elif (self.flag_type in self.TYPE_ONLY and
190 flag_token.next.type not in Type.FLAG_ENDING_TYPES and
191 flag_token.line_number == flag_token.next.line_number):
192 # b/10407058. If the flag is expected to be followed by a type then
193 # search for type in same line only. If no token after flag in same
194 # line then conclude that no type is specified.
195 self.type_start_token = flag_token.next
196 self.type_end_token, self.type = _GetEndTokenAndContents(
197 self.type_start_token)
198 if self.type is not None:
199 self.type = self.type.strip()
201 # Extract name, if applicable.
202 self.name_token = None
204 if self.flag_type in self.HAS_NAME:
205 # Handle bad case, name could be immediately after flag token.
206 self.name_token = _GetNextPartialIdentifierToken(flag_token)
208 # Handle good case, if found token is after type start, look for
209 # a identifier (substring to cover cases like [cnt] b/4197272) after
210 # type end, since types contain identifiers.
211 if (self.type and self.name_token and
212 tokenutil.Compare(self.name_token, self.type_start_token) > 0):
213 self.name_token = _GetNextPartialIdentifierToken(self.type_end_token)
216 self.name = self.name_token.string
218 # Extract description, if applicable.
219 self.description_start_token = None
220 self.description_end_token = None
221 self.description = None
222 if self.flag_type in self.HAS_DESCRIPTION:
223 search_start_token = flag_token
224 if self.name_token and self.type_end_token:
225 if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
226 search_start_token = self.type_end_token
228 search_start_token = self.name_token
229 elif self.name_token:
230 search_start_token = self.name_token
232 search_start_token = self.type_end_token
234 interesting_token = tokenutil.Search(search_start_token,
235 Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
236 if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
237 self.description_start_token = interesting_token
238 self.description_end_token, self.description = (
239 _GetEndTokenAndContents(interesting_token))
242 class DocComment(object):
243 """JavaScript doc comment object.
246 ordered_params: Ordered list of parameters documented.
247 start_token: The token that starts the doc comment.
248 end_token: The token that ends the doc comment.
249 suppressions: Map of suppression type to the token that added it.
251 def __init__(self, start_token):
252 """Create the doc comment object.
255 start_token: The first token in the doc comment.
258 self.start_token = start_token
259 self.end_token = None
260 self.suppressions = {}
261 self.invalidated = False
264 def ordered_params(self):
265 """Gives the list of parameter names as a list of strings."""
267 for flag in self.__flags:
268 if flag.flag_type == 'param' and flag.name:
269 params.append(flag.name)
272 def Invalidate(self):
273 """Indicate that the JSDoc is well-formed but we had problems parsing it.
275 This is a short-circuiting mechanism so that we don't emit false
276 positives about well-formed doc comments just because we don't support
279 self.invalidated = True
281 def IsInvalidated(self):
282 """Test whether Invalidate() has been called."""
283 return self.invalidated
285 def AddSuppression(self, token):
286 """Add a new error suppression flag.
289 token: The suppression flag token.
291 #TODO(user): Error if no braces
292 brace = tokenutil.SearchUntil(token, [Type.DOC_START_BRACE],
295 end_token, contents = _GetMatchingEndBraceAndContents(brace)
296 for suppression in contents.split('|'):
297 self.suppressions[suppression] = token
299 def SuppressionOnly(self):
300 """Returns whether this comment contains only suppression flags."""
304 for flag in self.__flags:
305 if flag.flag_type != 'suppress':
310 def AddFlag(self, flag):
311 """Add a new document flag.
314 flag: DocFlag object.
316 self.__flags.append(flag)
318 def InheritsDocumentation(self):
319 """Test if the jsdoc implies documentation inheritance.
322 True if documentation may be pulled off the superclass.
324 return self.HasFlag('inheritDoc') or self.HasFlag('override')
326 def HasFlag(self, flag_type):
327 """Test if the given flag has been set.
330 flag_type: The type of the flag to check.
333 True if the flag is set.
335 for flag in self.__flags:
336 if flag.flag_type == flag_type:
340 def GetFlag(self, flag_type):
341 """Gets the last flag of the given type.
344 flag_type: The type of the flag to get.
347 The last instance of the given flag type in this doc comment.
349 for flag in reversed(self.__flags):
350 if flag.flag_type == flag_type:
353 def GetDocFlags(self):
354 """Return the doc flags for this comment."""
355 return list(self.__flags)
357 def _YieldDescriptionTokens(self):
358 for token in self.start_token:
360 if (token is self.end_token or
361 token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or
362 token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES):
365 if token.type not in [
366 javascripttokens.JavaScriptTokenType.START_DOC_COMMENT,
367 javascripttokens.JavaScriptTokenType.END_DOC_COMMENT,
368 javascripttokens.JavaScriptTokenType.DOC_PREFIX]:
372 def description(self):
373 return tokenutil.TokensToString(
374 self._YieldDescriptionTokens())
376 def GetTargetIdentifier(self):
377 """Returns the identifier (as a string) that this is a comment for.
379 Note that this uses method uses GetIdentifierForToken to get the full
380 identifier, even if broken up by whitespace, newlines, or comments,
381 and thus could be longer than GetTargetToken().string.
384 The identifier for the token this comment is for.
386 token = self.GetTargetToken()
388 return tokenutil.GetIdentifierForToken(token)
390 def GetTargetToken(self):
391 """Get this comment's target token.
394 The token that is the target of this comment, or None if there isn't one.
397 # File overviews describe the file, not a token.
398 if self.HasFlag('fileoverview'):
401 skip_types = frozenset([
406 target_types = frozenset([
411 token = self.end_token.next
413 if token.type in target_types:
416 # Handles the case of a comment on "var foo = ...'
417 if token.IsKeyword('var'):
418 next_code_token = tokenutil.CustomSearch(
420 lambda t: t.type not in Type.NON_CODE_TYPES)
422 if (next_code_token and
423 next_code_token.IsType(Type.SIMPLE_LVALUE)):
424 return next_code_token
428 # Handles the case of a comment on "function foo () {}"
429 if token.type is Type.FUNCTION_DECLARATION:
430 next_code_token = tokenutil.CustomSearch(
432 lambda t: t.type not in Type.NON_CODE_TYPES)
434 if next_code_token.IsType(Type.FUNCTION_NAME):
435 return next_code_token
439 # Skip types will end the search.
440 if token.type not in skip_types:
445 def CompareParameters(self, params):
446 """Computes the edit distance and list from the function params to the docs.
448 Uses the Levenshtein edit distance algorithm, with code modified from
449 http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
452 params: The parameter list for the function declaration.
455 The edit distance, the edit list.
457 source_len, target_len = len(self.ordered_params), len(params)
460 for i in range(target_len+1):
461 edit_lists[0].append(['I'] * i)
462 distance[0].append(i)
464 for j in range(1, source_len+1):
465 edit_lists.append([['D'] * j])
468 for i in range(source_len):
469 for j in range(target_len):
471 if self.ordered_params[i] == params[j]:
474 deletion = distance[i][j+1] + 1
475 insertion = distance[i+1][j] + 1
476 substitution = distance[i][j] + cost
480 if deletion <= insertion and deletion <= substitution:
483 edit_list = list(edit_lists[i][j+1])
484 edit_list.append('D')
486 elif insertion <= substitution:
489 edit_list = list(edit_lists[i+1][j])
490 edit_list.append('I')
491 edit_lists[i+1].append(edit_list)
494 # Substitution is best.
496 edit_list = list(edit_lists[i][j])
498 edit_list.append('S')
500 edit_list.append('=')
502 edit_lists[i+1].append(edit_list)
503 distance[i+1].append(best)
505 return distance[source_len][target_len], edit_lists[source_len][target_len]
508 """Returns a string representation of this object.
511 A string representation of this object.
513 return '<DocComment: %s, %s>' % (
514 str(self.ordered_params), str(self.__flags))
518 # Helper methods used by DocFlag and DocComment to parse out flag information.
522 def _GetMatchingEndBraceAndContents(start_brace):
523 """Returns the matching end brace and contents between the two braces.
525 If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then
526 that token is used as the matching ending token. Contents will have all
527 comment prefixes stripped out of them, and all comment prefixes in between the
528 start and end tokens will be split out into separate DOC_PREFIX tokens.
531 start_brace: The DOC_START_BRACE token immediately before desired contents.
534 The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string
535 of the contents between the matching tokens, minus any comment prefixes.
541 # We don't consider the start brace part of the type string.
542 token = start_brace.next
543 while open_count != close_count:
544 if token.type == Type.DOC_START_BRACE:
546 elif token.type == Type.DOC_END_BRACE:
549 if token.type != Type.DOC_PREFIX:
550 contents.append(token.string)
552 if token.type in Type.FLAG_ENDING_TYPES:
556 #Don't include the end token (end brace, end doc comment, etc.) in type.
557 token = token.previous
558 contents = contents[:-1]
560 return token, ''.join(contents)
563 def _GetNextPartialIdentifierToken(start_token):
564 """Returns the first token having identifier as substring after a token.
566 Searches each token after the start to see if it contains an identifier.
567 If found, token is returned. If no identifier is found returns None.
568 Search is abandoned when a FLAG_ENDING_TYPE token is found.
571 start_token: The token to start searching after.
574 The token found containing identifier, None otherwise.
576 token = start_token.next
578 while token and token.type not in Type.FLAG_ENDING_TYPES:
579 match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search(
581 if match is not None and token.type == Type.COMMENT:
589 def _GetEndTokenAndContents(start_token):
590 """Returns last content token and all contents before FLAG_ENDING_TYPE token.
592 Comment prefixes are split into DOC_PREFIX tokens and stripped from the
596 start_token: The token immediately before the first content token.
599 The last content token and a string of all contents including start and
600 end tokens, with comment prefixes stripped.
602 iterator = start_token
603 last_line = iterator.line_number
607 while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
608 if (iterator.IsFirstInLine() and
609 DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
610 # If we have a blank comment line, consider that an implicit
611 # ending of the description. This handles a case like:
613 # * @return {boolean} True
615 # * Note: This is a sentence.
617 # The note is not part of the @return description, but there was
618 # no definitive ending token. Rather there was a line containing
619 # only a doc comment prefix or whitespace.
623 # don't prematurely match against a @flag if inside a doc flag
624 # need to think about what is the correct behavior for unterminated
626 if (iterator.type == Type.DOC_START_BRACE and
627 iterator.next.type == Type.DOC_INLINE_FLAG):
629 elif (iterator.type == Type.DOC_END_BRACE and
633 if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
634 contents += iterator.string
635 last_token = iterator
637 iterator = iterator.next
638 if iterator.line_number != last_line:
640 last_line = iterator.line_number
642 end_token = last_token
643 if DocFlag.EMPTY_STRING.match(contents):
646 # Strip trailing newline.
647 contents = contents[:-1]
649 return end_token, contents
652 class Function(object):
653 """Data about a JavaScript function.
656 block_depth: Block depth the function began at.
657 doc: The DocComment associated with the function.
658 has_return: If the function has a return value.
659 has_this: If the function references the 'this' object.
660 is_assigned: If the function is part of an assignment.
661 is_constructor: If the function is a constructor.
662 name: The name of the function, whether given in the function keyword or
663 as the lvalue the function is assigned to.
664 start_token: First token of the function (the function' keyword token).
665 end_token: Last token of the function (the closing '}' token).
666 parameters: List of parameter names.
669 def __init__(self, block_depth, is_assigned, doc, name):
670 self.block_depth = block_depth
671 self.is_assigned = is_assigned
672 self.is_constructor = doc and doc.HasFlag('constructor')
673 self.is_interface = doc and doc.HasFlag('interface')
674 self.has_return = False
675 self.has_throw = False
676 self.has_this = False
679 self.start_token = None
680 self.end_token = None
681 self.parameters = None
684 class StateTracker(object):
685 """EcmaScript state tracker.
687 Tracks block depth, function names, etc. within an EcmaScript token stream.
693 def __init__(self, doc_flag=DocFlag):
694 """Initializes a JavaScript token stream state tracker.
697 doc_flag: An optional custom DocFlag used for validating
700 self._doc_flag = doc_flag
704 """Resets the state tracker to prepare for processing a new page."""
705 self._block_depth = 0
706 self._is_block_close = False
707 self._paren_depth = 0
708 self._function_stack = []
709 self._functions_by_name = {}
710 self._last_comment = None
711 self._doc_comment = None
712 self._cumulative_params = None
713 self._block_types = []
714 self._last_non_space_token = None
715 self._last_line = None
716 self._first_token = None
717 self._documented_identifiers = set()
718 self._variables_in_scope = []
720 def InFunction(self):
721 """Returns true if the current token is within a function.
724 True if the current token is within a function.
726 return bool(self._function_stack)
728 def InConstructor(self):
729 """Returns true if the current token is within a constructor.
732 True if the current token is within a constructor.
734 return self.InFunction() and self._function_stack[-1].is_constructor
736 def InInterfaceMethod(self):
737 """Returns true if the current token is within an interface method.
740 True if the current token is within an interface method.
742 if self.InFunction():
743 if self._function_stack[-1].is_interface:
746 name = self._function_stack[-1].name
747 prototype_index = name.find('.prototype.')
748 if prototype_index != -1:
749 class_function_name = name[0:prototype_index]
750 if (class_function_name in self._functions_by_name and
751 self._functions_by_name[class_function_name].is_interface):
756 def InTopLevelFunction(self):
757 """Returns true if the current token is within a top level function.
760 True if the current token is within a top level function.
762 return len(self._function_stack) == 1 and self.InTopLevel()
764 def InAssignedFunction(self):
765 """Returns true if the current token is within a function variable.
768 True if if the current token is within a function variable
770 return self.InFunction() and self._function_stack[-1].is_assigned
772 def IsFunctionOpen(self):
773 """Returns true if the current token is a function block open.
776 True if the current token is a function block open.
778 return (self._function_stack and
779 self._function_stack[-1].block_depth == self._block_depth - 1)
781 def IsFunctionClose(self):
782 """Returns true if the current token is a function block close.
785 True if the current token is a function block close.
787 return (self._function_stack and
788 self._function_stack[-1].block_depth == self._block_depth)
791 """Returns true if the current token is within a block.
794 True if the current token is within a block.
796 return bool(self._block_depth)
798 def IsBlockClose(self):
799 """Returns true if the current token is a block close.
802 True if the current token is a block close.
804 return self._is_block_close
806 def InObjectLiteral(self):
807 """Returns true if the current token is within an object literal.
810 True if the current token is within an object literal.
812 return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL
814 def InObjectLiteralDescendant(self):
815 """Returns true if the current token has an object literal ancestor.
818 True if the current token has an object literal ancestor.
820 return self.OBJECT_LITERAL in self._block_types
822 def InParentheses(self):
823 """Returns true if the current token is within parentheses.
826 True if the current token is within parentheses.
828 return bool(self._paren_depth)
830 def ParenthesesDepth(self):
831 """Returns the number of parens surrounding the token.
834 The number of parenthesis surrounding the token.
836 return self._paren_depth
838 def BlockDepth(self):
839 """Returns the number of blocks in which the token is nested.
842 The number of blocks in which the token is nested.
844 return self._block_depth
846 def FunctionDepth(self):
847 """Returns the number of functions in which the token is nested.
850 The number of functions in which the token is nested.
852 return len(self._function_stack)
854 def InTopLevel(self):
855 """Whether we are at the top level in the class.
857 This function call is language specific. In some languages like
858 JavaScript, a function is top level if it is not inside any parenthesis.
859 In languages such as ActionScript, a function is top level if it is directly
862 raise TypeError('Abstract method InTopLevel not implemented')
864 def GetBlockType(self, token):
865 """Determine the block type given a START_BLOCK token.
867 Code blocks come after parameters, keywords like else, and closing parens.
870 token: The current token. Can be assumed to be type START_BLOCK.
872 Code block type for current token.
874 raise TypeError('Abstract method GetBlockType not implemented')
877 """Returns the accumulated input params as an array.
879 In some EcmasSript languages, input params are specified like
880 (param:Type, param2:Type2, ...)
881 in other they are specified just as
883 We handle both formats for specifying parameters here and leave
884 it to the compilers for each language to detect compile errors.
885 This allows more code to be reused between lint checkers for various
886 EcmaScript languages.
889 The accumulated input params as an array.
892 if self._cumulative_params:
893 params = re.compile(r'\s+').sub('', self._cumulative_params).split(',')
894 # Strip out the type from parameters of the form name:Type.
895 params = map(lambda param: param.split(':')[0], params)
899 def GetLastComment(self):
900 """Return the last plain comment that could be used as documentation.
903 The last plain comment that could be used as documentation.
905 return self._last_comment
907 def GetDocComment(self):
908 """Return the most recent applicable documentation comment.
911 The last applicable documentation comment.
913 return self._doc_comment
915 def HasDocComment(self, identifier):
916 """Returns whether the identifier has been documented yet.
919 identifier: The identifier.
922 Whether the identifier has been documented yet.
924 return identifier in self._documented_identifiers
926 def InDocComment(self):
927 """Returns whether the current token is in a doc comment.
930 Whether the current token is in a doc comment.
932 return self._doc_comment and self._doc_comment.end_token is None
934 def GetDocFlag(self):
935 """Returns the current documentation flags.
938 The current documentation flags.
940 return self._doc_flag
942 def IsTypeToken(self, t):
943 if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT,
944 Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
945 f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
947 if (f and f.attached_object.type_start_token is not None and
948 f.attached_object.type_end_token is not None):
949 return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
950 tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
953 def GetFunction(self):
954 """Return the function the current code block is a part of.
957 The current Function object.
959 if self._function_stack:
960 return self._function_stack[-1]
962 def GetBlockDepth(self):
963 """Return the block depth.
966 The current block depth.
968 return self._block_depth
970 def GetLastNonSpaceToken(self):
971 """Return the last non whitespace token."""
972 return self._last_non_space_token
974 def GetLastLine(self):
975 """Return the last line."""
976 return self._last_line
978 def GetFirstToken(self):
979 """Return the very first token in the file."""
980 return self._first_token
982 def IsVariableInScope(self, token_string):
983 """Checks if string is variable in current scope.
985 For given string it checks whether the string is a defined variable
986 (including function param) in current state.
988 E.g. if variables defined (variables in current scope) is docs
989 then docs, docs.length etc will be considered as variable in current
990 scope. This will help in avoding extra goog.require for variables.
993 token_string: String to check if its is a variable in current scope.
996 true if given string is a variable in current scope.
998 for variable in self._variables_in_scope:
999 if (token_string == variable
1000 or token_string.startswith(variable + '.')):
1005 def HandleToken(self, token, last_non_space_token):
1006 """Handles the given token and updates state.
1009 token: The token to handle.
1010 last_non_space_token:
1012 self._is_block_close = False
1014 if not self._first_token:
1015 self._first_token = token
1017 # Track block depth.
1019 if type == Type.START_BLOCK:
1020 self._block_depth += 1
1022 # Subclasses need to handle block start very differently because
1023 # whether a block is a CODE or OBJECT_LITERAL block varies significantly
1025 self._block_types.append(self.GetBlockType(token))
1027 # When entering a function body, record its parameters.
1028 if self.InFunction():
1029 function = self._function_stack[-1]
1030 if self._block_depth == function.block_depth + 1:
1031 function.parameters = self.GetParams()
1033 # Track block depth.
1034 elif type == Type.END_BLOCK:
1035 self._is_block_close = not self.InObjectLiteral()
1036 self._block_depth -= 1
1037 self._block_types.pop()
1039 # Track parentheses depth.
1040 elif type == Type.START_PAREN:
1041 self._paren_depth += 1
1043 # Track parentheses depth.
1044 elif type == Type.END_PAREN:
1045 self._paren_depth -= 1
1047 elif type == Type.COMMENT:
1048 self._last_comment = token.string
1050 elif type == Type.START_DOC_COMMENT:
1051 self._last_comment = None
1052 self._doc_comment = DocComment(token)
1054 elif type == Type.END_DOC_COMMENT:
1055 self._doc_comment.end_token = token
1057 elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
1058 flag = self._doc_flag(token)
1059 token.attached_object = flag
1060 self._doc_comment.AddFlag(flag)
1062 if flag.flag_type == 'suppress':
1063 self._doc_comment.AddSuppression(token)
1065 elif type == Type.FUNCTION_DECLARATION:
1066 last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
1069 # Only functions outside of parens are eligible for documentation.
1070 if not self._paren_depth:
1071 doc = self._doc_comment
1074 is_assigned = last_code and (last_code.IsOperator('=') or
1075 last_code.IsOperator('||') or last_code.IsOperator('&&') or
1076 (last_code.IsOperator(':') and not self.InObjectLiteral()))
1078 # TODO(robbyw): This breaks for x[2] = ...
1079 # Must use loop to find full function name in the case of line-wrapped
1080 # declarations (bug 1220601) like:
1082 # bar = function() ...
1083 identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
1084 while identifier and identifier.type in (
1085 Type.IDENTIFIER, Type.SIMPLE_LVALUE):
1086 name = identifier.string + name
1087 # Traverse behind us, skipping whitespace and comments.
1089 identifier = identifier.previous
1090 if not identifier or not identifier.type in Type.NON_CODE_TYPES:
1094 next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
1095 while next_token and next_token.IsType(Type.FUNCTION_NAME):
1096 name += next_token.string
1097 next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
1099 function = Function(self._block_depth, is_assigned, doc, name)
1100 function.start_token = token
1102 self._function_stack.append(function)
1103 self._functions_by_name[name] = function
1105 # Add a delimiter in stack for scope variables to define start of
1106 # function. This helps in popping variables of this function when
1107 # function declaration ends.
1108 self._variables_in_scope.append('')
1110 elif type == Type.START_PARAMETERS:
1111 self._cumulative_params = ''
1113 elif type == Type.PARAMETERS:
1114 self._cumulative_params += token.string
1115 self._variables_in_scope.extend(self.GetParams())
1117 elif type == Type.KEYWORD and token.string == 'return':
1118 next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
1119 if not next_token.IsType(Type.SEMICOLON):
1120 function = self.GetFunction()
1122 function.has_return = True
1124 elif type == Type.KEYWORD and token.string == 'throw':
1125 function = self.GetFunction()
1127 function.has_throw = True
1129 elif type == Type.KEYWORD and token.string == 'var':
1130 function = self.GetFunction()
1131 next_token = tokenutil.Search(token, [Type.IDENTIFIER,
1132 Type.SIMPLE_LVALUE])
1135 if next_token.type == Type.SIMPLE_LVALUE:
1136 self._variables_in_scope.append(next_token.values['identifier'])
1138 self._variables_in_scope.append(next_token.string)
1140 elif type == Type.SIMPLE_LVALUE:
1141 identifier = token.values['identifier']
1142 jsdoc = self.GetDocComment()
1144 self._documented_identifiers.add(identifier)
1146 self._HandleIdentifier(identifier, True)
1148 elif type == Type.IDENTIFIER:
1149 self._HandleIdentifier(token.string, False)
1151 # Detect documented non-assignments.
1152 next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
1153 if next_token and next_token.IsType(Type.SEMICOLON):
1154 if (self._last_non_space_token and
1155 self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
1156 self._documented_identifiers.add(token.string)
1158 def _HandleIdentifier(self, identifier, is_assignment):
1159 """Process the given identifier.
1161 Currently checks if it references 'this' and annotates the function
1165 identifier: The identifer to process.
1166 is_assignment: Whether the identifer is being written to.
1168 if identifier == 'this' or identifier.startswith('this.'):
1169 function = self.GetFunction()
1171 function.has_this = True
1173 def HandleAfterToken(self, token):
1174 """Handle updating state after a token has been checked.
1176 This function should be used for destructive state changes such as
1177 deleting a tracked object.
1180 token: The token to handle.
1183 if type == Type.SEMICOLON or type == Type.END_PAREN or (
1184 type == Type.END_BRACKET and
1185 self._last_non_space_token.type not in (
1186 Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END)):
1187 # We end on any numeric array index, but keep going for string based
1188 # array indices so that we pick up manually exported identifiers.
1189 self._doc_comment = None
1190 self._last_comment = None
1192 elif type == Type.END_BLOCK:
1193 self._doc_comment = None
1194 self._last_comment = None
1196 if self.InFunction() and self.IsFunctionClose():
1197 # TODO(robbyw): Detect the function's name for better errors.
1198 function = self._function_stack.pop()
1199 function.end_token = token
1201 # Pop all variables till delimiter ('') those were defined in the
1202 # function being closed so make them out of scope.
1203 while self._variables_in_scope and self._variables_in_scope[-1]:
1204 self._variables_in_scope.pop()
1207 if self._variables_in_scope:
1208 self._variables_in_scope.pop()
1210 elif type == Type.END_PARAMETERS and self._doc_comment:
1211 self._doc_comment = None
1212 self._last_comment = None
1214 if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE):
1215 self._last_non_space_token = token
1217 self._last_line = token.line