3 # Copyright 2007 The Closure Linter Authors. All Rights Reserved.
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
17 """Light weight EcmaScript state tracker that reads tokens and tracks state."""
19 __author__ = ('robbyw@google.com (Robert Walker)',
20 'ajp@google.com (Andy Perelson)')
24 from closure_linter import javascripttokenizer
25 from closure_linter import javascripttokens
26 from closure_linter import tokenutil
29 Type = javascripttokens.JavaScriptTokenType
32 class DocFlag(object):
33 """Generic doc flag object.
36 flag_type: param, return, define, type, etc.
37 flag_token: The flag token.
38 type_start_token: The first token specifying the flag type,
40 type_end_token: The last token specifying the flag type,
43 name_token: The token specifying the flag name.
45 description_start_token: The first token in the description.
46 description_end_token: The end token in the description.
47 description: The description.
50 # Please keep these lists alphabetized.
52 # The list of standard jsdoc tags is from
53 STANDARD_DOC = frozenset([
87 ANNOTATION = frozenset(['preserveTry', 'suppress'])
89 LEGAL_DOC = STANDARD_DOC | ANNOTATION
91 # Includes all Closure Compiler @suppress types.
92 # Not all of these annotations are interpreted by Closure Linter.
95 # - accessControls is supported by the compiler at the expression
96 # and method level to suppress warnings about private/protected
97 # access (method level applies to all references in the method).
98 # The linter mimics the compiler behavior.
99 SUPPRESS_TYPES = frozenset([
101 'ambiguousFunctionDecl',
115 'internetExplorerChecks',
121 'strictModuleDepCheck',
132 HAS_DESCRIPTION = frozenset([
133 'define', 'deprecated', 'desc', 'fileoverview', 'license', 'param',
134 'preserve', 'return', 'supported'])
136 HAS_TYPE = frozenset([
137 'define', 'enum', 'extends', 'implements', 'param', 'return', 'type',
140 TYPE_ONLY = frozenset(['enum', 'extends', 'implements', 'suppress', 'type'])
142 HAS_NAME = frozenset(['param'])
144 EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
145 EMPTY_STRING = re.compile(r'^\s*$')
147 def __init__(self, flag_token):
148 """Creates the DocFlag object and attaches it to the given start token.
151 flag_token: The starting token of the flag.
153 self.flag_token = flag_token
154 self.flag_type = flag_token.string.strip().lstrip('@')
156 # Extract type, if applicable.
158 self.type_start_token = None
159 self.type_end_token = None
160 if self.flag_type in self.HAS_TYPE:
161 brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
162 Type.FLAG_ENDING_TYPES)
164 end_token, contents = _GetMatchingEndBraceAndContents(brace)
166 self.type_start_token = brace
167 self.type_end_token = end_token
168 elif (self.flag_type in self.TYPE_ONLY and
169 flag_token.next.type not in Type.FLAG_ENDING_TYPES):
170 self.type_start_token = flag_token.next
171 self.type_end_token, self.type = _GetEndTokenAndContents(
172 self.type_start_token)
173 if self.type is not None:
174 self.type = self.type.strip()
176 # Extract name, if applicable.
177 self.name_token = None
179 if self.flag_type in self.HAS_NAME:
180 # Handle bad case, name could be immediately after flag token.
181 self.name_token = _GetNextIdentifierToken(flag_token)
183 # Handle good case, if found token is after type start, look for
184 # identifier after type end, since types contain identifiers.
185 if (self.type and self.name_token and
186 tokenutil.Compare(self.name_token, self.type_start_token) > 0):
187 self.name_token = _GetNextIdentifierToken(self.type_end_token)
190 self.name = self.name_token.string
192 # Extract description, if applicable.
193 self.description_start_token = None
194 self.description_end_token = None
195 self.description = None
196 if self.flag_type in self.HAS_DESCRIPTION:
197 search_start_token = flag_token
198 if self.name_token and self.type_end_token:
199 if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
200 search_start_token = self.type_end_token
202 search_start_token = self.name_token
203 elif self.name_token:
204 search_start_token = self.name_token
206 search_start_token = self.type_end_token
208 interesting_token = tokenutil.Search(search_start_token,
209 Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
210 if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
211 self.description_start_token = interesting_token
212 self.description_end_token, self.description = (
213 _GetEndTokenAndContents(interesting_token))
216 class DocComment(object):
217 """JavaScript doc comment object.
220 ordered_params: Ordered list of parameters documented.
221 start_token: The token that starts the doc comment.
222 end_token: The token that ends the doc comment.
223 suppressions: Map of suppression type to the token that added it.
225 def __init__(self, start_token):
226 """Create the doc comment object.
229 start_token: The first token in the doc comment.
232 self.ordered_params = []
234 self.start_token = start_token
235 self.end_token = None
236 self.suppressions = {}
237 self.invalidated = False
239 def Invalidate(self):
240 """Indicate that the JSDoc is well-formed but we had problems parsing it.
242 This is a short-circuiting mechanism so that we don't emit false
243 positives about well-formed doc comments just because we don't support
246 self.invalidated = True
248 def IsInvalidated(self):
249 """Test whether Invalidate() has been called."""
250 return self.invalidated
252 def AddParam(self, name, param_type):
253 """Add a new documented parameter.
256 name: The name of the parameter to document.
257 param_type: The parameter's declared JavaScript type.
259 self.ordered_params.append(name)
260 self.__params[name] = param_type
262 def AddSuppression(self, token):
263 """Add a new error suppression flag.
266 token: The suppression flag token.
268 #TODO(user): Error if no braces
269 brace = tokenutil.SearchUntil(token, [Type.DOC_START_BRACE],
272 end_token, contents = _GetMatchingEndBraceAndContents(brace)
273 for suppression in contents.split('|'):
274 self.suppressions[suppression] = token
276 def SuppressionOnly(self):
277 """Returns whether this comment contains only suppression flags."""
278 for flag_type in self.__flags.keys():
279 if flag_type != 'suppress':
283 def AddFlag(self, flag):
284 """Add a new document flag.
287 flag: DocFlag object.
289 self.__flags[flag.flag_type] = flag
291 def InheritsDocumentation(self):
292 """Test if the jsdoc implies documentation inheritance.
295 True if documentation may be pulled off the superclass.
297 return self.HasFlag('inheritDoc') or self.HasFlag('override')
299 def HasFlag(self, flag_type):
300 """Test if the given flag has been set.
303 flag_type: The type of the flag to check.
306 True if the flag is set.
308 return flag_type in self.__flags
310 def GetFlag(self, flag_type):
311 """Gets the last flag of the given type.
314 flag_type: The type of the flag to get.
317 The last instance of the given flag type in this doc comment.
319 return self.__flags[flag_type]
321 def CompareParameters(self, params):
322 """Computes the edit distance and list from the function params to the docs.
324 Uses the Levenshtein edit distance algorithm, with code modified from
325 http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
328 params: The parameter list for the function declaration.
331 The edit distance, the edit list.
333 source_len, target_len = len(self.ordered_params), len(params)
336 for i in range(target_len+1):
337 edit_lists[0].append(['I'] * i)
338 distance[0].append(i)
340 for j in range(1, source_len+1):
341 edit_lists.append([['D'] * j])
344 for i in range(source_len):
345 for j in range(target_len):
347 if self.ordered_params[i] == params[j]:
350 deletion = distance[i][j+1] + 1
351 insertion = distance[i+1][j] + 1
352 substitution = distance[i][j] + cost
356 if deletion <= insertion and deletion <= substitution:
359 edit_list = list(edit_lists[i][j+1])
360 edit_list.append('D')
362 elif insertion <= substitution:
365 edit_list = list(edit_lists[i+1][j])
366 edit_list.append('I')
367 edit_lists[i+1].append(edit_list)
370 # Substitution is best.
372 edit_list = list(edit_lists[i][j])
374 edit_list.append('S')
376 edit_list.append('=')
378 edit_lists[i+1].append(edit_list)
379 distance[i+1].append(best)
381 return distance[source_len][target_len], edit_lists[source_len][target_len]
384 """Returns a string representation of this object.
387 A string representation of this object.
389 return '<DocComment: %s, %s>' % (str(self.__params), str(self.__flags))
393 # Helper methods used by DocFlag and DocComment to parse out flag information.
397 def _GetMatchingEndBraceAndContents(start_brace):
398 """Returns the matching end brace and contents between the two braces.
400 If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then
401 that token is used as the matching ending token. Contents will have all
402 comment prefixes stripped out of them, and all comment prefixes in between the
403 start and end tokens will be split out into separate DOC_PREFIX tokens.
406 start_brace: The DOC_START_BRACE token immediately before desired contents.
409 The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string
410 of the contents between the matching tokens, minus any comment prefixes.
416 # We don't consider the start brace part of the type string.
417 token = start_brace.next
418 while open_count != close_count:
419 if token.type == Type.DOC_START_BRACE:
421 elif token.type == Type.DOC_END_BRACE:
424 if token.type != Type.DOC_PREFIX:
425 contents.append(token.string)
427 if token.type in Type.FLAG_ENDING_TYPES:
431 #Don't include the end token (end brace, end doc comment, etc.) in type.
432 token = token.previous
433 contents = contents[:-1]
435 return token, ''.join(contents)
438 def _GetNextIdentifierToken(start_token):
439 """Searches for and returns the first identifier at the beginning of a token.
441 Searches each token after the start to see if it starts with an identifier.
442 If found, will split the token into at most 3 piecies: leading whitespace,
443 identifier, rest of token, returning the identifier token. If no identifier is
444 found returns None and changes no tokens. Search is abandoned when a
445 FLAG_ENDING_TYPE token is found.
448 start_token: The token to start searching after.
451 The identifier token is found, None otherwise.
453 token = start_token.next
455 while token and not token.type in Type.FLAG_ENDING_TYPES:
456 match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.match(
458 if (match is not None and token.type == Type.COMMENT and
459 len(token.string) == len(match.group(0))):
467 def _GetEndTokenAndContents(start_token):
468 """Returns last content token and all contents before FLAG_ENDING_TYPE token.
470 Comment prefixes are split into DOC_PREFIX tokens and stripped from the
474 start_token: The token immediately before the first content token.
477 The last content token and a string of all contents including start and
478 end tokens, with comment prefixes stripped.
480 iterator = start_token
481 last_line = iterator.line_number
485 while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
486 if (iterator.IsFirstInLine() and
487 DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
488 # If we have a blank comment line, consider that an implicit
489 # ending of the description. This handles a case like:
491 # * @return {boolean} True
493 # * Note: This is a sentence.
495 # The note is not part of the @return description, but there was
496 # no definitive ending token. Rather there was a line containing
497 # only a doc comment prefix or whitespace.
501 # don't prematurely match against a @flag if inside a doc flag
502 # need to think about what is the correct behavior for unterminated
504 if (iterator.type == Type.DOC_START_BRACE and
505 iterator.next.type == Type.DOC_INLINE_FLAG):
507 elif (iterator.type == Type.DOC_END_BRACE and
511 if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
512 contents += iterator.string
513 last_token = iterator
515 iterator = iterator.next
516 if iterator.line_number != last_line:
518 last_line = iterator.line_number
520 end_token = last_token
521 if DocFlag.EMPTY_STRING.match(contents):
524 # Strip trailing newline.
525 contents = contents[:-1]
527 return end_token, contents
530 class Function(object):
531 """Data about a JavaScript function.
534 block_depth: Block depth the function began at.
535 doc: The DocComment associated with the function.
536 has_return: If the function has a return value.
537 has_this: If the function references the 'this' object.
538 is_assigned: If the function is part of an assignment.
539 is_constructor: If the function is a constructor.
540 name: The name of the function, whether given in the function keyword or
541 as the lvalue the function is assigned to.
544 def __init__(self, block_depth, is_assigned, doc, name):
545 self.block_depth = block_depth
546 self.is_assigned = is_assigned
547 self.is_constructor = doc and doc.HasFlag('constructor')
548 self.is_interface = doc and doc.HasFlag('interface')
549 self.has_return = False
550 self.has_throw = False
551 self.has_this = False
556 class StateTracker(object):
557 """EcmaScript state tracker.
559 Tracks block depth, function names, etc. within an EcmaScript token stream.
565 def __init__(self, doc_flag=DocFlag):
566 """Initializes a JavaScript token stream state tracker.
569 doc_flag: An optional custom DocFlag used for validating
572 self._doc_flag = doc_flag
576 """Resets the state tracker to prepare for processing a new page."""
577 self._block_depth = 0
578 self._is_block_close = False
579 self._paren_depth = 0
581 self._functions_by_name = {}
582 self._last_comment = None
583 self._doc_comment = None
584 self._cumulative_params = None
585 self._block_types = []
586 self._last_non_space_token = None
587 self._last_line = None
588 self._first_token = None
589 self._documented_identifiers = set()
591 def InFunction(self):
592 """Returns true if the current token is within a function.
595 True if the current token is within a function.
597 return bool(self._functions)
599 def InConstructor(self):
600 """Returns true if the current token is within a constructor.
603 True if the current token is within a constructor.
605 return self.InFunction() and self._functions[-1].is_constructor
607 def InInterfaceMethod(self):
608 """Returns true if the current token is within an interface method.
611 True if the current token is within an interface method.
613 if self.InFunction():
614 if self._functions[-1].is_interface:
617 name = self._functions[-1].name
618 prototype_index = name.find('.prototype.')
619 if prototype_index != -1:
620 class_function_name = name[0:prototype_index]
621 if (class_function_name in self._functions_by_name and
622 self._functions_by_name[class_function_name].is_interface):
627 def InTopLevelFunction(self):
628 """Returns true if the current token is within a top level function.
631 True if the current token is within a top level function.
633 return len(self._functions) == 1 and self.InTopLevel()
635 def InAssignedFunction(self):
636 """Returns true if the current token is within a function variable.
639 True if if the current token is within a function variable
641 return self.InFunction() and self._functions[-1].is_assigned
643 def IsFunctionOpen(self):
644 """Returns true if the current token is a function block open.
647 True if the current token is a function block open.
649 return (self._functions and
650 self._functions[-1].block_depth == self._block_depth - 1)
652 def IsFunctionClose(self):
653 """Returns true if the current token is a function block close.
656 True if the current token is a function block close.
658 return (self._functions and
659 self._functions[-1].block_depth == self._block_depth)
662 """Returns true if the current token is within a block.
665 True if the current token is within a block.
667 return bool(self._block_depth)
669 def IsBlockClose(self):
670 """Returns true if the current token is a block close.
673 True if the current token is a block close.
675 return self._is_block_close
677 def InObjectLiteral(self):
678 """Returns true if the current token is within an object literal.
681 True if the current token is within an object literal.
683 return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL
685 def InObjectLiteralDescendant(self):
686 """Returns true if the current token has an object literal ancestor.
689 True if the current token has an object literal ancestor.
691 return self.OBJECT_LITERAL in self._block_types
693 def InParentheses(self):
694 """Returns true if the current token is within parentheses.
697 True if the current token is within parentheses.
699 return bool(self._paren_depth)
701 def InTopLevel(self):
702 """Whether we are at the top level in the class.
704 This function call is language specific. In some languages like
705 JavaScript, a function is top level if it is not inside any parenthesis.
706 In languages such as ActionScript, a function is top level if it is directly
709 raise TypeError('Abstract method InTopLevel not implemented')
711 def GetBlockType(self, token):
712 """Determine the block type given a START_BLOCK token.
714 Code blocks come after parameters, keywords like else, and closing parens.
717 token: The current token. Can be assumed to be type START_BLOCK.
719 Code block type for current token.
721 raise TypeError('Abstract method GetBlockType not implemented')
724 """Returns the accumulated input params as an array.
726 In some EcmasSript languages, input params are specified like
727 (param:Type, param2:Type2, ...)
728 in other they are specified just as
730 We handle both formats for specifying parameters here and leave
731 it to the compilers for each language to detect compile errors.
732 This allows more code to be reused between lint checkers for various
733 EcmaScript languages.
736 The accumulated input params as an array.
739 if self._cumulative_params:
740 params = re.compile(r'\s+').sub('', self._cumulative_params).split(',')
741 # Strip out the type from parameters of the form name:Type.
742 params = map(lambda param: param.split(':')[0], params)
746 def GetLastComment(self):
747 """Return the last plain comment that could be used as documentation.
750 The last plain comment that could be used as documentation.
752 return self._last_comment
754 def GetDocComment(self):
755 """Return the most recent applicable documentation comment.
758 The last applicable documentation comment.
760 return self._doc_comment
762 def HasDocComment(self, identifier):
763 """Returns whether the identifier has been documented yet.
766 identifier: The identifier.
769 Whether the identifier has been documented yet.
771 return identifier in self._documented_identifiers
773 def InDocComment(self):
774 """Returns whether the current token is in a doc comment.
777 Whether the current token is in a doc comment.
779 return self._doc_comment and self._doc_comment.end_token is None
781 def GetDocFlag(self):
782 """Returns the current documentation flags.
785 The current documentation flags.
787 return self._doc_flag
789 def IsTypeToken(self, t):
790 if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT,
791 Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
792 f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
794 if f and f.attached_object.type_start_token is not None:
795 return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
796 tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
799 def GetFunction(self):
800 """Return the function the current code block is a part of.
803 The current Function object.
806 return self._functions[-1]
808 def GetBlockDepth(self):
809 """Return the block depth.
812 The current block depth.
814 return self._block_depth
816 def GetLastNonSpaceToken(self):
817 """Return the last non whitespace token."""
818 return self._last_non_space_token
820 def GetLastLine(self):
821 """Return the last line."""
822 return self._last_line
824 def GetFirstToken(self):
825 """Return the very first token in the file."""
826 return self._first_token
828 def HandleToken(self, token, last_non_space_token):
829 """Handles the given token and updates state.
832 token: The token to handle.
833 last_non_space_token:
835 self._is_block_close = False
837 if not self._first_token:
838 self._first_token = token
842 if type == Type.START_BLOCK:
843 self._block_depth += 1
845 # Subclasses need to handle block start very differently because
846 # whether a block is a CODE or OBJECT_LITERAL block varies significantly
848 self._block_types.append(self.GetBlockType(token))
851 elif type == Type.END_BLOCK:
852 self._is_block_close = not self.InObjectLiteral()
853 self._block_depth -= 1
854 self._block_types.pop()
856 # Track parentheses depth.
857 elif type == Type.START_PAREN:
858 self._paren_depth += 1
860 # Track parentheses depth.
861 elif type == Type.END_PAREN:
862 self._paren_depth -= 1
864 elif type == Type.COMMENT:
865 self._last_comment = token.string
867 elif type == Type.START_DOC_COMMENT:
868 self._last_comment = None
869 self._doc_comment = DocComment(token)
871 elif type == Type.END_DOC_COMMENT:
872 self._doc_comment.end_token = token
874 elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
875 flag = self._doc_flag(token)
876 token.attached_object = flag
877 self._doc_comment.AddFlag(flag)
879 if flag.flag_type == 'param' and flag.name:
880 self._doc_comment.AddParam(flag.name, flag.type)
881 elif flag.flag_type == 'suppress':
882 self._doc_comment.AddSuppression(token)
884 elif type == Type.FUNCTION_DECLARATION:
885 last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
888 # Only functions outside of parens are eligible for documentation.
889 if not self._paren_depth:
890 doc = self._doc_comment
893 is_assigned = last_code and (last_code.IsOperator('=') or
894 last_code.IsOperator('||') or last_code.IsOperator('&&') or
895 (last_code.IsOperator(':') and not self.InObjectLiteral()))
897 # TODO(robbyw): This breaks for x[2] = ...
898 # Must use loop to find full function name in the case of line-wrapped
899 # declarations (bug 1220601) like:
901 # bar = function() ...
902 identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
903 while identifier and identifier.type in (
904 Type.IDENTIFIER, Type.SIMPLE_LVALUE):
905 name = identifier.string + name
906 # Traverse behind us, skipping whitespace and comments.
908 identifier = identifier.previous
909 if not identifier or not identifier.type in Type.NON_CODE_TYPES:
913 next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
914 while next_token and next_token.IsType(Type.FUNCTION_NAME):
915 name += next_token.string
916 next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
918 function = Function(self._block_depth, is_assigned, doc, name)
919 self._functions.append(function)
920 self._functions_by_name[name] = function
922 elif type == Type.START_PARAMETERS:
923 self._cumulative_params = ''
925 elif type == Type.PARAMETERS:
926 self._cumulative_params += token.string
928 elif type == Type.KEYWORD and token.string == 'return':
929 next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
930 if not next_token.IsType(Type.SEMICOLON):
931 function = self.GetFunction()
933 function.has_return = True
935 elif type == Type.KEYWORD and token.string == 'throw':
936 function = self.GetFunction()
938 function.has_throw = True
940 elif type == Type.SIMPLE_LVALUE:
941 identifier = token.values['identifier']
942 jsdoc = self.GetDocComment()
944 self._documented_identifiers.add(identifier)
946 self._HandleIdentifier(identifier, True)
948 elif type == Type.IDENTIFIER:
949 self._HandleIdentifier(token.string, False)
951 # Detect documented non-assignments.
952 next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
953 if next_token.IsType(Type.SEMICOLON):
954 if (self._last_non_space_token and
955 self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
956 self._documented_identifiers.add(token.string)
958 def _HandleIdentifier(self, identifier, is_assignment):
959 """Process the given identifier.
961 Currently checks if it references 'this' and annotates the function
965 identifier: The identifer to process.
966 is_assignment: Whether the identifer is being written to.
968 if identifier == 'this' or identifier.startswith('this.'):
969 function = self.GetFunction()
971 function.has_this = True
974 def HandleAfterToken(self, token):
975 """Handle updating state after a token has been checked.
977 This function should be used for destructive state changes such as
978 deleting a tracked object.
981 token: The token to handle.
984 if type == Type.SEMICOLON or type == Type.END_PAREN or (
985 type == Type.END_BRACKET and
986 self._last_non_space_token.type not in (
987 Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END)):
988 # We end on any numeric array index, but keep going for string based
989 # array indices so that we pick up manually exported identifiers.
990 self._doc_comment = None
991 self._last_comment = None
993 elif type == Type.END_BLOCK:
994 self._doc_comment = None
995 self._last_comment = None
997 if self.InFunction() and self.IsFunctionClose():
998 # TODO(robbyw): Detect the function's name for better errors.
999 self._functions.pop()
1001 elif type == Type.END_PARAMETERS and self._doc_comment:
1002 self._doc_comment = None
1003 self._last_comment = None
1005 if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE):
1006 self._last_non_space_token = token
1008 self._last_line = token.line