Metadata-Version: 1.0
Name: closure_linter
-Version: 2.2.6
+Version: 2.3.5
Summary: Closure Linter
Home-page: http://code.google.com/p/closure-linter
Author: The Closure Linter Authors
+++ /dev/null
-Metadata-Version: 1.0
-Name: closure-linter
-Version: 2.2.6
-Summary: Closure Linter
-Home-page: http://code.google.com/p/closure-linter
-Author: The Closure Linter Authors
-Author-email: opensource@google.com
-License: Apache
-Description: UNKNOWN
-Platform: UNKNOWN
+++ /dev/null
-README
-setup.py
-closure_linter/__init__.py
-closure_linter/checker.py
-closure_linter/checkerbase.py
-closure_linter/ecmalintrules.py
-closure_linter/ecmametadatapass.py
-closure_linter/error_fixer.py
-closure_linter/errorrules.py
-closure_linter/errors.py
-closure_linter/fixjsstyle.py
-closure_linter/fixjsstyle_test.py
-closure_linter/full_test.py
-closure_linter/gjslint.py
-closure_linter/indentation.py
-closure_linter/javascriptlintrules.py
-closure_linter/javascriptstatetracker.py
-closure_linter/javascriptstatetracker_test.py
-closure_linter/javascripttokenizer.py
-closure_linter/javascripttokens.py
-closure_linter/statetracker.py
-closure_linter/tokenutil.py
-closure_linter.egg-info/PKG-INFO
-closure_linter.egg-info/SOURCES.txt
-closure_linter.egg-info/dependency_links.txt
-closure_linter.egg-info/entry_points.txt
-closure_linter.egg-info/requires.txt
-closure_linter.egg-info/top_level.txt
-closure_linter/common/__init__.py
-closure_linter/common/error.py
-closure_linter/common/erroraccumulator.py
-closure_linter/common/errorhandler.py
-closure_linter/common/errorprinter.py
-closure_linter/common/filetestcase.py
-closure_linter/common/htmlutil.py
-closure_linter/common/lintrunner.py
-closure_linter/common/matcher.py
-closure_linter/common/position.py
-closure_linter/common/simplefileflags.py
-closure_linter/common/tokenizer.py
-closure_linter/common/tokens.py
\ No newline at end of file
+++ /dev/null
-[console_scripts]
-fixjsstyle = closure_linter.fixjsstyle:main
-gjslint = closure_linter.gjslint:main
-
+++ /dev/null
-python-gflags
\ No newline at end of file
+++ /dev/null
-closure_linter
#!/usr/bin/env python
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package indicator for gjslint."""
import gflags as flags
from closure_linter import checkerbase
+from closure_linter import closurizednamespacesinfo
from closure_linter import ecmametadatapass
-from closure_linter import errors
from closure_linter import javascriptlintrules
from closure_linter import javascriptstatetracker
-from closure_linter.common import errorprinter
from closure_linter.common import lintrunner
flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
'report errors for missing documentation, some missing '
'descriptions, or methods whose @return tags don\'t have a '
'matching return statement.')
+flags.DEFINE_list('closurized_namespaces', '',
+ 'Namespace prefixes, used for testing of'
+ 'goog.provide/require')
+flags.DEFINE_list('ignored_extra_namespaces', '',
+ 'Fully qualified namespaces that should be not be reported '
+ 'as extra by the linter.')
class JavaScriptStyleChecker(checkerbase.CheckerBase):
"""Initialize an JavaScriptStyleChecker object.
Args:
- error_handler: Error handler to pass all errors to
+ error_handler: Error handler to pass all errors to.
"""
+ self._namespaces_info = None
+ if flags.FLAGS.closurized_namespaces:
+ self._namespaces_info = (
+ closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ flags.FLAGS.closurized_namespaces,
+ flags.FLAGS.ignored_extra_namespaces))
+
checkerbase.CheckerBase.__init__(
self,
error_handler=error_handler,
- lint_rules=javascriptlintrules.JavaScriptLintRules(),
- state_tracker=javascriptstatetracker.JavaScriptStateTracker(
- closurized_namespaces=flags.FLAGS.closurized_namespaces),
+ lint_rules=javascriptlintrules.JavaScriptLintRules(
+ self._namespaces_info),
+ state_tracker=javascriptstatetracker.JavaScriptStateTracker(),
metadata_pass=ecmametadatapass.EcmaMetaDataPass(),
limited_doc_files=flags.FLAGS.limited_doc_files)
+ def _CheckTokens(self, token, parse_error, debug_tokens):
+ """Checks a token stream for lint warnings/errors.
+
+ Adds a separate pass for computing dependency information based on
+ goog.require and goog.provide statements prior to the main linting pass.
+
+ Args:
+ token: The first token in the token stream.
+ parse_error: A ParseError if any errors occurred.
+ debug_tokens: Whether every token should be printed as it is encountered
+ during the pass.
+
+ Returns:
+ A boolean indicating whether the full token stream could be checked or if
+ checking failed prematurely.
+ """
+ # To maximize the amount of errors that get reported before a parse error
+ # is displayed, don't run the dependency pass if a parse error exists.
+ if self._namespaces_info and not parse_error:
+ self._namespaces_info.Reset()
+ result = (self._ExecutePass(token, self._DependencyPass) and
+ self._ExecutePass(token, self._LintPass,
+ debug_tokens=debug_tokens))
+ else:
+ result = self._ExecutePass(token, self._LintPass, parse_error,
+ debug_tokens)
+
+ if not result:
+ return False
+
+ self._lint_rules.Finalize(self._state_tracker, self._tokenizer.mode)
+
+ self._error_handler.FinishFile()
+ return True
+
+ def _DependencyPass(self, token):
+ """Processes an invidual token for dependency information.
+
+ Used to encapsulate the logic needed to process an individual token so that
+ it can be passed to _ExecutePass.
+
+ Args:
+ token: The token to process.
+ """
+ self._namespaces_info.ProcessToken(token, self._state_tracker)
+
class GJsLintRunner(lintrunner.LintRunner):
"""Wrapper class to run GJsLint."""
- def Run(self, filenames, error_handler=None):
+ def Run(self, filenames, error_handler):
"""Run GJsLint on the given filenames.
Args:
filenames: The filenames to check
- error_handler: An optional ErrorHandler object, an ErrorPrinter is used if
- none is specified.
-
- Returns:
- error_count, file_count: The number of errors and the number of files that
- contain errors.
+ error_handler: An ErrorHandler object.
"""
- if not error_handler:
- error_handler = errorprinter.ErrorPrinter(errors.NEW_ERRORS)
-
checker = JavaScriptStyleChecker(error_handler)
# Check the list of files.
for filename in filenames:
checker.Check(filename)
-
- return error_handler
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
+import StringIO
import traceback
import gflags as flags
flags.DEFINE_boolean('error_trace', False,
'Whether to show error exceptions.')
+
class LintRulesBase(object):
"""Base class for all classes defining the lint rules for a language."""
if errorrules.ShouldReportError(code):
self.__checker.HandleError(code, message, token, position, fix_data)
+ def _SetLimitedDocChecks(self, limited_doc_checks):
+ """Sets whether doc checking is relaxed for this file.
+
+ Args:
+ limited_doc_checks: Whether doc checking is relaxed for this file.
+ """
+ self._limited_doc_checks = limited_doc_checks
+
def CheckToken(self, token, parser_state):
"""Checks a token, given the current parser_state, for warnings and errors.
documentation comments.
metadata_pass: Object that builds metadata about the token stream.
"""
- self.__error_handler = error_handler
- self.__lint_rules = lint_rules
- self.__state_tracker = state_tracker
- self.__metadata_pass = metadata_pass
- self.__limited_doc_files = limited_doc_files
- self.__tokenizer = javascripttokenizer.JavaScriptTokenizer()
- self.__has_errors = False
+ self._error_handler = error_handler
+ self._lint_rules = lint_rules
+ self._state_tracker = state_tracker
+ self._metadata_pass = metadata_pass
+ self._limited_doc_files = limited_doc_files
+
+ # TODO(user): Factor out. A checker does not need to know about the
+ # tokenizer, only the token stream.
+ self._tokenizer = javascripttokenizer.JavaScriptTokenizer()
+
+ self._has_errors = False
def HandleError(self, code, message, token, position=None,
fix_data=None):
position: The position of the error, defaults to None.
fix_data: Metadata used for fixing the error.
"""
- self.__has_errors = True
- self.__error_handler.HandleError(
+ self._has_errors = True
+ self._error_handler.HandleError(
error.Error(code, message, token, position, fix_data))
def HasErrors(self):
Returns:
True if the style checker has found any errors.
"""
- return self.__has_errors
+ return self._has_errors
- def Check(self, filename):
+ def Check(self, filename, source=None):
"""Checks the file, printing warnings and errors as they are found.
Args:
filename: The name of the file to check.
+ source: Optional. The contents of the file. Can be either a string or
+ file-like object. If omitted, contents will be read from disk from
+ the given filename.
"""
- try:
- f = open(filename)
- except IOError:
- self.__error_handler.HandleFile(filename, None)
- self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None)
- self.__error_handler.FinishFile()
- return
+
+ if source is None:
+ try:
+ f = open(filename)
+ except IOError:
+ self._error_handler.HandleFile(filename, None)
+ self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None)
+ self._error_handler.FinishFile()
+ return
+ else:
+ if type(source) in [str, unicode]:
+ f = StringIO.StringIO(source)
+ else:
+ f = source
try:
if filename.endswith('.html') or filename.endswith('.htm'):
failed prematurely.
"""
limited_doc_checks = False
- if self.__limited_doc_files:
- for limited_doc_filename in self.__limited_doc_files:
+ if self._limited_doc_files:
+ for limited_doc_filename in self._limited_doc_files:
if filename.endswith(limited_doc_filename):
limited_doc_checks = True
break
- state_tracker = self.__state_tracker
- lint_rules = self.__lint_rules
- state_tracker.Reset()
+ lint_rules = self._lint_rules
lint_rules.Initialize(self, limited_doc_checks, is_html)
- token = self.__tokenizer.TokenizeFile(lines_iter)
+ token = self._tokenizer.TokenizeFile(lines_iter)
parse_error = None
- if self.__metadata_pass:
+ if self._metadata_pass:
try:
- self.__metadata_pass.Reset()
- self.__metadata_pass.Process(token)
+ self._metadata_pass.Reset()
+ self._metadata_pass.Process(token)
except ecmametadatapass.ParseError, caught_parse_error:
if FLAGS.error_trace:
traceback.print_exc()
traceback.print_exc()
return False
- self.__error_handler.HandleFile(filename, token)
+ self._error_handler.HandleFile(filename, token)
+
+ return self._CheckTokens(token, parse_error=parse_error,
+ debug_tokens=FLAGS.debug_tokens)
+
+ def _CheckTokens(self, token, parse_error, debug_tokens):
+ """Checks a token stream for lint warnings/errors.
+
+ Args:
+ token: The first token in the token stream to check.
+ parse_error: A ParseError if any errors occurred.
+ debug_tokens: Whether every token should be printed as it is encountered
+ during the pass.
+
+ Returns:
+ A boolean indicating whether the full token stream could be checked or if
+ checking failed prematurely.
+ """
+ result = self._ExecutePass(token, self._LintPass, parse_error, debug_tokens)
+
+ if not result:
+ return False
+
+ self._lint_rules.Finalize(self._state_tracker, self._tokenizer.mode)
+ self._error_handler.FinishFile()
+ return True
+ def _LintPass(self, token):
+ """Checks an individual token for lint warnings/errors.
+
+ Used to encapsulate the logic needed to check an individual token so that it
+ can be passed to _ExecutePass.
+
+ Args:
+ token: The token to check.
+ """
+ self._lint_rules.CheckToken(token, self._state_tracker)
+
+ def _ExecutePass(self, token, pass_function, parse_error=None,
+ debug_tokens=False):
+ """Calls the given function for every token in the given token stream.
+
+ As each token is passed to the given function, state is kept up to date and,
+ depending on the error_trace flag, errors are either caught and reported, or
+ allowed to bubble up so developers can see the full stack trace. If a parse
+ error is specified, the pass will proceed as normal until the token causing
+ the parse error is reached.
+
+ Args:
+ token: The first token in the token stream.
+ pass_function: The function to call for each token in the token stream.
+ parse_error: A ParseError if any errors occurred.
+ debug_tokens: Whether every token should be printed as it is encountered
+ during the pass.
+
+ Returns:
+ A boolean indicating whether the full token stream could be checked or if
+ checking failed prematurely.
+
+ Raises:
+ Exception: If any error occurred while calling the given function.
+ """
+ self._state_tracker.Reset()
while token:
- if FLAGS.debug_tokens:
+ if debug_tokens:
print token
if parse_error and parse_error.token == token:
- # Report any parse errors from above once we find the token.
message = ('Error parsing file at token "%s". Unable to '
'check the rest of file.' % token.string)
self.HandleError(errors.FILE_DOES_NOT_PARSE, message, token)
- self.__error_handler.FinishFile()
- return False
+ self._error_handler.FinishFile()
+ return
- if FLAGS.error_trace:
- state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
- else:
- try:
- state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
- except:
+ try:
+ self._state_tracker.HandleToken(
+ token, self._state_tracker.GetLastNonSpaceToken())
+ pass_function(token)
+ self._state_tracker.HandleAfterToken(token)
+ except:
+ if FLAGS.error_trace:
+ raise
+ else:
self.HandleError(errors.FILE_DOES_NOT_PARSE,
('Error parsing file at token "%s". Unable to '
- 'check the rest of file.' % token.string),
+ 'check the rest of file.' % token.string),
token)
- self.__error_handler.FinishFile()
- return False
-
- # Check the token for style guide violations.
- lint_rules.CheckToken(token, state_tracker)
-
- state_tracker.HandleAfterToken(token)
-
- # Move to the next token.
+ self._error_handler.FinishFile()
+ return False
token = token.next
-
- lint_rules.Finalize(state_tracker, self.__tokenizer.mode)
- self.__error_handler.FinishFile()
return True
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Logic for computing dependency information for closurized JavaScript files.
+
+Closurized JavaScript files express dependencies using goog.require and
+goog.provide statements. In order for the linter to detect when a statement is
+missing or unnecessary, all identifiers in the JavaScript file must first be
+processed to determine if they constitute the creation or usage of a dependency.
+"""
+
+
+
+from closure_linter import javascripttokens
+from closure_linter import tokenutil
+
+# pylint: disable-msg=C6409
+TokenType = javascripttokens.JavaScriptTokenType
+
+DEFAULT_EXTRA_NAMESPACES = [
+ 'goog.testing.asserts',
+ 'goog.testing.jsunit',
+]
+
+class ClosurizedNamespacesInfo(object):
+ """Dependency information for closurized JavaScript files.
+
+ Processes token streams for dependency creation or usage and provides logic
+ for determining if a given require or provide statement is unnecessary or if
+ there are missing require or provide statements.
+ """
+
+ def __init__(self, closurized_namespaces, ignored_extra_namespaces):
+ """Initializes an instance the ClosurizedNamespacesInfo class.
+
+ Args:
+ closurized_namespaces: A list of namespace prefixes that should be
+ processed for dependency information. Non-matching namespaces are
+ ignored.
+ ignored_extra_namespaces: A list of namespaces that should not be reported
+ as extra regardless of whether they are actually used.
+ """
+ self._closurized_namespaces = closurized_namespaces
+ self._ignored_extra_namespaces = (ignored_extra_namespaces +
+ DEFAULT_EXTRA_NAMESPACES)
+ self.Reset()
+
+ def Reset(self):
+ """Resets the internal state to prepare for processing a new file."""
+
+ # A list of goog.provide tokens in the order they appeared in the file.
+ self._provide_tokens = []
+
+ # A list of goog.require tokens in the order they appeared in the file.
+ self._require_tokens = []
+
+ # Namespaces that are already goog.provided.
+ self._provided_namespaces = []
+
+ # Namespaces that are already goog.required.
+ self._required_namespaces = []
+
+ # Note that created_namespaces and used_namespaces contain both namespaces
+ # and identifiers because there are many existing cases where a method or
+ # constant is provided directly instead of its namespace. Ideally, these
+ # two lists would only have to contain namespaces.
+
+ # A list of tuples where the first element is the namespace of an identifier
+ # created in the file and the second is the identifier itself.
+ self._created_namespaces = []
+
+ # A list of tuples where the first element is the namespace of an identifier
+ # used in the file and the second is the identifier itself.
+ self._used_namespaces = []
+
+ # A list of seemingly-unnecessary namespaces that are goog.required() and
+ # annotated with @suppress {extraRequire}.
+ self._suppressed_requires = []
+
+ # A list of goog.provide tokens which are duplicates.
+ self._duplicate_provide_tokens = []
+
+ # A list of goog.require tokens which are duplicates.
+ self._duplicate_require_tokens = []
+
+ # Whether this file is in a goog.scope. Someday, we may add support
+ # for checking scopified namespaces, but for now let's just fail
+ # in a more reasonable way.
+ self._scopified_file = False
+
+ # TODO(user): Handle the case where there are 2 different requires
+ # that can satisfy the same dependency, but only one is necessary.
+
+ def GetProvidedNamespaces(self):
+ """Returns the namespaces which are already provided by this file.
+
+ Returns:
+ A list of strings where each string is a 'namespace' corresponding to an
+ existing goog.provide statement in the file being checked.
+ """
+ return list(self._provided_namespaces)
+
+ def GetRequiredNamespaces(self):
+ """Returns the namespaces which are already required by this file.
+
+ Returns:
+ A list of strings where each string is a 'namespace' corresponding to an
+ existing goog.require statement in the file being checked.
+ """
+ return list(self._required_namespaces)
+
+ def IsExtraProvide(self, token):
+ """Returns whether the given goog.provide token is unnecessary.
+
+ Args:
+ token: A goog.provide token.
+
+ Returns:
+ True if the given token corresponds to an unnecessary goog.provide
+ statement, otherwise False.
+ """
+ if self._scopified_file:
+ return False
+
+ namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
+
+ base_namespace = namespace.split('.', 1)[0]
+ if base_namespace not in self._closurized_namespaces:
+ return False
+
+ if token in self._duplicate_provide_tokens:
+ return True
+
+ # TODO(user): There's probably a faster way to compute this.
+ for created_namespace, created_identifier in self._created_namespaces:
+ if namespace == created_namespace or namespace == created_identifier:
+ return False
+
+ return True
+
+ def IsExtraRequire(self, token):
+ """Returns whether the given goog.require token is unnecessary.
+
+ Args:
+ token: A goog.require token.
+
+ Returns:
+ True if the given token corresponds to an unnecessary goog.require
+ statement, otherwise False.
+ """
+ if self._scopified_file:
+ return False
+
+ namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
+
+ base_namespace = namespace.split('.', 1)[0]
+ if base_namespace not in self._closurized_namespaces:
+ return False
+
+ if namespace in self._ignored_extra_namespaces:
+ return False
+
+ if token in self._duplicate_require_tokens:
+ return True
+
+ if namespace in self._suppressed_requires:
+ return False
+
+ # If the namespace contains a component that is initial caps, then that
+ # must be the last component of the namespace.
+ parts = namespace.split('.')
+ if len(parts) > 1 and parts[-2][0].isupper():
+ return True
+
+ # TODO(user): There's probably a faster way to compute this.
+ for used_namespace, used_identifier in self._used_namespaces:
+ if namespace == used_namespace or namespace == used_identifier:
+ return False
+
+ return True
+
+ def GetMissingProvides(self):
+ """Returns the set of missing provided namespaces for the current file.
+
+ Returns:
+ Returns a set of strings where each string is a namespace that should be
+ provided by this file, but is not.
+ """
+ if self._scopified_file:
+ return set()
+
+ missing_provides = set()
+ for namespace, identifier in self._created_namespaces:
+ if (not self._IsPrivateIdentifier(identifier) and
+ namespace not in self._provided_namespaces and
+ identifier not in self._provided_namespaces and
+ namespace not in self._required_namespaces):
+ missing_provides.add(namespace)
+
+ return missing_provides
+
+ def GetMissingRequires(self):
+ """Returns the set of missing required namespaces for the current file.
+
+ For each non-private identifier used in the file, find either a
+ goog.require, goog.provide or a created identifier that satisfies it.
+ goog.require statements can satisfy the identifier by requiring either the
+ namespace of the identifier or the identifier itself. goog.provide
+ statements can satisfy the identifier by providing the namespace of the
+ identifier. A created identifier can only satisfy the used identifier if
+ it matches it exactly (necessary since things can be defined on a
+ namespace in more than one file). Note that provided namespaces should be
+ a subset of created namespaces, but we check both because in some cases we
+ can't always detect the creation of the namespace.
+
+ Returns:
+ Returns a set of strings where each string is a namespace that should be
+ required by this file, but is not.
+ """
+ if self._scopified_file:
+ return set()
+
+ external_dependencies = set(self._required_namespaces)
+
+ # Assume goog namespace is always available.
+ external_dependencies.add('goog')
+
+ created_identifiers = set()
+ for namespace, identifier in self._created_namespaces:
+ created_identifiers.add(identifier)
+
+ missing_requires = set()
+ for namespace, identifier in self._used_namespaces:
+ if (not self._IsPrivateIdentifier(identifier) and
+ namespace not in external_dependencies and
+ namespace not in self._provided_namespaces and
+ identifier not in external_dependencies and
+ identifier not in created_identifiers):
+ missing_requires.add(namespace)
+
+ return missing_requires
+
+ def _IsPrivateIdentifier(self, identifier):
+ """Returns whether the given identifer is private."""
+ pieces = identifier.split('.')
+ for piece in pieces:
+ if piece.endswith('_'):
+ return True
+ return False
+
+ def IsFirstProvide(self, token):
+ """Returns whether token is the first provide token."""
+ return self._provide_tokens and token == self._provide_tokens[0]
+
+ def IsFirstRequire(self, token):
+ """Returns whether token is the first require token."""
+ return self._require_tokens and token == self._require_tokens[0]
+
+ def IsLastProvide(self, token):
+ """Returns whether token is the last provide token."""
+ return self._provide_tokens and token == self._provide_tokens[-1]
+
+ def IsLastRequire(self, token):
+ """Returns whether token is the last require token."""
+ return self._require_tokens and token == self._require_tokens[-1]
+
+ def ProcessToken(self, token, state_tracker):
+ """Processes the given token for dependency information.
+
+ Args:
+ token: The token to process.
+ state_tracker: The JavaScript state tracker.
+ """
+
+ # Note that this method is in the critical path for the linter and has been
+ # optimized for performance in the following ways:
+ # - Tokens are checked by type first to minimize the number of function
+ # calls necessary to determine if action needs to be taken for the token.
+ # - The most common tokens types are checked for first.
+ # - The number of function calls has been minimized (thus the length of this
+ # function.
+
+ if token.type == TokenType.IDENTIFIER:
+ # TODO(user): Consider saving the whole identifier in metadata.
+ whole_identifier_string = self._GetWholeIdentifierString(token)
+ if whole_identifier_string is None:
+ # We only want to process the identifier one time. If the whole string
+ # identifier is None, that means this token was part of a multi-token
+ # identifier, but it was not the first token of the identifier.
+ return
+
+ # In the odd case that a goog.require is encountered inside a function,
+ # just ignore it (e.g. dynamic loading in test runners).
+ if token.string == 'goog.require' and not state_tracker.InFunction():
+ self._require_tokens.append(token)
+ namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
+ if namespace in self._required_namespaces:
+ self._duplicate_require_tokens.append(token)
+ else:
+ self._required_namespaces.append(namespace)
+
+ # If there is a suppression for the require, add a usage for it so it
+ # gets treated as a regular goog.require (i.e. still gets sorted).
+ jsdoc = state_tracker.GetDocComment()
+ if jsdoc and ('extraRequire' in jsdoc.suppressions):
+ self._suppressed_requires.append(namespace)
+ self._AddUsedNamespace(state_tracker, namespace)
+
+ elif token.string == 'goog.provide':
+ self._provide_tokens.append(token)
+ namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
+ if namespace in self._provided_namespaces:
+ self._duplicate_provide_tokens.append(token)
+ else:
+ self._provided_namespaces.append(namespace)
+
+ # If there is a suppression for the provide, add a creation for it so it
+ # gets treated as a regular goog.provide (i.e. still gets sorted).
+ jsdoc = state_tracker.GetDocComment()
+ if jsdoc and ('extraProvide' in jsdoc.suppressions):
+ self._AddCreatedNamespace(state_tracker, namespace)
+
+ elif token.string == 'goog.scope':
+ self._scopified_file = True
+
+ else:
+ jsdoc = state_tracker.GetDocComment()
+ if jsdoc and jsdoc.HasFlag('typedef'):
+ self._AddCreatedNamespace(state_tracker, whole_identifier_string,
+ self.GetClosurizedNamespace(
+ whole_identifier_string))
+ else:
+ self._AddUsedNamespace(state_tracker, whole_identifier_string)
+
+ elif token.type == TokenType.SIMPLE_LVALUE:
+ identifier = token.values['identifier']
+ namespace = self.GetClosurizedNamespace(identifier)
+ if state_tracker.InFunction():
+ self._AddUsedNamespace(state_tracker, identifier)
+ elif namespace and namespace != 'goog':
+ self._AddCreatedNamespace(state_tracker, identifier, namespace)
+
+ elif token.type == TokenType.DOC_FLAG:
+ flag_type = token.attached_object.flag_type
+ is_interface = state_tracker.GetDocComment().HasFlag('interface')
+ if flag_type == 'implements' or (flag_type == 'extends' and is_interface):
+ # Interfaces should be goog.require'd.
+ doc_start = tokenutil.Search(token, TokenType.DOC_START_BRACE)
+ interface = tokenutil.Search(doc_start, TokenType.COMMENT)
+ self._AddUsedNamespace(state_tracker, interface.string)
+
+
+ def _GetWholeIdentifierString(self, token):
+ """Returns the whole identifier string for the given token.
+
+ Checks the tokens after the current one to see if the token is one in a
+ sequence of tokens which are actually just one identifier (i.e. a line was
+ wrapped in the middle of an identifier).
+
+ Args:
+ token: The token to check.
+
+ Returns:
+ The whole identifier string or None if this token is not the first token
+ in a multi-token identifier.
+ """
+ result = ''
+
+ # Search backward to determine if this token is the first token of the
+ # identifier. If it is not the first token, return None to signal that this
+ # token should be ignored.
+ prev_token = token.previous
+ while prev_token:
+ if (prev_token.IsType(TokenType.IDENTIFIER) or
+ prev_token.IsType(TokenType.NORMAL) and prev_token.string == '.'):
+ return None
+ elif (not prev_token.IsType(TokenType.WHITESPACE) and
+ not prev_token.IsAnyType(TokenType.COMMENT_TYPES)):
+ break
+ prev_token = prev_token.previous
+
+ # Search forward to find other parts of this identifier separated by white
+ # space.
+ next_token = token
+ while next_token:
+ if (next_token.IsType(TokenType.IDENTIFIER) or
+ next_token.IsType(TokenType.NORMAL) and next_token.string == '.'):
+ result += next_token.string
+ elif (not next_token.IsType(TokenType.WHITESPACE) and
+ not next_token.IsAnyType(TokenType.COMMENT_TYPES)):
+ break
+ next_token = next_token.next
+
+ return result
+
+ def _AddCreatedNamespace(self, state_tracker, identifier, namespace=None):
+ """Adds the namespace of an identifier to the list of created namespaces.
+
+ If the identifier is annotated with a 'missingProvide' suppression, it is
+ not added.
+
+ Args:
+ state_tracker: The JavaScriptStateTracker instance.
+ identifier: The identifier to add.
+ namespace: The namespace of the identifier or None if the identifier is
+ also the namespace.
+ """
+ if not namespace:
+ namespace = identifier
+
+ jsdoc = state_tracker.GetDocComment()
+ if jsdoc and 'missingProvide' in jsdoc.suppressions:
+ return
+
+ self._created_namespaces.append([namespace, identifier])
+
+ def _AddUsedNamespace(self, state_tracker, identifier):
+ """Adds the namespace of an identifier to the list of used namespaces.
+
+ If the identifier is annotated with a 'missingRequire' suppression, it is
+ not added.
+
+ Args:
+ state_tracker: The JavaScriptStateTracker instance.
+ identifier: An identifier which has been used.
+ """
+ jsdoc = state_tracker.GetDocComment()
+ if jsdoc and 'missingRequire' in jsdoc.suppressions:
+ return
+
+ namespace = self.GetClosurizedNamespace(identifier)
+ if namespace:
+ self._used_namespaces.append([namespace, identifier])
+
+ def GetClosurizedNamespace(self, identifier):
+ """Given an identifier, returns the namespace that identifier is from.
+
+ Args:
+ identifier: The identifier to extract a namespace from.
+
+ Returns:
+ The namespace the given identifier resides in, or None if one could not
+ be found.
+ """
+ if identifier.startswith('goog.global'):
+ # Ignore goog.global, since it is, by definition, global.
+ return None
+
+ parts = identifier.split('.')
+ for namespace in self._closurized_namespaces:
+ if not identifier.startswith(namespace + '.'):
+ continue
+
+ last_part = parts[-1]
+ if not last_part:
+ # TODO(robbyw): Handle this: it's a multi-line identifier.
+ return None
+
+ # The namespace for a class is the shortest prefix ending in a class
+ # name, which starts with a capital letter but is not a capitalized word.
+ #
+ # We ultimately do not want to allow requiring or providing of inner
+ # classes/enums. Instead, a file should provide only the top-level class
+ # and users should require only that.
+ namespace = []
+ for part in parts:
+ if part == 'prototype' or part.isupper():
+ return '.'.join(namespace)
+ namespace.append(part)
+ if part[0].isupper():
+ return '.'.join(namespace)
+
+ # At this point, we know there's no class or enum, so the namespace is
+ # just the identifier with the last part removed. With the exception of
+ # apply, inherits, and call, which should also be stripped.
+ if parts[-1] in ('apply', 'inherits', 'call'):
+ parts.pop()
+ parts.pop()
+
+ # If the last part ends with an underscore, it is a private variable,
+ # method, or enum. The namespace is whatever is before it.
+ if parts and parts[-1].endswith('_'):
+ parts.pop()
+
+ return '.'.join(parts)
+
+ return None
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for ClosurizedNamespacesInfo."""
+
+
+
+import unittest as googletest
+from closure_linter import closurizednamespacesinfo
+from closure_linter import javascriptstatetracker
+from closure_linter import javascripttokenizer
+from closure_linter import javascripttokens
+from closure_linter import tokenutil
+
+# pylint: disable-msg=C6409
+TokenType = javascripttokens.JavaScriptTokenType
+
+
+class ClosurizedNamespacesInfoTest(googletest.TestCase):
+ """Tests for ClosurizedNamespacesInfo."""
+
+ _test_cases = {
+ 'goog.global.anything': None,
+ 'package.CONSTANT': 'package',
+ 'package.methodName': 'package',
+ 'package.subpackage.methodName': 'package.subpackage',
+ 'package.subpackage.methodName.apply': 'package.subpackage',
+ 'package.ClassName.something': 'package.ClassName',
+ 'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
+ 'package.ClassName.CONSTANT': 'package.ClassName',
+ 'package.namespace.CONSTANT.methodName': 'package.namespace',
+ 'package.ClassName.inherits': 'package.ClassName',
+ 'package.ClassName.apply': 'package.ClassName',
+ 'package.ClassName.methodName.apply': 'package.ClassName',
+ 'package.ClassName.methodName.call': 'package.ClassName',
+ 'package.ClassName.prototype.methodName': 'package.ClassName',
+ 'package.ClassName.privateMethod_': 'package.ClassName',
+ 'package.className.privateProperty_': 'package.className',
+ 'package.className.privateProperty_.methodName': 'package.className',
+ 'package.ClassName.PrivateEnum_': 'package.ClassName',
+ 'package.ClassName.prototype.methodName.apply': 'package.ClassName',
+ 'package.ClassName.property.subProperty': 'package.ClassName',
+ 'package.className.prototype.something.somethingElse': 'package.className'
+ }
+
+ _tokenizer = javascripttokenizer.JavaScriptTokenizer()
+
+ def testGetClosurizedNamespace(self):
+ """Tests that the correct namespace is returned for various identifiers."""
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ closurized_namespaces=['package'], ignored_extra_namespaces=[])
+ for identifier, expected_namespace in self._test_cases.items():
+ actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
+ self.assertEqual(
+ expected_namespace,
+ actual_namespace,
+ 'expected namespace "' + str(expected_namespace) +
+ '" for identifier "' + str(identifier) + '" but was "' +
+ str(actual_namespace) + '"')
+
+ def testIgnoredExtraNamespaces(self):
+ """Tests that ignored_extra_namespaces are ignored."""
+ token = self._GetRequireTokens('package.Something')
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ closurized_namespaces=['package'],
+ ignored_extra_namespaces=['package.Something'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should be valid since it is in ignored namespaces.')
+
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ ['package'], [])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'Should be invalid since it is not in ignored namespaces.')
+
+ def testIsExtraProvide_created(self):
+ """Tests that provides for created namespaces are not extra."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo = function() {};'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertFalse(namespaces_info.IsExtraProvide(token),
+ 'Should not be extra since it is created.')
+
+ def testIsExtraProvide_createdIdentifier(self):
+ """Tests that provides for created identifiers are not extra."""
+ input_lines = [
+ 'goog.provide(\'package.Foo.methodName\');',
+ 'package.Foo.methodName = function() {};'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertFalse(namespaces_info.IsExtraProvide(token),
+ 'Should not be extra since it is created.')
+
+ def testIsExtraProvide_notCreated(self):
+ """Tests that provides for non-created namespaces are extra."""
+ input_lines = ['goog.provide(\'package.Foo\');']
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertTrue(namespaces_info.IsExtraProvide(token),
+ 'Should be extra since it is not created.')
+
+ def testIsExtraProvide_duplicate(self):
+ """Tests that providing a namespace twice makes the second one extra."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo = function() {};'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ # Advance to the second goog.provide token.
+ token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
+
+ self.assertTrue(namespaces_info.IsExtraProvide(token),
+ 'Should be extra since it is already provided.')
+
+ def testIsExtraProvide_notClosurized(self):
+ """Tests that provides of non-closurized namespaces are not extra."""
+ input_lines = ['goog.provide(\'notclosurized.Foo\');']
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertFalse(namespaces_info.IsExtraProvide(token),
+ 'Should not be extra since it is not closurized.')
+
+ def testIsExtraRequire_used(self):
+ """Tests that requires for used namespaces are not extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'var x = package.Foo.methodName();'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should not be extra since it is used.')
+
+ def testIsExtraRequire_usedIdentifier(self):
+ """Tests that requires for used methods on classes are extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo.methodName\');',
+ 'var x = package.Foo.methodName();'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'Should require the package, not the method specifically.')
+
+ def testIsExtraRequire_notUsed(self):
+ """Tests that requires for unused namespaces are extra."""
+ input_lines = ['goog.require(\'package.Foo\');']
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'Should be extra since it is not used.')
+
+ def testIsExtraRequire_notClosurized(self):
+ """Tests that requires of non-closurized namespaces are not extra."""
+ input_lines = ['goog.require(\'notclosurized.Foo\');']
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should not be extra since it is not closurized.')
+
+ def testIsExtraRequire_objectOnClass(self):
+ """Tests that requiring an object on a class is extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo.Enum\');',
+ 'var x = package.Foo.Enum.VALUE1;',
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'The whole class, not the object, should be required.');
+
+ def testIsExtraRequire_constantOnClass(self):
+ """Tests that requiring a constant on a class is extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo.CONSTANT\');',
+ 'var x = package.Foo.CONSTANT',
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'The class, not the constant, should be required.');
+
+ def testIsExtraRequire_constantNotOnClass(self):
+ """Tests that requiring a constant not on a class is OK."""
+ input_lines = [
+ 'goog.require(\'package.subpackage.CONSTANT\');',
+ 'var x = package.subpackage.CONSTANT',
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Constants can be required except on classes.');
+
+ def testIsExtraRequire_methodNotOnClass(self):
+ """Tests that requiring a method not on a class is OK."""
+ input_lines = [
+ 'goog.require(\'package.subpackage.method\');',
+ 'var x = package.subpackage.method()',
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Methods can be required except on classes.');
+
+ def testIsExtraRequire_defaults(self):
+ """Tests that there are no warnings about extra requires for test utils"""
+ input_lines = ['goog.require(\'goog.testing.jsunit\');']
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['goog'], [])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should not be extra since it is for testing.')
+
+ def testGetMissingProvides_provided(self):
+ """Tests that provided functions don't cause a missing provide."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo = function() {};'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_providedIdentifier(self):
+ """Tests that provided identifiers don't cause a missing provide."""
+ input_lines = [
+ 'goog.provide(\'package.Foo.methodName\');',
+ 'package.Foo.methodName = function() {};'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_providedParentIdentifier(self):
+ """Tests that provided identifiers on a class don't cause a missing provide
+ on objects attached to that class."""
+ input_lines = [
+ 'goog.provide(\'package.foo.ClassName\');',
+ 'package.foo.ClassName.methodName = function() {};',
+ 'package.foo.ClassName.ObjectName = 1;',
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_unprovided(self):
+ """Tests that unprovided functions cause a missing provide."""
+ input_lines = ['package.Foo = function() {};']
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(1, len(namespaces_info.GetMissingProvides()))
+ self.assertTrue('package.Foo' in namespaces_info.GetMissingProvides())
+
+ def testGetMissingProvides_privatefunction(self):
+ """Tests that unprovided private functions don't cause a missing provide."""
+ input_lines = ['package.Foo_ = function() {};']
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_required(self):
+ """Tests that required namespaces don't cause a missing provide."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo.methodName = function() {};'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingRequires_required(self):
+ """Tests that required namespaces don't cause a missing require."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo();'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingRequires_requiredIdentifier(self):
+ """Tests that required namespaces satisfy identifiers on that namespace."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo.methodName();'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingRequires_requiredParentClass(self):
+ """Tests that requiring a parent class of an object is sufficient to prevent
+ a missing require on that object."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo.methodName();',
+ 'package.Foo.methodName(package.Foo.ObjectName);'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
+
+ def testGetMissingRequires_unrequired(self):
+ """Tests that unrequired namespaces cause a missing require."""
+ input_lines = ['package.Foo();']
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
+ self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
+
+ def testGetMissingRequires_provided(self):
+ """Tests that provided namespaces satisfy identifiers on that namespace."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo.methodName();'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
+
+ def testGetMissingRequires_created(self):
+ """Tests that created namespaces do not satisfy usage of an identifier."""
+ input_lines = [
+ 'package.Foo = function();',
+ 'package.Foo.methodName();'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
+ self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
+
+ def testGetMissingRequires_createdIdentifier(self):
+ """Tests that created identifiers satisfy usage of the identifier."""
+ input_lines = [
+ 'package.Foo.methodName = function();',
+ 'package.Foo.methodName();'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
+
+ def testGetMissingRequires_objectOnClass(self):
+ """Tests that we should require a class, not the object on the class."""
+ input_lines = [
+ 'goog.require(\'package.Foo.Enum\');',
+ 'var x = package.Foo.Enum.VALUE1;',
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertEquals(1, len(namespaces_info.GetMissingRequires()),
+ 'The whole class, not the object, should be required.');
+
+ def testIsFirstProvide(self):
+ """Tests operation of the isFirstProvide method."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo.methodName();'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
+
+ self.assertTrue(namespaces_info.IsFirstProvide(token))
+
+ def testGetWholeIdentifierString(self):
+ """Tests that created identifiers satisfy usage of the identifier."""
+ input_lines = [
+ 'package.Foo.',
+ ' veryLong.',
+ ' identifier;'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo([], [])
+
+ self.assertEquals('package.Foo.veryLong.identifier',
+ namespaces_info._GetWholeIdentifierString(token))
+ self.assertEquals(None,
+ namespaces_info._GetWholeIdentifierString(token.next))
+
+ def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
+ ignored_extra_namespaces):
+ """Returns a namespaces info initialized with the given token stream."""
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ closurized_namespaces=closurized_namespaces,
+ ignored_extra_namespaces=ignored_extra_namespaces)
+ state_tracker = javascriptstatetracker.JavaScriptStateTracker()
+
+ while token:
+ namespaces_info.ProcessToken(token, state_tracker)
+ token = token.next
+
+ return namespaces_info
+
+ def _GetProvideTokens(self, namespace):
+ """Returns a list of tokens for a goog.require of the given namespace."""
+ line_text = 'goog.require(\'' + namespace + '\');\n'
+ return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
+
+ def _GetRequireTokens(self, namespace):
+ """Returns a list of tokens for a goog.require of the given namespace."""
+ line_text = 'goog.require(\'' + namespace + '\');\n'
+ return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
+
+if __name__ == '__main__':
+ googletest.main()
#!/usr/bin/env python
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package indicator for gjslint.common."""
Args:
error: The error object
"""
- self._errors.append((error.token.line_number, error.code))
+ self._errors.append(error)
def GetErrors(self):
"""Returns the accumulated errors.
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions to format errors."""
+
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)',
+ 'nnaze@google.com (Nathan Naze)')
+
+
+def GetUnixErrorOutput(filename, error, new_error=False):
+ """Get a output line for an error in UNIX format."""
+
+ line = ''
+
+ if error.token:
+ line = '%d' % error.token.line_number
+
+ error_code = '%04d' % error.code
+ if new_error:
+ error_code = 'New Error ' + error_code
+ return '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
+
+
+def GetErrorOutput(error, new_error=False):
+ """Get a output line for an error in regular format."""
+
+ line = ''
+ if error.token:
+ line = 'Line %d, ' % error.token.line_number
+
+ code = 'E:%04d' % error.code
+
+ error_message = error.message
+ if new_error:
+ error_message = 'New Error ' + error_message
+
+ return '%s%s: %s' % (line, code, error.message)
+++ /dev/null
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Linter error handler class that prints errors to stdout."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-from closure_linter.common import error
-from closure_linter.common import errorhandler
-
-Error = error.Error
-
-
-# The error message is of the format:
-# Line <number>, E:<code>: message
-DEFAULT_FORMAT = 1
-
-# The error message is of the format:
-# filename:[line number]:message
-UNIX_FORMAT = 2
-
-
-class ErrorPrinter(errorhandler.ErrorHandler):
- """ErrorHandler that prints errors to stdout."""
-
- def __init__(self, new_errors=None):
- """Initializes this error printer.
-
- Args:
- new_errors: A sequence of error codes representing recently introduced
- errors, defaults to None.
- """
- # Number of errors
- self._error_count = 0
-
- # Number of new errors
- self._new_error_count = 0
-
- # Number of files checked
- self._total_file_count = 0
-
- # Number of files with errors
- self._error_file_count = 0
-
- # Dict of file name to number of errors
- self._file_table = {}
-
- # List of errors for each file
- self._file_errors = None
-
- # Current file
- self._filename = None
-
- self._format = DEFAULT_FORMAT
-
- if new_errors:
- self._new_errors = frozenset(new_errors)
- else:
- self._new_errors = frozenset(set())
-
- def SetFormat(self, format):
- """Sets the print format of errors.
-
- Args:
- format: One of {DEFAULT_FORMAT, UNIX_FORMAT}.
- """
- self._format = format
-
- def HandleFile(self, filename, first_token):
- """Notifies this ErrorPrinter that subsequent errors are in filename.
-
- Sets the current file name, and sets a flag stating the header for this file
- has not been printed yet.
-
- Should be called by a linter before a file is style checked.
-
- Args:
- filename: The name of the file about to be checked.
- first_token: The first token in the file, or None if there was an error
- opening the file
- """
- if self._filename and self._file_table[self._filename]:
- print
-
- self._filename = filename
- self._file_table[filename] = 0
- self._total_file_count += 1
- self._file_errors = []
-
- def HandleError(self, error):
- """Prints a formatted error message about the specified error.
-
- The error message is of the format:
- Error #<code>, line #<number>: message
-
- Args:
- error: The error object
- """
- self._file_errors.append(error)
- self._file_table[self._filename] += 1
- self._error_count += 1
-
- if self._new_errors and error.code in self._new_errors:
- self._new_error_count += 1
-
- def _PrintError(self, error):
- """Prints a formatted error message about the specified error.
-
- Args:
- error: The error object
- """
- new_error = self._new_errors and error.code in self._new_errors
- if self._format == DEFAULT_FORMAT:
- line = ''
- if error.token:
- line = 'Line %d, ' % error.token.line_number
-
- code = 'E:%04d' % error.code
- if new_error:
- print '%s%s: (New error) %s' % (line, code, error.message)
- else:
- print '%s%s: %s' % (line, code, error.message)
- else:
- # UNIX format
- filename = self._filename
- line = ''
- if error.token:
- line = '%d' % error.token.line_number
-
- error_code = '%04d' % error.code
- if new_error:
- error_code = 'New Error ' + error_code
- print '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
-
- def FinishFile(self):
- """Finishes handling the current file."""
- if self._file_errors:
- self._error_file_count += 1
-
- if self._format != UNIX_FORMAT:
- print '----- FILE : %s -----' % (self._filename)
-
- self._file_errors.sort(Error.Compare)
-
- for error in self._file_errors:
- self._PrintError(error)
-
- def HasErrors(self):
- """Whether this error printer encountered any errors.
-
- Returns:
- True if the error printer encountered any errors.
- """
- return self._error_count
-
- def HasNewErrors(self):
- """Whether this error printer encountered any new errors.
-
- Returns:
- True if the error printer encountered any new errors.
- """
- return self._new_error_count
-
- def HasOldErrors(self):
- """Whether this error printer encountered any old errors.
-
- Returns:
- True if the error printer encountered any old errors.
- """
- return self._error_count - self._new_error_count
-
- def PrintSummary(self):
- """Print a summary of the number of errors and files."""
- if self.HasErrors() or self.HasNewErrors():
- print ('Found %d errors, including %d new errors, in %d files '
- '(%d files OK).' % (
- self._error_count,
- self._new_error_count,
- self._error_file_count,
- self._total_file_count - self._error_file_count))
- else:
- print '%d files checked, no errors found.' % self._total_file_count
-
- def PrintFileSummary(self):
- """Print a detailed summary of the number of errors in each file."""
- keys = self._file_table.keys()
- keys.sort()
- for filename in keys:
- print '%s: %d' % (filename, self._file_table[filename])
self._runner.Run([filename], errors)
errors = errors.GetErrors()
- errors.sort()
- return errors
+
+ # Convert to expected tuple format.
+ error_msgs = [(error.token.line_number, error.code) for error in errors]
+ error_msgs.sort()
+ return error_msgs
return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string,
self.values, self.line_number,
self.metadata)
+
+ def __iter__(self):
+ """Returns a token iterator."""
+ node = self
+ while node:
+ yield node
+ node = node.next
+
+ def __reversed__(self):
+ """Returns a reverse-direction token iterator."""
+ node = self
+ while node:
+ yield node
+ node = node.previous
--- /dev/null
+#!/usr/bin/env python
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+
+
+
+
+import unittest as googletest
+from closure_linter.common import tokens
+
+
+def _CreateDummyToken():
+ return tokens.Token('foo', None, 1, 1)
+
+
+def _CreateDummyTokens(count):
+ dummy_tokens = []
+ for _ in xrange(count):
+ dummy_tokens.append(_CreateDummyToken())
+ return dummy_tokens
+
+
+def _SetTokensAsNeighbors(neighbor_tokens):
+ for i in xrange(len(neighbor_tokens)):
+ prev_index = i - 1
+ next_index = i + 1
+
+ if prev_index >= 0:
+ neighbor_tokens[i].previous = neighbor_tokens[prev_index]
+
+ if next_index < len(neighbor_tokens):
+ neighbor_tokens[i].next = neighbor_tokens[next_index]
+
+
+class TokensTest(googletest.TestCase):
+
+ def testIsFirstInLine(self):
+
+ # First token in file (has no previous).
+ self.assertTrue(_CreateDummyToken().IsFirstInLine())
+
+ a, b = _CreateDummyTokens(2)
+ _SetTokensAsNeighbors([a, b])
+
+ # Tokens on same line
+ a.line_number = 30
+ b.line_number = 30
+
+ self.assertFalse(b.IsFirstInLine())
+
+ # Tokens on different lines
+ b.line_number = 31
+ self.assertTrue(b.IsFirstInLine())
+
+ def testIsLastInLine(self):
+ # Last token in file (has no next).
+ self.assertTrue(_CreateDummyToken().IsLastInLine())
+
+ a, b = _CreateDummyTokens(2)
+ _SetTokensAsNeighbors([a, b])
+
+ # Tokens on same line
+ a.line_number = 30
+ b.line_number = 30
+ self.assertFalse(a.IsLastInLine())
+
+ b.line_number = 31
+ self.assertTrue(a.IsLastInLine())
+
+ def testIsType(self):
+ a = tokens.Token('foo', 'fakeType1', 1, 1)
+ self.assertTrue(a.IsType('fakeType1'))
+ self.assertFalse(a.IsType('fakeType2'))
+
+ def testIsAnyType(self):
+ a = tokens.Token('foo', 'fakeType1', 1, 1)
+ self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
+ self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
+
+ def testRepr(self):
+ a = tokens.Token('foo', 'fakeType1', 1, 1)
+ self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
+
+ def testIter(self):
+ dummy_tokens = _CreateDummyTokens(5)
+ _SetTokensAsNeighbors(dummy_tokens)
+ a, b, c, d, e = dummy_tokens
+
+ i = iter(a)
+ self.assertListEqual([a, b, c, d, e], list(i))
+
+ def testReverseIter(self):
+ dummy_tokens = _CreateDummyTokens(5)
+ _SetTokensAsNeighbors(dummy_tokens)
+ a, b, c, d, e = dummy_tokens
+
+ ri = reversed(e)
+ self.assertListEqual([e, d, c, b, a], list(ri))
+
+
+if __name__ == '__main__':
+ googletest.main()
from closure_linter import checkerbase
from closure_linter import ecmametadatapass
+from closure_linter import error_check
from closure_linter import errors
from closure_linter import indentation
from closure_linter import javascripttokens
import gflags as flags
FLAGS = flags.FLAGS
-flags.DEFINE_boolean('strict', False,
- 'Whether to validate against the stricter Closure style.')
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
# TODO(robbyw): Check for extra parens on return statements
Error = error.Error
Modes = javascripttokenizer.JavaScriptModes
Position = position.Position
+Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,
'JsDoc types cannot contain both "?" and "|": "%s"' % p, token)
- if FLAGS.strict and (flag.type_start_token.type != Type.DOC_START_BRACE or
- flag.type_end_token.type != Type.DOC_END_BRACE):
+ if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
+ flag.type_start_token.type != Type.DOC_START_BRACE or
+ flag.type_end_token.type != Type.DOC_END_BRACE):
self._HandleError(errors.MISSING_BRACES_AROUND_TYPE,
'Type must always be surrounded by curly braces.', token)
type = token.type
# Process the line change.
- if not self._is_html and FLAGS.strict:
+ if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
# TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors:
token.previous, Position.All(token.previous.string))
elif type == Type.START_BRACKET:
- if (not first_in_line and token.previous.type == Type.WHITESPACE and
- last_non_space_token and
- last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
- self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
- token.previous, Position.All(token.previous.string))
- # If the [ token is the first token in a line we shouldn't complain
- # about a missing space before [. This is because some Ecma script
- # languages allow syntax like:
- # [Annotation]
- # class MyClass {...}
- # So we don't want to blindly warn about missing spaces before [.
- # In the the future, when rules for computing exactly how many spaces
- # lines should be indented are added, then we can return errors for
- # [ tokens that are improperly indented.
- # For example:
- # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
- # [a,b,c];
- # should trigger a proper indentation warning message as [ is not indented
- # by four spaces.
- elif (not first_in_line and token.previous and
- not token.previous.type in (
- [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
- Type.EXPRESSION_ENDER_TYPES)):
- self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
- token, Position.AtBeginning())
-
+ self._HandleStartBracket(token, last_non_space_token)
elif type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the
elif type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine():
- self._HandleError(errors.ILLEGAL_TAB,
- 'Illegal tab in whitespace before "%s"' % token.next.string,
- token, Position.All(token.string))
+ if token.next:
+ self._HandleError(errors.ILLEGAL_TAB,
+ 'Illegal tab in whitespace before "%s"' % token.next.string,
+ token, Position.All(token.string))
+ else:
+ self._HandleError(errors.ILLEGAL_TAB,
+ 'Illegal tab in whitespace',
+ token, Position.All(token.string))
else:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string,
self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token)
- elif flag.type not in state.GetDocFlag().SUPPRESS_TYPES:
- self._HandleError(errors.INVALID_SUPPRESS_TYPE,
- 'Invalid suppression type: %s' % flag.type,
- token)
+ else:
+ for suppress_type in flag.type.split('|'):
+ if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
+ self._HandleError(errors.INVALID_SUPPRESS_TYPE,
+ 'Invalid suppression type: %s' % suppress_type,
+ token)
- elif FLAGS.strict and flag.flag_type == 'author':
+ elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
+ flag.flag_type == 'author'):
# TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required.
string = token.next.string
while desc_str.endswith('>'):
start_tag_index = desc_str.rfind('<')
if start_tag_index < 0:
- break
+ break
desc_str = desc_str[:start_tag_index].rstrip()
end_position = Position(len(desc_str), 0)
self._HandleError(errors.INVALID_JSDOC_TAG,
'Invalid JsDoc tag: %s' % token.values['name'], token)
- if (FLAGS.strict and token.values['name'] == 'inheritDoc' and
+ if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
+ token.values['name'] == 'inheritDoc' and
type == Type.DOC_INLINE_FLAG):
self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
'Unnecessary braces around @inheritDoc',
identifier.startswith('this.')):
# We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'):
- if jsdoc.HasFlag('override'):
+ # Can have a private class which inherits documentation from a
+ # public superclass.
+ #
+ # @inheritDoc is deprecated in favor of using @override, and they
+ if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
+ and not ('accessControls' in jsdoc.suppressions)):
self._HandleError(errors.INVALID_OVERRIDE_PRIVATE,
'%s should not override a private member.' % identifier,
jsdoc.GetFlag('override').flag_token)
- # Can have a private class which inherits documentation from a
- # public superclass.
- if jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor'):
+ if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
+ and not ('accessControls' in jsdoc.suppressions)):
self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and
- not ('underscore' in jsdoc.suppressions)):
+ not ('underscore' in jsdoc.suppressions) and not
+ ((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
+ ('accessControls' in jsdoc.suppressions))):
self._HandleError(errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' %
identifier, token)
self._HandleError(errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore'])
- elif jsdoc.HasFlag('private'):
+ elif (jsdoc.HasFlag('private') and
+ not self.InExplicitlyTypedLanguage()):
+ # It is convention to hide public fields in some ECMA
+ # implementations from documentation using the @private tag.
self._HandleError(errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' %
identifier, token)
- if ((jsdoc.HasFlag('desc') or jsdoc.HasFlag('hidden'))
+ # These flags are only legal on localizable message definitions;
+ # such variables always begin with the prefix MSG_.
+ for f in ('desc', 'hidden', 'meaning'):
+ if (jsdoc.HasFlag(f)
and not identifier.startswith('MSG_')
and identifier.find('.MSG_') == -1):
- # TODO(user): Update error message to show the actual invalid
- # tag, either @desc or @hidden.
- self._HandleError(errors.INVALID_USE_OF_DESC_TAG,
- 'Member "%s" should not have @desc JsDoc' % identifier,
- token)
+ self._HandleError(errors.INVALID_USE_OF_DESC_TAG,
+ 'Member "%s" should not have @%s JsDoc' % (identifier, f),
+ token)
# Check for illegaly assigning live objects as prototype property values.
index = identifier.find('.prototype.')
# Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care.
- self.HandleMissingParameterDoc(token, params_iter.next())
+ if not self._limited_doc_checks:
+ self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D':
# Deletion
docs_iter.next(), token)
elif op == 'S':
# Substitution
- self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,
- 'Parameter mismatch: got "%s", expected "%s"' %
- (params_iter.next(), docs_iter.next()), token)
+ if not self._limited_doc_checks:
+ self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,
+ 'Parameter mismatch: got "%s", expected "%s"' %
+ (params_iter.next(), docs_iter.next()), token)
else:
# Equality - just advance the iterators
self._HandleError(errors.MISSING_SEMICOLON,
'Missing semicolon at end of line', token)
+ def _HandleStartBracket(self, token, last_non_space_token):
+ """Handles a token that is an open bracket.
+
+ Args:
+ token: The token to handle.
+ last_non_space_token: The last token that was not a space.
+ """
+ if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
+ last_non_space_token and
+ last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
+ self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
+ token.previous, Position.All(token.previous.string))
+ # If the [ token is the first token in a line we shouldn't complain
+ # about a missing space before [. This is because some Ecma script
+ # languages allow syntax like:
+ # [Annotation]
+ # class MyClass {...}
+ # So we don't want to blindly warn about missing spaces before [.
+ # In the the future, when rules for computing exactly how many spaces
+ # lines should be indented are added, then we can return errors for
+ # [ tokens that are improperly indented.
+ # For example:
+ # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
+ # [a,b,c];
+ # should trigger a proper indentation warning message as [ is not indented
+ # by four spaces.
+ elif (not token.IsFirstInLine() and token.previous and
+ not token.previous.type in (
+ [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
+ Type.EXPRESSION_ENDER_TYPES)):
+ self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
+ token, Position.AtBeginning())
+
def Finalize(self, state, tokenizer_mode):
last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline.
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit."""
return []
+
+ def InExplicitlyTypedLanguage(self):
+ """Returns whether this ecma implementation is explicitly typed."""
+ return False
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Specific JSLint errors checker."""
+
+
+
+import gflags as flags
+
+FLAGS = flags.FLAGS
+
+
+class Rule(object):
+ """Different rules to check."""
+
+ # Documentations for specific rules goes in flag definition.
+ BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level'
+ INDENTATION = 'indentation'
+ WELL_FORMED_AUTHOR = 'well_formed_author'
+ NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc'
+ BRACES_AROUND_TYPE = 'braces_around_type'
+ OPTIONAL_TYPE_MARKER = 'optional_type_marker'
+ UNUSED_PRIVATE_MEMBERS = 'unused_private_members'
+
+ # Rule to raise all known errors.
+ ALL = 'all'
+
+ # All rules that are to be checked when using the strict flag. E.g. the rules
+ # that are specific to the stricter Closure style.
+ CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL,
+ INDENTATION,
+ WELL_FORMED_AUTHOR,
+ NO_BRACES_AROUND_INHERIT_DOC,
+ BRACES_AROUND_TYPE,
+ OPTIONAL_TYPE_MARKER])
+
+
+flags.DEFINE_boolean('strict', False,
+ 'Whether to validate against the stricter Closure style. '
+ 'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.')
+flags.DEFINE_multistring('jslint_error', [],
+ 'List of specific lint errors to check. Here is a list'
+ ' of accepted values:\n'
+ ' - ' + Rule.ALL + ': enables all following errors.\n'
+ ' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates'
+ 'number of blank lines between blocks at top level.\n'
+ ' - ' + Rule.INDENTATION + ': checks correct '
+ 'indentation of code.\n'
+ ' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the '
+ '@author JsDoc tags.\n'
+ ' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': '
+ 'forbids braces around @inheritdoc JsDoc tags.\n'
+ ' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces '
+ 'around types in JsDoc tags.\n'
+ ' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct '
+ 'use of optional marker = in param types.\n'
+ ' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for '
+ 'unused private variables.\n')
+
+
+def ShouldCheck(rule):
+ """Returns whether the optional rule should be checked.
+
+ Computes different flags (strict, jslint_error, jslint_noerror) to find out if
+ this specific rule should be checked.
+
+ Args:
+ rule: Name of the rule (see Rule).
+
+ Returns:
+ True if the rule should be checked according to the flags, otherwise False.
+ """
+ if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error:
+ return True
+ # Checks strict rules.
+ return FLAGS.strict and rule in Rule.CLOSURE_RULES
from closure_linter import errors
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokens
+from closure_linter import requireprovidesorter
from closure_linter import tokenutil
from closure_linter.common import errorhandler
END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
+# Regex to represent common mistake inverting author name and email as
+# @author User Name (user@company)
+INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
+ '(?P<name>[^(]+)'
+ '(?P<whitespace_after_name>\s+)'
+ '\('
+ '(?P<email>[^\s]+@[^)\s]+)'
+ '\)'
+ '(?P<trailing_characters>.*)')
+
FLAGS = flags.FLAGS
flags.DEFINE_boolean('disable_indentation_fixing', False,
'Whether to disable automatic fixing of indentation.')
+
class ErrorFixer(errorhandler.ErrorHandler):
"""Object that fixes simple style errors."""
- def __init__(self, external_file = None):
+ def __init__(self, external_file=None):
"""Initialize the error fixer.
Args:
external_file: If included, all output will be directed to this file
instead of overwriting the files the errors are found in.
"""
+ errorhandler.ErrorHandler.__init__(self)
+
self._file_name = None
self._file_token = None
self._external_file = external_file
token.attached_object = javascriptstatetracker.JsDocFlag(token)
self._AddFix(token)
+ elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
+ iterator = token.attached_object.type_end_token
+ if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
+ iterator = iterator.previous
+
+ ending_space = len(iterator.string) - len(iterator.string.rstrip())
+ iterator.string = '%s=%s' % (iterator.string.rstrip(),
+ ' ' * ending_space)
+
+ # Create a new flag object with updated type info.
+ token.attached_object = javascriptstatetracker.JsDocFlag(token)
+ self._AddFix(token)
+
elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
errors.MISSING_SEMICOLON):
semicolon_token = Token(';', Type.SEMICOLON, token.line,
elif code == errors.MISSING_LINE:
if error.position.IsAtBeginning():
- tokenutil.InsertLineAfter(token.previous)
+ tokenutil.InsertBlankLineAfter(token.previous)
else:
- tokenutil.InsertLineAfter(token)
+ tokenutil.InsertBlankLineAfter(token)
self._AddFix(token)
elif code == errors.EXTRA_LINE:
should_delete = False
if num_lines < 0:
- num_lines = num_lines * -1
+ num_lines *= -1
should_delete = True
for i in xrange(1, num_lines + 1):
# TODO(user): DeleteToken should update line numbers.
tokenutil.DeleteToken(token.previous)
else:
- tokenutil.InsertLineAfter(token.previous)
+ tokenutil.InsertBlankLineAfter(token.previous)
self._AddFix(token)
elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
if end_quote:
- single_quote_start = Token("'", Type.SINGLE_QUOTE_STRING_START,
- token.line, token.line_number)
- single_quote_end = Token("'", Type.SINGLE_QUOTE_STRING_START,
- end_quote.line, token.line_number)
+ single_quote_start = Token(
+ "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
+ single_quote_end = Token(
+ "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
+ token.line_number)
tokenutil.InsertTokenAfter(single_quote_start, token)
tokenutil.InsertTokenAfter(single_quote_end, end_quote)
start_token = token.attached_object.type_start_token
if start_token.type != Type.DOC_START_BRACE:
- leading_space = (len(start_token.string) -
- len(start_token.string.lstrip()))
+ leading_space = (
+ len(start_token.string) - len(start_token.string.lstrip()))
if leading_space:
start_token = tokenutil.SplitToken(start_token, leading_space)
# Fix case where start and end token were the same.
if token.attached_object.type_end_token == start_token.previous:
token.attached_object.type_end_token = start_token
- new_token = Token("{", Type.DOC_START_BRACE, start_token.line,
+ new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
start_token.line_number)
tokenutil.InsertTokenAfter(new_token, start_token.previous)
token.attached_object.type_start_token = new_token
# FLAG_ENDING_TYPE token, if there wasn't a starting brace then
# the end token is the last token of the actual type.
last_type = end_token
- if not len(fixed_tokens):
+ if not fixed_tokens:
last_type = end_token.previous
while last_type.string.isspace():
tokenutil.SplitToken(last_type,
len(last_type.string) - trailing_space)
- new_token = Token("}", Type.DOC_END_BRACE, last_type.line,
+ new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
last_type.line_number)
tokenutil.InsertTokenAfter(new_token, last_type)
token.attached_object.type_end_token = new_token
self._AddFix(fixed_tokens)
- elif code in (errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
- errors.GOOG_PROVIDES_NOT_ALPHABETIZED):
- tokens = error.fix_data
- strings = map(lambda x: x.string, tokens)
- sorted_strings = sorted(strings)
+ elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
+ require_start_token = error.fix_data
+ sorter = requireprovidesorter.RequireProvideSorter()
+ sorter.FixRequires(require_start_token)
- index = 0
- changed_tokens = []
- for token in tokens:
- if token.string != sorted_strings[index]:
- token.string = sorted_strings[index]
- changed_tokens.append(token)
- index += 1
+ self._AddFix(require_start_token)
- self._AddFix(changed_tokens)
+ elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
+ provide_start_token = error.fix_data
+ sorter = requireprovidesorter.RequireProvideSorter()
+ sorter.FixProvides(provide_start_token)
+
+ self._AddFix(provide_start_token)
elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
if token.previous.string == '{' and token.next.string == '}':
tokenutil.DeleteToken(token.next)
self._AddFix([token])
+ elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
+ match = INVERTED_AUTHOR_SPEC.match(token.string)
+ if match:
+ token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
+ match.group('email'),
+ match.group('whitespace_after_name'),
+ match.group('name'),
+ match.group('trailing_characters'))
+ self._AddFix(token)
+
elif (code == errors.WRONG_INDENTATION and
- not FLAGS.disable_indentation_fixing):
+ not FLAGS.disable_indentation_fixing):
token = tokenutil.GetFirstTokenInSameLine(token)
actual = error.position.start
expected = error.position.length
- if token.type in (Type.WHITESPACE, Type.PARAMETERS):
+ if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
token.string = token.string.lstrip() + (' ' * expected)
self._AddFix([token])
else:
tokenutil.InsertTokenAfter(new_token, token.previous)
self._AddFix([token])
- elif code == errors.EXTRA_GOOG_REQUIRE:
- fixed_tokens = []
- while token:
- if token.type == Type.IDENTIFIER:
- if token.string not in ['goog.require', 'goog.provide']:
- # Stop iterating over tokens once we're out of the requires and
- # provides.
- break
- if token.string == 'goog.require':
- # Text of form: goog.require('required'), skipping past open paren
- # and open quote to the string text.
- required = token.next.next.next.string
- if required in error.fix_data:
- fixed_tokens.append(token)
- # Want to delete: goog.require + open paren + open single-quote +
- # text + close single-quote + close paren + semi-colon = 7.
- tokenutil.DeleteTokens(token, 7)
- token = token.next
+ elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
+ errors.MISSING_END_OF_SCOPE_COMMENT]:
+ # Only fix cases where }); is found with no trailing content on the line
+ # other than a comment. Value of 'token' is set to } for this error.
+ if (token.type == Type.END_BLOCK and
+ token.next.type == Type.END_PAREN and
+ token.next.next.type == Type.SEMICOLON):
+ current_token = token.next.next.next
+ removed_tokens = []
+ while current_token and current_token.line_number == token.line_number:
+ if current_token.IsAnyType(Type.WHITESPACE,
+ Type.START_SINGLE_LINE_COMMENT,
+ Type.COMMENT):
+ removed_tokens.append(current_token)
+ current_token = current_token.next
+ else:
+ return
+
+ if removed_tokens:
+ tokenutil.DeleteTokens(removed_tokens[0], len(removed_tokens))
+
+ whitespace_token = Token(' ', Type.WHITESPACE, token.line,
+ token.line_number)
+ start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
+ token.line, token.line_number)
+ comment_token = Token(' goog.scope', Type.COMMENT, token.line,
+ token.line_number)
+ insertion_tokens = [whitespace_token, start_comment_token,
+ comment_token]
- self._AddFix(fixed_tokens)
+ tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
+ self._AddFix(removed_tokens + insertion_tokens)
+
+ elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
+ tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
+ tokenutil.DeleteTokens(tokens_in_line[0], len(tokens_in_line))
+ self._AddFix(tokens_in_line)
+
+ elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
+ is_provide = code == errors.MISSING_GOOG_PROVIDE
+ is_require = code == errors.MISSING_GOOG_REQUIRE
+
+ missing_namespaces = error.fix_data[0]
+ need_blank_line = error.fix_data[1]
+
+ if need_blank_line is None:
+ # TODO(user): This happens when there are no existing
+ # goog.provide or goog.require statements to position new statements
+ # relative to. Consider handling this case with a heuristic.
+ return
+
+ insert_location = token.previous
+
+ # If inserting a missing require with no existing requires, insert a
+ # blank line first.
+ if need_blank_line and is_require:
+ tokenutil.InsertBlankLineAfter(insert_location)
+ insert_location = insert_location.next
+
+ for missing_namespace in missing_namespaces:
+ new_tokens = self._GetNewRequireOrProvideTokens(
+ is_provide, missing_namespace, insert_location.line_number + 1)
+ tokenutil.InsertLineAfter(insert_location, new_tokens)
+ insert_location = new_tokens[-1]
+ self._AddFix(new_tokens)
+
+ # If inserting a missing provide with no existing provides, insert a
+ # blank line after.
+ if need_blank_line and is_provide:
+ tokenutil.InsertBlankLineAfter(insert_location)
+
+ def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
+ """Returns a list of tokens to create a goog.require/provide statement.
+
+ Args:
+ is_provide: True if getting tokens for a provide, False for require.
+ namespace: The required or provided namespaces to get tokens for.
+ line_number: The line number the new require or provide statement will be
+ on.
+
+ Returns:
+ Tokens to create a new goog.require or goog.provide statement.
+ """
+ string = 'goog.require'
+ if is_provide:
+ string = 'goog.provide'
+ line_text = string + '(\'' + namespace + '\');\n'
+ return [
+ Token(string, Type.IDENTIFIER, line_text, line_number),
+ Token('(', Type.START_PAREN, line_text, line_number),
+ Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
+ Token(namespace, Type.STRING_TEXT, line_text, line_number),
+ Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
+ Token(')', Type.END_PAREN, line_text, line_number),
+ Token(';', Type.SEMICOLON, line_text, line_number)
+ ]
def FinishFile(self):
"""Called when the current file has finished style checking.
if self._file_fix_count:
f = self._external_file
if not f:
- print "Fixed %d errors in %s" % (self._file_fix_count, self._file_name)
+ print 'Fixed %d errors in %s' % (self._file_fix_count, self._file_name)
f = open(self._file_name, 'w')
token = self._file_token
if token.IsLastInLine():
f.write('\n')
if char_count > 80 and token.line_number in self._file_changed_lines:
- print "WARNING: Line %d of %s is now longer than 80 characters." % (
+ print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
token.line_number, self._file_name)
char_count = 0
- self._file_changed_lines
token = token.next
--- /dev/null
+#!/usr/bin/env python
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""A simple, pickle-serializable class to represent a lint error."""
+
+
+
+import gflags as flags
+
+from closure_linter import errors
+from closure_linter.common import erroroutput
+
+FLAGS = flags.FLAGS
+
+
+class ErrorRecord(object):
+ """Record-keeping struct that can be serialized back from a process.
+
+ Attributes:
+ path: Path to the file.
+ error_string: Error string for the user.
+ new_error: Whether this is a "new error" (see errors.NEW_ERRORS).
+ """
+
+ def __init__(self, path, error_string, new_error):
+ self.path = path
+ self.error_string = error_string
+ self.new_error = new_error
+
+
+def MakeErrorRecord(path, error):
+ """Make an error record with correctly formatted error string.
+
+ Errors are not able to be serialized (pickled) over processes because of
+ their pointers to the complex token/context graph. We use an intermediary
+ serializable class to pass back just the relevant information.
+
+ Args:
+ path: Path of file the error was found in.
+ error: An error.Error instance.
+
+ Returns:
+ _ErrorRecord instance.
+ """
+ new_error = error.code in errors.NEW_ERRORS
+
+ if FLAGS.unix_mode:
+ error_string = erroroutput.GetUnixErrorOutput(path, error, new_error)
+ else:
+ error_string = erroroutput.GetErrorOutput(error, new_error)
+
+ return ErrorRecord(path, error_string, new_error)
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
+
def ByName(name):
"""Get the error code for the given error name.
COMMA_AT_END_OF_LITERAL = 121
MULTI_LINE_STRING = 130
UNNECESSARY_DOUBLE_QUOTED_STRING = 131
+UNUSED_PRIVATE_MEMBER = 132
# Requires, provides
GOOG_REQUIRES_NOT_ALPHABETIZED = 140
MISSING_GOOG_REQUIRE = 142
MISSING_GOOG_PROVIDE = 143
EXTRA_GOOG_REQUIRE = 144
+EXTRA_GOOG_PROVIDE = 145
# JsDoc
INVALID_JSDOC_TAG = 200
INVALID_AUTHOR_TAG_DESCRIPTION = 227
JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
+JSDOC_MISSING_OPTIONAL_TYPE = 232
+JSDOC_MISSING_OPTIONAL_PREFIX = 233
JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER = 240
# TODO(robbyw): Split this in to more specific syntax problems.
INCORRECT_SUPPRESS_SYNTAX = 250
INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
+# Comments
+MISSING_END_OF_SCOPE_COMMENT = 500
+MALFORMED_END_OF_SCOPE_COMMENT = 501
+
# ActionScript specific errors:
# TODO(user): move these errors to their own file and move all JavaScript
# specific errors to their own file as well.
# Errors added after 2.0.2:
WRONG_INDENTATION,
MISSING_SEMICOLON,
- # Errors added after 2.2.5:
- WRONG_BLANK_LINE_COUNT,
- EXTRA_GOOG_REQUIRE,
+ # Errors added after 2.3.4:
+ MISSING_END_OF_SCOPE_COMMENT,
+ MALFORMED_END_OF_SCOPE_COMMENT,
+ UNUSED_PRIVATE_MEMBER,
+ # Errors added after 2.3.5:
])
#!/usr/bin/env python
+# python2.6 for command-line runs using p4lib. pylint: disable-msg=C6301
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
from closure_linter import error_fixer
from closure_linter.common import simplefileflags as fileflags
+FLAGS = flags.FLAGS
+flags.DEFINE_list('additional_extensions', None, 'List of additional file '
+ 'extensions (not js) that should be treated as '
+ 'JavaScript files.')
+
def main(argv = None):
"""Main function.
if argv is None:
argv = flags.FLAGS(sys.argv)
- files = fileflags.GetFileList(argv, 'JavaScript', ['.js'])
+ suffixes = ['.js']
+ if FLAGS.additional_extensions:
+ suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
+
+ files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer())
flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
+
class FixJsStyleTest(googletest.TestCase):
"""Test case to for gjslint auto-fixing."""
def testFixJsStyle(self):
- input_filename = None
- try:
- input_filename = '%s/fixjsstyle.in.js' % (_RESOURCE_PREFIX)
+ test_cases = [['fixjsstyle.in.js', 'fixjsstyle.out.js'],
+ ['indentation.js', 'fixjsstyle.indentation.out.js']]
+ for [running_input_file, running_output_file] in test_cases:
+ input_filename = None
+ golden_filename = None
+ current_filename = None
+ try:
+ input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file)
+ current_filename = input_filename
+
+ golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
+ current_filename = golden_filename
+ except IOError, ex:
+ raise IOError('Could not find testdata resource for %s: %s' %
+ (current_filename, ex))
+
+ if running_input_file == 'fixjsstyle.in.js':
+ with open(input_filename) as f:
+ for line in f:
+ # Go to last line.
+ pass
+ self.assertTrue(line == line.rstrip(), '%s file should not end '
+ 'with a new line.' % (input_filename))
+
+ # Autofix the file, sending output to a fake file.
+ actual = StringIO.StringIO()
+ style_checker = checker.JavaScriptStyleChecker(
+ error_fixer.ErrorFixer(actual))
+ style_checker.Check(input_filename)
+
+ # Now compare the files.
+ actual.seek(0)
+ expected = open(golden_filename, 'r')
+
+ self.assertEqual(actual.readlines(), expected.readlines())
+
+ def testMissingExtraAndUnsortedRequires(self):
+ """Tests handling of missing extra and unsorted goog.require statements."""
+ original = [
+ "goog.require('dummy.aa');",
+ "goog.require('dummy.Cc');",
+ "goog.require('dummy.Dd');",
+ "",
+ "var x = new dummy.Bb();",
+ "dummy.Cc.someMethod();",
+ "dummy.aa.someMethod();",
+ ]
+
+ expected = [
+ "goog.require('dummy.Bb');",
+ "goog.require('dummy.Cc');",
+ "goog.require('dummy.aa');",
+ "",
+ "var x = new dummy.Bb();",
+ "dummy.Cc.someMethod();",
+ "dummy.aa.someMethod();",
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testMissingExtraAndUnsortedProvides(self):
+ """Tests handling of missing extra and unsorted goog.provide statements."""
+ original = [
+ "goog.provide('dummy.aa');",
+ "goog.provide('dummy.Cc');",
+ "goog.provide('dummy.Dd');",
+ "",
+ "dummy.Cc = function() {};",
+ "dummy.Bb = function() {};",
+ "dummy.aa.someMethod = function();",
+ ]
+
+ expected = [
+ "goog.provide('dummy.Bb');",
+ "goog.provide('dummy.Cc');",
+ "goog.provide('dummy.aa');",
+ "",
+ "dummy.Cc = function() {};",
+ "dummy.Bb = function() {};",
+ "dummy.aa.someMethod = function();",
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testNoRequires(self):
+ """Tests positioning of missing requires without existing requires."""
+ original = [
+ "goog.provide('dummy.Something');",
+ "",
+ "dummy.Something = function() {};",
+ "",
+ "var x = new dummy.Bb();",
+ ]
+
+ expected = [
+ "goog.provide('dummy.Something');",
+ "",
+ "goog.require('dummy.Bb');",
+ "",
+ "dummy.Something = function() {};",
+ "",
+ "var x = new dummy.Bb();",
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testNoProvides(self):
+ """Tests positioning of missing provides without existing provides."""
+ original = [
+ "goog.require('dummy.Bb');",
+ "",
+ "dummy.Something = function() {};",
+ "",
+ "var x = new dummy.Bb();",
+ ]
+
+ expected = [
+ "goog.provide('dummy.Something');",
+ "",
+ "goog.require('dummy.Bb');",
+ "",
+ "dummy.Something = function() {};",
+ "",
+ "var x = new dummy.Bb();",
+ ]
- golden_filename = '%s/fixjsstyle.out.js' % (_RESOURCE_PREFIX)
- except IOError, ex:
- raise IOError('Could not find testdata resource for %s: %s' %
- (self._filename, ex))
+ self._AssertFixes(original, expected)
+
+ def testGoogScopeIndentation(self):
+ """Tests Handling a typical end-of-scope indentation fix."""
+ original = [
+ 'goog.scope(function() {',
+ ' // TODO(brain): Take over the world.',
+ '}); // goog.scope',
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '// TODO(brain): Take over the world.',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testMissingEndOfScopeComment(self):
+ """Tests Handling a missing comment at end of goog.scope."""
+ original = [
+ 'goog.scope(function() {',
+ '});',
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testMissingEndOfScopeCommentWithOtherComment(self):
+ """Tests handling an irrelevant comment at end of goog.scope."""
+ original = [
+ 'goog.scope(function() {',
+ "}); // I don't belong here!",
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testMalformedEndOfScopeComment(self):
+ """Tests Handling a malformed comment at end of goog.scope."""
+ original = [
+ 'goog.scope(function() {',
+ '}); // goog.scope FTW',
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def _AssertFixes(self, original, expected):
+ """Asserts that the error fixer corrects original to expected."""
+ original = self._GetHeader() + original
+ expected = self._GetHeader() + expected
- # Autofix the file, sending output to a fake file.
actual = StringIO.StringIO()
style_checker = checker.JavaScriptStyleChecker(
error_fixer.ErrorFixer(actual))
- style_checker.Check(input_filename)
-
- # Now compare the files.
+ style_checker.CheckLines('testing.js', original, False)
actual.seek(0)
- expected = open(golden_filename, 'r')
- self.assertEqual(actual.readlines(), expected.readlines())
+ expected = [x + '\n' for x in expected]
+
+ self.assertListEqual(actual.readlines(), expected)
+
+ def _GetHeader(self):
+ """Returns a fake header for a JavaScript file."""
+ return [
+ "// Copyright 2011 Google Inc. All Rights Reserved.",
+ "",
+ "/**",
+ " * @fileoverview Fake file overview.",
+ " * @author fake@google.com (Fake Person)",
+ " */",
+ ""
+ ]
if __name__ == '__main__':
from closure_linter import checker
from closure_linter import errors
+from closure_linter import error_check
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
-flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js')
+flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
+ 'limited_doc_checks.js')
+flags.FLAGS.jslint_error = error_check.Rule.ALL
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
+# TODO(user): Figure out how to list the directory.
_TEST_FILES = [
'all_js_wrapped.js',
'blank_lines.js',
'ends_with_block.js',
'externs.js',
+ 'externs_jsdoc.js',
+ 'goog_scope.js',
'html_parse_error.html',
'indentation.js',
'interface.js',
'jsdoc.js',
+ 'limited_doc_checks.js',
'minimal.js',
'other.js',
+ 'provide_blank.js',
+ 'provide_extra.js',
+ 'provide_missing.js',
'require_all_caps.js',
+ 'require_blank.js',
'require_extra.js',
'require_function.js',
'require_function_missing.js',
'require_function_through_both.js',
'require_function_through_namespace.js',
'require_interface.js',
+ 'require_interface_base.js',
'require_lower_case.js',
+ 'require_missing.js',
'require_numeric.js',
+ 'require_provide_blank.js',
'require_provide_ok.js',
'require_provide_missing.js',
'simple.html',
'spaces.js',
'tokenizer.js',
'unparseable.js',
+ 'unused_private_members.js',
'utf8.html'
]
#!/usr/bin/env python
+# python2.6 for command-line runs using p4lib. pylint: disable-msg=C6301
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
+import functools
+import itertools
import sys
import time
+import gflags as flags
+
from closure_linter import checker
-from closure_linter import errors
-from closure_linter.common import errorprinter
+from closure_linter import errorrecord
+from closure_linter.common import erroraccumulator
from closure_linter.common import simplefileflags as fileflags
-import gflags as flags
+# Attempt import of multiprocessing (should be available in Python 2.6 and up).
+try:
+ # pylint: disable-msg=C6204
+ import multiprocessing
+except ImportError:
+ multiprocessing = None
FLAGS = flags.FLAGS
flags.DEFINE_boolean('unix_mode', False,
'Whether to check javascript in html files.')
flags.DEFINE_boolean('summary', False,
'Whether to show an error count summary.')
+flags.DEFINE_list('additional_extensions', None, 'List of additional file '
+ 'extensions (not js) that should be treated as '
+ 'JavaScript files.')
+flags.DEFINE_boolean('multiprocess', False,
+ 'Whether to parallalize linting using the '
+ 'multiprocessing module. Disabled by default.')
+
GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
'--check_html', '--summary']
-def FormatTime(t):
+def _MultiprocessCheckPaths(paths):
+ """Run _CheckPath over mutltiple processes.
+
+ Tokenization, passes, and checks are expensive operations. Running in a
+ single process, they can only run on one CPU/core. Instead,
+ shard out linting over all CPUs with multiprocessing to parallelize.
+
+ Args:
+ paths: paths to check.
+
+ Yields:
+ errorrecord.ErrorRecords for any found errors.
+ """
+
+ pool = multiprocessing.Pool()
+
+ for results in pool.imap(_CheckPath, paths):
+ for record in results:
+ yield record
+
+ pool.close()
+ pool.join()
+
+
+def _CheckPaths(paths):
+ """Run _CheckPath on all paths in one thread.
+
+ Args:
+ paths: paths to check.
+
+ Yields:
+ errorrecord.ErrorRecords for any found errors.
+ """
+
+ for path in paths:
+ results = _CheckPath(path)
+ for record in results:
+ yield record
+
+
+def _CheckPath(path):
+ """Check a path and return any errors.
+
+ Args:
+ path: paths to check.
+
+ Returns:
+ A list of errorrecord.ErrorRecords for any found errors.
+ """
+
+ error_accumulator = erroraccumulator.ErrorAccumulator()
+ style_checker = checker.JavaScriptStyleChecker(error_accumulator)
+ style_checker.Check(path)
+
+ # Return any errors as error records.
+ make_error_record = functools.partial(errorrecord.MakeErrorRecord, path)
+ return map(make_error_record, error_accumulator.GetErrors())
+
+
+def _GetFilePaths(argv):
+ suffixes = ['.js']
+ if FLAGS.additional_extensions:
+ suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
+ if FLAGS.check_html:
+ suffixes += ['.html', '.htm']
+ return fileflags.GetFileList(argv, 'JavaScript', suffixes)
+
+
+# Error printing functions
+
+
+def _PrintFileSummary(paths, records):
+ """Print a detailed summary of the number of errors in each file."""
+
+ paths = list(paths)
+ paths.sort()
+
+ for path in paths:
+ path_errors = [e for e in records if e.path == path]
+ print '%s: %d' % (path, len(path_errors))
+
+
+def _PrintFileSeparator(path):
+ print '----- FILE : %s -----' % path
+
+
+def _PrintSummary(paths, error_records):
+ """Print a summary of the number of errors and files."""
+
+ error_count = len(error_records)
+ all_paths = set(paths)
+ all_paths_count = len(all_paths)
+
+ if error_count is 0:
+ print '%d files checked, no errors found.' % all_paths_count
+
+ new_error_count = len([e for e in error_records if e.new_error])
+
+ error_paths = set([e.path for e in error_records])
+ error_paths_count = len(error_paths)
+ no_error_paths_count = all_paths_count - error_paths_count
+
+ if error_count or new_error_count:
+ print ('Found %d errors, including %d new errors, in %d files '
+ '(%d files OK).' % (
+ error_count,
+ new_error_count,
+ error_paths_count,
+ no_error_paths_count))
+
+
+def _PrintErrorRecords(error_records):
+ """Print error records strings in the expected format."""
+
+ current_path = None
+ for record in error_records:
+
+ if current_path != record.path:
+ current_path = record.path
+ if not FLAGS.unix_mode:
+ _PrintFileSeparator(current_path)
+
+ print record.error_string
+
+
+def _FormatTime(t):
"""Formats a duration as a human-readable string.
Args:
"""
if argv is None:
argv = flags.FLAGS(sys.argv)
-
+
if FLAGS.time:
- start_time = time.time()
+ start_time = time.time()
suffixes = ['.js']
+ if FLAGS.additional_extensions:
+ suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
- files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
+ paths = fileflags.GetFileList(argv, 'JavaScript', suffixes)
- error_handler = None
- if FLAGS.unix_mode:
- error_handler = errorprinter.ErrorPrinter(errors.NEW_ERRORS)
- error_handler.SetFormat(errorprinter.UNIX_FORMAT)
+ if FLAGS.multiprocess:
+ records_iter = _MultiprocessCheckPaths(paths)
+ else:
+ records_iter = _CheckPaths(paths)
- runner = checker.GJsLintRunner()
- result = runner.Run(files, error_handler)
- result.PrintSummary()
+ records_iter, records_iter_copy = itertools.tee(records_iter, 2)
+ _PrintErrorRecords(records_iter_copy)
+
+ error_records = list(records_iter)
+ _PrintSummary(paths, error_records)
exit_code = 0
- if result.HasOldErrors():
+
+ # If there are any errors
+ if error_records:
exit_code += 1
- if result.HasNewErrors():
+
+ # If there are any new errors
+ if [r for r in error_records if r.new_error]:
exit_code += 2
if exit_code:
if FLAGS.summary:
- result.PrintFileSummary()
+ _PrintFileSummary(paths, error_records)
if FLAGS.beep:
# Make a beep noise.
fixjsstyle. Please double check any changes it makes and report any bugs. The
script can be run by executing:
-fixjsstyle %s
-""" % ' '.join(fix_args)
+fixjsstyle %s """ % ' '.join(fix_args)
if FLAGS.time:
- print 'Done in %s.' % FormatTime(time.time() - start_time)
+ print 'Done in %s.' % _FormatTime(time.time() - start_time)
sys.exit(exit_code)
self._PopTo(Type.START_BRACKET)
elif token_type == Type.END_BLOCK:
- self._PopTo(Type.START_BLOCK)
+ start_token = self._PopTo(Type.START_BLOCK)
+ # Check for required goog.scope comment.
+ if start_token:
+ goog_scope = self._GoogScopeOrNone(start_token.token)
+ if goog_scope is not None:
+ if not token.line.endswith('; // goog.scope\n'):
+ if (token.line.find('//') > -1 and
+ token.line.find('goog.scope') >
+ token.line.find('//')):
+ indentation_errors.append([
+ errors.MALFORMED_END_OF_SCOPE_COMMENT,
+ ('Malformed end of goog.scope comment. Please use the '
+ 'exact following syntax to close the scope:\n'
+ '}); // goog.scope'),
+ token,
+ Position(token.start_index, token.length)])
+ else:
+ indentation_errors.append([
+ errors.MISSING_END_OF_SCOPE_COMMENT,
+ ('Missing comment for end of goog.scope which opened at line '
+ '%d. End the scope with:\n'
+ '}); // goog.scope' %
+ (start_token.line_number)),
+ token,
+ Position(token.start_index, token.length)])
elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
self._Add(self._PopTo(Type.START_BLOCK))
elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
self._Add(TokenInfo(token=token, is_block=True))
- elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
+ elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
self._Add(TokenInfo(token=token, is_block=False))
elif token_type == Type.KEYWORD and token.string == 'return':
if token.type not in Type.NON_CODE_TYPES:
return False
+ def _GoogScopeOrNone(self, token):
+ """Determines if the given START_BLOCK is part of a goog.scope statement.
+
+ Args:
+ token: A token of type START_BLOCK.
+
+ Returns:
+ The goog.scope function call token, or None if such call doesn't exist.
+ """
+ # Search for a goog.scope statement, which will be 5 tokens before the
+ # block. Illustration of the tokens found prior to the start block:
+ # goog.scope(function() {
+ # 5 4 3 21 ^
+
+ maybe_goog_scope = token
+ for unused_i in xrange(5):
+ maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
+ maybe_goog_scope.previous else None)
+ if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
+ return maybe_goog_scope
+
def _Add(self, token_info):
"""Adds the given token info to the stack.
return
if token_info.is_block or token_info.token.type == Type.START_PAREN:
+ token_info.overridden_by = self._GoogScopeOrNone(token_info.token)
index = 1
while index <= len(self._stack):
stack_info = self._stack[-index]
#!/usr/bin/env python
#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
-import gflags as flags
+import re
+from sets import Set
from closure_linter import ecmalintrules
+from closure_linter import error_check
from closure_linter import errors
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
+from closure_linter import requireprovidesorter
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
-FLAGS = flags.FLAGS
-flags.DEFINE_list('closurized_namespaces', '',
- 'Namespace prefixes, used for testing of'
- 'goog.provide/require')
-flags.DEFINE_list('ignored_extra_namespaces', '',
- 'Fully qualified namespaces that should be not be reported '
- 'as extra by the linter.')
-
# Shorthand
Error = error.Error
Position = position.Position
+Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
"""JavaScript lint rules that catch JavaScript specific style errors."""
+ def __init__(self, namespaces_info):
+ """Initializes a JavaScriptLintRules instance."""
+ ecmalintrules.EcmaScriptLintRules.__init__(self)
+ self._namespaces_info = namespaces_info
+ self._declared_private_member_tokens = {}
+ self._declared_private_members = Set()
+ self._used_private_members = Set()
+
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a param tag."""
self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION,
Args:
token: The token being checked
+
+ Returns:
+ True if the token contains a record type, False otherwise.
"""
# If we see more than one left-brace in the string of an annotation token,
# then there's a record type in there.
- return (token and token.type == Type.DOC_FLAG and
+ return (
+ token and token.type == Type.DOC_FLAG and
token.attached_object.type is not None and
token.attached_object.type.find('{') != token.string.rfind('{'))
-
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
super(JavaScriptLintRules, self).CheckToken(token, state)
# Store some convenience variables
- first_in_line = token.IsFirstInLine()
- last_in_line = token.IsLastInLine()
- type = token.type
-
- if type == Type.DOC_FLAG:
+ namespaces_info = self._namespaces_info
+
+ if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
+ # Find all assignments to private members.
+ if token.type == Type.SIMPLE_LVALUE:
+ identifier = token.string
+ if identifier.endswith('_') and not identifier.endswith('__'):
+ doc_comment = state.GetDocComment()
+ suppressed = (doc_comment and doc_comment.HasFlag('suppress') and
+ doc_comment.GetFlag('suppress').type == 'underscore')
+ if not suppressed:
+ # Look for static members defined on a provided namespace.
+ namespace = namespaces_info.GetClosurizedNamespace(identifier)
+ provided_namespaces = namespaces_info.GetProvidedNamespaces()
+
+ # Skip cases of this.something_.somethingElse_.
+ regex = re.compile('^this\.[a-zA-Z_]+$')
+ if namespace in provided_namespaces or regex.match(identifier):
+ variable = identifier.split('.')[-1]
+ self._declared_private_member_tokens[variable] = token
+ self._declared_private_members.add(variable)
+ elif not identifier.endswith('__'):
+ # Consider setting public members of private members to be a usage.
+ for piece in identifier.split('.'):
+ if piece.endswith('_'):
+ self._used_private_members.add(piece)
+
+ # Find all usages of private members.
+ if token.type == Type.IDENTIFIER:
+ for piece in token.string.split('.'):
+ if piece.endswith('_'):
+ self._used_private_members.add(piece)
+
+ if token.type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'param' and flag.name_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.name_token)
+ if (error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER) and
+ flag.type is not None and flag.name is not None):
+ # Check for optional marker in type.
+ if (flag.type.endswith('=') and
+ not flag.name.startswith('opt_')):
+ self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
+ 'Optional parameter name %s must be prefixed '
+ 'with opt_.' % flag.name,
+ token)
+ elif (not flag.type.endswith('=') and
+ flag.name.startswith('opt_')):
+ self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
+ 'Optional parameter %s type must end with =.' %
+ flag.name,
+ token)
+
if flag.flag_type in state.GetDocFlag().HAS_TYPE:
# Check for both missing type token and empty type braces '{}'
# Missing suppress types are reported separately and we allow enums
# without types.
if (flag.flag_type not in ('suppress', 'enum') and
- (flag.type == None or flag.type == '' or flag.type.isspace())):
+ (not flag.type or flag.type.isspace())):
self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
'Missing type in %s tag' % token.string, token)
'Type should be immediately after %s tag' % token.string,
token)
- elif type == Type.DOUBLE_QUOTE_STRING_START:
- next = token.next
- while next.type == Type.STRING_TEXT:
+ elif token.type == Type.DOUBLE_QUOTE_STRING_START:
+ next_token = token.next
+ while next_token.type == Type.STRING_TEXT:
if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
- next.string):
+ next_token.string):
break
- next = next.next
+ next_token = next_token.next
else:
self._HandleError(
errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
token,
Position.All(token.string))
- elif type == Type.END_DOC_COMMENT:
- if (FLAGS.strict and not self._is_html and state.InTopLevel() and
- not state.InBlock()):
+ elif token.type == Type.END_DOC_COMMENT:
+ doc_comment = state.GetDocComment()
+
+ # When @externs appears in a @fileoverview comment, it should trigger
+ # the same limited doc checks as a special filename like externs.js.
+ if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'):
+ self._SetLimitedDocChecks(True)
+
+ if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and
+ not self._is_html and state.InTopLevel() and not state.InBlock()):
# Check if we're in a fileoverview or constructor JsDoc.
- doc_comment = state.GetDocComment()
- is_constructor = (doc_comment.HasFlag('constructor') or
+ is_constructor = (
+ doc_comment.HasFlag('constructor') or
doc_comment.HasFlag('interface'))
is_file_overview = doc_comment.HasFlag('fileoverview')
# precede some code, skip it.
# NOTE: The tokenutil methods are not used here because of their
# behavior at the top of a file.
- next = token.next
- if (not next or
- (not is_file_overview and next.type in Type.NON_CODE_TYPES)):
+ next_token = token.next
+ if (not next_token or
+ (not is_file_overview and next_token.type in Type.NON_CODE_TYPES)):
+ return
+
+ # Don't require extra blank lines around suppression of extra
+ # goog.require errors.
+ if (doc_comment.SuppressionOnly() and
+ next_token.type == Type.IDENTIFIER and
+ next_token.string in ['goog.provide', 'goog.require']):
return
# Find the start of this block (include comments above the block, unless
error_message = 'Should have a blank line before a file overview.'
expected_blank_lines = 1
elif is_constructor and blank_lines != 3:
- error_message = ('Should have 3 blank lines before a constructor/'
- 'interface.')
+ error_message = (
+ 'Should have 3 blank lines before a constructor/interface.')
expected_blank_lines = 3
elif not is_file_overview and not is_constructor and blank_lines != 2:
error_message = 'Should have 2 blank lines between top-level blocks.'
expected_blank_lines = 2
if error_message:
- self._HandleError(errors.WRONG_BLANK_LINE_COUNT, error_message,
+ self._HandleError(
+ errors.WRONG_BLANK_LINE_COUNT, error_message,
block_start, Position.AtBeginning(),
expected_blank_lines - blank_lines)
- elif type == Type.END_BLOCK:
+ elif token.type == Type.END_BLOCK:
if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and
token.next.type == Type.START_PAREN)
errors.MISSING_RETURN_DOCUMENTATION,
'Missing @return JsDoc in function with non-trivial return',
function.doc.end_token, Position.AtBeginning())
- elif (not function.has_return and function.doc and
+ elif (not function.has_return and
+ not function.has_throw and
+ function.doc and
function.doc.HasFlag('return') and
not state.InInterfaceMethod()):
return_flag = function.doc.GetFlag('return')
'constructor with @constructor)',
function.doc.end_token, Position.AtBeginning())
- elif type == Type.IDENTIFIER:
+ elif token.type == Type.IDENTIFIER:
if token.string == 'goog.inherits' and not state.InFunction():
if state.GetLastNonSpaceToken().line_number == token.line_number:
self._HandleError(
# TODO(robbyw): Test the last function was a constructor.
# TODO(robbyw): Test correct @extends and @implements documentation.
- elif type == Type.OPERATOR:
+ elif (token.string == 'goog.provide' and
+ not state.InFunction() and
+ namespaces_info is not None):
+ namespace = tokenutil.Search(token, Type.STRING_TEXT).string
+
+ # Report extra goog.provide statement.
+ if namespaces_info.IsExtraProvide(token):
+ self._HandleError(
+ errors.EXTRA_GOOG_PROVIDE,
+ 'Unnecessary goog.provide: ' + namespace,
+ token, position=Position.AtBeginning())
+
+ if namespaces_info.IsLastProvide(token):
+ # Report missing provide statements after the last existing provide.
+ missing_provides = namespaces_info.GetMissingProvides()
+ if missing_provides:
+ self._ReportMissingProvides(
+ missing_provides,
+ tokenutil.GetLastTokenInSameLine(token).next,
+ False)
+
+ # If there are no require statements, missing requires should be
+ # reported after the last provide.
+ if not namespaces_info.GetRequiredNamespaces():
+ missing_requires = namespaces_info.GetMissingRequires()
+ if missing_requires:
+ self._ReportMissingRequires(
+ missing_requires,
+ tokenutil.GetLastTokenInSameLine(token).next,
+ True)
+
+ elif (token.string == 'goog.require' and
+ not state.InFunction() and
+ namespaces_info is not None):
+ namespace = tokenutil.Search(token, Type.STRING_TEXT).string
+
+ # If there are no provide statements, missing provides should be
+ # reported before the first require.
+ if (namespaces_info.IsFirstRequire(token) and
+ not namespaces_info.GetProvidedNamespaces()):
+ missing_provides = namespaces_info.GetMissingProvides()
+ if missing_provides:
+ self._ReportMissingProvides(
+ missing_provides,
+ tokenutil.GetFirstTokenInSameLine(token),
+ True)
+
+ # Report extra goog.require statement.
+ if namespaces_info.IsExtraRequire(token):
+ self._HandleError(
+ errors.EXTRA_GOOG_REQUIRE,
+ 'Unnecessary goog.require: ' + namespace,
+ token, position=Position.AtBeginning())
+
+ # Report missing goog.require statements.
+ if namespaces_info.IsLastRequire(token):
+ missing_requires = namespaces_info.GetMissingRequires()
+ if missing_requires:
+ self._ReportMissingRequires(
+ missing_requires,
+ tokenutil.GetLastTokenInSameLine(token).next,
+ False)
+
+ elif token.type == Type.OPERATOR:
+ last_in_line = token.IsLastInLine()
# If the token is unary and appears to be used in a unary context
# it's ok. Otherwise, if it's at the end of the line or immediately
# before a comment, it's ok.
'Missing space after "%s"' % token.string,
token,
Position.AtEnd(token.string))
- elif type == Type.WHITESPACE:
+ elif token.type == Type.WHITESPACE:
+ first_in_line = token.IsFirstInLine()
+ last_in_line = token.IsLastInLine()
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if not last_in_line and not first_in_line and not token.next.IsComment():
token,
Position.All(token.string))
+ def _ReportMissingProvides(self, missing_provides, token, need_blank_line):
+ """Reports missing provide statements to the error handler.
+
+ Args:
+ missing_provides: A list of strings where each string is a namespace that
+ should be provided, but is not.
+ token: The token where the error was detected (also where the new provides
+ will be inserted.
+ need_blank_line: Whether a blank line needs to be inserted after the new
+ provides are inserted. May be True, False, or None, where None
+ indicates that the insert location is unknown.
+ """
+ self._HandleError(
+ errors.MISSING_GOOG_PROVIDE,
+ 'Missing the following goog.provide statements:\n' +
+ '\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
+ sorted(missing_provides))),
+ token, position=Position.AtBeginning(),
+ fix_data=(missing_provides, need_blank_line))
+
+ def _ReportMissingRequires(self, missing_requires, token, need_blank_line):
+ """Reports missing require statements to the error handler.
+
+ Args:
+ missing_requires: A list of strings where each string is a namespace that
+ should be required, but is not.
+ token: The token where the error was detected (also where the new requires
+ will be inserted.
+ need_blank_line: Whether a blank line needs to be inserted before the new
+ requires are inserted. May be True, False, or None, where None
+ indicates that the insert location is unknown.
+ """
+ self._HandleError(
+ errors.MISSING_GOOG_REQUIRE,
+ 'Missing the following goog.require statements:\n' +
+ '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
+ sorted(missing_requires))),
+ token, position=Position.AtBeginning(),
+ fix_data=(missing_requires, need_blank_line))
+
def Finalize(self, state, tokenizer_mode):
"""Perform all checks that need to occur after all lines are processed."""
# Call the base class's Finalize function.
super(JavaScriptLintRules, self).Finalize(state, tokenizer_mode)
- # Check for sorted requires statements.
- goog_require_tokens = state.GetGoogRequireTokens()
- requires = [require_token.string for require_token in goog_require_tokens]
- sorted_requires = sorted(requires)
- index = 0
- bad = False
- for item in requires:
- if item != sorted_requires[index]:
- bad = True
- break
- index += 1
-
- if bad:
- self._HandleError(
- errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
- 'goog.require classes must be alphabetized. The correct code is:\n' +
- '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
- sorted_requires)),
- goog_require_tokens[index],
- position=Position.AtBeginning(),
- fix_data=goog_require_tokens)
-
- # Check for sorted provides statements.
- goog_provide_tokens = state.GetGoogProvideTokens()
- provides = [provide_token.string for provide_token in goog_provide_tokens]
- sorted_provides = sorted(provides)
- index = 0
- bad = False
- for item in provides:
- if item != sorted_provides[index]:
- bad = True
- break
- index += 1
-
- if bad:
+ if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
+ # Report an error for any declared private member that was never used.
+ unused_private_members = (self._declared_private_members -
+ self._used_private_members)
+
+ for variable in unused_private_members:
+ token = self._declared_private_member_tokens[variable]
+ self._HandleError(errors.UNUSED_PRIVATE_MEMBER,
+ 'Unused private member: %s.' % token.string,
+ token)
+
+ # Clear state to prepare for the next file.
+ self._declared_private_member_tokens = {}
+ self._declared_private_members = Set()
+ self._used_private_members = Set()
+
+ namespaces_info = self._namespaces_info
+ if namespaces_info is not None:
+ # If there are no provide or require statements, missing provides and
+ # requires should be reported on line 1.
+ if (not namespaces_info.GetProvidedNamespaces() and
+ not namespaces_info.GetRequiredNamespaces()):
+ missing_provides = namespaces_info.GetMissingProvides()
+ if missing_provides:
+ self._ReportMissingProvides(
+ missing_provides, state.GetFirstToken(), None)
+
+ missing_requires = namespaces_info.GetMissingRequires()
+ if missing_requires:
+ self._ReportMissingRequires(
+ missing_requires, state.GetFirstToken(), None)
+
+ self._CheckSortedRequiresProvides(state.GetFirstToken())
+
+ def _CheckSortedRequiresProvides(self, token):
+ """Checks that all goog.require and goog.provide statements are sorted.
+
+ Note that this method needs to be run after missing statements are added to
+ preserve alphabetical order.
+
+ Args:
+ token: The first token in the token stream.
+ """
+ sorter = requireprovidesorter.RequireProvideSorter()
+ provides_result = sorter.CheckProvides(token)
+ if provides_result:
self._HandleError(
errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
'goog.provide classes must be alphabetized. The correct code is:\n' +
- '\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
- sorted_provides)),
- goog_provide_tokens[index],
+ '\n'.join(
+ map(lambda x: 'goog.provide(\'%s\');' % x, provides_result[1])),
+ provides_result[0],
position=Position.AtBeginning(),
- fix_data=goog_provide_tokens)
-
- if FLAGS.closurized_namespaces:
- # Check that we provide everything we need.
- provided_namespaces = state.GetProvidedNamespaces()
- missing_provides = provided_namespaces - set(provides)
- if missing_provides:
- self._HandleError(
- errors.MISSING_GOOG_PROVIDE,
- 'Missing the following goog.provide statements:\n' +
- '\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
- sorted(missing_provides))),
- state.GetFirstToken(), position=Position.AtBeginning(),
- fix_data=missing_provides)
-
- # Compose a set of all available namespaces. Explicitly omit goog
- # because if you can call goog.require, you already have goog.
- available_namespaces = (set(requires) | set(provides) | set(['goog']) |
- provided_namespaces)
-
- # Check that we require everything we need.
- missing_requires = set()
- for namespace_variants in state.GetUsedNamespaces():
- # Namespace variants is a list of potential things to require. If we
- # find we're missing one, we are lazy and choose to require the first
- # in the sequence - which should be the namespace.
- if not set(namespace_variants) & available_namespaces:
- missing_requires.add(namespace_variants[0])
-
- if missing_requires:
- self._HandleError(
- errors.MISSING_GOOG_REQUIRE,
- 'Missing the following goog.require statements:\n' +
- '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
- sorted(missing_requires))),
- state.GetFirstToken(), position=Position.AtBeginning(),
- fix_data=missing_requires)
-
- # Check that we don't require things we don't actually use.
- namespace_variants = state.GetUsedNamespaces()
- used_namespaces = set()
- for a, b in namespace_variants:
- used_namespaces.add(a)
- used_namespaces.add(b)
-
- extra_requires = set()
- for i in requires:
- baseNamespace = i.split('.')[0]
- if (i not in used_namespaces and
- baseNamespace in FLAGS.closurized_namespaces and
- i not in FLAGS.ignored_extra_namespaces):
- extra_requires.add(i)
-
- if extra_requires:
- self._HandleError(
- errors.EXTRA_GOOG_REQUIRE,
- 'The following goog.require statements appear unnecessary:\n' +
- '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
- sorted(extra_requires))),
- state.GetFirstToken(), position=Position.AtBeginning(),
- fix_data=extra_requires)
+ fix_data=provides_result[0])
+ requires_result = sorter.CheckRequires(token)
+ if requires_result:
+ self._HandleError(
+ errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
+ 'goog.require classes must be alphabetized. The correct code is:\n' +
+ '\n'.join(
+ map(lambda x: 'goog.require(\'%s\');' % x, requires_result[1])),
+ requires_result[0],
+ position=Position.AtBeginning(),
+ fix_data=requires_result[0])
+
+ def GetLongLineExceptions(self):
+ """Gets a list of regexps for lines which can be longer than the limit."""
+ return [
+ re.compile('goog\.require\(.+\);?\s*$'),
+ re.compile('goog\.provide\(.+\);?\s*$')
+ ]
# TODO(robbyw): determine which of these, if any, should be illegal.
EXTENDED_DOC = frozenset([
'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link',
- 'protected', 'notypecheck', 'throws'])
+ 'meaning', 'protected', 'notypecheck', 'throws'])
LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC
functionality needed for JavaScript.
"""
- def __init__(self, closurized_namespaces=''):
- """Initializes a JavaScript token stream state tracker.
-
- Args:
- closurized_namespaces: An optional list of namespace prefixes used for
- testing of goog.provide/require.
- """
+ def __init__(self):
+ """Initializes a JavaScript token stream state tracker."""
statetracker.StateTracker.__init__(self, JsDocFlag)
- self.__closurized_namespaces = closurized_namespaces
-
- def Reset(self):
- """Resets the state tracker to prepare for processing a new page."""
- super(JavaScriptStateTracker, self).Reset()
-
- self.__goog_require_tokens = []
- self.__goog_provide_tokens = []
- self.__provided_namespaces = set()
- self.__used_namespaces = []
def InTopLevel(self):
"""Compute whether we are at the top level in the class.
"""
return not self.InParentheses()
- def GetGoogRequireTokens(self):
- """Returns list of require tokens."""
- return self.__goog_require_tokens
-
- def GetGoogProvideTokens(self):
- """Returns list of provide tokens."""
- return self.__goog_provide_tokens
-
- def GetProvidedNamespaces(self):
- """Returns list of provided namespaces."""
- return self.__provided_namespaces
-
- def GetUsedNamespaces(self):
- """Returns list of used namespaces, is a list of sequences."""
- return self.__used_namespaces
-
def GetBlockType(self, token):
"""Determine the block type given a START_BLOCK token.
"""
super(JavaScriptStateTracker, self).HandleToken(token,
last_non_space_token)
-
- if token.IsType(Type.IDENTIFIER):
- if token.string == 'goog.require':
- class_token = tokenutil.Search(token, Type.STRING_TEXT)
- self.__goog_require_tokens.append(class_token)
-
- elif token.string == 'goog.provide':
- class_token = tokenutil.Search(token, Type.STRING_TEXT)
- self.__goog_provide_tokens.append(class_token)
-
- elif self.__closurized_namespaces:
- self.__AddUsedNamespace(token.string)
- if token.IsType(Type.SIMPLE_LVALUE) and not self.InFunction():
- identifier = token.values['identifier']
-
- if self.__closurized_namespaces:
- namespace = self.GetClosurizedNamespace(identifier)
- if namespace and identifier == namespace:
- self.__provided_namespaces.add(namespace)
- if (self.__closurized_namespaces and
- token.IsType(Type.DOC_FLAG) and
- token.attached_object.flag_type == 'implements'):
- # Interfaces should be goog.require'd.
- doc_start = tokenutil.Search(token, Type.DOC_START_BRACE)
- interface = tokenutil.Search(doc_start, Type.COMMENT)
- self.__AddUsedNamespace(interface.string)
-
- def __AddUsedNamespace(self, identifier):
- """Adds the namespace of an identifier to the list of used namespaces.
-
- Args:
- identifier: An identifier which has been used.
- """
- namespace = self.GetClosurizedNamespace(identifier)
-
- if namespace:
- # We add token.string as a 'namespace' as it is something that could
- # potentially be provided to satisfy this dependency.
- self.__used_namespaces.append([namespace, identifier])
-
- def GetClosurizedNamespace(self, identifier):
- """Given an identifier, returns the namespace that identifier is from.
-
- Args:
- identifier: The identifier to extract a namespace from.
-
- Returns:
- The namespace the given identifier resides in, or None if one could not
- be found.
- """
- parts = identifier.split('.')
- for part in parts:
- if part.endswith('_'):
- # Ignore private variables / inner classes.
- return None
-
- if identifier.startswith('goog.global'):
- # Ignore goog.global, since it is, by definition, global.
- return None
-
- for namespace in self.__closurized_namespaces:
- if identifier.startswith(namespace + '.'):
- last_part = parts[-1]
- if not last_part:
- # TODO(robbyw): Handle this: it's a multi-line identifier.
- return None
-
- if last_part in ('apply', 'inherits', 'call'):
- # Calling one of Function's methods usually indicates use of a
- # superclass.
- parts.pop()
- last_part = parts[-1]
-
- for i in xrange(1, len(parts)):
- part = parts[i]
- if part.isupper():
- # If an identifier is of the form foo.bar.BAZ.x or foo.bar.BAZ,
- # the namespace is foo.bar.
- return '.'.join(parts[:i])
- if part == 'prototype':
- # If an identifier is of the form foo.bar.prototype.x, the
- # namespace is foo.bar.
- return '.'.join(parts[:i])
-
- if last_part.isupper() or not last_part[0].isupper():
- # Strip off the last part of an enum or constant reference.
- parts.pop()
-
- return '.'.join(parts)
-
- return None
+++ /dev/null
-#!/usr/bin/env python
-#
-# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests for JavaScriptStateTracker."""
-
-
-
-import unittest as googletest
-from closure_linter import javascriptstatetracker
-
-class JavaScriptStateTrackerTest(googletest.TestCase):
-
- __test_cases = {
- 'package.CONSTANT' : 'package',
- 'package.methodName' : 'package',
- 'package.subpackage.methodName' : 'package.subpackage',
- 'package.ClassName.something' : 'package.ClassName',
- 'package.ClassName.Enum.VALUE.methodName' : 'package.ClassName.Enum',
- 'package.ClassName.CONSTANT' : 'package.ClassName',
- 'package.ClassName.inherits' : 'package.ClassName',
- 'package.ClassName.apply' : 'package.ClassName',
- 'package.ClassName.methodName.apply' : 'package.ClassName',
- 'package.ClassName.methodName.call' : 'package.ClassName',
- 'package.ClassName.prototype.methodName' : 'package.ClassName',
- 'package.ClassName.privateMethod_' : None,
- 'package.ClassName.prototype.methodName.apply' : 'package.ClassName'
- }
-
- def testGetClosurizedNamespace(self):
- stateTracker = javascriptstatetracker.JavaScriptStateTracker(['package'])
- for identifier, expected_namespace in self.__test_cases.items():
- actual_namespace = stateTracker.GetClosurizedNamespace(identifier)
- self.assertEqual(expected_namespace, actual_namespace,
- 'expected namespace "' + str(expected_namespace) +
- '" for identifier "' + str(identifier) + '" but was "' +
- str(actual_namespace) + '"')
-
-if __name__ == '__main__':
- googletest.main()
-
"""
# Useful patterns for JavaScript parsing.
- IDENTIFIER_CHAR = r'A-Za-z0-9_$.';
+ IDENTIFIER_CHAR = r'A-Za-z0-9_$.'
# Number patterns based on:
# http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html
Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG),
Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG,
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE),
- Matcher(DOC_FLAG, Type.DOC_FLAG),
+
+ # Encountering a doc flag should leave lex spaces mode.
+ Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE),
# Tokenize braces so we can find types.
Matcher(START_BLOCK, Type.DOC_START_BRACE),
# returned. Hence the order is important because the matchers that come first
# overrule the matchers that come later.
JAVASCRIPT_MATCHERS = {
- # Matchers for basic text mode.
- JavaScriptModes.TEXT_MODE: [
- # Check a big group - strings, starting comments, and regexes - all
- # of which could be intertwined. 'string with /regex/',
- # /regex with 'string'/, /* comment with /regex/ and string */ (and so on)
- Matcher(START_DOC_COMMENT, Type.START_DOC_COMMENT,
- JavaScriptModes.DOC_COMMENT_MODE),
- Matcher(START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
- JavaScriptModes.BLOCK_COMMENT_MODE),
- Matcher(END_OF_LINE_SINGLE_LINE_COMMENT,
- Type.START_SINGLE_LINE_COMMENT),
- Matcher(START_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT,
- JavaScriptModes.LINE_COMMENT_MODE),
- Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
- JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
- Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
- JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
- Matcher(REGEX, Type.REGEX),
-
- # Next we check for start blocks appearing outside any of the items above.
- Matcher(START_BLOCK, Type.START_BLOCK),
- Matcher(END_BLOCK, Type.END_BLOCK),
-
- # Then we search for function declarations.
- Matcher(FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
- JavaScriptModes.FUNCTION_MODE),
-
- # Next, we convert non-function related parens to tokens.
- Matcher(OPENING_PAREN, Type.START_PAREN),
- Matcher(CLOSING_PAREN, Type.END_PAREN),
-
- # Next, we convert brackets to tokens.
- Matcher(OPENING_BRACKET, Type.START_BRACKET),
- Matcher(CLOSING_BRACKET, Type.END_BRACKET),
-
- # Find numbers. This has to happen before operators because scientific
- # notation numbers can have + and - in them.
- Matcher(NUMBER, Type.NUMBER),
-
- # Find operators and simple assignments
- Matcher(SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
- Matcher(OPERATOR, Type.OPERATOR),
-
- # Find key words and whitespace
- Matcher(KEYWORD, Type.KEYWORD),
- Matcher(WHITESPACE, Type.WHITESPACE),
-
- # Find identifiers
- Matcher(IDENTIFIER, Type.IDENTIFIER),
-
- # Finally, we convert semicolons to tokens.
- Matcher(SEMICOLON, Type.SEMICOLON)],
-
-
- # Matchers for single quote strings.
- JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
- Matcher(SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
- Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
- JavaScriptModes.TEXT_MODE)],
-
-
- # Matchers for double quote strings.
- JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
- Matcher(DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
- Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
- JavaScriptModes.TEXT_MODE)],
-
-
- # Matchers for block comments.
- JavaScriptModes.BLOCK_COMMENT_MODE: [
- # First we check for exiting a block comment.
- Matcher(END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
- JavaScriptModes.TEXT_MODE),
-
- # Match non-comment-ending text..
- Matcher(BLOCK_COMMENT_TEXT, Type.COMMENT)],
-
-
- # Matchers for doc comments.
- JavaScriptModes.DOC_COMMENT_MODE: COMMON_DOC_MATCHERS + [
- Matcher(DOC_COMMENT_TEXT, Type.COMMENT)],
-
- JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: COMMON_DOC_MATCHERS + [
- Matcher(WHITESPACE, Type.COMMENT),
- Matcher(DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
-
- # Matchers for single line comments.
- JavaScriptModes.LINE_COMMENT_MODE: [
- # We greedy match until the end of the line in line comment mode.
- Matcher(ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
-
-
- # Matchers for code after the function keyword.
- JavaScriptModes.FUNCTION_MODE: [
- # Must match open paren before anything else and move into parameter mode,
- # otherwise everything inside the parameter list is parsed incorrectly.
- Matcher(OPENING_PAREN, Type.START_PARAMETERS,
- JavaScriptModes.PARAMETER_MODE),
- Matcher(WHITESPACE, Type.WHITESPACE),
- Matcher(IDENTIFIER, Type.FUNCTION_NAME)],
-
-
- # Matchers for function parameters
- JavaScriptModes.PARAMETER_MODE: [
- # When in function parameter mode, a closing paren is treated specially.
- # Everything else is treated as lines of parameters.
- Matcher(CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
- JavaScriptModes.TEXT_MODE),
- Matcher(PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]}
-
+ # Matchers for basic text mode.
+ JavaScriptModes.TEXT_MODE: [
+ # Check a big group - strings, starting comments, and regexes - all
+ # of which could be intertwined. 'string with /regex/',
+ # /regex with 'string'/, /* comment with /regex/ and string */ (and so
+ # on)
+ Matcher(START_DOC_COMMENT, Type.START_DOC_COMMENT,
+ JavaScriptModes.DOC_COMMENT_MODE),
+ Matcher(START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
+ JavaScriptModes.BLOCK_COMMENT_MODE),
+ Matcher(END_OF_LINE_SINGLE_LINE_COMMENT,
+ Type.START_SINGLE_LINE_COMMENT),
+ Matcher(START_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT,
+ JavaScriptModes.LINE_COMMENT_MODE),
+ Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
+ JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
+ Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
+ JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
+ Matcher(REGEX, Type.REGEX),
+
+ # Next we check for start blocks appearing outside any of the items
+ # above.
+ Matcher(START_BLOCK, Type.START_BLOCK),
+ Matcher(END_BLOCK, Type.END_BLOCK),
+
+ # Then we search for function declarations.
+ Matcher(FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
+ JavaScriptModes.FUNCTION_MODE),
+
+ # Next, we convert non-function related parens to tokens.
+ Matcher(OPENING_PAREN, Type.START_PAREN),
+ Matcher(CLOSING_PAREN, Type.END_PAREN),
+
+ # Next, we convert brackets to tokens.
+ Matcher(OPENING_BRACKET, Type.START_BRACKET),
+ Matcher(CLOSING_BRACKET, Type.END_BRACKET),
+
+ # Find numbers. This has to happen before operators because scientific
+ # notation numbers can have + and - in them.
+ Matcher(NUMBER, Type.NUMBER),
+
+ # Find operators and simple assignments
+ Matcher(SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
+ Matcher(OPERATOR, Type.OPERATOR),
+
+ # Find key words and whitespace.
+ Matcher(KEYWORD, Type.KEYWORD),
+ Matcher(WHITESPACE, Type.WHITESPACE),
+
+ # Find identifiers.
+ Matcher(IDENTIFIER, Type.IDENTIFIER),
+
+ # Finally, we convert semicolons to tokens.
+ Matcher(SEMICOLON, Type.SEMICOLON)],
+
+ # Matchers for single quote strings.
+ JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
+ Matcher(SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
+ Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
+ JavaScriptModes.TEXT_MODE)],
+
+ # Matchers for double quote strings.
+ JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
+ Matcher(DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
+ Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
+ JavaScriptModes.TEXT_MODE)],
+
+ # Matchers for block comments.
+ JavaScriptModes.BLOCK_COMMENT_MODE: [
+ # First we check for exiting a block comment.
+ Matcher(END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
+ JavaScriptModes.TEXT_MODE),
+
+ # Match non-comment-ending text..
+ Matcher(BLOCK_COMMENT_TEXT, Type.COMMENT)],
+
+ # Matchers for doc comments.
+ JavaScriptModes.DOC_COMMENT_MODE: COMMON_DOC_MATCHERS + [
+ Matcher(DOC_COMMENT_TEXT, Type.COMMENT)],
+
+ JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: COMMON_DOC_MATCHERS + [
+ Matcher(WHITESPACE, Type.COMMENT),
+ Matcher(DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
+
+ # Matchers for single line comments.
+ JavaScriptModes.LINE_COMMENT_MODE: [
+ # We greedy match until the end of the line in line comment mode.
+ Matcher(ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
+
+ # Matchers for code after the function keyword.
+ JavaScriptModes.FUNCTION_MODE: [
+ # Must match open paren before anything else and move into parameter
+ # mode, otherwise everything inside the parameter list is parsed
+ # incorrectly.
+ Matcher(OPENING_PAREN, Type.START_PARAMETERS,
+ JavaScriptModes.PARAMETER_MODE),
+ Matcher(WHITESPACE, Type.WHITESPACE),
+ Matcher(IDENTIFIER, Type.FUNCTION_NAME)],
+
+ # Matchers for function parameters
+ JavaScriptModes.PARAMETER_MODE: [
+ # When in function parameter mode, a closing paren is treated specially.
+ # Everything else is treated as lines of parameters.
+ Matcher(CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
+ JavaScriptModes.TEXT_MODE),
+ Matcher(PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]}
# When text is not matched, it is given this default type based on mode.
# If unspecified in this map, the default default is Type.NORMAL.
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for gjslint --nostrict.
+
+Tests errors that can be thrown by gjslint when not in strict mode.
+"""
+
+
+
+import os
+import sys
+import unittest
+
+import gflags as flags
+import unittest as googletest
+
+from closure_linter import checker
+from closure_linter import errors
+from closure_linter.common import filetestcase
+
+_RESOURCE_PREFIX = 'closure_linter/testdata'
+
+flags.FLAGS.strict = False
+flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
+flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
+flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
+ 'limited_doc_checks.js')
+
+
+# List of files under testdata to test.
+# We need to list files explicitly since pyglib can't list directories.
+_TEST_FILES = [
+ 'not_strict.js'
+ ]
+
+
+class GJsLintTestSuite(unittest.TestSuite):
+ """Test suite to run a GJsLintTest for each of several files.
+
+ If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
+ testdata to test. Otherwise, _TEST_FILES is used.
+ """
+
+ def __init__(self, tests=()):
+ unittest.TestSuite.__init__(self, tests)
+
+ argv = sys.argv and sys.argv[1:] or []
+ if argv:
+ test_files = argv
+ else:
+ test_files = _TEST_FILES
+ for test_file in test_files:
+ resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
+ self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
+ checker.GJsLintRunner(),
+ errors.ByName))
+
+if __name__ == '__main__':
+ # Don't let main parse args; it happens in the TestSuite.
+ googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains logic for sorting goog.provide and goog.require statements.
+
+Closurized JavaScript files use goog.provide and goog.require statements at the
+top of the file to manage dependencies. These statements should be sorted
+alphabetically, however, it is common for them to be accompanied by inline
+comments or suppression annotations. In order to sort these statements without
+disrupting their comments and annotations, the association between statements
+and comments/annotations must be maintained while sorting.
+
+ RequireProvideSorter: Handles checking/fixing of provide/require statements.
+"""
+
+
+
+from closure_linter import javascripttokens
+from closure_linter import tokenutil
+
+# Shorthand
+Type = javascripttokens.JavaScriptTokenType
+
+
+class RequireProvideSorter(object):
+ """Checks for and fixes alphabetization of provide and require statements.
+
+ When alphabetizing, comments on the same line or comments directly above a
+ goog.provide or goog.require statement are associated with that statement and
+ stay with the statement as it gets sorted.
+ """
+
+ def CheckProvides(self, token):
+ """Checks alphabetization of goog.provide statements.
+
+ Iterates over tokens in given token stream, identifies goog.provide tokens,
+ and checks that they occur in alphabetical order by the object being
+ provided.
+
+ Args:
+ token: A token in the token stream before any goog.provide tokens.
+
+ Returns:
+ A tuple containing the first provide token in the token stream and a list
+ of provided objects sorted alphabetically. For example:
+
+ (JavaScriptToken, ['object.a', 'object.b', ...])
+
+ None is returned if all goog.provide statements are already sorted.
+ """
+ provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
+ provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
+ sorted_provide_strings = sorted(provide_strings)
+ if provide_strings != sorted_provide_strings:
+ return [provide_tokens[0], sorted_provide_strings]
+ return None
+
+ def CheckRequires(self, token):
+ """Checks alphabetization of goog.require statements.
+
+ Iterates over tokens in given token stream, identifies goog.require tokens,
+ and checks that they occur in alphabetical order by the dependency being
+ required.
+
+ Args:
+ token: A token in the token stream before any goog.require tokens.
+
+ Returns:
+ A tuple containing the first require token in the token stream and a list
+ of required dependencies sorted alphabetically. For example:
+
+ (JavaScriptToken, ['object.a', 'object.b', ...])
+
+ None is returned if all goog.require statements are already sorted.
+ """
+ require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
+ require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
+ sorted_require_strings = sorted(require_strings)
+ if require_strings != sorted_require_strings:
+ return (require_tokens[0], sorted_require_strings)
+ return None
+
+ def FixProvides(self, token):
+ """Sorts goog.provide statements in the given token stream alphabetically.
+
+ Args:
+ token: The first token in the token stream.
+ """
+ self._FixProvidesOrRequires(
+ self._GetRequireOrProvideTokens(token, 'goog.provide'))
+
+ def FixRequires(self, token):
+ """Sorts goog.require statements in the given token stream alphabetically.
+
+ Args:
+ token: The first token in the token stream.
+ """
+ self._FixProvidesOrRequires(
+ self._GetRequireOrProvideTokens(token, 'goog.require'))
+
+ def _FixProvidesOrRequires(self, tokens):
+ """Sorts goog.provide or goog.require statements.
+
+ Args:
+ tokens: A list of goog.provide or goog.require tokens in the order they
+ appear in the token stream. i.e. the first token in this list must
+ be the first goog.provide or goog.require token.
+ """
+ strings = self._GetRequireOrProvideTokenStrings(tokens)
+ sorted_strings = sorted(strings)
+
+ # Make a separate pass to remove any blank lines between goog.require/
+ # goog.provide tokens.
+ first_token = tokens[0]
+ last_token = tokens[-1]
+ i = last_token
+ while i != first_token:
+ if i.type is Type.BLANK_LINE:
+ tokenutil.DeleteToken(i)
+ i = i.previous
+
+ # A map from required/provided object name to tokens that make up the line
+ # it was on, including any comments immediately before it or after it on the
+ # same line.
+ tokens_map = self._GetTokensMap(tokens)
+
+ # Iterate over the map removing all tokens.
+ for name in tokens_map:
+ tokens_to_delete = tokens_map[name]
+ for i in tokens_to_delete:
+ tokenutil.DeleteToken(i)
+
+ # Re-add all tokens in the map in alphabetical order.
+ insert_after = tokens[0].previous
+ for string in sorted_strings:
+ for i in tokens_map[string]:
+ tokenutil.InsertTokenAfter(i, insert_after)
+ insert_after = i
+
+ def _GetRequireOrProvideTokens(self, token, token_string):
+ """Gets all goog.provide or goog.require tokens in the given token stream.
+
+ Args:
+ token: The first token in the token stream.
+ token_string: One of 'goog.provide' or 'goog.require' to indicate which
+ tokens to find.
+
+ Returns:
+ A list of goog.provide or goog.require tokens in the order they appear in
+ the token stream.
+ """
+ tokens = []
+ while token:
+ if token.type == Type.IDENTIFIER:
+ if token.string == token_string:
+ tokens.append(token)
+ elif token.string not in ['goog.require', 'goog.provide']:
+ # The goog.provide and goog.require identifiers are at the top of the
+ # file. So if any other identifier is encountered, return.
+ break
+ token = token.next
+
+ return tokens
+
+ def _GetRequireOrProvideTokenStrings(self, tokens):
+ """Gets a list of strings corresponding to the given list of tokens.
+
+ The string will be the next string in the token stream after each token in
+ tokens. This is used to find the object being provided/required by a given
+ goog.provide or goog.require token.
+
+ Args:
+ tokens: A list of goog.provide or goog.require tokens.
+
+ Returns:
+ A list of object names that are being provided or required by the given
+ list of tokens. For example:
+
+ ['object.a', 'object.c', 'object.b']
+ """
+ token_strings = []
+ for token in tokens:
+ name = tokenutil.Search(token, Type.STRING_TEXT).string
+ token_strings.append(name)
+ return token_strings
+
+ def _GetTokensMap(self, tokens):
+ """Gets a map from object name to tokens associated with that object.
+
+ Starting from the goog.provide/goog.require token, searches backwards in the
+ token stream for any lines that start with a comment. These lines are
+ associated with the goog.provide/goog.require token. Also associates any
+ tokens on the same line as the goog.provide/goog.require token with that
+ token.
+
+ Args:
+ tokens: A list of goog.provide or goog.require tokens.
+
+ Returns:
+ A dictionary that maps object names to the tokens associated with the
+ goog.provide or goog.require of that object name. For example:
+
+ {
+ 'object.a': [JavaScriptToken, JavaScriptToken, ...],
+ 'object.b': [...]
+ }
+
+ The list of tokens includes any comment lines above the goog.provide or
+ goog.require statement and everything after the statement on the same
+ line. For example, all of the following would be associated with
+ 'object.a':
+
+ /** @suppress {extraRequire} */
+ goog.require('object.a'); // Some comment.
+ """
+ tokens_map = {}
+ for token in tokens:
+ object_name = tokenutil.Search(token, Type.STRING_TEXT).string
+ # If the previous line starts with a comment, presume that the comment
+ # relates to the goog.require or goog.provide and keep them together when
+ # sorting.
+ first_token = token
+ previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
+ while previous_first_token.IsAnyType(Type.COMMENT_TYPES):
+ first_token = previous_first_token
+ previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
+ first_token)
+
+ # Find the last token on the line.
+ last_token = tokenutil.GetLastTokenInSameLine(token)
+
+ all_tokens = self._GetTokenList(first_token, last_token)
+ tokens_map[object_name] = all_tokens
+ return tokens_map
+
+ def _GetTokenList(self, first_token, last_token):
+ """Gets a list of all tokens from first_token to last_token, inclusive.
+
+ Args:
+ first_token: The first token to get.
+ last_token: The last token to get.
+
+ Returns:
+ A list of all tokens between first_token and last_token, including both
+ first_token and last_token.
+
+ Raises:
+ Exception: If the token stream ends before last_token is reached.
+ """
+ token_list = []
+ token = first_token
+ while token != last_token:
+ if not token:
+ raise Exception('ran out of tokens')
+ token_list.append(token)
+ token = token.next
+ token_list.append(last_token)
+
+ return token_list
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for RequireProvideSorter."""
+
+
+
+import unittest as googletest
+from closure_linter import ecmametadatapass
+from closure_linter import javascripttokenizer
+from closure_linter import javascripttokens
+from closure_linter import requireprovidesorter
+
+# pylint: disable-msg=C6409
+TokenType = javascripttokens.JavaScriptTokenType
+
+
+class RequireProvideSorterTest(googletest.TestCase):
+ """Tests for RequireProvideSorter."""
+
+ _tokenizer = javascripttokenizer.JavaScriptTokenizer()
+ _metadata_pass = ecmametadatapass.EcmaMetaDataPass()
+
+ def testFixRequires_removeBlankLines(self):
+ """Tests that blank lines are omitted in sorted goog.require statements."""
+ input_lines = [
+ 'goog.provide(\'package.subpackage.Whatever\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassB\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassA\');'
+ ]
+ expected_lines = [
+ 'goog.provide(\'package.subpackage.Whatever\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassA\');',
+ 'goog.require(\'package.subpackage.ClassB\');'
+ ]
+ token = self._tokenizer.TokenizeFile(input_lines)
+ self._metadata_pass.Reset()
+ self._metadata_pass.Process(token)
+
+ sorter = requireprovidesorter.RequireProvideSorter()
+ sorter.FixRequires(token)
+
+ self.assertEquals(expected_lines, self._GetLines(token))
+
+ def _GetLines(self, token):
+ """Returns an array of lines based on the specified token stream."""
+ lines = []
+ line = ''
+ while token:
+ line += token.string
+ if token.IsLastInLine():
+ lines.append(line)
+ line = ''
+ token = token.next
+ return lines
+
+if __name__ == '__main__':
+ googletest.main()
'implements',
'implicitCast',
'interface',
+ 'lends',
'license',
'noalias',
'nocompile',
# Includes all Closure Compiler @suppress types.
# Not all of these annotations are interpreted by Closure Linter.
+ #
+ # Specific cases:
+ # - accessControls is supported by the compiler at the expression
+ # and method level to suppress warnings about private/protected
+ # access (method level applies to all references in the method).
+ # The linter mimics the compiler behavior.
SUPPRESS_TYPES = frozenset([
'accessControls',
+ 'ambiguousFunctionDecl',
'checkRegExp',
'checkTypes',
'checkVars',
+ 'const',
+ 'constantProperty',
'deprecated',
'duplicate',
+ 'es5Strict',
+ 'externsValidation',
+ 'extraProvide',
+ 'extraRequire',
'fileoverviewTags',
+ 'globalThis',
+ 'internetExplorerChecks',
'invalidCasts',
'missingProperties',
+ 'missingProvide',
+ 'missingRequire',
'nonStandardJsDocs',
'strictModuleDepCheck',
+ 'tweakValidation',
+ 'typeInvalidation',
+ 'undefinedNames',
'undefinedVars',
'underscore',
'unknownDefines',
[Type.DOC_FLAG])
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
- self.suppressions[contents] = token
+ for suppression in contents.split('|'):
+ self.suppressions[suppression] = token
+
+ def SuppressionOnly(self):
+ """Returns whether this comment contains only suppression flags."""
+ for flag_type in self.__flags.keys():
+ if flag_type != 'suppress':
+ return False
+ return True
def AddFlag(self, flag):
"""Add a new document flag.
Returns:
True if documentation may be pulled off the superclass.
"""
- return (self.HasFlag('inheritDoc') or
- (self.HasFlag('override') and
- not self.HasFlag('return') and
- not self.HasFlag('param')))
+ return self.HasFlag('inheritDoc') or self.HasFlag('override')
def HasFlag(self, flag_type):
"""Test if the given flag has been set.
last_line = iterator.line_number
last_token = None
contents = ''
- while not iterator.type in Type.FLAG_ENDING_TYPES:
+ doc_depth = 0
+ while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
if (iterator.IsFirstInLine() and
DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
# If we have a blank comment line, consider that an implicit
# only a doc comment prefix or whitespace.
break
+ # b/2983692
+ # don't prematurely match against a @flag if inside a doc flag
+ # need to think about what is the correct behavior for unterminated
+ # inline doc flags
+ if (iterator.type == Type.DOC_START_BRACE and
+ iterator.next.type == Type.DOC_INLINE_FLAG):
+ doc_depth += 1
+ elif (iterator.type == Type.DOC_END_BRACE and
+ doc_depth > 0):
+ doc_depth -= 1
+
if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
contents += iterator.string
last_token = iterator
self.is_constructor = doc and doc.HasFlag('constructor')
self.is_interface = doc and doc.HasFlag('interface')
self.has_return = False
+ self.has_throw = False
self.has_this = False
self.name = name
self.doc = doc
if function:
function.has_return = True
+ elif type == Type.KEYWORD and token.string == 'throw':
+ function = self.GetFunction()
+ if function:
+ function.has_throw = True
+
elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
jsdoc = self.GetDocComment()
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
-from closure_linter.common import tokens
-from closure_linter import javascripttokens
-
import copy
+from closure_linter import javascripttokens
+from closure_linter.common import tokens
+
# Shorthand
JavaScriptToken = javascripttokens.JavaScriptToken
Type = tokens.TokenType
+
def GetFirstTokenInSameLine(token):
"""Returns the first token in the same line as token.
return token
+def GetFirstTokenInPreviousLine(token):
+ """Returns the first token in the previous line as token.
+
+ Args:
+ token: Any token in the line.
+
+ Returns:
+ The first token in the previous line as token, or None if token is on the
+ first line.
+ """
+ first_in_line = GetFirstTokenInSameLine(token)
+ if first_in_line.previous:
+ return GetFirstTokenInSameLine(first_in_line.previous)
+
+ return None
+
+
+def GetLastTokenInSameLine(token):
+ """Returns the last token in the same line as token.
+
+ Args:
+ token: Any token in the line.
+
+ Returns:
+ The last token in the same line as token.
+ """
+ while not token.IsLastInLine():
+ token = token.next
+ return token
+
+
+def GetAllTokensInSameLine(token):
+ """Returns all tokens in the same line as the given token.
+
+ Args:
+ token: Any token in the line.
+
+ Returns:
+ All tokens on the same line as the given token.
+ """
+ first_token = GetFirstTokenInSameLine(token)
+ last_token = GetLastTokenInSameLine(token)
+
+ tokens_in_line = []
+ while first_token != last_token:
+ tokens_in_line.append(first_token)
+ first_token = first_token.next
+ tokens_in_line.append(last_token)
+
+ return tokens_in_line
+
+
def CustomSearch(start_token, func, end_func=None, distance=None,
reverse=False):
"""Returns the first token where func is True within distance of this token.
else:
while token and (distance is None or distance > 0):
- next = token.next
- if next:
- if func(next):
- return next
- if end_func and end_func(next):
+ next_token = token.next
+ if next_token:
+ if func(next_token):
+ return next_token
+ if end_func and end_func(next_token):
return None
- token = next
+ token = next_token
if distance is not None:
distance -= 1
reverse: When true, search the tokens before this one instead of the tokens
after it
-
Returns:
The first token of any type in token_types within distance of this token, or
None if no such token is found.
following_token.metadata.last_code = token.metadata.last_code
following_token = following_token.next
-def DeleteTokens(token, tokenCount):
+
+def DeleteTokens(token, token_count):
"""Deletes the given number of tokens starting with the given token.
Args:
token: The token to start deleting at.
- tokenCount: The total number of tokens to delete.
+ token_count: The total number of tokens to delete.
"""
- for i in xrange(1, tokenCount):
+ for i in xrange(1, token_count):
DeleteToken(token.next)
DeleteToken(token)
+
def InsertTokenAfter(new_token, token):
- """Insert new_token after token
+ """Insert new_token after token.
Args:
new_token: A token to be added to the stream
iterator = iterator.next
+def InsertTokensAfter(new_tokens, token):
+ """Insert multiple tokens after token.
+
+ Args:
+ new_tokens: An array of tokens to be added to the stream
+ token: A token already in the stream
+ """
+ # TODO(user): It would be nicer to have InsertTokenAfter defer to here
+ # instead of vice-versa.
+ current_token = token
+ for new_token in new_tokens:
+ InsertTokenAfter(new_token, current_token)
+ current_token = new_token
+
+
def InsertSpaceTokenAfter(token):
"""Inserts a space token after the given token.
token: The token to insert a space token after
Returns:
- A single space token"""
+ A single space token
+ """
space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,
token.line_number)
InsertTokenAfter(space_token, token)
-def InsertLineAfter(token):
+def InsertBlankLineAfter(token):
"""Inserts a blank line after the given token.
Args:
token: The token to insert a blank line after
Returns:
- A single space token"""
+ A single space token
+ """
blank_token = JavaScriptToken('', Type.BLANK_LINE, '',
token.line_number + 1)
- InsertTokenAfter(blank_token, token)
- # Update all subsequent ine numbers.
- blank_token = blank_token.next
- while blank_token:
- blank_token.line_number += 1
- blank_token = blank_token.next
+ InsertLineAfter(token, [blank_token])
+
+
+def InsertLineAfter(token, new_tokens):
+ """Inserts a new line consisting of new_tokens after the given token.
+
+ Args:
+ token: The token to insert after.
+ new_tokens: The tokens that will make up the new line.
+ """
+ insert_location = token
+ for new_token in new_tokens:
+ InsertTokenAfter(new_token, insert_location)
+ insert_location = new_token
+
+ # Update all subsequent line numbers.
+ next_token = new_tokens[-1].next
+ while next_token:
+ next_token.line_number += 1
+ next_token = next_token.next
def SplitToken(token, position):
def Compare(token1, token2):
"""Compares two tokens and determines their relative order.
+ Args:
+ token1: The first token to compare.
+ token2: The second token to compare.
+
Returns:
A negative integer, zero, or a positive integer as the first token is
before, equal, or after the second in the token stream.
+++ /dev/null
-#!/usr/bin/env python
-
-# Copyright (c) 2007, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# ---
-# Author: Chad Lester
-# Design and style contributions by:
-# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
-# Eric Veach, Laurence Gonsalves, Matthew Springer
-# Code reorganized a bit by Craig Silverstein
-
-"""This module is used to define and parse command line flags.
-
-This module defines a *distributed* flag-definition policy: rather than
-an application having to define all flags in or near main(), each python
-module defines flags that are useful to it. When one python module
-imports another, it gains access to the other's flags. (This is
-implemented by having all modules share a common, global registry object
-containing all the flag information.)
-
-Flags are defined through the use of one of the DEFINE_xxx functions.
-The specific function used determines how the flag is parsed, checked,
-and optionally type-converted, when it's seen on the command line.
-
-
-IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a
-'FlagValues' object (typically the global FlagValues FLAGS, defined
-here). The 'FlagValues' object can scan the command line arguments and
-pass flag arguments to the corresponding 'Flag' objects for
-value-checking and type conversion. The converted flag values are
-available as attributes of the 'FlagValues' object.
-
-Code can access the flag through a FlagValues object, for instance
-gflags.FLAGS.myflag. Typically, the __main__ module passes the
-command line arguments to gflags.FLAGS for parsing.
-
-At bottom, this module calls getopt(), so getopt functionality is
-supported, including short- and long-style flags, and the use of -- to
-terminate flags.
-
-Methods defined by the flag module will throw 'FlagsError' exceptions.
-The exception argument will be a human-readable string.
-
-
-FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags
-take a name, default value, help-string, and optional 'short' name
-(one-letter name). Some flags have other arguments, which are described
-with the flag.
-
-DEFINE_string: takes any input, and interprets it as a string.
-
-DEFINE_bool or
-DEFINE_boolean: typically does not take an argument: say --myflag to
- set FLAGS.myflag to true, or --nomyflag to set
- FLAGS.myflag to false. Alternately, you can say
- --myflag=true or --myflag=t or --myflag=1 or
- --myflag=false or --myflag=f or --myflag=0
-
-DEFINE_float: takes an input and interprets it as a floating point
- number. Takes optional args lower_bound and upper_bound;
- if the number specified on the command line is out of
- range, it will raise a FlagError.
-
-DEFINE_integer: takes an input and interprets it as an integer. Takes
- optional args lower_bound and upper_bound as for floats.
-
-DEFINE_enum: takes a list of strings which represents legal values. If
- the command-line value is not in this list, raise a flag
- error. Otherwise, assign to FLAGS.flag as a string.
-
-DEFINE_list: Takes a comma-separated list of strings on the commandline.
- Stores them in a python list object.
-
-DEFINE_spaceseplist: Takes a space-separated list of strings on the
- commandline. Stores them in a python list object.
- Example: --myspacesepflag "foo bar baz"
-
-DEFINE_multistring: The same as DEFINE_string, except the flag can be
- specified more than once on the commandline. The
- result is a python list object (list of strings),
- even if the flag is only on the command line once.
-
-DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
- specified more than once on the commandline. The
- result is a python list object (list of ints), even if
- the flag is only on the command line once.
-
-
-SPECIAL FLAGS: There are a few flags that have special meaning:
- --help prints a list of all the flags in a human-readable fashion
- --helpshort prints a list of all key flags (see below).
- --helpxml prints a list of all flags, in XML format. DO NOT parse
- the output of --help and --helpshort. Instead, parse
- the output of --helpxml. For more info, see
- "OUTPUT FOR --helpxml" below.
- --flagfile=foo read flags from file foo.
- --undefok=f1,f2 ignore unrecognized option errors for f1,f2.
- For boolean flags, you should use --undefok=boolflag, and
- --boolflag and --noboolflag will be accepted. Do not use
- --undefok=noboolflag.
- -- as in getopt(), terminates flag-processing
-
-
-NOTE ON --flagfile:
-
-Flags may be loaded from text files in addition to being specified on
-the commandline.
-
-Any flags you don't feel like typing, throw them in a file, one flag per
-line, for instance:
- --myflag=myvalue
- --nomyboolean_flag
-You then specify your file with the special flag '--flagfile=somefile'.
-You CAN recursively nest flagfile= tokens OR use multiple files on the
-command line. Lines beginning with a single hash '#' or a double slash
-'//' are comments in your flagfile.
-
-Any flagfile=<file> will be interpreted as having a relative path from
-the current working directory rather than from the place the file was
-included from:
- myPythonScript.py --flagfile=config/somefile.cfg
-
-If somefile.cfg includes further --flagfile= directives, these will be
-referenced relative to the original CWD, not from the directory the
-including flagfile was found in!
-
-The caveat applies to people who are including a series of nested files
-in a different dir than they are executing out of. Relative path names
-are always from CWD, not from the directory of the parent include
-flagfile. We do now support '~' expanded directory names.
-
-Absolute path names ALWAYS work!
-
-
-EXAMPLE USAGE:
-
- import gflags
- FLAGS = gflags.FLAGS
-
- # Flag names are globally defined! So in general, we need to be
- # careful to pick names that are unlikely to be used by other libraries.
- # If there is a conflict, we'll get an error at import time.
- gflags.DEFINE_string('name', 'Mr. President', 'your name')
- gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0)
- gflags.DEFINE_boolean('debug', False, 'produces debugging output')
- gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender')
-
- def main(argv):
- try:
- argv = FLAGS(argv) # parse flags
- except gflags.FlagsError, e:
- print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
- sys.exit(1)
- if FLAGS.debug: print 'non-flag arguments:', argv
- print 'Happy Birthday', FLAGS.name
- if FLAGS.age is not None:
- print 'You are a %s, who is %d years old' % (FLAGS.gender, FLAGS.age)
-
- if __name__ == '__main__':
- main(sys.argv)
-
-
-KEY FLAGS:
-
-As we already explained, each module gains access to all flags defined
-by all the other modules it transitively imports. In the case of
-non-trivial scripts, this means a lot of flags ... For documentation
-purposes, it is good to identify the flags that are key (i.e., really
-important) to a module. Clearly, the concept of "key flag" is a
-subjective one. When trying to determine whether a flag is key to a
-module or not, assume that you are trying to explain your module to a
-potential user: which flags would you really like to mention first?
-
-We'll describe shortly how to declare which flags are key to a module.
-For the moment, assume we know the set of key flags for each module.
-Then, if you use the app.py module, you can use the --helpshort flag to
-print only the help for the flags that are key to the main module, in a
-human-readable format.
-
-NOTE: If you need to parse the flag help, do NOT use the output of
---help / --helpshort. That output is meant for human consumption, and
-may be changed in the future. Instead, use --helpxml; flags that are
-key for the main module are marked there with a <key>yes</key> element.
-
-The set of key flags for a module M is composed of:
-
-1. Flags defined by module M by calling a DEFINE_* function.
-
-2. Flags that module M explictly declares as key by using the function
-
- DECLARE_key_flag(<flag_name>)
-
-3. Key flags of other modules that M specifies by using the function
-
- ADOPT_module_key_flags(<other_module>)
-
- This is a "bulk" declaration of key flags: each flag that is key for
- <other_module> becomes key for the current module too.
-
-Notice that if you do not use the functions described at points 2 and 3
-above, then --helpshort prints information only about the flags defined
-by the main module of our script. In many cases, this behavior is good
-enough. But if you move part of the main module code (together with the
-related flags) into a different module, then it is nice to use
-DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort
-lists all relevant flags (otherwise, your code refactoring may confuse
-your users).
-
-Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own
-pluses and minuses: DECLARE_key_flag is more targeted and may lead a
-more focused --helpshort documentation. ADOPT_module_key_flags is good
-for cases when an entire module is considered key to the current script.
-Also, it does not require updates to client scripts when a new flag is
-added to the module.
-
-
-EXAMPLE USAGE 2 (WITH KEY FLAGS):
-
-Consider an application that contains the following three files (two
-auxiliary modules and a main module):
-
-File libfoo.py:
-
- import gflags
-
- gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start')
- gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.')
-
- ... some code ...
-
-File libbar.py:
-
- import gflags
-
- gflags.DEFINE_string('bar_gfs_path', '/gfs/path',
- 'Path to the GFS files for libbar.')
- gflags.DEFINE_string('email_for_bar_errors', 'bar-team@google.com',
- 'Email address for bug reports about module libbar.')
- gflags.DEFINE_boolean('bar_risky_hack', False,
- 'Turn on an experimental and buggy optimization.')
-
- ... some code ...
-
-File myscript.py:
-
- import gflags
- import libfoo
- import libbar
-
- gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.')
-
- # Declare that all flags that are key for libfoo are
- # key for this module too.
- gflags.ADOPT_module_key_flags(libfoo)
-
- # Declare that the flag --bar_gfs_path (defined in libbar) is key
- # for this module.
- gflags.DECLARE_key_flag('bar_gfs_path')
-
- ... some code ...
-
-When myscript is invoked with the flag --helpshort, the resulted help
-message lists information about all the key flags for myscript:
---num_iterations, --num_replicas, --rpc2, and --bar_gfs_path (in
-addition to the special flags --help and --helpshort).
-
-Of course, myscript uses all the flags declared by it (in this case,
-just --num_replicas) or by any of the modules it transitively imports
-(e.g., the modules libfoo, libbar). E.g., it can access the value of
-FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key
-flag for myscript.
-
-
-OUTPUT FOR --helpxml:
-
-The --helpxml flag generates output with the following structure:
-
-<?xml version="1.0"?>
-<AllFlags>
- <program>PROGRAM_BASENAME</program>
- <usage>MAIN_MODULE_DOCSTRING</usage>
- (<flag>
- [<key>yes</key>]
- <file>DECLARING_MODULE</file>
- <name>FLAG_NAME</name>
- <meaning>FLAG_HELP_MESSAGE</meaning>
- <default>DEFAULT_FLAG_VALUE</default>
- <current>CURRENT_FLAG_VALUE</current>
- <type>FLAG_TYPE</type>
- [OPTIONAL_ELEMENTS]
- </flag>)*
-</AllFlags>
-
-Notes:
-
-1. The output is intentionally similar to the output generated by the
-C++ command-line flag library. The few differences are due to the
-Python flags that do not have a C++ equivalent (at least not yet),
-e.g., DEFINE_list.
-
-2. New XML elements may be added in the future.
-
-3. DEFAULT_FLAG_VALUE is in serialized form, i.e., the string you can
-pass for this flag on the command-line. E.g., for a flag defined
-using DEFINE_list, this field may be foo,bar, not ['foo', 'bar'].
-
-4. CURRENT_FLAG_VALUE is produced using str(). This means that the
-string 'false' will be represented in the same way as the boolean
-False. Using repr() would have removed this ambiguity and simplified
-parsing, but would have broken the compatibility with the C++
-command-line flags.
-
-5. OPTIONAL_ELEMENTS describe elements relevant for certain kinds of
-flags: lower_bound, upper_bound (for flags that specify bounds),
-enum_value (for enum flags), list_separator (for flags that consist of
-a list of values, separated by a special token).
-
-6. We do not provide any example here: please use --helpxml instead.
-"""
-
-import cgi
-import getopt
-import os
-import re
-import string
-import sys
-
-# Are we running at least python 2.2?
-try:
- if tuple(sys.version_info[:3]) < (2,2,0):
- raise NotImplementedError("requires python 2.2.0 or later")
-except AttributeError: # a very old python, that lacks sys.version_info
- raise NotImplementedError("requires python 2.2.0 or later")
-
-# If we're not running at least python 2.2.1, define True, False, and bool.
-# Thanks, Guido, for the code.
-try:
- True, False, bool
-except NameError:
- False = 0
- True = 1
- def bool(x):
- if x:
- return True
- else:
- return False
-
-# Are we running under pychecker?
-_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
-
-
-def _GetCallingModule():
- """Returns the name of the module that's calling into this module.
-
- We generally use this function to get the name of the module calling a
- DEFINE_foo... function.
- """
- # Walk down the stack to find the first globals dict that's not ours.
- for depth in range(1, sys.getrecursionlimit()):
- if not sys._getframe(depth).f_globals is globals():
- globals_for_frame = sys._getframe(depth).f_globals
- module_name = _GetModuleObjectAndName(globals_for_frame)[1]
- if module_name is not None:
- return module_name
- raise AssertionError("No module was found")
-
-
-def _GetThisModuleObjectAndName():
- """Returns: (module object, module name) for this module."""
- return _GetModuleObjectAndName(globals())
-
-
-# module exceptions:
-class FlagsError(Exception):
- """The base class for all flags errors."""
- pass
-
-
-class DuplicateFlag(FlagsError):
- """Raised if there is a flag naming conflict."""
- pass
-
-
-class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag):
- """Special case of DuplicateFlag -- SWIG flag value can't be set to None.
-
- This can be raised when a duplicate flag is created. Even if allow_override is
- True, we still abort if the new value is None, because it's currently
- impossible to pass None default value back to SWIG. See FlagValues.SetDefault
- for details.
- """
- pass
-
-
-# A DuplicateFlagError conveys more information than a
-# DuplicateFlag. Since there are external modules that create
-# DuplicateFlags, the interface to DuplicateFlag shouldn't change.
-class DuplicateFlagError(DuplicateFlag):
-
- def __init__(self, flagname, flag_values):
- self.flagname = flagname
- message = "The flag '%s' is defined twice." % self.flagname
- flags_by_module = flag_values.FlagsByModuleDict()
- for module in flags_by_module:
- for flag in flags_by_module[module]:
- if flag.name == flagname or flag.short_name == flagname:
- message = message + " First from " + module + ","
- break
- message = message + " Second from " + _GetCallingModule()
- DuplicateFlag.__init__(self, message)
-
-
-class IllegalFlagValue(FlagsError):
- """The flag command line argument is illegal."""
- pass
-
-
-class UnrecognizedFlag(FlagsError):
- """Raised if a flag is unrecognized."""
- pass
-
-
-# An UnrecognizedFlagError conveys more information than an
-# UnrecognizedFlag. Since there are external modules that create
-# DuplicateFlags, the interface to DuplicateFlag shouldn't change.
-class UnrecognizedFlagError(UnrecognizedFlag):
- def __init__(self, flagname):
- self.flagname = flagname
- UnrecognizedFlag.__init__(
- self, "Unknown command line flag '%s'" % flagname)
-
-
-# Global variable used by expvar
-_exported_flags = {}
-_help_width = 80 # width of help output
-
-
-def GetHelpWidth():
- """Returns: an integer, the width of help lines that is used in TextWrap."""
- return _help_width
-
-
-def CutCommonSpacePrefix(text):
- """Removes a common space prefix from the lines of a multiline text.
-
- If the first line does not start with a space, it is left as it is and
- only in the remaining lines a common space prefix is being searched
- for. That means the first line will stay untouched. This is especially
- useful to turn doc strings into help texts. This is because some
- people prefer to have the doc comment start already after the
- apostrophy and then align the following lines while others have the
- apostrophies on a seperately line.
-
- The function also drops trailing empty lines and ignores empty lines
- following the initial content line while calculating the initial
- common whitespace.
-
- Args:
- text: text to work on
-
- Returns:
- the resulting text
- """
- text_lines = text.splitlines()
- # Drop trailing empty lines
- while text_lines and not text_lines[-1]:
- text_lines = text_lines[:-1]
- if text_lines:
- # We got some content, is the first line starting with a space?
- if text_lines[0] and text_lines[0][0].isspace():
- text_first_line = []
- else:
- text_first_line = [text_lines.pop(0)]
- # Calculate length of common leading whitesppace (only over content lines)
- common_prefix = os.path.commonprefix([line for line in text_lines if line])
- space_prefix_len = len(common_prefix) - len(common_prefix.lstrip())
- # If we have a common space prefix, drop it from all lines
- if space_prefix_len:
- for index in xrange(len(text_lines)):
- if text_lines[index]:
- text_lines[index] = text_lines[index][space_prefix_len:]
- return '\n'.join(text_first_line + text_lines)
- return ''
-
-
-def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '):
- """Wraps a given text to a maximum line length and returns it.
-
- We turn lines that only contain whitespaces into empty lines. We keep
- new lines and tabs (e.g., we do not treat tabs as spaces).
-
- Args:
- text: text to wrap
- length: maximum length of a line, includes indentation
- if this is None then use GetHelpWidth()
- indent: indent for all but first line
- firstline_indent: indent for first line; if None, fall back to indent
- tabs: replacement for tabs
-
- Returns:
- wrapped text
-
- Raises:
- FlagsError: if indent not shorter than length
- FlagsError: if firstline_indent not shorter than length
- """
- # Get defaults where callee used None
- if length is None:
- length = GetHelpWidth()
- if indent is None:
- indent = ''
- if len(indent) >= length:
- raise FlagsError('Indent must be shorter than length')
- # In line we will be holding the current line which is to be started
- # with indent (or firstline_indent if available) and then appended
- # with words.
- if firstline_indent is None:
- firstline_indent = ''
- line = indent
- else:
- line = firstline_indent
- if len(firstline_indent) >= length:
- raise FlagsError('First iline indent must be shorter than length')
-
- # If the callee does not care about tabs we simply convert them to
- # spaces If callee wanted tabs to be single space then we do that
- # already here.
- if not tabs or tabs == ' ':
- text = text.replace('\t', ' ')
- else:
- tabs_are_whitespace = not tabs.strip()
-
- line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE)
-
- # Split the text into lines and the lines with the regex above. The
- # resulting lines are collected in result[]. For each split we get the
- # spaces, the tabs and the next non white space (e.g. next word).
- result = []
- for text_line in text.splitlines():
- # Store result length so we can find out whether processing the next
- # line gave any new content
- old_result_len = len(result)
- # Process next line with line_regex. For optimization we do an rstrip().
- # - process tabs (changes either line or word, see below)
- # - process word (first try to squeeze on line, then wrap or force wrap)
- # Spaces found on the line are ignored, they get added while wrapping as
- # needed.
- for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()):
- # If tabs weren't converted to spaces, handle them now
- if current_tabs:
- # If the last thing we added was a space anyway then drop
- # it. But let's not get rid of the indentation.
- if (((result and line != indent) or
- (not result and line != firstline_indent)) and line[-1] == ' '):
- line = line[:-1]
- # Add the tabs, if that means adding whitespace, just add it at
- # the line, the rstrip() code while shorten the line down if
- # necessary
- if tabs_are_whitespace:
- line += tabs * len(current_tabs)
- else:
- # if not all tab replacement is whitespace we prepend it to the word
- word = tabs * len(current_tabs) + word
- # Handle the case where word cannot be squeezed onto current last line
- if len(line) + len(word) > length and len(indent) + len(word) <= length:
- result.append(line.rstrip())
- line = indent + word
- word = ''
- # No space left on line or can we append a space?
- if len(line) + 1 >= length:
- result.append(line.rstrip())
- line = indent
- else:
- line += ' '
- # Add word and shorten it up to allowed line length. Restart next
- # line with indent and repeat, or add a space if we're done (word
- # finished) This deals with words that caanot fit on one line
- # (e.g. indent + word longer than allowed line length).
- while len(line) + len(word) >= length:
- line += word
- result.append(line[:length])
- word = line[length:]
- line = indent
- # Default case, simply append the word and a space
- if word:
- line += word + ' '
- # End of input line. If we have content we finish the line. If the
- # current line is just the indent but we had content in during this
- # original line then we need to add an emoty line.
- if (result and line != indent) or (not result and line != firstline_indent):
- result.append(line.rstrip())
- elif len(result) == old_result_len:
- result.append('')
- line = indent
-
- return '\n'.join(result)
-
-
-def DocToHelp(doc):
- """Takes a __doc__ string and reformats it as help."""
-
- # Get rid of starting and ending white space. Using lstrip() or even
- # strip() could drop more than maximum of first line and right space
- # of last line.
- doc = doc.strip()
-
- # Get rid of all empty lines
- whitespace_only_line = re.compile('^[ \t]+$', re.M)
- doc = whitespace_only_line.sub('', doc)
-
- # Cut out common space at line beginnings
- doc = CutCommonSpacePrefix(doc)
-
- # Just like this module's comment, comments tend to be aligned somehow.
- # In other words they all start with the same amount of white space
- # 1) keep double new lines
- # 2) keep ws after new lines if not empty line
- # 3) all other new lines shall be changed to a space
- # Solution: Match new lines between non white space and replace with space.
- doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M)
-
- return doc
-
-
-def _GetModuleObjectAndName(globals_dict):
- """Returns the module that defines a global environment, and its name.
-
- Args:
- globals_dict: A dictionary that should correspond to an environment
- providing the values of the globals.
-
- Returns:
- A pair consisting of (1) module object and (2) module name (a
- string). Returns (None, None) if the module could not be
- identified.
- """
- # The use of .items() (instead of .iteritems()) is NOT a mistake: if
- # a parallel thread imports a module while we iterate over
- # .iteritems() (not nice, but possible), we get a RuntimeError ...
- # Hence, we use the slightly slower but safer .items().
- for name, module in sys.modules.items():
- if getattr(module, '__dict__', None) is globals_dict:
- if name == '__main__':
- # Pick a more informative name for the main module.
- name = sys.argv[0]
- return (module, name)
- return (None, None)
-
-
-def _GetMainModule():
- """Returns the name of the module from which execution started."""
- for depth in range(1, sys.getrecursionlimit()):
- try:
- globals_of_main = sys._getframe(depth).f_globals
- except ValueError:
- return _GetModuleObjectAndName(globals_of_main)[1]
- raise AssertionError("No module was found")
-
-
-class FlagValues:
- """Registry of 'Flag' objects.
-
- A 'FlagValues' can then scan command line arguments, passing flag
- arguments through to the 'Flag' objects that it owns. It also
- provides easy access to the flag values. Typically only one
- 'FlagValues' object is needed by an application: gflags.FLAGS
-
- This class is heavily overloaded:
-
- 'Flag' objects are registered via __setitem__:
- FLAGS['longname'] = x # register a new flag
-
- The .value attribute of the registered 'Flag' objects can be accessed
- as attributes of this 'FlagValues' object, through __getattr__. Both
- the long and short name of the original 'Flag' objects can be used to
- access its value:
- FLAGS.longname # parsed flag value
- FLAGS.x # parsed flag value (short name)
-
- Command line arguments are scanned and passed to the registered 'Flag'
- objects through the __call__ method. Unparsed arguments, including
- argv[0] (e.g. the program name) are returned.
- argv = FLAGS(sys.argv) # scan command line arguments
-
- The original registered Flag objects can be retrieved through the use
- of the dictionary-like operator, __getitem__:
- x = FLAGS['longname'] # access the registered Flag object
-
- The str() operator of a 'FlagValues' object provides help for all of
- the registered 'Flag' objects.
- """
-
- def __init__(self):
- # Since everything in this class is so heavily overloaded, the only
- # way of defining and using fields is to access __dict__ directly.
-
- # Dictionary: flag name (string) -> Flag object.
- self.__dict__['__flags'] = {}
- # Dictionary: module name (string) -> list of Flag objects that are defined
- # by that module.
- self.__dict__['__flags_by_module'] = {}
- # Dictionary: module name (string) -> list of Flag objects that are
- # key for that module.
- self.__dict__['__key_flags_by_module'] = {}
-
- # Set if we should use new style gnu_getopt rather than getopt when parsing
- # the args. Only possible with Python 2.3+
- self.UseGnuGetOpt(False)
-
- def UseGnuGetOpt(self, use_gnu_getopt=True):
- self.__dict__['__use_gnu_getopt'] = use_gnu_getopt
-
- def IsGnuGetOpt(self):
- return self.__dict__['__use_gnu_getopt']
-
- def FlagDict(self):
- return self.__dict__['__flags']
-
- def FlagsByModuleDict(self):
- """Returns the dictionary of module_name -> list of defined flags.
-
- Returns:
- A dictionary. Its keys are module names (strings). Its values
- are lists of Flag objects.
- """
- return self.__dict__['__flags_by_module']
-
- def KeyFlagsByModuleDict(self):
- """Returns the dictionary of module_name -> list of key flags.
-
- Returns:
- A dictionary. Its keys are module names (strings). Its values
- are lists of Flag objects.
- """
- return self.__dict__['__key_flags_by_module']
-
- def _RegisterFlagByModule(self, module_name, flag):
- """Records the module that defines a specific flag.
-
- We keep track of which flag is defined by which module so that we
- can later sort the flags by module.
-
- Args:
- module_name: A string, the name of a Python module.
- flag: A Flag object, a flag that is key to the module.
- """
- flags_by_module = self.FlagsByModuleDict()
- flags_by_module.setdefault(module_name, []).append(flag)
-
- def _RegisterKeyFlagForModule(self, module_name, flag):
- """Specifies that a flag is a key flag for a module.
-
- Args:
- module_name: A string, the name of a Python module.
- flag: A Flag object, a flag that is key to the module.
- """
- key_flags_by_module = self.KeyFlagsByModuleDict()
- # The list of key flags for the module named module_name.
- key_flags = key_flags_by_module.setdefault(module_name, [])
- # Add flag, but avoid duplicates.
- if flag not in key_flags:
- key_flags.append(flag)
-
- def _GetFlagsDefinedByModule(self, module):
- """Returns the list of flags defined by a module.
-
- Args:
- module: A module object or a module name (a string).
-
- Returns:
- A new list of Flag objects. Caller may update this list as he
- wishes: none of those changes will affect the internals of this
- FlagValue object.
- """
- if not isinstance(module, str):
- module = module.__name__
-
- return list(self.FlagsByModuleDict().get(module, []))
-
- def _GetKeyFlagsForModule(self, module):
- """Returns the list of key flags for a module.
-
- Args:
- module: A module object or a module name (a string)
-
- Returns:
- A new list of Flag objects. Caller may update this list as he
- wishes: none of those changes will affect the internals of this
- FlagValue object.
- """
- if not isinstance(module, str):
- module = module.__name__
-
- # Any flag is a key flag for the module that defined it. NOTE:
- # key_flags is a fresh list: we can update it without affecting the
- # internals of this FlagValues object.
- key_flags = self._GetFlagsDefinedByModule(module)
-
- # Take into account flags explicitly declared as key for a module.
- for flag in self.KeyFlagsByModuleDict().get(module, []):
- if flag not in key_flags:
- key_flags.append(flag)
- return key_flags
-
- def AppendFlagValues(self, flag_values):
- """Appends flags registered in another FlagValues instance.
-
- Args:
- flag_values: registry to copy from
- """
- for flag_name, flag in flag_values.FlagDict().iteritems():
- # Each flags with shortname appears here twice (once under its
- # normal name, and again with its short name). To prevent
- # problems (DuplicateFlagError) with double flag registration, we
- # perform a check to make sure that the entry we're looking at is
- # for its normal name.
- if flag_name == flag.name:
- self[flag_name] = flag
-
- def RemoveFlagValues(self, flag_values):
- """Remove flags that were previously appended from another FlagValues.
-
- Args:
- flag_values: registry containing flags to remove.
- """
- for flag_name in flag_values.FlagDict():
- self.__delattr__(flag_name)
-
- def __setitem__(self, name, flag):
- """Registers a new flag variable."""
- fl = self.FlagDict()
- if not isinstance(flag, Flag):
- raise IllegalFlagValue(flag)
- if not isinstance(name, type("")):
- raise FlagsError("Flag name must be a string")
- if len(name) == 0:
- raise FlagsError("Flag name cannot be empty")
- # If running under pychecker, duplicate keys are likely to be
- # defined. Disable check for duplicate keys when pycheck'ing.
- if (fl.has_key(name) and not flag.allow_override and
- not fl[name].allow_override and not _RUNNING_PYCHECKER):
- raise DuplicateFlagError(name, self)
- short_name = flag.short_name
- if short_name is not None:
- if (fl.has_key(short_name) and not flag.allow_override and
- not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
- raise DuplicateFlagError(short_name, self)
- fl[short_name] = flag
- fl[name] = flag
- global _exported_flags
- _exported_flags[name] = flag
-
- def __getitem__(self, name):
- """Retrieves the Flag object for the flag --name."""
- return self.FlagDict()[name]
-
- def __getattr__(self, name):
- """Retrieves the 'value' attribute of the flag --name."""
- fl = self.FlagDict()
- if not fl.has_key(name):
- raise AttributeError(name)
- return fl[name].value
-
- def __setattr__(self, name, value):
- """Sets the 'value' attribute of the flag --name."""
- fl = self.FlagDict()
- fl[name].value = value
- return value
-
- def _FlagIsRegistered(self, flag_obj):
- """Checks whether a Flag object is registered under some name.
-
- Note: this is non trivial: in addition to its normal name, a flag
- may have a short name too. In self.FlagDict(), both the normal and
- the short name are mapped to the same flag object. E.g., calling
- only "del FLAGS.short_name" is not unregistering the corresponding
- Flag object (it is still registered under the longer name).
-
- Args:
- flag_obj: A Flag object.
-
- Returns:
- A boolean: True iff flag_obj is registered under some name.
- """
- flag_dict = self.FlagDict()
- # Check whether flag_obj is registered under its long name.
- name = flag_obj.name
- if flag_dict.get(name, None) == flag_obj:
- return True
- # Check whether flag_obj is registered under its short name.
- short_name = flag_obj.short_name
- if (short_name is not None and
- flag_dict.get(short_name, None) == flag_obj):
- return True
- # The flag cannot be registered under any other name, so we do not
- # need to do a full search through the values of self.FlagDict().
- return False
-
- def __delattr__(self, flag_name):
- """Deletes a previously-defined flag from a flag object.
-
- This method makes sure we can delete a flag by using
-
- del flag_values_object.<flag_name>
-
- E.g.,
-
- flags.DEFINE_integer('foo', 1, 'Integer flag.')
- del flags.FLAGS.foo
-
- Args:
- flag_name: A string, the name of the flag to be deleted.
-
- Raises:
- AttributeError: When there is no registered flag named flag_name.
- """
- fl = self.FlagDict()
- if flag_name not in fl:
- raise AttributeError(flag_name)
-
- flag_obj = fl[flag_name]
- del fl[flag_name]
-
- if not self._FlagIsRegistered(flag_obj):
- # If the Flag object indicated by flag_name is no longer
- # registered (please see the docstring of _FlagIsRegistered), then
- # we delete the occurences of the flag object in all our internal
- # dictionaries.
- self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj)
- self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj)
-
- def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj):
- """Removes a flag object from a module -> list of flags dictionary.
-
- Args:
- flags_by_module_dict: A dictionary that maps module names to lists of
- flags.
- flag_obj: A flag object.
- """
- for unused_module, flags_in_module in flags_by_module_dict.iteritems():
- # while (as opposed to if) takes care of multiple occurences of a
- # flag in the list for the same module.
- while flag_obj in flags_in_module:
- flags_in_module.remove(flag_obj)
-
- def SetDefault(self, name, value):
- """Changes the default value of the named flag object."""
- fl = self.FlagDict()
- if not fl.has_key(name):
- raise AttributeError(name)
- fl[name].SetDefault(value)
-
- def __contains__(self, name):
- """Returns True if name is a value (flag) in the dict."""
- return name in self.FlagDict()
-
- has_key = __contains__ # a synonym for __contains__()
-
- def __iter__(self):
- return self.FlagDict().iterkeys()
-
- def __call__(self, argv):
- """Parses flags from argv; stores parsed flags into this FlagValues object.
-
- All unparsed arguments are returned. Flags are parsed using the GNU
- Program Argument Syntax Conventions, using getopt:
-
- http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
-
- Args:
- argv: argument list. Can be of any type that may be converted to a list.
-
- Returns:
- The list of arguments not parsed as options, including argv[0]
-
- Raises:
- FlagsError: on any parsing error
- """
- # Support any sequence type that can be converted to a list
- argv = list(argv)
-
- shortopts = ""
- longopts = []
-
- fl = self.FlagDict()
-
- # This pre parses the argv list for --flagfile=<> options.
- argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False)
-
- # Correct the argv to support the google style of passing boolean
- # parameters. Boolean parameters may be passed by using --mybool,
- # --nomybool, --mybool=(true|false|1|0). getopt does not support
- # having options that may or may not have a parameter. We replace
- # instances of the short form --mybool and --nomybool with their
- # full forms: --mybool=(true|false).
- original_argv = list(argv) # list() makes a copy
- shortest_matches = None
- for name, flag in fl.items():
- if not flag.boolean:
- continue
- if shortest_matches is None:
- # Determine the smallest allowable prefix for all flag names
- shortest_matches = self.ShortestUniquePrefixes(fl)
- no_name = 'no' + name
- prefix = shortest_matches[name]
- no_prefix = shortest_matches[no_name]
-
- # Replace all occurences of this boolean with extended forms
- for arg_idx in range(1, len(argv)):
- arg = argv[arg_idx]
- if arg.find('=') >= 0: continue
- if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
- argv[arg_idx] = ('--%s=true' % name)
- elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
- argv[arg_idx] = ('--%s=false' % name)
-
- # Loop over all of the flags, building up the lists of short options
- # and long options that will be passed to getopt. Short options are
- # specified as a string of letters, each letter followed by a colon
- # if it takes an argument. Long options are stored in an array of
- # strings. Each string ends with an '=' if it takes an argument.
- for name, flag in fl.items():
- longopts.append(name + "=")
- if len(name) == 1: # one-letter option: allow short flag type also
- shortopts += name
- if not flag.boolean:
- shortopts += ":"
-
- longopts.append('undefok=')
- undefok_flags = []
-
- # In case --undefok is specified, loop to pick up unrecognized
- # options one by one.
- unrecognized_opts = []
- args = argv[1:]
- while True:
- try:
- if self.__dict__['__use_gnu_getopt']:
- optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts)
- else:
- optlist, unparsed_args = getopt.getopt(args, shortopts, longopts)
- break
- except getopt.GetoptError, e:
- if not e.opt or e.opt in fl:
- # Not an unrecognized option, reraise the exception as a FlagsError
- raise FlagsError(e)
- # Handle an unrecognized option.
- unrecognized_opts.append(e.opt)
- # Remove offender from args and try again
- for arg_index in range(len(args)):
- if ((args[arg_index] == '--' + e.opt) or
- (args[arg_index] == '-' + e.opt) or
- args[arg_index].startswith('--' + e.opt + '=')):
- args = args[0:arg_index] + args[arg_index+1:]
- break
- else:
- # We should have found the option, so we don't expect to get
- # here. We could assert, but raising the original exception
- # might work better.
- raise FlagsError(e)
-
- for name, arg in optlist:
- if name == '--undefok':
- flag_names = arg.split(',')
- undefok_flags.extend(flag_names)
- # For boolean flags, if --undefok=boolflag is specified, then we should
- # also accept --noboolflag, in addition to --boolflag.
- # Since we don't know the type of the undefok'd flag, this will affect
- # non-boolean flags as well.
- # NOTE: You shouldn't use --undefok=noboolflag, because then we will
- # accept --nonoboolflag here. We are choosing not to do the conversion
- # from noboolflag -> boolflag because of the ambiguity that flag names
- # can start with 'no'.
- undefok_flags.extend('no' + name for name in flag_names)
- continue
- if name.startswith('--'):
- # long option
- name = name[2:]
- short_option = 0
- else:
- # short option
- name = name[1:]
- short_option = 1
- if fl.has_key(name):
- flag = fl[name]
- if flag.boolean and short_option: arg = 1
- flag.Parse(arg)
-
- # If there were unrecognized options, raise an exception unless
- # the options were named via --undefok.
- for opt in unrecognized_opts:
- if opt not in undefok_flags:
- raise UnrecognizedFlagError(opt)
-
- if unparsed_args:
- if self.__dict__['__use_gnu_getopt']:
- # if using gnu_getopt just return the program name + remainder of argv.
- return argv[:1] + unparsed_args
- else:
- # unparsed_args becomes the first non-flag detected by getopt to
- # the end of argv. Because argv may have been modified above,
- # return original_argv for this region.
- return argv[:1] + original_argv[-len(unparsed_args):]
- else:
- return argv[:1]
-
- def Reset(self):
- """Resets the values to the point before FLAGS(argv) was called."""
- for f in self.FlagDict().values():
- f.Unparse()
-
- def RegisteredFlags(self):
- """Returns: a list of the names and short names of all registered flags."""
- return self.FlagDict().keys()
-
- def FlagValuesDict(self):
- """Returns: a dictionary that maps flag names to flag values."""
- flag_values = {}
-
- for flag_name in self.RegisteredFlags():
- flag = self.FlagDict()[flag_name]
- flag_values[flag_name] = flag.value
-
- return flag_values
-
- def __str__(self):
- """Generates a help string for all known flags."""
- return self.GetHelp()
-
- def GetHelp(self, prefix=''):
- """Generates a help string for all known flags."""
- helplist = []
-
- flags_by_module = self.FlagsByModuleDict()
- if flags_by_module:
-
- modules = flags_by_module.keys()
- modules.sort()
-
- # Print the help for the main module first, if possible.
- main_module = _GetMainModule()
- if main_module in modules:
- modules.remove(main_module)
- modules = [main_module] + modules
-
- for module in modules:
- self.__RenderOurModuleFlags(module, helplist)
-
- self.__RenderModuleFlags('gflags',
- _SPECIAL_FLAGS.FlagDict().values(),
- helplist)
-
- else:
- # Just print one long list of flags.
- self.__RenderFlagList(
- self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(),
- helplist, prefix)
-
- return '\n'.join(helplist)
-
- def __RenderModuleFlags(self, module, flags, output_lines, prefix=""):
- """Generates a help string for a given module."""
- if not isinstance(module, str):
- module = module.__name__
- output_lines.append('\n%s%s:' % (prefix, module))
- self.__RenderFlagList(flags, output_lines, prefix + " ")
-
- def __RenderOurModuleFlags(self, module, output_lines, prefix=""):
- """Generates a help string for a given module."""
- flags = self._GetFlagsDefinedByModule(module)
- if flags:
- self.__RenderModuleFlags(module, flags, output_lines, prefix)
-
- def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""):
- """Generates a help string for the key flags of a given module.
-
- Args:
- module: A module object or a module name (a string).
- output_lines: A list of strings. The generated help message
- lines will be appended to this list.
- prefix: A string that is prepended to each generated help line.
- """
- key_flags = self._GetKeyFlagsForModule(module)
- if key_flags:
- self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
-
- def ModuleHelp(self, module):
- """Describe the key flags of a module.
-
- Args:
- module: A module object or a module name (a string).
-
- Returns:
- string describing the key flags of a module.
- """
- helplist = []
- self.__RenderOurModuleKeyFlags(module, helplist)
- return '\n'.join(helplist)
-
- def MainModuleHelp(self):
- """Describe the key flags of the main module.
-
- Returns:
- string describing the key flags of a module.
- """
- return self.ModuleHelp(_GetMainModule())
-
- def __RenderFlagList(self, flaglist, output_lines, prefix=" "):
- fl = self.FlagDict()
- special_fl = _SPECIAL_FLAGS.FlagDict()
- flaglist = [(flag.name, flag) for flag in flaglist]
- flaglist.sort()
- flagset = {}
- for (name, flag) in flaglist:
- # It's possible this flag got deleted or overridden since being
- # registered in the per-module flaglist. Check now against the
- # canonical source of current flag information, the FlagDict.
- if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
- # a different flag is using this name now
- continue
- # only print help once
- if flagset.has_key(flag): continue
- flagset[flag] = 1
- flaghelp = ""
- if flag.short_name: flaghelp += "-%s," % flag.short_name
- if flag.boolean:
- flaghelp += "--[no]%s" % flag.name + ":"
- else:
- flaghelp += "--%s" % flag.name + ":"
- flaghelp += " "
- if flag.help:
- flaghelp += flag.help
- flaghelp = TextWrap(flaghelp, indent=prefix+" ",
- firstline_indent=prefix)
- if flag.default_as_str:
- flaghelp += "\n"
- flaghelp += TextWrap("(default: %s)" % flag.default_as_str,
- indent=prefix+" ")
- if flag.parser.syntactic_help:
- flaghelp += "\n"
- flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help,
- indent=prefix+" ")
- output_lines.append(flaghelp)
-
- def get(self, name, default):
- """Returns the value of a flag (if not None) or a default value.
-
- Args:
- name: A string, the name of a flag.
- default: Default value to use if the flag value is None.
- """
-
- value = self.__getattr__(name)
- if value is not None: # Can't do if not value, b/c value might be '0' or ""
- return value
- else:
- return default
-
- def ShortestUniquePrefixes(self, fl):
- """Returns: dictionary; maps flag names to their shortest unique prefix."""
- # Sort the list of flag names
- sorted_flags = []
- for name, flag in fl.items():
- sorted_flags.append(name)
- if flag.boolean:
- sorted_flags.append('no%s' % name)
- sorted_flags.sort()
-
- # For each name in the sorted list, determine the shortest unique
- # prefix by comparing itself to the next name and to the previous
- # name (the latter check uses cached info from the previous loop).
- shortest_matches = {}
- prev_idx = 0
- for flag_idx in range(len(sorted_flags)):
- curr = sorted_flags[flag_idx]
- if flag_idx == (len(sorted_flags) - 1):
- next = None
- else:
- next = sorted_flags[flag_idx+1]
- next_len = len(next)
- for curr_idx in range(len(curr)):
- if (next is None
- or curr_idx >= next_len
- or curr[curr_idx] != next[curr_idx]):
- # curr longer than next or no more chars in common
- shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
- prev_idx = curr_idx
- break
- else:
- # curr shorter than (or equal to) next
- shortest_matches[curr] = curr
- prev_idx = curr_idx + 1 # next will need at least one more char
- return shortest_matches
-
- def __IsFlagFileDirective(self, flag_string):
- """Checks whether flag_string contain a --flagfile=<foo> directive."""
- if isinstance(flag_string, type("")):
- if flag_string.startswith('--flagfile='):
- return 1
- elif flag_string == '--flagfile':
- return 1
- elif flag_string.startswith('-flagfile='):
- return 1
- elif flag_string == '-flagfile':
- return 1
- else:
- return 0
- return 0
-
- def ExtractFilename(self, flagfile_str):
- """Returns filename from a flagfile_str of form -[-]flagfile=filename.
-
- The cases of --flagfile foo and -flagfile foo shouldn't be hitting
- this function, as they are dealt with in the level above this
- function.
- """
- if flagfile_str.startswith('--flagfile='):
- return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
- elif flagfile_str.startswith('-flagfile='):
- return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
- else:
- raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
-
- def __GetFlagFileLines(self, filename, parsed_file_list):
- """Returns the useful (!=comments, etc) lines from a file with flags.
-
- Args:
- filename: A string, the name of the flag file.
- parsed_file_list: A list of the names of the files we have
- already read. MUTATED BY THIS FUNCTION.
-
- Returns:
- List of strings. See the note below.
-
- NOTE(springer): This function checks for a nested --flagfile=<foo>
- tag and handles the lower file recursively. It returns a list of
- all the lines that _could_ contain command flags. This is
- EVERYTHING except whitespace lines and comments (lines starting
- with '#' or '//').
- """
- line_list = [] # All line from flagfile.
- flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
- try:
- file_obj = open(filename, 'r')
- except IOError, e_msg:
- print e_msg
- print 'ERROR:: Unable to open flagfile: %s' % (filename)
- return flag_line_list
-
- line_list = file_obj.readlines()
- file_obj.close()
- parsed_file_list.append(filename)
-
- # This is where we check each line in the file we just read.
- for line in line_list:
- if line.isspace():
- pass
- # Checks for comment (a line that starts with '#').
- elif line.startswith('#') or line.startswith('//'):
- pass
- # Checks for a nested "--flagfile=<bar>" flag in the current file.
- # If we find one, recursively parse down into that file.
- elif self.__IsFlagFileDirective(line):
- sub_filename = self.ExtractFilename(line)
- # We do a little safety check for reparsing a file we've already done.
- if not sub_filename in parsed_file_list:
- included_flags = self.__GetFlagFileLines(sub_filename,
- parsed_file_list)
- flag_line_list.extend(included_flags)
- else: # Case of hitting a circularly included file.
- print >>sys.stderr, ('Warning: Hit circular flagfile dependency: %s'
- % sub_filename)
- else:
- # Any line that's not a comment or a nested flagfile should get
- # copied into 2nd position. This leaves earlier arguements
- # further back in the list, thus giving them higher priority.
- flag_line_list.append(line.strip())
- return flag_line_list
-
- def ReadFlagsFromFiles(self, argv, force_gnu=True):
- """Processes command line args, but also allow args to be read from file.
- Args:
- argv: A list of strings, usually sys.argv[1:], which may contain one or
- more flagfile directives of the form --flagfile="./filename".
- Note that the name of the program (sys.argv[0]) should be omitted.
- force_gnu: If False, --flagfile parsing obeys normal flag semantics.
- If True, --flagfile parsing instead follows gnu_getopt semantics.
- *** WARNING *** force_gnu=False may become the future default!
-
- Returns:
-
- A new list which has the original list combined with what we read
- from any flagfile(s).
-
- References: Global gflags.FLAG class instance.
-
- This function should be called before the normal FLAGS(argv) call.
- This function scans the input list for a flag that looks like:
- --flagfile=<somefile>. Then it opens <somefile>, reads all valid key
- and value pairs and inserts them into the input list between the
- first item of the list and any subsequent items in the list.
-
- Note that your application's flags are still defined the usual way
- using gflags DEFINE_flag() type functions.
-
- Notes (assuming we're getting a commandline of some sort as our input):
- --> Flags from the command line argv _should_ always take precedence!
- --> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
- It will be processed after the parent flag file is done.
- --> For duplicate flags, first one we hit should "win".
- --> In a flagfile, a line beginning with # or // is a comment.
- --> Entirely blank lines _should_ be ignored.
- """
- parsed_file_list = []
- rest_of_args = argv
- new_argv = []
- while rest_of_args:
- current_arg = rest_of_args[0]
- rest_of_args = rest_of_args[1:]
- if self.__IsFlagFileDirective(current_arg):
- # This handles the case of -(-)flagfile foo. In this case the
- # next arg really is part of this one.
- if current_arg == '--flagfile' or current_arg == '-flagfile':
- if not rest_of_args:
- raise IllegalFlagValue('--flagfile with no argument')
- flag_filename = os.path.expanduser(rest_of_args[0])
- rest_of_args = rest_of_args[1:]
- else:
- # This handles the case of (-)-flagfile=foo.
- flag_filename = self.ExtractFilename(current_arg)
- new_argv[0:0] = self.__GetFlagFileLines(flag_filename, parsed_file_list)
- else:
- new_argv.append(current_arg)
- # Stop parsing after '--', like getopt and gnu_getopt.
- if current_arg == '--':
- break
- # Stop parsing after a non-flag, like getopt.
- if not current_arg.startswith('-'):
- if not force_gnu and not self.__dict__['__use_gnu_getopt']:
- break
-
- if rest_of_args:
- new_argv.extend(rest_of_args)
-
- return new_argv
-
- def FlagsIntoString(self):
- """Returns a string with the flags assignments from this FlagValues object.
-
- This function ignores flags whose value is None. Each flag
- assignment is separated by a newline.
-
- NOTE: MUST mirror the behavior of the C++ function
- CommandlineFlagsIntoString from google3/base/commandlineflags.cc.
- """
- s = ''
- for flag in self.FlagDict().values():
- if flag.value is not None:
- s += flag.Serialize() + '\n'
- return s
-
- def AppendFlagsIntoFile(self, filename):
- """Appends all flags assignments from this FlagInfo object to a file.
-
- Output will be in the format of a flagfile.
-
- NOTE: MUST mirror the behavior of the C++ version of
- AppendFlagsIntoFile from google3/base/commandlineflags.cc.
- """
- out_file = open(filename, 'a')
- out_file.write(self.FlagsIntoString())
- out_file.close()
-
- def WriteHelpInXMLFormat(self, outfile=None):
- """Outputs flag documentation in XML format.
-
- NOTE: We use element names that are consistent with those used by
- the C++ command-line flag library, from
- google3/base/commandlineflags_reporting.cc. We also use a few new
- elements (e.g., <key>), but we do not interfere / overlap with
- existing XML elements used by the C++ library. Please maintain this
- consistency.
-
- Args:
- outfile: File object we write to. Default None means sys.stdout.
- """
- outfile = outfile or sys.stdout
-
- outfile.write('<?xml version=\"1.0\"?>\n')
- outfile.write('<AllFlags>\n')
- indent = ' '
- _WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]),
- indent)
-
- usage_doc = sys.modules['__main__'].__doc__
- if not usage_doc:
- usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
- else:
- usage_doc = usage_doc.replace('%s', sys.argv[0])
- _WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent)
-
- # Get list of key flags for the main module.
- key_flags = self._GetKeyFlagsForModule(_GetMainModule())
-
- # Sort flags by declaring module name and next by flag name.
- flags_by_module = self.FlagsByModuleDict()
- all_module_names = list(flags_by_module.keys())
- all_module_names.sort()
- for module_name in all_module_names:
- flag_list = [(f.name, f) for f in flags_by_module[module_name]]
- flag_list.sort()
- for unused_flag_name, flag in flag_list:
- is_key = flag in key_flags
- flag.WriteInfoInXMLFormat(outfile, module_name,
- is_key=is_key, indent=indent)
-
- outfile.write('</AllFlags>\n')
- outfile.flush()
-# end of FlagValues definition
-
-
-# The global FlagValues instance
-FLAGS = FlagValues()
-
-
-def _MakeXMLSafe(s):
- """Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
- s = cgi.escape(s) # Escape <, >, and &
- # Remove characters that cannot appear in an XML 1.0 document
- # (http://www.w3.org/TR/REC-xml/#charsets).
- #
- # NOTE: if there are problems with current solution, one may move to
- # XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
- s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s)
- return s
-
-
-def _WriteSimpleXMLElement(outfile, name, value, indent):
- """Writes a simple XML element.
-
- Args:
- outfile: File object we write the XML element to.
- name: A string, the name of XML element.
- value: A Python object, whose string representation will be used
- as the value of the XML element.
- indent: A string, prepended to each line of generated output.
- """
- value_str = str(value)
- if isinstance(value, bool):
- # Display boolean values as the C++ flag library does: no caps.
- value_str = value_str.lower()
- outfile.write('%s<%s>%s</%s>\n' %
- (indent, name, _MakeXMLSafe(value_str), name))
-
-
-class Flag:
- """Information about a command-line flag.
-
- 'Flag' objects define the following fields:
- .name - the name for this flag
- .default - the default value for this flag
- .default_as_str - default value as repr'd string, e.g., "'true'" (or None)
- .value - the most recent parsed value of this flag; set by Parse()
- .help - a help string or None if no help is available
- .short_name - the single letter alias for this flag (or None)
- .boolean - if 'true', this flag does not accept arguments
- .present - true if this flag was parsed from command line flags.
- .parser - an ArgumentParser object
- .serializer - an ArgumentSerializer object
- .allow_override - the flag may be redefined without raising an error
-
- The only public method of a 'Flag' object is Parse(), but it is
- typically only called by a 'FlagValues' object. The Parse() method is
- a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
- value is saved in .value, and the .present attribute is updated. If
- this flag was already present, a FlagsError is raised.
-
- Parse() is also called during __init__ to parse the default value and
- initialize the .value attribute. This enables other python modules to
- safely use flags even if the __main__ module neglects to parse the
- command line arguments. The .present attribute is cleared after
- __init__ parsing. If the default value is set to None, then the
- __init__ parsing step is skipped and the .value attribute is
- initialized to None.
-
- Note: The default value is also presented to the user in the help
- string, so it is important that it be a legal value for this flag.
- """
-
- def __init__(self, parser, serializer, name, default, help_string,
- short_name=None, boolean=0, allow_override=0):
- self.name = name
-
- if not help_string:
- help_string = '(no help available)'
-
- self.help = help_string
- self.short_name = short_name
- self.boolean = boolean
- self.present = 0
- self.parser = parser
- self.serializer = serializer
- self.allow_override = allow_override
- self.value = None
-
- self.SetDefault(default)
-
- def __GetParsedValueAsString(self, value):
- if value is None:
- return None
- if self.serializer:
- return repr(self.serializer.Serialize(value))
- if self.boolean:
- if value:
- return repr('true')
- else:
- return repr('false')
- return repr(str(value))
-
- def Parse(self, argument):
- try:
- self.value = self.parser.Parse(argument)
- except ValueError, e: # recast ValueError as IllegalFlagValue
- raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e))
- self.present += 1
-
- def Unparse(self):
- if self.default is None:
- self.value = None
- else:
- self.Parse(self.default)
- self.present = 0
-
- def Serialize(self):
- if self.value is None:
- return ''
- if self.boolean:
- if self.value:
- return "--%s" % self.name
- else:
- return "--no%s" % self.name
- else:
- if not self.serializer:
- raise FlagsError("Serializer not present for flag %s" % self.name)
- return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
-
- def SetDefault(self, value):
- """Changes the default value (and current value too) for this Flag."""
- # We can't allow a None override because it may end up not being
- # passed to C++ code when we're overriding C++ flags. So we
- # cowardly bail out until someone fixes the semantics of trying to
- # pass None to a C++ flag. See swig_flags.Init() for details on
- # this behavior.
- if value is None and self.allow_override:
- raise DuplicateFlagCannotPropagateNoneToSwig(self.name)
-
- self.default = value
- self.Unparse()
- self.default_as_str = self.__GetParsedValueAsString(self.value)
-
- def Type(self):
- """Returns: a string that describes the type of this Flag."""
- # NOTE: we use strings, and not the types.*Type constants because
- # our flags can have more exotic types, e.g., 'comma separated list
- # of strings', 'whitespace separated list of strings', etc.
- return self.parser.Type()
-
- def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''):
- """Writes common info about this flag, in XML format.
-
- This is information that is relevant to all flags (e.g., name,
- meaning, etc.). If you defined a flag that has some other pieces of
- info, then please override _WriteCustomInfoInXMLFormat.
-
- Please do NOT override this method.
-
- Args:
- outfile: File object we write to.
- module_name: A string, the name of the module that defines this flag.
- is_key: A boolean, True iff this flag is key for main module.
- indent: A string that is prepended to each generated line.
- """
- outfile.write(indent + '<flag>\n')
- inner_indent = indent + ' '
- if is_key:
- _WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent)
- _WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent)
- # Print flag features that are relevant for all flags.
- _WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent)
- if self.short_name:
- _WriteSimpleXMLElement(outfile, 'short_name', self.short_name,
- inner_indent)
- if self.help:
- _WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent)
- # The default flag value can either be represented as a string like on the
- # command line, or as a Python object. We serialize this value in the
- # latter case in order to remain consistent.
- if self.serializer and not isinstance(self.default, str):
- default_serialized = self.serializer.Serialize(self.default)
- else:
- default_serialized = self.default
- _WriteSimpleXMLElement(outfile, 'default', default_serialized, inner_indent)
- _WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent)
- _WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent)
- # Print extra flag features this flag may have.
- self._WriteCustomInfoInXMLFormat(outfile, inner_indent)
- outfile.write(indent + '</flag>\n')
-
- def _WriteCustomInfoInXMLFormat(self, outfile, indent):
- """Writes extra info about this flag, in XML format.
-
- "Extra" means "not already printed by WriteInfoInXMLFormat above."
-
- Args:
- outfile: File object we write to.
- indent: A string that is prepended to each generated line.
- """
- # Usually, the parser knows the extra details about the flag, so
- # we just forward the call to it.
- self.parser.WriteCustomInfoInXMLFormat(outfile, indent)
-# End of Flag definition
-
-
-class ArgumentParser:
- """Base class used to parse and convert arguments.
-
- The Parse() method checks to make sure that the string argument is a
- legal value and convert it to a native type. If the value cannot be
- converted, it should throw a 'ValueError' exception with a human
- readable explanation of why the value is illegal.
-
- Subclasses should also define a syntactic_help string which may be
- presented to the user to describe the form of the legal values.
- """
- syntactic_help = ""
-
- def Parse(self, argument):
- """Default implementation: always returns its argument unmodified."""
- return argument
-
- def Type(self):
- return 'string'
-
- def WriteCustomInfoInXMLFormat(self, outfile, indent):
- pass
-
-
-class ArgumentSerializer:
- """Base class for generating string representations of a flag value."""
-
- def Serialize(self, value):
- return str(value)
-
-
-class ListSerializer(ArgumentSerializer):
-
- def __init__(self, list_sep):
- self.list_sep = list_sep
-
- def Serialize(self, value):
- return self.list_sep.join([str(x) for x in value])
-
-
-# The DEFINE functions are explained in mode details in the module doc string.
-
-
-def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
- **args):
- """Registers a generic Flag object.
-
- NOTE: in the docstrings of all DEFINE* functions, "registers" is short
- for "creates a new flag and registers it".
-
- Auxiliary function: clients should use the specialized DEFINE_<type>
- function instead.
-
- Args:
- parser: ArgumentParser that is used to parse the flag arguments.
- name: A string, the flag name.
- default: The default value of the flag.
- help: A help string.
- flag_values: FlagValues object the flag will be registered with.
- serializer: ArgumentSerializer that serializes the flag value.
- args: Dictionary with extra keyword args that are passes to the
- Flag __init__.
- """
- DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
- flag_values)
-
-
-def DEFINE_flag(flag, flag_values=FLAGS):
- """Registers a 'Flag' object with a 'FlagValues' object.
-
- By default, the global FLAGS 'FlagValue' object is used.
-
- Typical users will use one of the more specialized DEFINE_xxx
- functions, such as DEFINE_string or DEFINE_integer. But developers
- who need to create Flag objects themselves should use this function
- to register their flags.
- """
- # copying the reference to flag_values prevents pychecker warnings
- fv = flag_values
- fv[flag.name] = flag
- # Tell flag_values who's defining the flag.
- if isinstance(flag_values, FlagValues):
- # Regarding the above isinstance test: some users pass funny
- # values of flag_values (e.g., {}) in order to avoid the flag
- # registration (in the past, there used to be a flag_values ==
- # FLAGS test here) and redefine flags with the same name (e.g.,
- # debug). To avoid breaking their code, we perform the
- # registration only if flag_values is a real FlagValues object.
- flag_values._RegisterFlagByModule(_GetCallingModule(), flag)
-
-
-def _InternalDeclareKeyFlags(flag_names,
- flag_values=FLAGS, key_flag_values=None):
- """Declares a flag as key for the calling module.
-
- Internal function. User code should call DECLARE_key_flag or
- ADOPT_module_key_flags instead.
-
- Args:
- flag_names: A list of strings that are names of already-registered
- Flag objects.
- flag_values: A FlagValues object that the flags listed in
- flag_names have registered with (the value of the flag_values
- argument from the DEFINE_* calls that defined those flags).
- This should almost never need to be overridden.
- key_flag_values: A FlagValues object that (among possibly many
- other things) keeps track of the key flags for each module.
- Default None means "same as flag_values". This should almost
- never need to be overridden.
-
- Raises:
- UnrecognizedFlagError: when we refer to a flag that was not
- defined yet.
- """
- key_flag_values = key_flag_values or flag_values
-
- module = _GetCallingModule()
-
- for flag_name in flag_names:
- if flag_name not in flag_values:
- raise UnrecognizedFlagError(flag_name)
- flag = flag_values.FlagDict()[flag_name]
- key_flag_values._RegisterKeyFlagForModule(module, flag)
-
-
-def DECLARE_key_flag(flag_name, flag_values=FLAGS):
- """Declares one flag as key to the current module.
-
- Key flags are flags that are deemed really important for a module.
- They are important when listing help messages; e.g., if the
- --helpshort command-line flag is used, then only the key flags of the
- main module are listed (instead of all flags, as in the case of
- --help).
-
- Sample usage:
-
- flags.DECLARED_key_flag('flag_1')
-
- Args:
- flag_name: A string, the name of an already declared flag.
- (Redeclaring flags as key, including flags implicitly key
- because they were declared in this module, is a no-op.)
- flag_values: A FlagValues object. This should almost never
- need to be overridden.
- """
- if flag_name in _SPECIAL_FLAGS:
- # Take care of the special flags, e.g., --flagfile, --undefok.
- # These flags are defined in _SPECIAL_FLAGS, and are treated
- # specially during flag parsing, taking precedence over the
- # user-defined flags.
- _InternalDeclareKeyFlags([flag_name],
- flag_values=_SPECIAL_FLAGS,
- key_flag_values=flag_values)
- return
- _InternalDeclareKeyFlags([flag_name], flag_values=flag_values)
-
-
-def ADOPT_module_key_flags(module, flag_values=FLAGS):
- """Declares that all flags key to a module are key to the current module.
-
- Args:
- module: A module object.
- flag_values: A FlagValues object. This should almost never need
- to be overridden.
-
- Raises:
- FlagsError: When given an argument that is a module name (a
- string), instead of a module object.
- """
- # NOTE(salcianu): an even better test would be if not
- # isinstance(module, types.ModuleType) but I didn't want to import
- # types for such a tiny use.
- if isinstance(module, str):
- raise FlagsError('Received module name %s; expected a module object.'
- % module)
- _InternalDeclareKeyFlags(
- [f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],
- flag_values=flag_values)
- # If module is this flag module, take _SPECIAL_FLAGS into account.
- if module == _GetThisModuleObjectAndName()[0]:
- _InternalDeclareKeyFlags(
- # As we associate flags with _GetCallingModule(), the special
- # flags defined in this module are incorrectly registered with
- # a different module. So, we can't use _GetKeyFlagsForModule.
- # Instead, we take all flags from _SPECIAL_FLAGS (a private
- # FlagValues, where no other module should register flags).
- [f.name for f in _SPECIAL_FLAGS.FlagDict().values()],
- flag_values=_SPECIAL_FLAGS,
- key_flag_values=flag_values)
-
-
-#
-# STRING FLAGS
-#
-
-
-def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
- """Registers a flag whose value can be any string."""
- parser = ArgumentParser()
- serializer = ArgumentSerializer()
- DEFINE(parser, name, default, help, flag_values, serializer, **args)
-
-
-#
-# BOOLEAN FLAGS
-#
-# and the special HELP flags.
-
-class BooleanParser(ArgumentParser):
- """Parser of boolean values."""
-
- def Convert(self, argument):
- """Converts the argument to a boolean; raise ValueError on errors."""
- if type(argument) == str:
- if argument.lower() in ['true', 't', '1']:
- return True
- elif argument.lower() in ['false', 'f', '0']:
- return False
-
- bool_argument = bool(argument)
- if argument == bool_argument:
- # The argument is a valid boolean (True, False, 0, or 1), and not just
- # something that always converts to bool (list, string, int, etc.).
- return bool_argument
-
- raise ValueError('Non-boolean argument to boolean flag', argument)
-
- def Parse(self, argument):
- val = self.Convert(argument)
- return val
-
- def Type(self):
- return 'bool'
-
-
-class BooleanFlag(Flag):
- """Basic boolean flag.
-
- Boolean flags do not take any arguments, and their value is either
- True (1) or False (0). The false value is specified on the command
- line by prepending the word 'no' to either the long or the short flag
- name.
-
- For example, if a Boolean flag was created whose long name was
- 'update' and whose short name was 'x', then this flag could be
- explicitly unset through either --noupdate or --nox.
- """
-
- def __init__(self, name, default, help, short_name=None, **args):
- p = BooleanParser()
- Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
- if not self.help: self.help = "a boolean value"
-
-
-def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
- """Registers a boolean flag.
-
- Such a boolean flag does not take an argument. If a user wants to
- specify a false value explicitly, the long option beginning with 'no'
- must be used: i.e. --noflag
-
- This flag will have a value of None, True or False. None is possible
- if default=None and the user does not specify the flag on the command
- line.
- """
- DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
-
-# Match C++ API to unconfuse C++ people.
-DEFINE_bool = DEFINE_boolean
-
-class HelpFlag(BooleanFlag):
- """
- HelpFlag is a special boolean flag that prints usage information and
- raises a SystemExit exception if it is ever found in the command
- line arguments. Note this is called with allow_override=1, so other
- apps can define their own --help flag, replacing this one, if they want.
- """
- def __init__(self):
- BooleanFlag.__init__(self, "help", 0, "show this help",
- short_name="?", allow_override=1)
- def Parse(self, arg):
- if arg:
- doc = sys.modules["__main__"].__doc__
- flags = str(FLAGS)
- print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
- if flags:
- print "flags:"
- print flags
- sys.exit(1)
-
-
-class HelpXMLFlag(BooleanFlag):
- """Similar to HelpFlag, but generates output in XML format."""
-
- def __init__(self):
- BooleanFlag.__init__(self, 'helpxml', False,
- 'like --help, but generates XML output',
- allow_override=1)
-
- def Parse(self, arg):
- if arg:
- FLAGS.WriteHelpInXMLFormat(sys.stdout)
- sys.exit(1)
-
-
-class HelpshortFlag(BooleanFlag):
- """
- HelpshortFlag is a special boolean flag that prints usage
- information for the "main" module, and rasies a SystemExit exception
- if it is ever found in the command line arguments. Note this is
- called with allow_override=1, so other apps can define their own
- --helpshort flag, replacing this one, if they want.
- """
- def __init__(self):
- BooleanFlag.__init__(self, "helpshort", 0,
- "show usage only for this module", allow_override=1)
- def Parse(self, arg):
- if arg:
- doc = sys.modules["__main__"].__doc__
- flags = FLAGS.MainModuleHelp()
- print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
- if flags:
- print "flags:"
- print flags
- sys.exit(1)
-
-#
-# Numeric parser - base class for Integer and Float parsers
-#
-
-
-class NumericParser(ArgumentParser):
- """Parser of numeric values.
-
- Parsed value may be bounded to a given upper and lower bound.
- """
-
- def Parse(self, argument):
- val = self.Convert(argument)
- if ((self.lower_bound is not None and val < self.lower_bound) or
- (self.upper_bound is not None and val > self.upper_bound)):
- raise ValueError("%s is not %s" % (val, self.syntactic_help))
- return val
-
- def WriteCustomInfoInXMLFormat(self, outfile, indent):
- if self.lower_bound is not None:
- _WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent)
- if self.upper_bound is not None:
- _WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent)
-
- def Convert(self, argument):
- """Default implementation: always returns its argument unmodified."""
- return argument
-
-# End of Numeric Parser
-
-#
-# FLOAT FLAGS
-#
-
-class FloatParser(NumericParser):
- """Parser of floating point values.
-
- Parsed value may be bounded to a given upper and lower bound.
- """
- number_article = "a"
- number_name = "number"
- syntactic_help = " ".join((number_article, number_name))
-
- def __init__(self, lower_bound=None, upper_bound=None):
- self.lower_bound = lower_bound
- self.upper_bound = upper_bound
- sh = self.syntactic_help
- if lower_bound is not None and upper_bound is not None:
- sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
- elif lower_bound == 0:
- sh = "a non-negative %s" % self.number_name
- elif upper_bound == 0:
- sh = "a non-positive %s" % self.number_name
- elif upper_bound is not None:
- sh = "%s <= %s" % (self.number_name, upper_bound)
- elif lower_bound is not None:
- sh = "%s >= %s" % (self.number_name, lower_bound)
- self.syntactic_help = sh
-
- def Convert(self, argument):
- """Converts argument to a float; raises ValueError on errors."""
- return float(argument)
-
- def Type(self):
- return 'float'
-# End of FloatParser
-
-
-def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
- flag_values=FLAGS, **args):
- """Registers a flag whose value must be a float.
-
- If lower_bound or upper_bound are set, then this flag must be
- within the given range.
- """
- parser = FloatParser(lower_bound, upper_bound)
- serializer = ArgumentSerializer()
- DEFINE(parser, name, default, help, flag_values, serializer, **args)
-
-
-#
-# INTEGER FLAGS
-#
-
-
-class IntegerParser(NumericParser):
- """Parser of an integer value.
-
- Parsed value may be bounded to a given upper and lower bound.
- """
- number_article = "an"
- number_name = "integer"
- syntactic_help = " ".join((number_article, number_name))
-
- def __init__(self, lower_bound=None, upper_bound=None):
- self.lower_bound = lower_bound
- self.upper_bound = upper_bound
- sh = self.syntactic_help
- if lower_bound is not None and upper_bound is not None:
- sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
- elif lower_bound == 1:
- sh = "a positive %s" % self.number_name
- elif upper_bound == -1:
- sh = "a negative %s" % self.number_name
- elif lower_bound == 0:
- sh = "a non-negative %s" % self.number_name
- elif upper_bound == 0:
- sh = "a non-positive %s" % self.number_name
- elif upper_bound is not None:
- sh = "%s <= %s" % (self.number_name, upper_bound)
- elif lower_bound is not None:
- sh = "%s >= %s" % (self.number_name, lower_bound)
- self.syntactic_help = sh
-
- def Convert(self, argument):
- __pychecker__ = 'no-returnvalues'
- if type(argument) == str:
- base = 10
- if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
- base = 16
- try:
- return int(argument, base)
- # ValueError is thrown when argument is a string, and overflows an int.
- except ValueError:
- return long(argument, base)
- else:
- try:
- return int(argument)
- # OverflowError is thrown when argument is numeric, and overflows an int.
- except OverflowError:
- return long(argument)
-
- def Type(self):
- return 'int'
-
-
-def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
- flag_values=FLAGS, **args):
- """Registers a flag whose value must be an integer.
-
- If lower_bound, or upper_bound are set, then this flag must be
- within the given range.
- """
- parser = IntegerParser(lower_bound, upper_bound)
- serializer = ArgumentSerializer()
- DEFINE(parser, name, default, help, flag_values, serializer, **args)
-
-
-#
-# ENUM FLAGS
-#
-
-
-class EnumParser(ArgumentParser):
- """Parser of a string enum value (a string value from a given set).
-
- If enum_values (see below) is not specified, any string is allowed.
- """
-
- def __init__(self, enum_values=None):
- self.enum_values = enum_values
-
- def Parse(self, argument):
- if self.enum_values and argument not in self.enum_values:
- raise ValueError("value should be one of <%s>" %
- "|".join(self.enum_values))
- return argument
-
- def Type(self):
- return 'string enum'
-
-
-class EnumFlag(Flag):
- """Basic enum flag; its value can be any string from list of enum_values."""
-
- def __init__(self, name, default, help, enum_values=None,
- short_name=None, **args):
- enum_values = enum_values or []
- p = EnumParser(enum_values)
- g = ArgumentSerializer()
- Flag.__init__(self, p, g, name, default, help, short_name, **args)
- if not self.help: self.help = "an enum string"
- self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
-
- def _WriteCustomInfoInXMLFormat(self, outfile, indent):
- for enum_value in self.parser.enum_values:
- _WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent)
-
-
-def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
- **args):
- """Registers a flag whose value can be any string from enum_values."""
- DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
- flag_values)
-
-
-#
-# LIST FLAGS
-#
-
-
-class BaseListParser(ArgumentParser):
- """Base class for a parser of lists of strings.
-
- To extend, inherit from this class; from the subclass __init__, call
-
- BaseListParser.__init__(self, token, name)
-
- where token is a character used to tokenize, and name is a description
- of the separator.
- """
-
- def __init__(self, token=None, name=None):
- assert name
- self._token = token
- self._name = name
- self.syntactic_help = "a %s separated list" % self._name
-
- def Parse(self, argument):
- if isinstance(argument, list):
- return argument
- elif argument == '':
- return []
- else:
- return [s.strip() for s in argument.split(self._token)]
-
- def Type(self):
- return '%s separated list of strings' % self._name
-
-
-class ListParser(BaseListParser):
- """Parser for a comma-separated list of strings."""
-
- def __init__(self):
- BaseListParser.__init__(self, ',', 'comma')
-
- def WriteCustomInfoInXMLFormat(self, outfile, indent):
- BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
- _WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent)
-
-
-class WhitespaceSeparatedListParser(BaseListParser):
- """Parser for a whitespace-separated list of strings."""
-
- def __init__(self):
- BaseListParser.__init__(self, None, 'whitespace')
-
- def WriteCustomInfoInXMLFormat(self, outfile, indent):
- BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
- separators = list(string.whitespace)
- separators.sort()
- for ws_char in string.whitespace:
- _WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent)
-
-
-def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
- """Registers a flag whose value is a comma-separated list of strings."""
- parser = ListParser()
- serializer = ListSerializer(',')
- DEFINE(parser, name, default, help, flag_values, serializer, **args)
-
-
-def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
- """Registers a flag whose value is a whitespace-separated list of strings.
-
- Any whitespace can be used as a separator.
- """
- parser = WhitespaceSeparatedListParser()
- serializer = ListSerializer(' ')
- DEFINE(parser, name, default, help, flag_values, serializer, **args)
-
-
-#
-# MULTI FLAGS
-#
-
-
-class MultiFlag(Flag):
- """A flag that can appear multiple time on the command-line.
-
- The value of such a flag is a list that contains the individual values
- from all the appearances of that flag on the command-line.
-
- See the __doc__ for Flag for most behavior of this class. Only
- differences in behavior are described here:
-
- * The default value may be either a single value or a list of values.
- A single value is interpreted as the [value] singleton list.
-
- * The value of the flag is always a list, even if the option was
- only supplied once, and even if the default value is a single
- value
- """
-
- def __init__(self, *args, **kwargs):
- Flag.__init__(self, *args, **kwargs)
- self.help += ';\n repeat this option to specify a list of values'
-
- def Parse(self, arguments):
- """Parses one or more arguments with the installed parser.
-
- Args:
- arguments: a single argument or a list of arguments (typically a
- list of default values); a single argument is converted
- internally into a list containing one item.
- """
- if not isinstance(arguments, list):
- # Default value may be a list of values. Most other arguments
- # will not be, so convert them into a single-item list to make
- # processing simpler below.
- arguments = [arguments]
-
- if self.present:
- # keep a backup reference to list of previously supplied option values
- values = self.value
- else:
- # "erase" the defaults with an empty list
- values = []
-
- for item in arguments:
- # have Flag superclass parse argument, overwriting self.value reference
- Flag.Parse(self, item) # also increments self.present
- values.append(self.value)
-
- # put list of option values back in the 'value' attribute
- self.value = values
-
- def Serialize(self):
- if not self.serializer:
- raise FlagsError("Serializer not present for flag %s" % self.name)
- if self.value is None:
- return ''
-
- s = ''
-
- multi_value = self.value
-
- for self.value in multi_value:
- if s: s += ' '
- s += Flag.Serialize(self)
-
- self.value = multi_value
-
- return s
-
- def Type(self):
- return 'multi ' + self.parser.Type()
-
-
-def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
- **args):
- """Registers a generic MultiFlag that parses its args with a given parser.
-
- Auxiliary function. Normal users should NOT use it directly.
-
- Developers who need to create their own 'Parser' classes for options
- which can appear multiple times can call this module function to
- register their flags.
- """
- DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
- flag_values)
-
-
-def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
- """Registers a flag whose value can be a list of any strings.
-
- Use the flag on the command line multiple times to place multiple
- string values into the list. The 'default' may be a single string
- (which will be converted into a single-element list) or a list of
- strings.
- """
- parser = ArgumentParser()
- serializer = ArgumentSerializer()
- DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
-
-
-def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
- flag_values=FLAGS, **args):
- """Registers a flag whose value can be a list of arbitrary integers.
-
- Use the flag on the command line multiple times to place multiple
- integer values into the list. The 'default' may be a single integer
- (which will be converted into a single-element list) or a list of
- integers.
- """
- parser = IntegerParser(lower_bound, upper_bound)
- serializer = ArgumentSerializer()
- DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
-
-
-# Now register the flags that we want to exist in all applications.
-# These are all defined with allow_override=1, so user-apps can use
-# these flagnames for their own purposes, if they want.
-DEFINE_flag(HelpFlag())
-DEFINE_flag(HelpshortFlag())
-DEFINE_flag(HelpXMLFlag())
-
-# Define special flags here so that help may be generated for them.
-# NOTE: Please do NOT use _SPECIAL_FLAGS from outside this module.
-_SPECIAL_FLAGS = FlagValues()
-
-
-DEFINE_string(
- 'flagfile', "",
- "Insert flag definitions from the given file into the command line.",
- _SPECIAL_FLAGS)
-
-DEFINE_string(
- 'undefok', "",
- "comma-separated list of flag names that it is okay to specify "
- "on the command line even if the program does not define a flag "
- "with that name. IMPORTANT: flags in this list that have "
- "arguments MUST use the --flag=value format.", _SPECIAL_FLAGS)
+++ /dev/null
-[egg_info]
-tag_build =
-tag_date = 0
-tag_svn_revision = 0
-
from distutils.core import setup
setup(name='closure_linter',
- version='2.2.6',
+ version='2.3.5',
description='Closure Linter',
license='Apache',
author='The Closure Linter Authors',