1 #!/usr/bin/env python
\r
3 # Copyright (c) 2009 Google Inc. All rights reserved.
\r
5 # Redistribution and use in source and binary forms, with or without
\r
6 # modification, are permitted provided that the following conditions are
\r
9 # * Redistributions of source code must retain the above copyright
\r
10 # notice, this list of conditions and the following disclaimer.
\r
11 # * Redistributions in binary form must reproduce the above
\r
12 # copyright notice, this list of conditions and the following disclaimer
\r
13 # in the documentation and/or other materials provided with the
\r
15 # * Neither the name of Google Inc. nor the names of its
\r
16 # contributors may be used to endorse or promote products derived from
\r
17 # this software without specific prior written permission.
\r
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
\r
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
\r
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
\r
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
\r
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
\r
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
\r
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
\r
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
\r
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
\r
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
\r
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\r
33 """Does google-lint on c++ files.
\r
35 The goal of this script is to identify places in the code that *may*
\r
36 be in non-compliance with google style. It does not attempt to fix
\r
37 up these problems -- the point is to educate. It does also not
\r
38 attempt to find all problems, or to ensure that everything it does
\r
39 find is legitimately a problem.
\r
41 In particular, we can get very confused by /* and // inside strings!
\r
42 We do a small hack, which is to ignore //'s with "'s after them on the
\r
43 same line, but it is far from perfect (in either direction).
\r
49 import math # for log
\r
59 Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
\r
60 [--counting=total|toplevel|detailed] [--root=subdir]
\r
61 [--linelength=digits]
\r
64 The style guidelines this tries to follow are those in
\r
65 http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
\r
67 Every problem is given a confidence score from 1-5, with 5 meaning we are
\r
68 certain of the problem, and 1 meaning it could be a legitimate construct.
\r
69 This will miss some errors, and is not a substitute for a code review.
\r
71 To suppress false-positive errors of a certain category, add a
\r
72 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
\r
73 suppresses errors of all categories on that line.
\r
75 The files passed in will be linted; at least one file must be provided.
\r
76 Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
\r
77 extensions with the --extensions flag.
\r
82 By default, the output is formatted to ease emacs parsing. Visual Studio
\r
83 compatible output (vs7) may also be used. Other formats are unsupported.
\r
86 Specify a number 0-5 to restrict errors to certain verbosity levels.
\r
89 Specify a comma-separated list of category-filters to apply: only
\r
90 error messages whose category names pass the filters will be printed.
\r
91 (Category names are printed with the message and look like
\r
92 "[whitespace/indent]".) Filters are evaluated left to right.
\r
93 "-FOO" and "FOO" means "do not print categories that start with FOO".
\r
94 "+FOO" means "do print categories that start with FOO".
\r
96 Examples: --filter=-whitespace,+whitespace/braces
\r
97 --filter=whitespace,runtime/printf,+runtime/printf_format
\r
98 --filter=-,+build/include_what_you_use
\r
100 To see a list of all the categories used in cpplint, pass no arg:
\r
103 counting=total|toplevel|detailed
\r
104 The total number of errors found is always printed. If
\r
105 'toplevel' is provided, then the count of errors in each of
\r
106 the top-level categories like 'build' and 'whitespace' will
\r
107 also be printed. If 'detailed' is provided, then a count
\r
108 is provided for each category like 'build/class'.
\r
111 The root directory used for deriving header guard CPP variable.
\r
112 By default, the header guard CPP variable is calculated as the relative
\r
113 path to the directory that contains .git, .hg, or .svn. When this flag
\r
114 is specified, the relative path is calculated from the specified
\r
115 directory. If the specified directory does not exist, this flag is
\r
119 Assuming that src/.git exists, the header guard CPP variables for
\r
120 src/chrome/browser/ui/browser.h are:
\r
122 No flag => CHROME_BROWSER_UI_BROWSER_H_
\r
123 --root=chrome => BROWSER_UI_BROWSER_H_
\r
124 --root=chrome/browser => UI_BROWSER_H_
\r
127 This is the allowed line length for the project. The default value is
\r
133 extensions=extension,extension,...
\r
134 The allowed file extensions that cpplint will check
\r
137 --extensions=hpp,cpp
\r
139 cpplint.py supports per-directory configurations specified in CPPLINT.cfg
\r
140 files. CPPLINT.cfg file can contain a number of key=value pairs.
\r
141 Currently the following options are supported:
\r
144 filter=+filter1,-filter2,...
\r
145 exclude_files=regex
\r
148 "set noparent" option prevents cpplint from traversing directory tree
\r
149 upwards looking for more .cfg files in parent directories. This option
\r
150 is usually placed in the top-level project directory.
\r
152 The "filter" option is similar in function to --filter flag. It specifies
\r
153 message filters in addition to the |_DEFAULT_FILTERS| and those specified
\r
154 through --filter command-line flag.
\r
156 "exclude_files" allows to specify a regular expression to be matched against
\r
157 a file name. If the expression matches, the file is skipped and not run
\r
160 "linelength" allows to specify the allowed line length for the project.
\r
162 CPPLINT.cfg has an effect on files in the same directory and all
\r
163 sub-directories, unless overridden by a nested configuration file.
\r
166 filter=-build/include_order,+build/include_alpha
\r
167 exclude_files=.*\.cc
\r
169 The above example disables build/include_order warning and enables
\r
170 build/include_alpha as well as excludes all .cc from being
\r
171 processed by linter, in the current directory (where the .cfg
\r
172 file is located) and all sub-directories.
\r
175 # We categorize each error message we print. Here are the categories.
\r
176 # We want an explicit list so we can list them all in cpplint --filter=.
\r
177 # If you add a new error message with a new category, add it to the list
\r
178 # here! cpplint_unittest.py should tell you if you forget to do this.
\r
179 _ERROR_CATEGORIES = [
\r
182 'build/deprecated',
\r
183 'build/endif_comment',
\r
184 'build/explicit_make_pair',
\r
185 'build/forward_decl',
\r
186 'build/header_guard',
\r
188 'build/include_alpha',
\r
189 'build/include_order',
\r
190 'build/include_what_you_use',
\r
191 'build/namespaces',
\r
192 'build/printf_format',
\r
193 'build/storage_class',
\r
195 'readability/alt_tokens',
\r
196 'readability/braces',
\r
197 'readability/casting',
\r
198 'readability/check',
\r
199 'readability/constructors',
\r
200 'readability/fn_size',
\r
201 'readability/function',
\r
202 'readability/inheritance',
\r
203 'readability/multiline_comment',
\r
204 'readability/multiline_string',
\r
205 'readability/namespace',
\r
206 'readability/nolint',
\r
208 'readability/strings',
\r
209 'readability/todo',
\r
210 'readability/utf8',
\r
213 'runtime/explicit',
\r
216 'runtime/invalid_increment',
\r
217 'runtime/member_string_references',
\r
219 'runtime/indentation_namespace',
\r
220 'runtime/operator',
\r
222 'runtime/printf_format',
\r
223 'runtime/references',
\r
225 'runtime/threadsafe_fn',
\r
227 'whitespace/blank_line',
\r
228 'whitespace/braces',
\r
229 'whitespace/comma',
\r
230 'whitespace/comments',
\r
231 'whitespace/empty_conditional_body',
\r
232 'whitespace/empty_loop_body',
\r
233 'whitespace/end_of_line',
\r
234 'whitespace/ending_newline',
\r
235 'whitespace/forcolon',
\r
236 'whitespace/indent',
\r
237 'whitespace/line_length',
\r
238 'whitespace/newline',
\r
239 'whitespace/operators',
\r
240 'whitespace/parens',
\r
241 'whitespace/semicolon',
\r
246 # These error categories are no longer enforced by cpplint, but for backwards-
\r
247 # compatibility they may still appear in NOLINT comments.
\r
248 _LEGACY_ERROR_CATEGORIES = [
\r
249 'readability/streams',
\r
252 # The default state of the category filter. This is overridden by the --filter=
\r
253 # flag. By default all errors are on, so only add here categories that should be
\r
254 # off by default (i.e., categories that must be enabled by the --filter= flags).
\r
255 # All entries here should start with a '-' or '+', as in the --filter= flag.
\r
256 _DEFAULT_FILTERS = ['-build/include_alpha']
\r
258 # We used to check for high-bit characters, but after much discussion we
\r
259 # decided those were OK, as long as they were in UTF-8 and didn't represent
\r
260 # hard-coded international strings, which belong in a separate i18n file.
\r
263 _CPP_HEADERS = frozenset([
\r
316 # 17.6.1.2 C++ library headers
\r
324 'condition_variable',
\r
331 'initializer_list',
\r
369 # 17.6.1.2 C++ headers for C library facilities
\r
399 # These headers are excluded from [build/include] and [build/include_order]
\r
401 # - Anything not following google file name conventions (containing an
\r
402 # uppercase character, such as Python.h or nsStringAPI.h, for example).
\r
404 _THIRD_PARTY_HEADERS_PATTERN = re.compile(
\r
405 r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
\r
408 # Assertion macros. These are defined in base/logging.h and
\r
409 # testing/base/gunit.h. Note that the _M versions need to come first
\r
410 # for substring matching to work.
\r
413 'EXPECT_TRUE_M', 'EXPECT_TRUE',
\r
414 'ASSERT_TRUE_M', 'ASSERT_TRUE',
\r
415 'EXPECT_FALSE_M', 'EXPECT_FALSE',
\r
416 'ASSERT_FALSE_M', 'ASSERT_FALSE',
\r
419 # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
\r
420 _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
\r
422 for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
\r
423 ('>=', 'GE'), ('>', 'GT'),
\r
424 ('<=', 'LE'), ('<', 'LT')]:
\r
425 _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
\r
426 _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
\r
427 _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
\r
428 _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
\r
429 _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
\r
430 _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
\r
432 for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
\r
433 ('>=', 'LT'), ('>', 'LE'),
\r
434 ('<=', 'GT'), ('<', 'GE')]:
\r
435 _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
\r
436 _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
\r
437 _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
\r
438 _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
\r
440 # Alternative tokens and their replacements. For full list, see section 2.5
\r
441 # Alternative tokens [lex.digraph] in the C++ standard.
\r
443 # Digraphs (such as '%:') are not included here since it's a mess to
\r
444 # match those on a word boundary.
\r
445 _ALT_TOKEN_REPLACEMENT = {
\r
459 # Compile regular expression that matches all the above keywords. The "[ =()]"
\r
460 # bit is meant to avoid matching these keywords outside of boolean expressions.
\r
462 # False positives include C-style multi-line comments and multi-line strings
\r
463 # but those have always been troublesome for cpplint.
\r
464 _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
\r
465 r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
\r
468 # These constants define types of headers for use with
\r
469 # _IncludeState.CheckNextIncludeOrder().
\r
471 _CPP_SYS_HEADER = 2
\r
472 _LIKELY_MY_HEADER = 3
\r
473 _POSSIBLE_MY_HEADER = 4
\r
476 # These constants define the current inline assembly state
\r
477 _NO_ASM = 0 # Outside of inline assembly block
\r
478 _INSIDE_ASM = 1 # Inside inline assembly block
\r
479 _END_ASM = 2 # Last line of inline assembly block
\r
480 _BLOCK_ASM = 3 # The whole block is an inline assembly block
\r
482 # Match start of assembly blocks
\r
483 _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
\r
484 r'(?:\s+(volatile|__volatile__))?'
\r
488 _regexp_compile_cache = {}
\r
490 # {str, set(int)}: a map from error categories to sets of linenumbers
\r
491 # on which those errors are expected and should be suppressed.
\r
492 _error_suppressions = {}
\r
494 # The root directory used for deriving header guard CPP variable.
\r
495 # This is set by --root flag.
\r
498 # The allowed line length of files.
\r
499 # This is set by --linelength flag.
\r
502 # The allowed extensions for file names
\r
503 # This is set by --extensions flag.
\r
504 _valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
\r
506 def ParseNolintSuppressions(filename, raw_line, linenum, error):
\r
507 """Updates the global list of error-suppressions.
\r
509 Parses any NOLINT comments on the current line, updating the global
\r
510 error_suppressions store. Reports an error if the NOLINT comment
\r
514 filename: str, the name of the input file.
\r
515 raw_line: str, the line of input text, with comments.
\r
516 linenum: int, the number of the current line.
\r
517 error: function, an error handler.
\r
519 matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
\r
521 if matched.group(1):
\r
522 suppressed_line = linenum + 1
\r
524 suppressed_line = linenum
\r
525 category = matched.group(2)
\r
526 if category in (None, '(*)'): # => "suppress all"
\r
527 _error_suppressions.setdefault(None, set()).add(suppressed_line)
\r
529 if category.startswith('(') and category.endswith(')'):
\r
530 category = category[1:-1]
\r
531 if category in _ERROR_CATEGORIES:
\r
532 _error_suppressions.setdefault(category, set()).add(suppressed_line)
\r
533 elif category not in _LEGACY_ERROR_CATEGORIES:
\r
534 error(filename, linenum, 'readability/nolint', 5,
\r
535 'Unknown NOLINT error category: %s' % category)
\r
538 def ResetNolintSuppressions():
\r
539 """Resets the set of NOLINT suppressions to empty."""
\r
540 _error_suppressions.clear()
\r
543 def IsErrorSuppressedByNolint(category, linenum):
\r
544 """Returns true if the specified error category is suppressed on this line.
\r
546 Consults the global error_suppressions map populated by
\r
547 ParseNolintSuppressions/ResetNolintSuppressions.
\r
550 category: str, the category of the error.
\r
551 linenum: int, the current line number.
\r
553 bool, True iff the error should be suppressed due to a NOLINT comment.
\r
555 return (linenum in _error_suppressions.get(category, set()) or
\r
556 linenum in _error_suppressions.get(None, set()))
\r
559 def Match(pattern, s):
\r
560 """Matches the string with the pattern, caching the compiled regexp."""
\r
561 # The regexp compilation caching is inlined in both Match and Search for
\r
562 # performance reasons; factoring it out into a separate function turns out
\r
563 # to be noticeably expensive.
\r
564 if pattern not in _regexp_compile_cache:
\r
565 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
\r
566 return _regexp_compile_cache[pattern].match(s)
\r
569 def ReplaceAll(pattern, rep, s):
\r
570 """Replaces instances of pattern in a string with a replacement.
\r
572 The compiled regex is kept in a cache shared by Match and Search.
\r
575 pattern: regex pattern
\r
576 rep: replacement text
\r
580 string with replacements made (or original string if no replacements)
\r
582 if pattern not in _regexp_compile_cache:
\r
583 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
\r
584 return _regexp_compile_cache[pattern].sub(rep, s)
\r
587 def Search(pattern, s):
\r
588 """Searches the string for the pattern, caching the compiled regexp."""
\r
589 if pattern not in _regexp_compile_cache:
\r
590 _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
\r
591 return _regexp_compile_cache[pattern].search(s)
\r
594 class _IncludeState(object):
\r
595 """Tracks line numbers for includes, and the order in which includes appear.
\r
597 include_list contains list of lists of (header, line number) pairs.
\r
598 It's a lists of lists rather than just one flat list to make it
\r
599 easier to update across preprocessor boundaries.
\r
601 Call CheckNextIncludeOrder() once for each header in the file, passing
\r
602 in the type constants defined above. Calls in an illegal order will
\r
603 raise an _IncludeError with an appropriate error message.
\r
606 # self._section will move monotonically through this set. If it ever
\r
607 # needs to move backwards, CheckNextIncludeOrder will raise an error.
\r
608 _INITIAL_SECTION = 0
\r
612 _OTHER_H_SECTION = 4
\r
615 _C_SYS_HEADER: 'C system header',
\r
616 _CPP_SYS_HEADER: 'C++ system header',
\r
617 _LIKELY_MY_HEADER: 'header this file implements',
\r
618 _POSSIBLE_MY_HEADER: 'header this file may implement',
\r
619 _OTHER_HEADER: 'other header',
\r
622 _INITIAL_SECTION: "... nothing. (This can't be an error.)",
\r
623 _MY_H_SECTION: 'a header this file implements',
\r
624 _C_SECTION: 'C system header',
\r
625 _CPP_SECTION: 'C++ system header',
\r
626 _OTHER_H_SECTION: 'other header',
\r
629 def __init__(self):
\r
630 self.include_list = [[]]
\r
631 self.ResetSection('')
\r
633 def FindHeader(self, header):
\r
634 """Check if a header has already been included.
\r
637 header: header to check.
\r
639 Line number of previous occurrence, or -1 if the header has not
\r
642 for section_list in self.include_list:
\r
643 for f in section_list:
\r
648 def ResetSection(self, directive):
\r
649 """Reset section checking for preprocessor directive.
\r
652 directive: preprocessor directive (e.g. "if", "else").
\r
654 # The name of the current section.
\r
655 self._section = self._INITIAL_SECTION
\r
656 # The path of last found header.
\r
657 self._last_header = ''
\r
659 # Update list of includes. Note that we never pop from the
\r
661 if directive in ('if', 'ifdef', 'ifndef'):
\r
662 self.include_list.append([])
\r
663 elif directive in ('else', 'elif'):
\r
664 self.include_list[-1] = []
\r
666 def SetLastHeader(self, header_path):
\r
667 self._last_header = header_path
\r
669 def CanonicalizeAlphabeticalOrder(self, header_path):
\r
670 """Returns a path canonicalized for alphabetical comparison.
\r
672 - replaces "-" with "_" so they both cmp the same.
\r
673 - removes '-inl' since we don't require them to be after the main header.
\r
674 - lowercase everything, just in case.
\r
677 header_path: Path to be canonicalized.
\r
680 Canonicalized path.
\r
682 return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
\r
684 def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
\r
685 """Check if a header is in alphabetical order with the previous header.
\r
688 clean_lines: A CleansedLines instance containing the file.
\r
689 linenum: The number of the line to check.
\r
690 header_path: Canonicalized header to be checked.
\r
693 Returns true if the header is in alphabetical order.
\r
695 # If previous section is different from current section, _last_header will
\r
696 # be reset to empty string, so it's always less than current header.
\r
698 # If previous line was a blank line, assume that the headers are
\r
699 # intentionally sorted the way they are.
\r
700 if (self._last_header > header_path and
\r
701 Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
\r
705 def CheckNextIncludeOrder(self, header_type):
\r
706 """Returns a non-empty error message if the next header is out of order.
\r
708 This function also updates the internal state to be ready to check
\r
712 header_type: One of the _XXX_HEADER constants defined above.
\r
715 The empty string if the header is in the right order, or an
\r
716 error message describing what's wrong.
\r
719 error_message = ('Found %s after %s' %
\r
720 (self._TYPE_NAMES[header_type],
\r
721 self._SECTION_NAMES[self._section]))
\r
723 last_section = self._section
\r
725 if header_type == _C_SYS_HEADER:
\r
726 if self._section <= self._C_SECTION:
\r
727 self._section = self._C_SECTION
\r
729 self._last_header = ''
\r
730 return error_message
\r
731 elif header_type == _CPP_SYS_HEADER:
\r
732 if self._section <= self._CPP_SECTION:
\r
733 self._section = self._CPP_SECTION
\r
735 self._last_header = ''
\r
736 return error_message
\r
737 elif header_type == _LIKELY_MY_HEADER:
\r
738 if self._section <= self._MY_H_SECTION:
\r
739 self._section = self._MY_H_SECTION
\r
741 self._section = self._OTHER_H_SECTION
\r
742 elif header_type == _POSSIBLE_MY_HEADER:
\r
743 if self._section <= self._MY_H_SECTION:
\r
744 self._section = self._MY_H_SECTION
\r
746 # This will always be the fallback because we're not sure
\r
747 # enough that the header is associated with this file.
\r
748 self._section = self._OTHER_H_SECTION
\r
750 assert header_type == _OTHER_HEADER
\r
751 self._section = self._OTHER_H_SECTION
\r
753 if last_section != self._section:
\r
754 self._last_header = ''
\r
759 class _CppLintState(object):
\r
760 """Maintains module-wide state.."""
\r
762 def __init__(self):
\r
763 self.verbose_level = 1 # global setting.
\r
764 self.error_count = 0 # global count of reported errors
\r
765 # filters to apply when emitting error messages
\r
766 self.filters = _DEFAULT_FILTERS[:]
\r
767 # backup of filter list. Used to restore the state after each file.
\r
768 self._filters_backup = self.filters[:]
\r
769 self.counting = 'total' # In what way are we counting errors?
\r
770 self.errors_by_category = {} # string to int dict storing error counts
\r
773 # "emacs" - format that emacs can parse (default)
\r
774 # "vs7" - format that Microsoft Visual Studio 7 can parse
\r
775 self.output_format = 'emacs'
\r
777 def SetOutputFormat(self, output_format):
\r
778 """Sets the output format for errors."""
\r
779 self.output_format = output_format
\r
781 def SetVerboseLevel(self, level):
\r
782 """Sets the module's verbosity, and returns the previous setting."""
\r
783 last_verbose_level = self.verbose_level
\r
784 self.verbose_level = level
\r
785 return last_verbose_level
\r
787 def SetCountingStyle(self, counting_style):
\r
788 """Sets the module's counting options."""
\r
789 self.counting = counting_style
\r
791 def SetFilters(self, filters):
\r
792 """Sets the error-message filters.
\r
794 These filters are applied when deciding whether to emit a given
\r
798 filters: A string of comma-separated filters (eg "+whitespace/indent").
\r
799 Each filter should start with + or -; else we die.
\r
802 ValueError: The comma-separated filters did not all start with '+' or '-'.
\r
803 E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
\r
805 # Default filters always have less priority than the flag ones.
\r
806 self.filters = _DEFAULT_FILTERS[:]
\r
807 self.AddFilters(filters)
\r
809 def AddFilters(self, filters):
\r
810 """ Adds more filters to the existing list of error-message filters. """
\r
811 for filt in filters.split(','):
\r
812 clean_filt = filt.strip()
\r
814 self.filters.append(clean_filt)
\r
815 for filt in self.filters:
\r
816 if not (filt.startswith('+') or filt.startswith('-')):
\r
817 raise ValueError('Every filter in --filters must start with + or -'
\r
818 ' (%s does not)' % filt)
\r
820 def BackupFilters(self):
\r
821 """ Saves the current filter list to backup storage."""
\r
822 self._filters_backup = self.filters[:]
\r
824 def RestoreFilters(self):
\r
825 """ Restores filters previously backed up."""
\r
826 self.filters = self._filters_backup[:]
\r
828 def ResetErrorCounts(self):
\r
829 """Sets the module's error statistic back to zero."""
\r
830 self.error_count = 0
\r
831 self.errors_by_category = {}
\r
833 def IncrementErrorCount(self, category):
\r
834 """Bumps the module's error statistic."""
\r
835 self.error_count += 1
\r
836 if self.counting in ('toplevel', 'detailed'):
\r
837 if self.counting != 'detailed':
\r
838 category = category.split('/')[0]
\r
839 if category not in self.errors_by_category:
\r
840 self.errors_by_category[category] = 0
\r
841 self.errors_by_category[category] += 1
\r
843 def PrintErrorCounts(self):
\r
844 """Print a summary of errors by category, and the total."""
\r
845 for category, count in self.errors_by_category.iteritems():
\r
846 sys.stderr.write('Category \'%s\' errors found: %d\n' %
\r
848 sys.stderr.write('Total errors found: %d\n' % self.error_count)
\r
850 _cpplint_state = _CppLintState()
\r
853 def _OutputFormat():
\r
854 """Gets the module's output format."""
\r
855 return _cpplint_state.output_format
\r
858 def _SetOutputFormat(output_format):
\r
859 """Sets the module's output format."""
\r
860 _cpplint_state.SetOutputFormat(output_format)
\r
863 def _VerboseLevel():
\r
864 """Returns the module's verbosity setting."""
\r
865 return _cpplint_state.verbose_level
\r
868 def _SetVerboseLevel(level):
\r
869 """Sets the module's verbosity, and returns the previous setting."""
\r
870 return _cpplint_state.SetVerboseLevel(level)
\r
873 def _SetCountingStyle(level):
\r
874 """Sets the module's counting options."""
\r
875 _cpplint_state.SetCountingStyle(level)
\r
879 """Returns the module's list of output filters, as a list."""
\r
880 return _cpplint_state.filters
\r
883 def _SetFilters(filters):
\r
884 """Sets the module's error-message filters.
\r
886 These filters are applied when deciding whether to emit a given
\r
890 filters: A string of comma-separated filters (eg "whitespace/indent").
\r
891 Each filter should start with + or -; else we die.
\r
893 _cpplint_state.SetFilters(filters)
\r
895 def _AddFilters(filters):
\r
896 """Adds more filter overrides.
\r
898 Unlike _SetFilters, this function does not reset the current list of filters
\r
902 filters: A string of comma-separated filters (eg "whitespace/indent").
\r
903 Each filter should start with + or -; else we die.
\r
905 _cpplint_state.AddFilters(filters)
\r
907 def _BackupFilters():
\r
908 """ Saves the current filter list to backup storage."""
\r
909 _cpplint_state.BackupFilters()
\r
911 def _RestoreFilters():
\r
912 """ Restores filters previously backed up."""
\r
913 _cpplint_state.RestoreFilters()
\r
915 class _FunctionState(object):
\r
916 """Tracks current function name and the number of lines in its body."""
\r
918 _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
\r
919 _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
\r
921 def __init__(self):
\r
922 self.in_a_function = False
\r
923 self.lines_in_function = 0
\r
924 self.current_function = ''
\r
926 def Begin(self, function_name):
\r
927 """Start analyzing function body.
\r
930 function_name: The name of the function being tracked.
\r
932 self.in_a_function = True
\r
933 self.lines_in_function = 0
\r
934 self.current_function = function_name
\r
937 """Count line in current function body."""
\r
938 if self.in_a_function:
\r
939 self.lines_in_function += 1
\r
941 def Check(self, error, filename, linenum):
\r
942 """Report if too many lines in function body.
\r
945 error: The function to call with any errors found.
\r
946 filename: The name of the current file.
\r
947 linenum: The number of the line to check.
\r
949 if Match(r'T(EST|est)', self.current_function):
\r
950 base_trigger = self._TEST_TRIGGER
\r
952 base_trigger = self._NORMAL_TRIGGER
\r
953 trigger = base_trigger * 2**_VerboseLevel()
\r
955 if self.lines_in_function > trigger:
\r
956 error_level = int(math.log(self.lines_in_function / base_trigger, 2))
\r
957 # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
\r
958 if error_level > 5:
\r
960 error(filename, linenum, 'readability/fn_size', error_level,
\r
961 'Small and focused functions are preferred:'
\r
962 ' %s has %d non-comment lines'
\r
963 ' (error triggered by exceeding %d lines).' % (
\r
964 self.current_function, self.lines_in_function, trigger))
\r
967 """Stop analyzing function body."""
\r
968 self.in_a_function = False
\r
971 class _IncludeError(Exception):
\r
972 """Indicates a problem with the include order in a file."""
\r
976 class FileInfo(object):
\r
977 """Provides utility functions for filenames.
\r
979 FileInfo provides easy access to the components of a file's path
\r
980 relative to the project root.
\r
983 def __init__(self, filename):
\r
984 self._filename = filename
\r
986 def FullName(self):
\r
987 """Make Windows paths like Unix."""
\r
988 return os.path.abspath(self._filename).replace('\\', '/')
\r
990 def RepositoryName(self):
\r
991 """FullName after removing the local path to the repository.
\r
993 If we have a real absolute path name here we can try to do something smart:
\r
994 detecting the root of the checkout and truncating /path/to/checkout from
\r
995 the name so that we get header guards that don't include things like
\r
996 "C:\Documents and Settings\..." or "/home/username/..." in them and thus
\r
997 people on different computers who have checked the source out to different
\r
998 locations won't see bogus errors.
\r
1000 fullname = self.FullName()
\r
1002 if os.path.exists(fullname):
\r
1003 project_dir = os.path.dirname(fullname)
\r
1005 if os.path.exists(os.path.join(project_dir, ".svn")):
\r
1006 # If there's a .svn file in the current directory, we recursively look
\r
1007 # up the directory tree for the top of the SVN checkout
\r
1008 root_dir = project_dir
\r
1009 one_up_dir = os.path.dirname(root_dir)
\r
1010 while os.path.exists(os.path.join(one_up_dir, ".svn")):
\r
1011 root_dir = os.path.dirname(root_dir)
\r
1012 one_up_dir = os.path.dirname(one_up_dir)
\r
1014 prefix = os.path.commonprefix([root_dir, project_dir])
\r
1015 return fullname[len(prefix) + 1:]
\r
1017 # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
\r
1018 # searching up from the current path.
\r
1019 root_dir = os.path.dirname(fullname)
\r
1020 while (root_dir != os.path.dirname(root_dir) and
\r
1021 not os.path.exists(os.path.join(root_dir, ".git")) and
\r
1022 not os.path.exists(os.path.join(root_dir, ".hg")) and
\r
1023 not os.path.exists(os.path.join(root_dir, ".svn"))):
\r
1024 root_dir = os.path.dirname(root_dir)
\r
1026 if (os.path.exists(os.path.join(root_dir, ".git")) or
\r
1027 os.path.exists(os.path.join(root_dir, ".hg")) or
\r
1028 os.path.exists(os.path.join(root_dir, ".svn"))):
\r
1029 prefix = os.path.commonprefix([root_dir, project_dir])
\r
1030 return fullname[len(prefix) + 1:]
\r
1032 # Don't know what to do; header guard warnings may be wrong...
\r
1036 """Splits the file into the directory, basename, and extension.
\r
1038 For 'chrome/browser/browser.cc', Split() would
\r
1039 return ('chrome/browser', 'browser', '.cc')
\r
1042 A tuple of (directory, basename, extension).
\r
1045 googlename = self.RepositoryName()
\r
1046 project, rest = os.path.split(googlename)
\r
1047 return (project,) + os.path.splitext(rest)
\r
1049 def BaseName(self):
\r
1050 """File base name - text after the final slash, before the final period."""
\r
1051 return self.Split()[1]
\r
1053 def Extension(self):
\r
1054 """File extension - text following the final period."""
\r
1055 return self.Split()[2]
\r
1057 def NoExtension(self):
\r
1058 """File has no source file extension."""
\r
1059 return '/'.join(self.Split()[0:2])
\r
1061 def IsSource(self):
\r
1062 """File has a source file extension."""
\r
1063 return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
\r
1066 def _ShouldPrintError(category, confidence, linenum):
\r
1067 """If confidence >= verbose, category passes filter and is not suppressed."""
\r
1069 # There are three ways we might decide not to print an error message:
\r
1070 # a "NOLINT(category)" comment appears in the source,
\r
1071 # the verbosity level isn't high enough, or the filters filter it out.
\r
1072 if IsErrorSuppressedByNolint(category, linenum):
\r
1075 if confidence < _cpplint_state.verbose_level:
\r
1078 is_filtered = False
\r
1079 for one_filter in _Filters():
\r
1080 if one_filter.startswith('-'):
\r
1081 if category.startswith(one_filter[1:]):
\r
1082 is_filtered = True
\r
1083 elif one_filter.startswith('+'):
\r
1084 if category.startswith(one_filter[1:]):
\r
1085 is_filtered = False
\r
1087 assert False # should have been checked for in SetFilter.
\r
1094 def Error(filename, linenum, category, confidence, message):
\r
1095 """Logs the fact we've found a lint error.
\r
1097 We log where the error was found, and also our confidence in the error,
\r
1098 that is, how certain we are this is a legitimate style regression, and
\r
1099 not a misidentification or a use that's sometimes justified.
\r
1101 False positives can be suppressed by the use of
\r
1102 "cpplint(category)" comments on the offending line. These are
\r
1103 parsed into _error_suppressions.
\r
1106 filename: The name of the file containing the error.
\r
1107 linenum: The number of the line containing the error.
\r
1108 category: A string used to describe the "category" this bug
\r
1109 falls under: "whitespace", say, or "runtime". Categories
\r
1110 may have a hierarchy separated by slashes: "whitespace/indent".
\r
1111 confidence: A number from 1-5 representing a confidence score for
\r
1112 the error, with 5 meaning that we are certain of the problem,
\r
1113 and 1 meaning that it could be a legitimate construct.
\r
1114 message: The error message.
\r
1116 if _ShouldPrintError(category, confidence, linenum):
\r
1117 _cpplint_state.IncrementErrorCount(category)
\r
1118 if _cpplint_state.output_format == 'vs7':
\r
1119 sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
\r
1120 filename, linenum, message, category, confidence))
\r
1121 elif _cpplint_state.output_format == 'eclipse':
\r
1122 sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
\r
1123 filename, linenum, message, category, confidence))
\r
1125 sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
\r
1126 filename, linenum, message, category, confidence))
\r
1129 # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
\r
1130 _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
\r
1131 r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
\r
1132 # Match a single C style comment on the same line.
\r
1133 _RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
\r
1134 # Matches multi-line C style comments.
\r
1135 # This RE is a little bit more complicated than one might expect, because we
\r
1136 # have to take care of space removals tools so we can handle comments inside
\r
1137 # statements better.
\r
1138 # The current rule is: We only clear spaces from both sides when we're at the
\r
1139 # end of the line. Otherwise, we try to remove spaces from the right side,
\r
1140 # if this doesn't work we try on left side but only if there's a non-character
\r
1142 _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
\r
1143 r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
\r
1144 _RE_PATTERN_C_COMMENTS + r'\s+|' +
\r
1145 r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
\r
1146 _RE_PATTERN_C_COMMENTS + r')')
\r
1149 def IsCppString(line):
\r
1150 """Does line terminate so, that the next symbol is in string constant.
\r
1152 This function does not consider single-line nor multi-line comments.
\r
1155 line: is a partial line of code starting from the 0..n.
\r
1158 True, if next character appended to 'line' is inside a
\r
1162 line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
\r
1163 return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
\r
1166 def CleanseRawStrings(raw_lines):
\r
1167 """Removes C++11 raw strings from lines.
\r
1170 static const char kData[] = R"(
\r
1175 static const char kData[] = ""
\r
1176 (replaced by blank line)
\r
1180 raw_lines: list of raw lines.
\r
1183 list of lines with C++11 raw strings replaced by empty strings.
\r
1187 lines_without_raw_strings = []
\r
1188 for line in raw_lines:
\r
1190 # Inside a raw string, look for the end
\r
1191 end = line.find(delimiter)
\r
1193 # Found the end of the string, match leading space for this
\r
1194 # line and resume copying the original lines, and also insert
\r
1195 # a "" on the last line.
\r
1196 leading_space = Match(r'^(\s*)\S', line)
\r
1197 line = leading_space.group(1) + '""' + line[end + len(delimiter):]
\r
1200 # Haven't found the end yet, append a blank line.
\r
1203 # Look for beginning of a raw string, and replace them with
\r
1204 # empty strings. This is done in a loop to handle multiple raw
\r
1205 # strings on the same line.
\r
1206 while delimiter is None:
\r
1207 # Look for beginning of a raw string.
\r
1208 # See 2.14.15 [lex.string] for syntax.
\r
1209 matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
\r
1211 delimiter = ')' + matched.group(2) + '"'
\r
1213 end = matched.group(3).find(delimiter)
\r
1215 # Raw string ended on same line
\r
1216 line = (matched.group(1) + '""' +
\r
1217 matched.group(3)[end + len(delimiter):])
\r
1220 # Start of a multi-line raw string
\r
1221 line = matched.group(1) + '""'
\r
1225 lines_without_raw_strings.append(line)
\r
1227 # TODO(unknown): if delimiter is not None here, we might want to
\r
1228 # emit a warning for unterminated string.
\r
1229 return lines_without_raw_strings
\r
1232 def FindNextMultiLineCommentStart(lines, lineix):
\r
1233 """Find the beginning marker for a multiline comment."""
\r
1234 while lineix < len(lines):
\r
1235 if lines[lineix].strip().startswith('/*'):
\r
1236 # Only return this marker if the comment goes beyond this line
\r
1237 if lines[lineix].strip().find('*/', 2) < 0:
\r
1243 def FindNextMultiLineCommentEnd(lines, lineix):
\r
1244 """We are inside a comment, find the end marker."""
\r
1245 while lineix < len(lines):
\r
1246 if lines[lineix].strip().endswith('*/'):
\r
1252 def RemoveMultiLineCommentsFromRange(lines, begin, end):
\r
1253 """Clears a range of lines for multi-line comments."""
\r
1254 # Having // dummy comments makes the lines non-empty, so we will not get
\r
1255 # unnecessary blank line warnings later in the code.
\r
1256 for i in range(begin, end):
\r
1260 def RemoveMultiLineComments(filename, lines, error):
\r
1261 """Removes multiline (c-style) comments from lines."""
\r
1263 while lineix < len(lines):
\r
1264 lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
\r
1265 if lineix_begin >= len(lines):
\r
1267 lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
\r
1268 if lineix_end >= len(lines):
\r
1269 error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
\r
1270 'Could not find end of multi-line comment')
\r
1272 RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
\r
1273 lineix = lineix_end + 1
\r
1276 def CleanseComments(line):
\r
1277 """Removes //-comments and single-line C-style /* */ comments.
\r
1280 line: A line of C++ source.
\r
1283 The line with single-line comments removed.
\r
1285 commentpos = line.find('//')
\r
1286 if commentpos != -1 and not IsCppString(line[:commentpos]):
\r
1287 line = line[:commentpos].rstrip()
\r
1288 # get rid of /* ... */
\r
1289 return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
\r
1292 class CleansedLines(object):
\r
1293 """Holds 4 copies of all lines with different preprocessing applied to them.
\r
1295 1) elided member contains lines without strings and comments.
\r
1296 2) lines member contains lines without comments.
\r
1297 3) raw_lines member contains all the lines without processing.
\r
1298 4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
\r
1300 All these members are of <type 'list'>, and of the same length.
\r
1303 def __init__(self, lines):
\r
1306 self.raw_lines = lines
\r
1307 self.num_lines = len(lines)
\r
1308 self.lines_without_raw_strings = CleanseRawStrings(lines)
\r
1309 for linenum in range(len(self.lines_without_raw_strings)):
\r
1310 self.lines.append(CleanseComments(
\r
1311 self.lines_without_raw_strings[linenum]))
\r
1312 elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
\r
1313 self.elided.append(CleanseComments(elided))
\r
1315 def NumLines(self):
\r
1316 """Returns the number of lines represented."""
\r
1317 return self.num_lines
\r
1320 def _CollapseStrings(elided):
\r
1321 """Collapses strings and chars on a line to simple "" or '' blocks.
\r
1323 We nix strings first so we're not fooled by text like '"http://"'
\r
1326 elided: The line being processed.
\r
1329 The line with collapsed strings.
\r
1331 if _RE_PATTERN_INCLUDE.match(elided):
\r
1334 # Remove escaped characters first to make quote/single quote collapsing
\r
1335 # basic. Things that look like escaped characters shouldn't occur
\r
1336 # outside of strings and chars.
\r
1337 elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
\r
1339 # Replace quoted strings and digit separators. Both single quotes
\r
1340 # and double quotes are processed in the same loop, otherwise
\r
1341 # nested quotes wouldn't work.
\r
1344 # Find the first quote character
\r
1345 match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
\r
1347 collapsed += elided
\r
1349 head, quote, tail = match.groups()
\r
1352 # Collapse double quoted strings
\r
1353 second_quote = tail.find('"')
\r
1354 if second_quote >= 0:
\r
1355 collapsed += head + '""'
\r
1356 elided = tail[second_quote + 1:]
\r
1358 # Unmatched double quote, don't bother processing the rest
\r
1359 # of the line since this is probably a multiline string.
\r
1360 collapsed += elided
\r
1363 # Found single quote, check nearby text to eliminate digit separators.
\r
1365 # There is no special handling for floating point here, because
\r
1366 # the integer/fractional/exponent parts would all be parsed
\r
1367 # correctly as long as there are digits on both sides of the
\r
1368 # separator. So we are fine as long as we don't see something
\r
1369 # like "0.'3" (gcc 4.9.0 will not allow this literal).
\r
1370 if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
\r
1371 match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
\r
1372 collapsed += head + match_literal.group(1).replace("'", '')
\r
1373 elided = match_literal.group(2)
\r
1375 second_quote = tail.find('\'')
\r
1376 if second_quote >= 0:
\r
1377 collapsed += head + "''"
\r
1378 elided = tail[second_quote + 1:]
\r
1380 # Unmatched single quote
\r
1381 collapsed += elided
\r
1387 def FindEndOfExpressionInLine(line, startpos, stack):
\r
1388 """Find the position just after the end of current parenthesized expression.
\r
1391 line: a CleansedLines line.
\r
1392 startpos: start searching at this position.
\r
1393 stack: nesting stack at startpos.
\r
1396 On finding matching end: (index just after matching end, None)
\r
1397 On finding an unclosed expression: (-1, None)
\r
1398 Otherwise: (-1, new stack at end of this line)
\r
1400 for i in xrange(startpos, len(line)):
\r
1403 # Found start of parenthesized expression, push to expression stack
\r
1404 stack.append(char)
\r
1406 # Found potential start of template argument list
\r
1407 if i > 0 and line[i - 1] == '<':
\r
1408 # Left shift operator
\r
1409 if stack and stack[-1] == '<':
\r
1413 elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
\r
1414 # operator<, don't add to stack
\r
1417 # Tentative start of template argument list
\r
1419 elif char in ')]}':
\r
1420 # Found end of parenthesized expression.
\r
1422 # If we are currently expecting a matching '>', the pending '<'
\r
1423 # must have been an operator. Remove them from expression stack.
\r
1424 while stack and stack[-1] == '<':
\r
1428 if ((stack[-1] == '(' and char == ')') or
\r
1429 (stack[-1] == '[' and char == ']') or
\r
1430 (stack[-1] == '{' and char == '}')):
\r
1433 return (i + 1, None)
\r
1435 # Mismatched parentheses
\r
1438 # Found potential end of template argument list.
\r
1440 # Ignore "->" and operator functions
\r
1442 (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
\r
1445 # Pop the stack if there is a matching '<'. Otherwise, ignore
\r
1446 # this '>' since it must be an operator.
\r
1448 if stack[-1] == '<':
\r
1451 return (i + 1, None)
\r
1453 # Found something that look like end of statements. If we are currently
\r
1454 # expecting a '>', the matching '<' must have been an operator, since
\r
1455 # template argument list should not contain statements.
\r
1456 while stack and stack[-1] == '<':
\r
1461 # Did not find end of expression or unbalanced parentheses on this line
\r
1462 return (-1, stack)
\r
1465 def CloseExpression(clean_lines, linenum, pos):
\r
1466 """If input points to ( or { or [ or <, finds the position that closes it.
\r
1468 If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
\r
1469 linenum/pos that correspond to the closing of the expression.
\r
1471 TODO(unknown): cpplint spends a fair bit of time matching parentheses.
\r
1472 Ideally we would want to index all opening and closing parentheses once
\r
1473 and have CloseExpression be just a simple lookup, but due to preprocessor
\r
1474 tricks, this is not so easy.
\r
1477 clean_lines: A CleansedLines instance containing the file.
\r
1478 linenum: The number of the line to check.
\r
1479 pos: A position on the line.
\r
1482 A tuple (line, linenum, pos) pointer *past* the closing brace, or
\r
1483 (line, len(lines), -1) if we never find a close. Note we ignore
\r
1484 strings and comments when matching; and the line we return is the
\r
1485 'cleansed' line at linenum.
\r
1488 line = clean_lines.elided[linenum]
\r
1489 if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
\r
1490 return (line, clean_lines.NumLines(), -1)
\r
1492 # Check first line
\r
1493 (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
\r
1495 return (line, linenum, end_pos)
\r
1497 # Continue scanning forward
\r
1498 while stack and linenum < clean_lines.NumLines() - 1:
\r
1500 line = clean_lines.elided[linenum]
\r
1501 (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
\r
1503 return (line, linenum, end_pos)
\r
1505 # Did not find end of expression before end of file, give up
\r
1506 return (line, clean_lines.NumLines(), -1)
\r
1509 def FindStartOfExpressionInLine(line, endpos, stack):
\r
1510 """Find position at the matching start of current expression.
\r
1512 This is almost the reverse of FindEndOfExpressionInLine, but note
\r
1513 that the input position and returned position differs by 1.
\r
1516 line: a CleansedLines line.
\r
1517 endpos: start searching at this position.
\r
1518 stack: nesting stack at endpos.
\r
1521 On finding matching start: (index at matching start, None)
\r
1522 On finding an unclosed expression: (-1, None)
\r
1523 Otherwise: (-1, new stack at beginning of this line)
\r
1529 # Found end of expression, push to expression stack
\r
1530 stack.append(char)
\r
1532 # Found potential end of template argument list.
\r
1534 # Ignore it if it's a "->" or ">=" or "operator>"
\r
1536 (line[i - 1] == '-' or
\r
1537 Match(r'\s>=\s', line[i - 1:]) or
\r
1538 Search(r'\boperator\s*$', line[0:i]))):
\r
1543 # Found potential start of template argument list
\r
1544 if i > 0 and line[i - 1] == '<':
\r
1545 # Left shift operator
\r
1548 # If there is a matching '>', we can pop the expression stack.
\r
1549 # Otherwise, ignore this '<' since it must be an operator.
\r
1550 if stack and stack[-1] == '>':
\r
1554 elif char in '([{':
\r
1555 # Found start of expression.
\r
1557 # If there are any unmatched '>' on the stack, they must be
\r
1558 # operators. Remove those.
\r
1559 while stack and stack[-1] == '>':
\r
1563 if ((char == '(' and stack[-1] == ')') or
\r
1564 (char == '[' and stack[-1] == ']') or
\r
1565 (char == '{' and stack[-1] == '}')):
\r
1570 # Mismatched parentheses
\r
1573 # Found something that look like end of statements. If we are currently
\r
1574 # expecting a '<', the matching '>' must have been an operator, since
\r
1575 # template argument list should not contain statements.
\r
1576 while stack and stack[-1] == '>':
\r
1583 return (-1, stack)
\r
1586 def ReverseCloseExpression(clean_lines, linenum, pos):
\r
1587 """If input points to ) or } or ] or >, finds the position that opens it.
\r
1589 If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
\r
1590 linenum/pos that correspond to the opening of the expression.
\r
1593 clean_lines: A CleansedLines instance containing the file.
\r
1594 linenum: The number of the line to check.
\r
1595 pos: A position on the line.
\r
1598 A tuple (line, linenum, pos) pointer *at* the opening brace, or
\r
1599 (line, 0, -1) if we never find the matching opening brace. Note
\r
1600 we ignore strings and comments when matching; and the line we
\r
1601 return is the 'cleansed' line at linenum.
\r
1603 line = clean_lines.elided[linenum]
\r
1604 if line[pos] not in ')}]>':
\r
1605 return (line, 0, -1)
\r
1608 (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
\r
1609 if start_pos > -1:
\r
1610 return (line, linenum, start_pos)
\r
1612 # Continue scanning backward
\r
1613 while stack and linenum > 0:
\r
1615 line = clean_lines.elided[linenum]
\r
1616 (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
\r
1617 if start_pos > -1:
\r
1618 return (line, linenum, start_pos)
\r
1620 # Did not find start of expression before beginning of file, give up
\r
1621 return (line, 0, -1)
\r
1624 def CheckForCopyright(filename, lines, error):
\r
1625 """Logs an error if no Copyright message appears at the top of the file."""
\r
1627 # We'll say it should occur by line 10. Don't forget there's a
\r
1628 # dummy line at the front.
\r
1629 for line in xrange(1, min(len(lines), 11)):
\r
1630 if re.search(r'Copyright', lines[line], re.I): break
\r
1631 else: # means no copyright line was found
\r
1632 error(filename, 0, 'legal/copyright', 5,
\r
1633 'No copyright message found. '
\r
1634 'You should have a line: "Copyright [year] <Copyright Owner>"')
\r
1637 def GetIndentLevel(line):
\r
1638 """Return the number of leading spaces in line.
\r
1641 line: A string to check.
\r
1644 An integer count of leading spaces, possibly zero.
\r
1646 indent = Match(r'^( *)\S', line)
\r
1648 return len(indent.group(1))
\r
1653 def GetHeaderGuardCPPVariable(filename):
\r
1654 """Returns the CPP variable that should be used as a header guard.
\r
1657 filename: The name of a C++ header file.
\r
1660 The CPP variable that should be used as a header guard in the
\r
1665 # Restores original filename in case that cpplint is invoked from Emacs's
\r
1667 filename = re.sub(r'_flymake\.h$', '.h', filename)
\r
1668 filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
\r
1669 # Replace 'c++' with 'cpp'.
\r
1670 filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
\r
1672 fileinfo = FileInfo(filename)
\r
1673 file_path_from_root = fileinfo.RepositoryName()
\r
1675 file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
\r
1676 return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
\r
1679 def CheckForHeaderGuard(filename, clean_lines, error):
\r
1680 """Checks that the file contains a header guard.
\r
1682 Logs an error if no #ifndef header guard is present. For other
\r
1683 headers, checks that the full pathname is used.
\r
1686 filename: The name of the C++ header file.
\r
1687 clean_lines: A CleansedLines instance containing the file.
\r
1688 error: The function to call with any errors found.
\r
1691 # Don't check for header guards if there are error suppression
\r
1692 # comments somewhere in this file.
\r
1694 # Because this is silencing a warning for a nonexistent line, we
\r
1695 # only support the very specific NOLINT(build/header_guard) syntax,
\r
1696 # and not the general NOLINT or NOLINT(*) syntax.
\r
1697 raw_lines = clean_lines.lines_without_raw_strings
\r
1698 for i in raw_lines:
\r
1699 if Search(r'//\s*NOLINT\(build/header_guard\)', i):
\r
1702 cppvar = GetHeaderGuardCPPVariable(filename)
\r
1705 ifndef_linenum = 0
\r
1709 for linenum, line in enumerate(raw_lines):
\r
1710 linesplit = line.split()
\r
1711 if len(linesplit) >= 2:
\r
1712 # find the first occurrence of #ifndef and #define, save arg
\r
1713 if not ifndef and linesplit[0] == '#ifndef':
\r
1714 # set ifndef to the header guard presented on the #ifndef line.
\r
1715 ifndef = linesplit[1]
\r
1716 ifndef_linenum = linenum
\r
1717 if not define and linesplit[0] == '#define':
\r
1718 define = linesplit[1]
\r
1719 # find the last occurrence of #endif, save entire line
\r
1720 if line.startswith('#endif'):
\r
1722 endif_linenum = linenum
\r
1724 if not ifndef or not define or ifndef != define:
\r
1725 error(filename, 0, 'build/header_guard', 5,
\r
1726 'No #ifndef header guard found, suggested CPP variable is: %s' %
\r
1730 # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
\r
1731 # for backward compatibility.
\r
1732 if ifndef != cppvar:
\r
1734 if ifndef != cppvar + '_':
\r
1737 ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
\r
1739 error(filename, ifndef_linenum, 'build/header_guard', error_level,
\r
1740 '#ifndef header guard has wrong style, please use: %s' % cppvar)
\r
1742 # Check for "//" comments on endif line.
\r
1743 ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
\r
1745 match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
\r
1747 if match.group(1) == '_':
\r
1748 # Issue low severity warning for deprecated double trailing underscore
\r
1749 error(filename, endif_linenum, 'build/header_guard', 0,
\r
1750 '#endif line should be "#endif // %s"' % cppvar)
\r
1753 # Didn't find the corresponding "//" comment. If this file does not
\r
1754 # contain any "//" comments at all, it could be that the compiler
\r
1755 # only wants "/**/" comments, look for those instead.
\r
1756 no_single_line_comments = True
\r
1757 for i in xrange(1, len(raw_lines) - 1):
\r
1758 line = raw_lines[i]
\r
1759 if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
\r
1760 no_single_line_comments = False
\r
1763 if no_single_line_comments:
\r
1764 match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
\r
1766 if match.group(1) == '_':
\r
1767 # Low severity warning for double trailing underscore
\r
1768 error(filename, endif_linenum, 'build/header_guard', 0,
\r
1769 '#endif line should be "#endif /* %s */"' % cppvar)
\r
1772 # Didn't find anything
\r
1773 error(filename, endif_linenum, 'build/header_guard', 5,
\r
1774 '#endif line should be "#endif // %s"' % cppvar)
\r
1777 def CheckHeaderFileIncluded(filename, include_state, error):
\r
1778 """Logs an error if a .cc file does not include its header."""
\r
1780 # Do not check test files
\r
1781 if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'):
\r
1784 fileinfo = FileInfo(filename)
\r
1785 headerfile = filename[0:len(filename) - 2] + 'h'
\r
1786 if not os.path.exists(headerfile):
\r
1788 headername = FileInfo(headerfile).RepositoryName()
\r
1790 for section_list in include_state.include_list:
\r
1791 for f in section_list:
\r
1792 if headername in f[0] or f[0] in headername:
\r
1794 if not first_include:
\r
1795 first_include = f[1]
\r
1797 error(filename, first_include, 'build/include', 5,
\r
1798 '%s should include its header file %s' % (fileinfo.RepositoryName(),
\r
1802 def CheckForBadCharacters(filename, lines, error):
\r
1803 """Logs an error for each line containing bad characters.
\r
1805 Two kinds of bad characters:
\r
1807 1. Unicode replacement characters: These indicate that either the file
\r
1808 contained invalid UTF-8 (likely) or Unicode replacement characters (which
\r
1809 it shouldn't). Note that it's possible for this to throw off line
\r
1810 numbering if the invalid UTF-8 occurred adjacent to a newline.
\r
1812 2. NUL bytes. These are problematic for some tools.
\r
1815 filename: The name of the current file.
\r
1816 lines: An array of strings, each representing a line of the file.
\r
1817 error: The function to call with any errors found.
\r
1819 for linenum, line in enumerate(lines):
\r
1820 if u'\ufffd' in line:
\r
1821 error(filename, linenum, 'readability/utf8', 5,
\r
1822 'Line contains invalid UTF-8 (or Unicode replacement character).')
\r
1824 error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
\r
1827 def CheckForNewlineAtEOF(filename, lines, error):
\r
1828 """Logs an error if there is no newline char at the end of the file.
\r
1831 filename: The name of the current file.
\r
1832 lines: An array of strings, each representing a line of the file.
\r
1833 error: The function to call with any errors found.
\r
1836 # The array lines() was created by adding two newlines to the
\r
1837 # original file (go figure), then splitting on \n.
\r
1838 # To verify that the file ends in \n, we just have to make sure the
\r
1839 # last-but-two element of lines() exists and is empty.
\r
1840 if len(lines) < 3 or lines[-2]:
\r
1841 error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
\r
1842 'Could not find a newline character at the end of the file.')
\r
1845 def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
\r
1846 """Logs an error if we see /* ... */ or "..." that extend past one line.
\r
1848 /* ... */ comments are legit inside macros, for one line.
\r
1849 Otherwise, we prefer // comments, so it's ok to warn about the
\r
1850 other. Likewise, it's ok for strings to extend across multiple
\r
1851 lines, as long as a line continuation character (backslash)
\r
1852 terminates each line. Although not currently prohibited by the C++
\r
1853 style guide, it's ugly and unnecessary. We don't do well with either
\r
1854 in this lint program, so we warn about both.
\r
1857 filename: The name of the current file.
\r
1858 clean_lines: A CleansedLines instance containing the file.
\r
1859 linenum: The number of the line to check.
\r
1860 error: The function to call with any errors found.
\r
1862 line = clean_lines.elided[linenum]
\r
1864 # Remove all \\ (escaped backslashes) from the line. They are OK, and the
\r
1865 # second (escaped) slash may trigger later \" detection erroneously.
\r
1866 line = line.replace('\\\\', '')
\r
1868 if line.count('/*') > line.count('*/'):
\r
1869 error(filename, linenum, 'readability/multiline_comment', 5,
\r
1870 'Complex multi-line /*...*/-style comment found. '
\r
1871 'Lint may give bogus warnings. '
\r
1872 'Consider replacing these with //-style comments, '
\r
1873 'with #if 0...#endif, '
\r
1874 'or with more clearly structured multi-line comments.')
\r
1876 if (line.count('"') - line.count('\\"')) % 2:
\r
1877 error(filename, linenum, 'readability/multiline_string', 5,
\r
1878 'Multi-line string ("...") found. This lint script doesn\'t '
\r
1879 'do well with such strings, and may give bogus warnings. '
\r
1880 'Use C++11 raw strings or concatenation instead.')
\r
1883 # (non-threadsafe name, thread-safe alternative, validation pattern)
\r
1885 # The validation pattern is used to eliminate false positives such as:
\r
1886 # _rand(); // false positive due to substring match.
\r
1887 # ->rand(); // some member function rand().
\r
1888 # ACMRandom rand(seed); // some variable named rand.
\r
1889 # ISAACRandom rand(); // another variable named rand.
\r
1891 # Basically we require the return value of these functions to be used
\r
1892 # in some expression context on the same line by matching on some
\r
1893 # operator before the function name. This eliminates constructors and
\r
1894 # member function calls.
\r
1895 _UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
\r
1896 _THREADING_LIST = (
\r
1897 ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
\r
1898 ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
\r
1899 ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
\r
1900 ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
\r
1901 ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
\r
1902 ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
\r
1903 ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
\r
1904 ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
\r
1905 ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
\r
1906 ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
\r
1907 ('strtok(', 'strtok_r(',
\r
1908 _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
\r
1909 ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
\r
1913 def CheckPosixThreading(filename, clean_lines, linenum, error):
\r
1914 """Checks for calls to thread-unsafe functions.
\r
1916 Much code has been originally written without consideration of
\r
1917 multi-threading. Also, engineers are relying on their old experience;
\r
1918 they have learned posix before threading extensions were added. These
\r
1919 tests guide the engineers to use thread-safe functions (when using
\r
1923 filename: The name of the current file.
\r
1924 clean_lines: A CleansedLines instance containing the file.
\r
1925 linenum: The number of the line to check.
\r
1926 error: The function to call with any errors found.
\r
1928 line = clean_lines.elided[linenum]
\r
1929 for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
\r
1930 # Additional pattern matching check to confirm that this is the
\r
1931 # function we are looking for
\r
1932 if Search(pattern, line):
\r
1933 error(filename, linenum, 'runtime/threadsafe_fn', 2,
\r
1934 'Consider using ' + multithread_safe_func +
\r
1935 '...) instead of ' + single_thread_func +
\r
1936 '...) for improved thread safety.')
\r
1939 def CheckVlogArguments(filename, clean_lines, linenum, error):
\r
1940 """Checks that VLOG() is only used for defining a logging level.
\r
1942 For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
\r
1943 VLOG(FATAL) are not.
\r
1946 filename: The name of the current file.
\r
1947 clean_lines: A CleansedLines instance containing the file.
\r
1948 linenum: The number of the line to check.
\r
1949 error: The function to call with any errors found.
\r
1951 line = clean_lines.elided[linenum]
\r
1952 if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
\r
1953 error(filename, linenum, 'runtime/vlog', 5,
\r
1954 'VLOG() should be used with numeric verbosity level. '
\r
1955 'Use LOG() if you want symbolic severity levels.')
\r
1957 # Matches invalid increment: *count++, which moves pointer instead of
\r
1958 # incrementing a value.
\r
1959 _RE_PATTERN_INVALID_INCREMENT = re.compile(
\r
1960 r'^\s*\*\w+(\+\+|--);')
\r
1963 def CheckInvalidIncrement(filename, clean_lines, linenum, error):
\r
1964 """Checks for invalid increment *count++.
\r
1966 For example following function:
\r
1967 void increment_counter(int* count) {
\r
1970 is invalid, because it effectively does count++, moving pointer, and should
\r
1971 be replaced with ++*count, (*count)++ or *count += 1.
\r
1974 filename: The name of the current file.
\r
1975 clean_lines: A CleansedLines instance containing the file.
\r
1976 linenum: The number of the line to check.
\r
1977 error: The function to call with any errors found.
\r
1979 line = clean_lines.elided[linenum]
\r
1980 if _RE_PATTERN_INVALID_INCREMENT.match(line):
\r
1981 error(filename, linenum, 'runtime/invalid_increment', 5,
\r
1982 'Changing pointer instead of value (or unused value of operator*).')
\r
1985 def IsMacroDefinition(clean_lines, linenum):
\r
1986 if Search(r'^#define', clean_lines[linenum]):
\r
1989 if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
\r
1995 def IsForwardClassDeclaration(clean_lines, linenum):
\r
1996 return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
\r
1999 class _BlockInfo(object):
\r
2000 """Stores information about a generic block of code."""
\r
2002 def __init__(self, seen_open_brace):
\r
2003 self.seen_open_brace = seen_open_brace
\r
2004 self.open_parentheses = 0
\r
2005 self.inline_asm = _NO_ASM
\r
2006 self.check_namespace_indentation = False
\r
2008 def CheckBegin(self, filename, clean_lines, linenum, error):
\r
2009 """Run checks that applies to text up to the opening brace.
\r
2011 This is mostly for checking the text after the class identifier
\r
2012 and the "{", usually where the base class is specified. For other
\r
2013 blocks, there isn't much to check, so we always pass.
\r
2016 filename: The name of the current file.
\r
2017 clean_lines: A CleansedLines instance containing the file.
\r
2018 linenum: The number of the line to check.
\r
2019 error: The function to call with any errors found.
\r
2023 def CheckEnd(self, filename, clean_lines, linenum, error):
\r
2024 """Run checks that applies to text after the closing brace.
\r
2026 This is mostly used for checking end of namespace comments.
\r
2029 filename: The name of the current file.
\r
2030 clean_lines: A CleansedLines instance containing the file.
\r
2031 linenum: The number of the line to check.
\r
2032 error: The function to call with any errors found.
\r
2036 def IsBlockInfo(self):
\r
2037 """Returns true if this block is a _BlockInfo.
\r
2039 This is convenient for verifying that an object is an instance of
\r
2040 a _BlockInfo, but not an instance of any of the derived classes.
\r
2043 True for this class, False for derived classes.
\r
2045 return self.__class__ == _BlockInfo
\r
2048 class _ExternCInfo(_BlockInfo):
\r
2049 """Stores information about an 'extern "C"' block."""
\r
2051 def __init__(self):
\r
2052 _BlockInfo.__init__(self, True)
\r
2055 class _ClassInfo(_BlockInfo):
\r
2056 """Stores information about a class."""
\r
2058 def __init__(self, name, class_or_struct, clean_lines, linenum):
\r
2059 _BlockInfo.__init__(self, False)
\r
2061 self.starting_linenum = linenum
\r
2062 self.is_derived = False
\r
2063 self.check_namespace_indentation = True
\r
2064 if class_or_struct == 'struct':
\r
2065 self.access = 'public'
\r
2066 self.is_struct = True
\r
2068 self.access = 'private'
\r
2069 self.is_struct = False
\r
2071 # Remember initial indentation level for this class. Using raw_lines here
\r
2072 # instead of elided to account for leading comments.
\r
2073 self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
\r
2075 # Try to find the end of the class. This will be confused by things like:
\r
2079 # But it's still good enough for CheckSectionSpacing.
\r
2080 self.last_line = 0
\r
2082 for i in range(linenum, clean_lines.NumLines()):
\r
2083 line = clean_lines.elided[i]
\r
2084 depth += line.count('{') - line.count('}')
\r
2086 self.last_line = i
\r
2089 def CheckBegin(self, filename, clean_lines, linenum, error):
\r
2090 # Look for a bare ':'
\r
2091 if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
\r
2092 self.is_derived = True
\r
2094 def CheckEnd(self, filename, clean_lines, linenum, error):
\r
2095 # If there is a DISALLOW macro, it should appear near the end of
\r
2097 seen_last_thing_in_class = False
\r
2098 for i in xrange(linenum - 1, self.starting_linenum, -1):
\r
2100 r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
\r
2101 self.name + r'\)',
\r
2102 clean_lines.elided[i])
\r
2104 if seen_last_thing_in_class:
\r
2105 error(filename, i, 'readability/constructors', 3,
\r
2106 match.group(1) + ' should be the last thing in the class')
\r
2109 if not Match(r'^\s*$', clean_lines.elided[i]):
\r
2110 seen_last_thing_in_class = True
\r
2112 # Check that closing brace is aligned with beginning of the class.
\r
2113 # Only do this if the closing brace is indented by only whitespaces.
\r
2114 # This means we will not check single-line class definitions.
\r
2115 indent = Match(r'^( *)\}', clean_lines.elided[linenum])
\r
2116 if indent and len(indent.group(1)) != self.class_indent:
\r
2117 if self.is_struct:
\r
2118 parent = 'struct ' + self.name
\r
2120 parent = 'class ' + self.name
\r
2121 error(filename, linenum, 'whitespace/indent', 3,
\r
2122 'Closing brace should be aligned with beginning of %s' % parent)
\r
2125 class _NamespaceInfo(_BlockInfo):
\r
2126 """Stores information about a namespace."""
\r
2128 def __init__(self, name, linenum):
\r
2129 _BlockInfo.__init__(self, False)
\r
2130 self.name = name or ''
\r
2131 self.starting_linenum = linenum
\r
2132 self.check_namespace_indentation = True
\r
2134 def CheckEnd(self, filename, clean_lines, linenum, error):
\r
2135 """Check end of namespace comments."""
\r
2136 line = clean_lines.raw_lines[linenum]
\r
2138 # Check how many lines is enclosed in this namespace. Don't issue
\r
2139 # warning for missing namespace comments if there aren't enough
\r
2140 # lines. However, do apply checks if there is already an end of
\r
2141 # namespace comment and it's incorrect.
\r
2143 # TODO(unknown): We always want to check end of namespace comments
\r
2144 # if a namespace is large, but sometimes we also want to apply the
\r
2145 # check if a short namespace contained nontrivial things (something
\r
2146 # other than forward declarations). There is currently no logic on
\r
2147 # deciding what these nontrivial things are, so this check is
\r
2148 # triggered by namespace size only, which works most of the time.
\r
2149 if (linenum - self.starting_linenum < 10
\r
2150 and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
\r
2153 # Look for matching comment at end of namespace.
\r
2155 # Note that we accept C style "/* */" comments for terminating
\r
2156 # namespaces, so that code that terminate namespaces inside
\r
2157 # preprocessor macros can be cpplint clean.
\r
2159 # We also accept stuff like "// end of namespace <name>." with the
\r
2160 # period at the end.
\r
2162 # Besides these, we don't accept anything else, otherwise we might
\r
2163 # get false negatives when existing comment is a substring of the
\r
2164 # expected namespace.
\r
2167 if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
\r
2168 r'[\*/\.\\\s]*$'),
\r
2170 error(filename, linenum, 'readability/namespace', 5,
\r
2171 'Namespace should be terminated with "// namespace %s"' %
\r
2174 # Anonymous namespace
\r
2175 if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
\r
2176 # If "// namespace anonymous" or "// anonymous namespace (more text)",
\r
2177 # mention "// anonymous namespace" as an acceptable form
\r
2178 if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
\r
2179 error(filename, linenum, 'readability/namespace', 5,
\r
2180 'Anonymous namespace should be terminated with "// namespace"'
\r
2181 ' or "// anonymous namespace"')
\r
2183 error(filename, linenum, 'readability/namespace', 5,
\r
2184 'Anonymous namespace should be terminated with "// namespace"')
\r
2187 class _PreprocessorInfo(object):
\r
2188 """Stores checkpoints of nesting stacks when #if/#else is seen."""
\r
2190 def __init__(self, stack_before_if):
\r
2191 # The entire nesting stack before #if
\r
2192 self.stack_before_if = stack_before_if
\r
2194 # The entire nesting stack up to #else
\r
2195 self.stack_before_else = []
\r
2197 # Whether we have already seen #else or #elif
\r
2198 self.seen_else = False
\r
2201 class NestingState(object):
\r
2202 """Holds states related to parsing braces."""
\r
2204 def __init__(self):
\r
2205 # Stack for tracking all braces. An object is pushed whenever we
\r
2206 # see a "{", and popped when we see a "}". Only 3 types of
\r
2207 # objects are possible:
\r
2208 # - _ClassInfo: a class or struct.
\r
2209 # - _NamespaceInfo: a namespace.
\r
2210 # - _BlockInfo: some other type of block.
\r
2213 # Top of the previous stack before each Update().
\r
2215 # Because the nesting_stack is updated at the end of each line, we
\r
2216 # had to do some convoluted checks to find out what is the current
\r
2217 # scope at the beginning of the line. This check is simplified by
\r
2218 # saving the previous top of nesting stack.
\r
2220 # We could save the full stack, but we only need the top. Copying
\r
2221 # the full nesting stack would slow down cpplint by ~10%.
\r
2222 self.previous_stack_top = []
\r
2224 # Stack of _PreprocessorInfo objects.
\r
2225 self.pp_stack = []
\r
2227 def SeenOpenBrace(self):
\r
2228 """Check if we have seen the opening brace for the innermost block.
\r
2231 True if we have seen the opening brace, False if the innermost
\r
2232 block is still expecting an opening brace.
\r
2234 return (not self.stack) or self.stack[-1].seen_open_brace
\r
2236 def InNamespaceBody(self):
\r
2237 """Check if we are currently one level inside a namespace body.
\r
2240 True if top of the stack is a namespace block, False otherwise.
\r
2242 return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
\r
2244 def InExternC(self):
\r
2245 """Check if we are currently one level inside an 'extern "C"' block.
\r
2248 True if top of the stack is an extern block, False otherwise.
\r
2250 return self.stack and isinstance(self.stack[-1], _ExternCInfo)
\r
2252 def InClassDeclaration(self):
\r
2253 """Check if we are currently one level inside a class or struct declaration.
\r
2256 True if top of the stack is a class/struct, False otherwise.
\r
2258 return self.stack and isinstance(self.stack[-1], _ClassInfo)
\r
2260 def InAsmBlock(self):
\r
2261 """Check if we are currently one level inside an inline ASM block.
\r
2264 True if the top of the stack is a block containing inline ASM.
\r
2266 return self.stack and self.stack[-1].inline_asm != _NO_ASM
\r
2268 def InTemplateArgumentList(self, clean_lines, linenum, pos):
\r
2269 """Check if current position is inside template argument list.
\r
2272 clean_lines: A CleansedLines instance containing the file.
\r
2273 linenum: The number of the line to check.
\r
2274 pos: position just after the suspected template argument.
\r
2276 True if (linenum, pos) is inside template arguments.
\r
2278 while linenum < clean_lines.NumLines():
\r
2279 # Find the earliest character that might indicate a template argument
\r
2280 line = clean_lines.elided[linenum]
\r
2281 match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
\r
2286 token = match.group(1)
\r
2287 pos += len(match.group(0))
\r
2289 # These things do not look like template argument list:
\r
2291 # class Suspect x; }
\r
2292 if token in ('{', '}', ';'): return False
\r
2294 # These things look like template argument list:
\r
2295 # template <class Suspect>
\r
2296 # template <class Suspect = default_value>
\r
2297 # template <class Suspect[]>
\r
2298 # template <class Suspect...>
\r
2299 if token in ('>', '=', '[', ']', '.'): return True
\r
2301 # Check if token is an unmatched '<'.
\r
2302 # If not, move on to the next character.
\r
2305 if pos >= len(line):
\r
2310 # We can't be sure if we just find a single '<', and need to
\r
2311 # find the matching '>'.
\r
2312 (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
\r
2314 # Not sure if template argument list or syntax error in file
\r
2316 linenum = end_line
\r
2320 def UpdatePreprocessor(self, line):
\r
2321 """Update preprocessor stack.
\r
2323 We need to handle preprocessors due to classes like this:
\r
2325 struct ResultDetailsPageElementExtensionPoint {
\r
2327 struct ResultDetailsPageElementExtensionPoint : public Extension {
\r
2330 We make the following assumptions (good enough for most files):
\r
2331 - Preprocessor condition evaluates to true from #if up to first
\r
2332 #else/#elif/#endif.
\r
2334 - Preprocessor condition evaluates to false from #else/#elif up
\r
2335 to #endif. We still perform lint checks on these lines, but
\r
2336 these do not affect nesting stack.
\r
2339 line: current line to check.
\r
2341 if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
\r
2342 # Beginning of #if block, save the nesting stack here. The saved
\r
2343 # stack will allow us to restore the parsing state in the #else case.
\r
2344 self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
\r
2345 elif Match(r'^\s*#\s*(else|elif)\b', line):
\r
2346 # Beginning of #else block
\r
2348 if not self.pp_stack[-1].seen_else:
\r
2349 # This is the first #else or #elif block. Remember the
\r
2350 # whole nesting stack up to this point. This is what we
\r
2351 # keep after the #endif.
\r
2352 self.pp_stack[-1].seen_else = True
\r
2353 self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
\r
2355 # Restore the stack to how it was before the #if
\r
2356 self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
\r
2358 # TODO(unknown): unexpected #else, issue warning?
\r
2360 elif Match(r'^\s*#\s*endif\b', line):
\r
2361 # End of #if or #else blocks.
\r
2363 # If we saw an #else, we will need to restore the nesting
\r
2364 # stack to its former state before the #else, otherwise we
\r
2365 # will just continue from where we left off.
\r
2366 if self.pp_stack[-1].seen_else:
\r
2367 # Here we can just use a shallow copy since we are the last
\r
2368 # reference to it.
\r
2369 self.stack = self.pp_stack[-1].stack_before_else
\r
2370 # Drop the corresponding #if
\r
2371 self.pp_stack.pop()
\r
2373 # TODO(unknown): unexpected #endif, issue warning?
\r
2376 # TODO(unknown): Update() is too long, but we will refactor later.
\r
2377 def Update(self, filename, clean_lines, linenum, error):
\r
2378 """Update nesting state with current line.
\r
2381 filename: The name of the current file.
\r
2382 clean_lines: A CleansedLines instance containing the file.
\r
2383 linenum: The number of the line to check.
\r
2384 error: The function to call with any errors found.
\r
2386 line = clean_lines.elided[linenum]
\r
2388 # Remember top of the previous nesting stack.
\r
2390 # The stack is always pushed/popped and not modified in place, so
\r
2391 # we can just do a shallow copy instead of copy.deepcopy. Using
\r
2392 # deepcopy would slow down cpplint by ~28%.
\r
2394 self.previous_stack_top = self.stack[-1]
\r
2396 self.previous_stack_top = None
\r
2399 self.UpdatePreprocessor(line)
\r
2401 # Count parentheses. This is to avoid adding struct arguments to
\r
2402 # the nesting stack.
\r
2404 inner_block = self.stack[-1]
\r
2405 depth_change = line.count('(') - line.count(')')
\r
2406 inner_block.open_parentheses += depth_change
\r
2408 # Also check if we are starting or ending an inline assembly block.
\r
2409 if inner_block.inline_asm in (_NO_ASM, _END_ASM):
\r
2410 if (depth_change != 0 and
\r
2411 inner_block.open_parentheses == 1 and
\r
2412 _MATCH_ASM.match(line)):
\r
2413 # Enter assembly block
\r
2414 inner_block.inline_asm = _INSIDE_ASM
\r
2416 # Not entering assembly block. If previous line was _END_ASM,
\r
2417 # we will now shift to _NO_ASM state.
\r
2418 inner_block.inline_asm = _NO_ASM
\r
2419 elif (inner_block.inline_asm == _INSIDE_ASM and
\r
2420 inner_block.open_parentheses == 0):
\r
2421 # Exit assembly block
\r
2422 inner_block.inline_asm = _END_ASM
\r
2424 # Consume namespace declaration at the beginning of the line. Do
\r
2425 # this in a loop so that we catch same line declarations like this:
\r
2426 # namespace proto2 { namespace bridge { class MessageSet; } }
\r
2428 # Match start of namespace. The "\b\s*" below catches namespace
\r
2429 # declarations even if it weren't followed by a whitespace, this
\r
2430 # is so that we don't confuse our namespace checker. The
\r
2431 # missing spaces will be flagged by CheckSpacing.
\r
2432 namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
\r
2433 if not namespace_decl_match:
\r
2436 new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
\r
2437 self.stack.append(new_namespace)
\r
2439 line = namespace_decl_match.group(2)
\r
2440 if line.find('{') != -1:
\r
2441 new_namespace.seen_open_brace = True
\r
2442 line = line[line.find('{') + 1:]
\r
2444 # Look for a class declaration in whatever is left of the line
\r
2445 # after parsing namespaces. The regexp accounts for decorated classes
\r
2447 # class LOCKABLE API Object {
\r
2449 class_decl_match = Match(
\r
2450 r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
\r
2451 r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
\r
2453 if (class_decl_match and
\r
2454 (not self.stack or self.stack[-1].open_parentheses == 0)):
\r
2455 # We do not want to accept classes that are actually template arguments:
\r
2456 # template <class Ignore1,
\r
2457 # class Ignore2 = Default<Args>,
\r
2458 # template <Args> class Ignore3>
\r
2459 # void Function() {};
\r
2461 # To avoid template argument cases, we scan forward and look for
\r
2462 # an unmatched '>'. If we see one, assume we are inside a
\r
2463 # template argument list.
\r
2464 end_declaration = len(class_decl_match.group(1))
\r
2465 if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
\r
2466 self.stack.append(_ClassInfo(
\r
2467 class_decl_match.group(3), class_decl_match.group(2),
\r
2468 clean_lines, linenum))
\r
2469 line = class_decl_match.group(4)
\r
2471 # If we have not yet seen the opening brace for the innermost block,
\r
2472 # run checks here.
\r
2473 if not self.SeenOpenBrace():
\r
2474 self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
\r
2476 # Update access control if we are inside a class/struct
\r
2477 if self.stack and isinstance(self.stack[-1], _ClassInfo):
\r
2478 classinfo = self.stack[-1]
\r
2479 access_match = Match(
\r
2480 r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
\r
2484 classinfo.access = access_match.group(2)
\r
2486 # Check that access keywords are indented +1 space. Skip this
\r
2487 # check if the keywords are not preceded by whitespaces.
\r
2488 indent = access_match.group(1)
\r
2489 if (len(indent) != classinfo.class_indent + 1 and
\r
2490 Match(r'^\s*$', indent)):
\r
2491 if classinfo.is_struct:
\r
2492 parent = 'struct ' + classinfo.name
\r
2494 parent = 'class ' + classinfo.name
\r
2496 if access_match.group(3):
\r
2497 slots = access_match.group(3)
\r
2498 error(filename, linenum, 'whitespace/indent', 3,
\r
2499 '%s%s: should be indented +1 space inside %s' % (
\r
2500 access_match.group(2), slots, parent))
\r
2502 # Consume braces or semicolons from what's left of the line
\r
2504 # Match first brace, semicolon, or closed parenthesis.
\r
2505 matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
\r
2509 token = matched.group(1)
\r
2511 # If namespace or class hasn't seen a opening brace yet, mark
\r
2512 # namespace/class head as complete. Push a new block onto the
\r
2513 # stack otherwise.
\r
2514 if not self.SeenOpenBrace():
\r
2515 self.stack[-1].seen_open_brace = True
\r
2516 elif Match(r'^extern\s*"[^"]*"\s*\{', line):
\r
2517 self.stack.append(_ExternCInfo())
\r
2519 self.stack.append(_BlockInfo(True))
\r
2520 if _MATCH_ASM.match(line):
\r
2521 self.stack[-1].inline_asm = _BLOCK_ASM
\r
2523 elif token == ';' or token == ')':
\r
2524 # If we haven't seen an opening brace yet, but we already saw
\r
2525 # a semicolon, this is probably a forward declaration. Pop
\r
2526 # the stack for these.
\r
2528 # Similarly, if we haven't seen an opening brace yet, but we
\r
2529 # already saw a closing parenthesis, then these are probably
\r
2530 # function arguments with extra "class" or "struct" keywords.
\r
2531 # Also pop these stack for these.
\r
2532 if not self.SeenOpenBrace():
\r
2534 else: # token == '}'
\r
2535 # Perform end of block checks and pop the stack.
\r
2537 self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
\r
2539 line = matched.group(2)
\r
2541 def InnermostClass(self):
\r
2542 """Get class info on the top of the stack.
\r
2545 A _ClassInfo object if we are inside a class, or None otherwise.
\r
2547 for i in range(len(self.stack), 0, -1):
\r
2548 classinfo = self.stack[i - 1]
\r
2549 if isinstance(classinfo, _ClassInfo):
\r
2553 def CheckCompletedBlocks(self, filename, error):
\r
2554 """Checks that all classes and namespaces have been completely parsed.
\r
2556 Call this when all lines in a file have been processed.
\r
2558 filename: The name of the current file.
\r
2559 error: The function to call with any errors found.
\r
2561 # Note: This test can result in false positives if #ifdef constructs
\r
2562 # get in the way of brace matching. See the testBuildClass test in
\r
2563 # cpplint_unittest.py for an example of this.
\r
2564 for obj in self.stack:
\r
2565 if isinstance(obj, _ClassInfo):
\r
2566 error(filename, obj.starting_linenum, 'build/class', 5,
\r
2567 'Failed to find complete declaration of class %s' %
\r
2569 elif isinstance(obj, _NamespaceInfo):
\r
2570 error(filename, obj.starting_linenum, 'build/namespaces', 5,
\r
2571 'Failed to find complete declaration of namespace %s' %
\r
2575 def CheckForNonStandardConstructs(filename, clean_lines, linenum,
\r
2576 nesting_state, error):
\r
2577 r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
\r
2579 Complain about several constructs which gcc-2 accepts, but which are
\r
2580 not standard C++. Warning about these in lint is one way to ease the
\r
2581 transition to new compilers.
\r
2582 - put storage class first (e.g. "static const" instead of "const static").
\r
2583 - "%lld" instead of %qd" in printf-type functions.
\r
2584 - "%1$d" is non-standard in printf-type functions.
\r
2585 - "\%" is an undefined character escape sequence.
\r
2586 - text after #endif is not allowed.
\r
2587 - invalid inner-style forward declaration.
\r
2588 - >? and <? operators, and their >?= and <?= cousins.
\r
2590 Additionally, check for constructor/destructor style violations and reference
\r
2591 members, as it is very convenient to do so while checking for
\r
2595 filename: The name of the current file.
\r
2596 clean_lines: A CleansedLines instance containing the file.
\r
2597 linenum: The number of the line to check.
\r
2598 nesting_state: A NestingState instance which maintains information about
\r
2599 the current stack of nested blocks being parsed.
\r
2600 error: A callable to which errors are reported, which takes 4 arguments:
\r
2601 filename, line number, error level, and message
\r
2604 # Remove comments from the line, but leave in strings for now.
\r
2605 line = clean_lines.lines[linenum]
\r
2607 if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
\r
2608 error(filename, linenum, 'runtime/printf_format', 3,
\r
2609 '%q in format strings is deprecated. Use %ll instead.')
\r
2611 if Search(r'printf\s*\(.*".*%\d+\$', line):
\r
2612 error(filename, linenum, 'runtime/printf_format', 2,
\r
2613 '%N$ formats are unconventional. Try rewriting to avoid them.')
\r
2615 # Remove escaped backslashes before looking for undefined escapes.
\r
2616 line = line.replace('\\\\', '')
\r
2618 if Search(r'("|\').*\\(%|\[|\(|{)', line):
\r
2619 error(filename, linenum, 'build/printf_format', 3,
\r
2620 '%, [, (, and { are undefined character escapes. Unescape them.')
\r
2622 # For the rest, work with both comments and strings removed.
\r
2623 line = clean_lines.elided[linenum]
\r
2625 if Search(r'\b(const|volatile|void|char|short|int|long'
\r
2626 r'|float|double|signed|unsigned'
\r
2627 r'|schar|u?int8|u?int16|u?int32|u?int64)'
\r
2628 r'\s+(register|static|extern|typedef)\b',
\r
2630 error(filename, linenum, 'build/storage_class', 5,
\r
2631 'Storage class (static, extern, typedef, etc) should be first.')
\r
2633 if Match(r'\s*#\s*endif\s*[^/\s]+', line):
\r
2634 error(filename, linenum, 'build/endif_comment', 5,
\r
2635 'Uncommented text after #endif is non-standard. Use a comment.')
\r
2637 if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
\r
2638 error(filename, linenum, 'build/forward_decl', 5,
\r
2639 'Inner-style forward declarations are invalid. Remove this line.')
\r
2641 if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
\r
2643 error(filename, linenum, 'build/deprecated', 3,
\r
2644 '>? and <? (max and min) operators are non-standard and deprecated.')
\r
2646 if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
\r
2647 # TODO(unknown): Could it be expanded safely to arbitrary references,
\r
2648 # without triggering too many false positives? The first
\r
2649 # attempt triggered 5 warnings for mostly benign code in the regtest, hence
\r
2650 # the restriction.
\r
2651 # Here's the original regexp, for the reference:
\r
2652 # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
\r
2653 # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
\r
2654 error(filename, linenum, 'runtime/member_string_references', 2,
\r
2655 'const string& members are dangerous. It is much better to use '
\r
2656 'alternatives, such as pointers or simple constants.')
\r
2658 # Everything else in this function operates on class declarations.
\r
2659 # Return early if the top of the nesting stack is not a class, or if
\r
2660 # the class head is not completed yet.
\r
2661 classinfo = nesting_state.InnermostClass()
\r
2662 if not classinfo or not classinfo.seen_open_brace:
\r
2665 # The class may have been declared with namespace or classname qualifiers.
\r
2666 # The constructor and destructor will not have those qualifiers.
\r
2667 base_classname = classinfo.name.split('::')[-1]
\r
2669 # Look for single-argument constructors that aren't marked explicit.
\r
2670 # Technically a valid construct, but against style. Also look for
\r
2671 # non-single-argument constructors which are also technically valid, but
\r
2672 # strongly suggest something is wrong.
\r
2673 explicit_constructor_match = Match(
\r
2674 r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
\r
2675 r'\(((?:[^()]|\([^()]*\))*)\)'
\r
2676 % re.escape(base_classname),
\r
2679 if explicit_constructor_match:
\r
2680 is_marked_explicit = explicit_constructor_match.group(1)
\r
2682 if not explicit_constructor_match.group(2):
\r
2683 constructor_args = []
\r
2685 constructor_args = explicit_constructor_match.group(2).split(',')
\r
2687 # collapse arguments so that commas in template parameter lists and function
\r
2688 # argument parameter lists don't split arguments in two
\r
2690 while i < len(constructor_args):
\r
2691 constructor_arg = constructor_args[i]
\r
2692 while (constructor_arg.count('<') > constructor_arg.count('>') or
\r
2693 constructor_arg.count('(') > constructor_arg.count(')')):
\r
2694 constructor_arg += ',' + constructor_args[i + 1]
\r
2695 del constructor_args[i + 1]
\r
2696 constructor_args[i] = constructor_arg
\r
2699 defaulted_args = [arg for arg in constructor_args if '=' in arg]
\r
2700 noarg_constructor = (not constructor_args or # empty arg list
\r
2701 # 'void' arg specifier
\r
2702 (len(constructor_args) == 1 and
\r
2703 constructor_args[0].strip() == 'void'))
\r
2704 onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
\r
2705 not noarg_constructor) or
\r
2706 # all but at most one arg defaulted
\r
2707 (len(constructor_args) >= 1 and
\r
2708 not noarg_constructor and
\r
2709 len(defaulted_args) >= len(constructor_args) - 1))
\r
2710 initializer_list_constructor = bool(
\r
2711 onearg_constructor and
\r
2712 Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
\r
2713 copy_constructor = bool(
\r
2714 onearg_constructor and
\r
2715 Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
\r
2716 % re.escape(base_classname), constructor_args[0].strip()))
\r
2718 if (not is_marked_explicit and
\r
2719 onearg_constructor and
\r
2720 not initializer_list_constructor and
\r
2721 not copy_constructor):
\r
2722 if defaulted_args:
\r
2723 error(filename, linenum, 'runtime/explicit', 5,
\r
2724 'Constructors callable with one argument '
\r
2725 'should be marked explicit.')
\r
2727 error(filename, linenum, 'runtime/explicit', 5,
\r
2728 'Single-parameter constructors should be marked explicit.')
\r
2729 elif is_marked_explicit and not onearg_constructor:
\r
2730 if noarg_constructor:
\r
2731 error(filename, linenum, 'runtime/explicit', 5,
\r
2732 'Zero-parameter constructors should not be marked explicit.')
\r
2734 error(filename, linenum, 'runtime/explicit', 0,
\r
2735 'Constructors that require multiple arguments '
\r
2736 'should not be marked explicit.')
\r
2739 def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
\r
2740 """Checks for the correctness of various spacing around function calls.
\r
2743 filename: The name of the current file.
\r
2744 clean_lines: A CleansedLines instance containing the file.
\r
2745 linenum: The number of the line to check.
\r
2746 error: The function to call with any errors found.
\r
2748 line = clean_lines.elided[linenum]
\r
2750 # Since function calls often occur inside if/for/while/switch
\r
2751 # expressions - which have their own, more liberal conventions - we
\r
2752 # first see if we should be looking inside such an expression for a
\r
2753 # function call, to which we can apply more strict standards.
\r
2754 fncall = line # if there's no control flow construct, look at whole line
\r
2755 for pattern in (r'\bif\s*\((.*)\)\s*{',
\r
2756 r'\bfor\s*\((.*)\)\s*{',
\r
2757 r'\bwhile\s*\((.*)\)\s*[{;]',
\r
2758 r'\bswitch\s*\((.*)\)\s*{'):
\r
2759 match = Search(pattern, line)
\r
2761 fncall = match.group(1) # look inside the parens for function calls
\r
2764 # Except in if/for/while/switch, there should never be space
\r
2765 # immediately inside parens (eg "f( 3, 4 )"). We make an exception
\r
2766 # for nested parens ( (a+b) + c ). Likewise, there should never be
\r
2767 # a space before a ( when it's a function argument. I assume it's a
\r
2768 # function argument when the char before the whitespace is legal in
\r
2769 # a function name (alnum + _) and we're not starting a macro. Also ignore
\r
2770 # pointers and references to arrays and functions coz they're too tricky:
\r
2771 # we use a very simple way to recognize these:
\r
2772 # " (something)(maybe-something)" or
\r
2773 # " (something)(maybe-something," or
\r
2774 # " (something)[something]"
\r
2775 # Note that we assume the contents of [] to be short enough that
\r
2776 # they'll never need to wrap.
\r
2777 if ( # Ignore control structures.
\r
2778 not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
\r
2780 # Ignore pointers/references to functions.
\r
2781 not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
\r
2782 # Ignore pointers/references to arrays.
\r
2783 not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
\r
2784 if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
\r
2785 error(filename, linenum, 'whitespace/parens', 4,
\r
2786 '[SPC_M_SEP]Extra space after ( in function call')
\r
2787 elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
\r
2788 error(filename, linenum, 'whitespace/parens', 2,
\r
2789 '[SPC_M_SEP]Extra space after (')
\r
2790 if (Search(r'\w\s+\(', fncall) and
\r
2791 not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
\r
2792 not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
\r
2793 not Search(r'\bcase\s+\(', fncall)):
\r
2794 # TODO(unknown): Space after an operator function seem to be a common
\r
2795 # error, silence those for now by restricting them to highest verbosity.
\r
2796 if Search(r'\boperator_*\b', line):
\r
2797 error(filename, linenum, 'whitespace/parens', 0,
\r
2798 '[SPC_M_SEP]Extra space before ( in function call')
\r
2800 error(filename, linenum, 'whitespace/parens', 4,
\r
2801 '[SPC_M_SEP]Extra space before ( in function call')
\r
2802 # If the ) is followed only by a newline or a { + newline, assume it's
\r
2803 # part of a control statement (if/while/etc), and don't complain
\r
2804 if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
\r
2805 # If the closing parenthesis is preceded by only whitespaces,
\r
2806 # try to give a more descriptive error message.
\r
2807 if Search(r'^\s+\)', fncall):
\r
2808 error(filename, linenum, 'whitespace/parens', 2,
\r
2809 '[SPC_M_SEP]Closing ) should be moved to the previous line')
\r
2811 error(filename, linenum, 'whitespace/parens', 2,
\r
2812 '[SPC_M_SEP]Extra space before )')
\r
2815 def IsBlankLine(line):
\r
2816 """Returns true if the given line is blank.
\r
2818 We consider a line to be blank if the line is empty or consists of
\r
2819 only white spaces.
\r
2822 line: A line of a string.
\r
2825 True, if the given line is blank.
\r
2827 return not line or line.isspace()
\r
2830 def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
\r
2832 is_namespace_indent_item = (
\r
2833 len(nesting_state.stack) > 1 and
\r
2834 nesting_state.stack[-1].check_namespace_indentation and
\r
2835 isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
\r
2836 nesting_state.previous_stack_top == nesting_state.stack[-2])
\r
2838 if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
\r
2839 clean_lines.elided, line):
\r
2840 CheckItemIndentationInNamespace(filename, clean_lines.elided,
\r
2844 def CheckForFunctionLengths(filename, clean_lines, linenum,
\r
2845 function_state, error):
\r
2846 """Reports for long function bodies.
\r
2848 For an overview why this is done, see:
\r
2849 http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
\r
2851 Uses a simplistic algorithm assuming other style guidelines
\r
2852 (especially spacing) are followed.
\r
2853 Only checks unindented functions, so class members are unchecked.
\r
2854 Trivial bodies are unchecked, so constructors with huge initializer lists
\r
2856 Blank/comment lines are not counted so as to avoid encouraging the removal
\r
2857 of vertical space and comments just to get through a lint check.
\r
2858 NOLINT *on the last line of a function* disables this check.
\r
2861 filename: The name of the current file.
\r
2862 clean_lines: A CleansedLines instance containing the file.
\r
2863 linenum: The number of the line to check.
\r
2864 function_state: Current function name and lines in body so far.
\r
2865 error: The function to call with any errors found.
\r
2867 lines = clean_lines.lines
\r
2868 line = lines[linenum]
\r
2871 starting_func = False
\r
2872 regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
\r
2873 match_result = Match(regexp, line)
\r
2875 # If the name is all caps and underscores, figure it's a macro and
\r
2876 # ignore it, unless it's TEST or TEST_F.
\r
2877 function_name = match_result.group(1).split()[-1]
\r
2878 if function_name == 'TEST' or function_name == 'TEST_F' or (
\r
2879 not Match(r'[A-Z_]+$', function_name)):
\r
2880 starting_func = True
\r
2883 body_found = False
\r
2884 for start_linenum in xrange(linenum, clean_lines.NumLines()):
\r
2885 start_line = lines[start_linenum]
\r
2886 joined_line += ' ' + start_line.lstrip()
\r
2887 if Search(r'(;|})', start_line): # Declarations and trivial functions
\r
2889 break # ... ignore
\r
2890 elif Search(r'{', start_line):
\r
2892 function = Search(r'((\w|:)*)\(', line).group(1)
\r
2893 if Match(r'TEST', function): # Handle TEST... macros
\r
2894 parameter_regexp = Search(r'(\(.*\))', joined_line)
\r
2895 if parameter_regexp: # Ignore bad syntax
\r
2896 function += parameter_regexp.group(1)
\r
2899 function_state.Begin(function)
\r
2901 if not body_found:
\r
2902 # No body for the function (or evidence of a non-function) was found.
\r
2903 error(filename, linenum, 'readability/fn_size', 5,
\r
2904 'Lint failed to find start of function body.')
\r
2905 elif Match(r'^\}\s*$', line): # function end
\r
2906 function_state.Check(error, filename, linenum)
\r
2907 function_state.End()
\r
2908 elif not Match(r'^\s*$', line):
\r
2909 function_state.Count() # Count non-blank/non-comment lines.
\r
2912 _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
\r
2915 def CheckComment(line, filename, linenum, next_line_start, error):
\r
2916 """Checks for common mistakes in comments.
\r
2919 line: The line in question.
\r
2920 filename: The name of the current file.
\r
2921 linenum: The number of the line to check.
\r
2922 next_line_start: The first non-whitespace column of the next line.
\r
2923 error: The function to call with any errors found.
\r
2925 commentpos = line.find('//')
\r
2926 if commentpos != -1:
\r
2927 # Check if the // may be in quotes. If so, ignore it
\r
2928 # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
\r
2929 if (line.count('"', 0, commentpos) -
\r
2930 line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
\r
2931 # Allow one space for new scopes, two spaces otherwise:
\r
2932 if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
\r
2933 ((commentpos >= 1 and
\r
2934 line[commentpos-1] not in string.whitespace) or
\r
2935 (commentpos >= 2 and
\r
2936 line[commentpos-2] not in string.whitespace))):
\r
2937 error(filename, linenum, 'whitespace/comments', 2,
\r
2938 'At least two spaces is best between code and comments')
\r
2940 # Checks for common mistakes in TODO comments.
\r
2941 comment = line[commentpos:]
\r
2942 match = _RE_PATTERN_TODO.match(comment)
\r
2944 # One whitespace is correct; zero whitespace is handled elsewhere.
\r
2945 leading_whitespace = match.group(1)
\r
2946 if len(leading_whitespace) > 1:
\r
2947 error(filename, linenum, 'whitespace/todo', 2,
\r
2948 'Too many spaces before TODO')
\r
2950 username = match.group(2)
\r
2952 error(filename, linenum, 'readability/todo', 2,
\r
2953 'Missing username in TODO; it should look like '
\r
2954 '"// TODO(my_username): Stuff."')
\r
2956 middle_whitespace = match.group(3)
\r
2957 # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
\r
2958 if middle_whitespace != ' ' and middle_whitespace != '':
\r
2959 error(filename, linenum, 'whitespace/todo', 2,
\r
2960 'TODO(my_username) should be followed by a space')
\r
2962 # If the comment contains an alphanumeric character, there
\r
2963 # should be a space somewhere between it and the // unless
\r
2964 # it's a /// or //! Doxygen comment.
\r
2965 if (Match(r'//[^ ]*\w', comment) and
\r
2966 not Match(r'(///|//\!)(\s+|$)', comment)):
\r
2967 error(filename, linenum, 'whitespace/comments', 4,
\r
2968 'Should have a space between // and comment')
\r
2971 def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
\r
2972 """Checks for improper use of DISALLOW* macros.
\r
2975 filename: The name of the current file.
\r
2976 clean_lines: A CleansedLines instance containing the file.
\r
2977 linenum: The number of the line to check.
\r
2978 nesting_state: A NestingState instance which maintains information about
\r
2979 the current stack of nested blocks being parsed.
\r
2980 error: The function to call with any errors found.
\r
2982 line = clean_lines.elided[linenum] # get rid of comments and strings
\r
2984 matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
\r
2985 r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
\r
2988 if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
\r
2989 if nesting_state.stack[-1].access != 'private':
\r
2990 error(filename, linenum, 'readability/constructors', 3,
\r
2991 '%s must be in the private: section' % matched.group(1))
\r
2994 # Found DISALLOW* macro outside a class declaration, or perhaps it
\r
2995 # was used inside a function when it should have been part of the
\r
2996 # class declaration. We could issue a warning here, but it
\r
2997 # probably resulted in a compiler error already.
\r
3001 def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
\r
3002 """Checks for the correctness of various spacing issues in the code.
\r
3004 Things we check for: spaces around operators, spaces after
\r
3005 if/for/while/switch, no spaces around parens in function calls, two
\r
3006 spaces between code and comment, don't start a block with a blank
\r
3007 line, don't end a function with a blank line, don't add a blank line
\r
3008 after public/protected/private, don't have too many blank lines in a row.
\r
3011 filename: The name of the current file.
\r
3012 clean_lines: A CleansedLines instance containing the file.
\r
3013 linenum: The number of the line to check.
\r
3014 nesting_state: A NestingState instance which maintains information about
\r
3015 the current stack of nested blocks being parsed.
\r
3016 error: The function to call with any errors found.
\r
3019 # Don't use "elided" lines here, otherwise we can't check commented lines.
\r
3020 # Don't want to use "raw" either, because we don't want to check inside C++11
\r
3022 raw = clean_lines.lines_without_raw_strings
\r
3023 line = raw[linenum]
\r
3025 # Before nixing comments, check if the line is blank for no good
\r
3026 # reason. This includes the first line after a block is opened, and
\r
3027 # blank lines at the end of a function (ie, right before a line like '}'
\r
3029 # Skip all the blank line checks if we are immediately inside a
\r
3030 # namespace body. In other words, don't issue blank line warnings
\r
3036 # A warning about missing end of namespace comments will be issued instead.
\r
3038 # Also skip blank line checks for 'extern "C"' blocks, which are formatted
\r
3039 # like namespaces.
\r
3040 if (IsBlankLine(line) and
\r
3041 not nesting_state.InNamespaceBody() and
\r
3042 not nesting_state.InExternC()):
\r
3043 elided = clean_lines.elided
\r
3044 prev_line = elided[linenum - 1]
\r
3045 prevbrace = prev_line.rfind('{')
\r
3046 # TODO(unknown): Don't complain if line before blank line, and line after,
\r
3047 # both start with alnums and are indented the same amount.
\r
3048 # This ignores whitespace at the start of a namespace block
\r
3049 # because those are not usually indented.
\r
3050 if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
\r
3051 # OK, we have a blank line at the start of a code block. Before we
\r
3052 # complain, we check if it is an exception to the rule: The previous
\r
3053 # non-empty line has the parameters of a function header that are indented
\r
3054 # 4 spaces (because they did not fit in a 80 column line when placed on
\r
3055 # the same line as the function name). We also check for the case where
\r
3056 # the previous line is indented 6 spaces, which may happen when the
\r
3057 # initializers of a constructor do not fit into a 80 column line.
\r
3059 if Match(r' {6}\w', prev_line): # Initializer list?
\r
3060 # We are looking for the opening column of initializer list, which
\r
3061 # should be indented 4 spaces to cause 6 space indentation afterwards.
\r
3062 search_position = linenum-2
\r
3063 while (search_position >= 0
\r
3064 and Match(r' {6}\w', elided[search_position])):
\r
3065 search_position -= 1
\r
3066 exception = (search_position >= 0
\r
3067 and elided[search_position][:5] == ' :')
\r
3069 # Search for the function arguments or an initializer list. We use a
\r
3070 # simple heuristic here: If the line is indented 4 spaces; and we have a
\r
3071 # closing paren, without the opening paren, followed by an opening brace
\r
3072 # or colon (for initializer lists) we assume that it is the last line of
\r
3073 # a function header. If we have a colon indented 4 spaces, it is an
\r
3074 # initializer list.
\r
3075 exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
\r
3077 or Match(r' {4}:', prev_line))
\r
3080 error(filename, linenum, 'whitespace/blank_line', 2,
\r
3081 '[LNE_R_TWS]Redundant blank line at the start of a code block '
\r
3082 'should be deleted.')
\r
3083 # Ignore blank lines at the end of a block in a long if-else
\r
3084 # chain, like this:
\r
3085 # if (condition1) {
\r
3086 # // Something followed by a blank line
\r
3088 # } else if (condition2) {
\r
3089 # // Something else
\r
3091 if linenum + 1 < clean_lines.NumLines():
\r
3092 next_line = raw[linenum + 1]
\r
3094 and Match(r'\s*}', next_line)
\r
3095 and next_line.find('} else ') == -1):
\r
3096 error(filename, linenum, 'whitespace/blank_line', 3,
\r
3097 '[LNE_R_TWS]Redundant blank line at the end of a code block '
\r
3098 'should be deleted.')
\r
3100 matched = Match(r'\s*(public|protected|private):', prev_line)
\r
3102 # error(filename, linenum, 'whitespace/blank_line', 3,
\r
3103 # 'Do not leave a blank line after "%s:"' % matched.group(1))
\r
3105 # Next, check comments
\r
3106 next_line_start = 0
\r
3107 if linenum + 1 < clean_lines.NumLines():
\r
3108 next_line = raw[linenum + 1]
\r
3109 next_line_start = len(next_line) - len(next_line.lstrip())
\r
3110 CheckComment(line, filename, linenum, next_line_start, error)
\r
3112 # get rid of comments and strings
\r
3113 line = clean_lines.elided[linenum]
\r
3115 # You shouldn't have spaces before your brackets, except maybe after
\r
3116 # 'delete []' or 'return []() {};'
\r
3117 if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
\r
3118 error(filename, linenum, 'whitespace/braces', 5,
\r
3119 '[SPC_M_SEP]Extra space before [')
\r
3121 # In range-based for, we wanted spaces before and after the colon, but
\r
3122 # not around "::" tokens that might appear.
\r
3123 if (Search(r'for *\(.*[^:]:[^: ]', line) or
\r
3124 Search(r'for *\(.*[^: ]:[^:]', line)):
\r
3125 error(filename, linenum, 'whitespace/forcolon', 2,
\r
3126 'Missing space around colon in range-based for loop')
\r
3129 def CheckOperatorSpacing(filename, clean_lines, linenum, error):
\r
3130 """Checks for horizontal spacing around operators.
\r
3133 filename: The name of the current file.
\r
3134 clean_lines: A CleansedLines instance containing the file.
\r
3135 linenum: The number of the line to check.
\r
3136 error: The function to call with any errors found.
\r
3138 line = clean_lines.elided[linenum]
\r
3140 # Don't try to do spacing checks for operator methods. Do this by
\r
3141 # replacing the troublesome characters with something else,
\r
3142 # preserving column position for all other characters.
\r
3144 # The replacement is done repeatedly to avoid false positives from
\r
3145 # operators that call operators.
\r
3147 match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
\r
3149 line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
\r
3153 # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
\r
3154 # Otherwise not. Note we only check for non-spaces on *both* sides;
\r
3155 # sometimes people put non-spaces on one side when aligning ='s among
\r
3156 # many lines (not that this is behavior that I approve of...)
\r
3157 if ((Search(r'[\w.]=', line) or
\r
3158 Search(r'=[\w.]', line))
\r
3159 and not Search(r'\b(if|while|for) ', line)
\r
3160 # Operators taken from [lex.operators] in C++11 standard.
\r
3161 and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
\r
3162 and not Search(r'operator=', line)):
\r
3163 error(filename, linenum, 'whitespace/operators', 4,
\r
3164 '[SPC_M_OPR]Missing spaces around =')
\r
3166 # It's ok not to have spaces around binary operators like + - * /, but if
\r
3167 # there's too little whitespace, we get concerned. It's hard to tell,
\r
3168 # though, so we punt on this one for now. TODO.
\r
3170 # You should always have whitespace around binary operators.
\r
3172 # Check <= and >= first to avoid false positives with < and >, then
\r
3173 # check non-include lines for spacing around < and >.
\r
3175 # If the operator is followed by a comma, assume it's be used in a
\r
3176 # macro context and don't do any checks. This avoids false
\r
3179 # Note that && is not included here. Those are checked separately
\r
3180 # in CheckRValueReference
\r
3181 match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
\r
3183 error(filename, linenum, 'whitespace/operators', 3,
\r
3184 '[SPC_M_OPR]Missing spaces around %s' % match.group(1))
\r
3185 elif not Match(r'#.*include', line):
\r
3186 # Look for < that is not surrounded by spaces. This is only
\r
3187 # triggered if both sides are missing spaces, even though
\r
3188 # technically should should flag if at least one side is missing a
\r
3189 # space. This is done to avoid some false positives with shifts.
\r
3190 match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
\r
3192 (_, _, end_pos) = CloseExpression(
\r
3193 clean_lines, linenum, len(match.group(1)))
\r
3195 error(filename, linenum, 'whitespace/operators', 3,
\r
3196 '[SPC_M_OPR]Missing spaces around <')
\r
3198 # Look for > that is not surrounded by spaces. Similar to the
\r
3199 # above, we only trigger if both sides are missing spaces to avoid
\r
3200 # false positives with shifts.
\r
3201 match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
\r
3203 (_, _, start_pos) = ReverseCloseExpression(
\r
3204 clean_lines, linenum, len(match.group(1)))
\r
3205 if start_pos <= -1:
\r
3206 error(filename, linenum, 'whitespace/operators', 3,
\r
3207 '[SPC_M_OPR]Missing spaces around >')
\r
3209 # We allow no-spaces around << when used like this: 10<<20, but
\r
3210 # not otherwise (particularly, not when used as streams)
\r
3212 # We also allow operators following an opening parenthesis, since
\r
3213 # those tend to be macros that deal with operators.
\r
3214 match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line)
\r
3215 if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
\r
3216 not (match.group(1) == 'operator' and match.group(2) == ';')):
\r
3217 error(filename, linenum, 'whitespace/operators', 3,
\r
3218 '[SPC_M_OPR]Missing spaces around <<')
\r
3220 # We allow no-spaces around >> for almost anything. This is because
\r
3221 # C++11 allows ">>" to close nested templates, which accounts for
\r
3222 # most cases when ">>" is not followed by a space.
\r
3224 # We still warn on ">>" followed by alpha character, because that is
\r
3225 # likely due to ">>" being used for right shifts, e.g.:
\r
3228 # When ">>" is used to close templates, the alphanumeric letter that
\r
3229 # follows would be part of an identifier, and there should still be
\r
3230 # a space separating the template type and the identifier.
\r
3231 # type<type<type>> alpha
\r
3232 match = Search(r'>>[a-zA-Z_]', line)
\r
3234 error(filename, linenum, 'whitespace/operators', 3,
\r
3235 '[SPC_M_OPR]Missing spaces around >>')
\r
3237 # There shouldn't be space around unary operators
\r
3238 match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
\r
3240 error(filename, linenum, 'whitespace/operators', 4,
\r
3241 '[SPC_M_OPR]Extra space for operator %s' % match.group(1))
\r
3244 def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
\r
3245 """Checks for horizontal spacing around parentheses.
\r
3248 filename: The name of the current file.
\r
3249 clean_lines: A CleansedLines instance containing the file.
\r
3250 linenum: The number of the line to check.
\r
3251 error: The function to call with any errors found.
\r
3253 line = clean_lines.elided[linenum]
\r
3255 # No spaces after an if, while, switch, or for
\r
3256 match = Search(r' (if\(|for\(|while\(|switch\()', line)
\r
3258 error(filename, linenum, 'whitespace/parens', 5,
\r
3259 '[SPC_M_SEP]Missing space before ( in %s' % match.group(1))
\r
3261 # For if/for/while/switch, the left and right parens should be
\r
3262 # consistent about how many spaces are inside the parens, and
\r
3263 # there should either be zero or one spaces inside the parens.
\r
3264 # We don't want: "if ( foo)" or "if ( foo )".
\r
3265 # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
\r
3266 match = Search(r'\b(if|for|while|switch)\s*'
\r
3267 r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
\r
3270 #if len(match.group(2)) != len(match.group(4)):
\r
3271 # if not (match.group(3) == ';' and
\r
3272 # len(match.group(2)) == 1 + len(match.group(4)) or
\r
3273 # not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
\r
3274 # error(filename, linenum, 'whitespace/parens', 5,
\r
3275 # 'Mismatching spaces inside () in %s' % match.group(1))
\r
3276 if len(match.group(2)) not in [0, 1]:
\r
3277 error(filename, linenum, 'whitespace/parens', 5,
\r
3278 '[SPC_M_SEP]Should have zero or one spaces inside ( and ) in %s' %
\r
3282 def CheckCommaSpacing(filename, clean_lines, linenum, error):
\r
3283 """Checks for horizontal spacing near commas and semicolons.
\r
3286 filename: The name of the current file.
\r
3287 clean_lines: A CleansedLines instance containing the file.
\r
3288 linenum: The number of the line to check.
\r
3289 error: The function to call with any errors found.
\r
3291 raw = clean_lines.lines_without_raw_strings
\r
3292 line = clean_lines.elided[linenum]
\r
3294 # You should always have a space after a comma (either as fn arg or operator)
\r
3296 # This does not apply when the non-space character following the
\r
3297 # comma is another comma, since the only time when that happens is
\r
3298 # for empty macro arguments.
\r
3300 # We run this check in two passes: first pass on elided lines to
\r
3301 # verify that lines contain missing whitespaces, second pass on raw
\r
3302 # lines to confirm that those missing whitespaces are not due to
\r
3303 # elided comments.
\r
3304 if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
\r
3305 Search(r',[^,\s]', raw[linenum])):
\r
3306 error(filename, linenum, 'whitespace/comma', 3,
\r
3307 '[SPC_M_SEP]Missing space after ,')
\r
3309 # You should always have a space after a semicolon
\r
3310 # except for few corner cases
\r
3311 # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
\r
3313 # if Search(r';[^\s};\\)/]', line):
\r
3314 # error(filename, linenum, 'whitespace/semicolon', 3,
\r
3315 # 'Missing space after ;')
\r
3318 def CheckBracesSpacing(filename, clean_lines, linenum, error):
\r
3319 """Checks for horizontal spacing near commas.
\r
3322 filename: The name of the current file.
\r
3323 clean_lines: A CleansedLines instance containing the file.
\r
3324 linenum: The number of the line to check.
\r
3325 error: The function to call with any errors found.
\r
3327 line = clean_lines.elided[linenum]
\r
3329 # Except after an opening paren, or after another opening brace (in case of
\r
3330 # an initializer list, for instance), you should have spaces before your
\r
3331 # braces. And since you should never have braces at the beginning of a line,
\r
3332 # this is an easy test.
\r
3333 match = Match(r'^(.*[^ ({>]){', line)
\r
3335 # Try a bit harder to check for brace initialization. This
\r
3336 # happens in one of the following forms:
\r
3337 # Constructor() : initializer_list_{} { ... }
\r
3338 # Constructor{}.MemberFunction()
\r
3339 # Type variable{};
\r
3340 # FunctionCall(type{}, ...);
\r
3341 # LastArgument(..., type{});
\r
3342 # LOG(INFO) << type{} << " ...";
\r
3343 # map_of_type[{...}] = ...;
\r
3344 # ternary = expr ? new type{} : nullptr;
\r
3345 # OuterTemplate<InnerTemplateConstructor<Type>{}>
\r
3347 # We check for the character following the closing brace, and
\r
3348 # silence the warning if it's one of those listed above, i.e.
\r
3351 # To account for nested initializer list, we allow any number of
\r
3352 # closing braces up to "{;,)<". We can't simply silence the
\r
3353 # warning on first sight of closing brace, because that would
\r
3354 # cause false negatives for things that are not initializer lists.
\r
3355 # Silence this: But not this:
\r
3356 # Outer{ if (...) {
\r
3357 # Inner{...} if (...){ // Missing space before {
\r
3360 # There is a false negative with this approach if people inserted
\r
3361 # spurious semicolons, e.g. "if (cond){};", but we will catch the
\r
3362 # spurious semicolon with a separate check.
\r
3363 (endline, endlinenum, endpos) = CloseExpression(
\r
3364 clean_lines, linenum, len(match.group(1)))
\r
3365 trailing_text = ''
\r
3367 trailing_text = endline[endpos:]
\r
3368 for offset in xrange(endlinenum + 1,
\r
3369 min(endlinenum + 3, clean_lines.NumLines() - 1)):
\r
3370 trailing_text += clean_lines.elided[offset]
\r
3371 # due to method opening brace false positive, below error has been suppressed
\r
3372 #if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
\r
3373 # error(filename, linenum, 'whitespace/braces', 5,
\r
3374 # '[SPC_M_SEP]Missing space before {')
\r
3376 # Make sure '} else {' has spaces.
\r
3377 if Search(r'}else', line):
\r
3378 error(filename, linenum, 'whitespace/braces', 5,
\r
3379 '[SPC_M_KWD]Missing space before else')
\r
3381 # You shouldn't have a space before a semicolon at the end of the line.
\r
3382 # There's a special case for "for" since the style guide allows space before
\r
3383 # the semicolon there.
\r
3384 if Search(r':\s*;\s*$', line):
\r
3385 error(filename, linenum, 'whitespace/semicolon', 5,
\r
3386 'Semicolon defining empty statement. Use {} instead.')
\r
3387 elif Search(r'^\s*;\s*$', line):
\r
3388 error(filename, linenum, 'whitespace/semicolon', 5,
\r
3389 'Line contains only semicolon. If this should be an empty statement, '
\r
3390 'use {} instead.')
\r
3391 elif (Search(r'\s+;\s*$', line) and
\r
3392 not Search(r'\bfor\b', line)):
\r
3393 error(filename, linenum, 'whitespace/semicolon', 5,
\r
3394 'Extra space before last semicolon. If this should be an empty '
\r
3395 'statement, use {} instead.')
\r
3398 def IsDecltype(clean_lines, linenum, column):
\r
3399 """Check if the token ending on (linenum, column) is decltype().
\r
3402 clean_lines: A CleansedLines instance containing the file.
\r
3403 linenum: the number of the line to check.
\r
3404 column: end column of the token to check.
\r
3406 True if this token is decltype() expression, False otherwise.
\r
3408 (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
\r
3411 if Search(r'\bdecltype\s*$', text[0:start_col]):
\r
3416 def IsTemplateParameterList(clean_lines, linenum, column):
\r
3417 """Check if the token ending on (linenum, column) is the end of template<>.
\r
3420 clean_lines: A CleansedLines instance containing the file.
\r
3421 linenum: the number of the line to check.
\r
3422 column: end column of the token to check.
\r
3424 True if this token is end of a template parameter list, False otherwise.
\r
3426 (_, startline, startpos) = ReverseCloseExpression(
\r
3427 clean_lines, linenum, column)
\r
3428 if (startpos > -1 and
\r
3429 Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
\r
3434 def IsRValueType(typenames, clean_lines, nesting_state, linenum, column):
\r
3435 """Check if the token ending on (linenum, column) is a type.
\r
3437 Assumes that text to the right of the column is "&&" or a function
\r
3441 typenames: set of type names from template-argument-list.
\r
3442 clean_lines: A CleansedLines instance containing the file.
\r
3443 nesting_state: A NestingState instance which maintains information about
\r
3444 the current stack of nested blocks being parsed.
\r
3445 linenum: the number of the line to check.
\r
3446 column: end column of the token to check.
\r
3448 True if this token is a type, False if we are not sure.
\r
3450 prefix = clean_lines.elided[linenum][0:column]
\r
3452 # Get one word to the left. If we failed to do so, this is most
\r
3453 # likely not a type, since it's unlikely that the type name and "&&"
\r
3454 # would be split across multiple lines.
\r
3455 match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
\r
3459 # Check text following the token. If it's "&&>" or "&&," or "&&...", it's
\r
3460 # most likely a rvalue reference used inside a template.
\r
3461 suffix = clean_lines.elided[linenum][column:]
\r
3462 if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
\r
3465 # Check for known types and end of templates:
\r
3467 # vector<int>&& variable
\r
3469 # Because this function is called recursively, we also need to
\r
3470 # recognize pointer and reference types:
\r
3473 if (match.group(2) in typenames or
\r
3474 match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
\r
3475 'short', 'int', 'long', 'signed', 'unsigned',
\r
3476 'float', 'double', 'void', 'auto', '>', '*', '&']):
\r
3479 # If we see a close parenthesis, look for decltype on the other side.
\r
3480 # decltype would unambiguously identify a type, anything else is
\r
3481 # probably a parenthesized expression and not a type.
\r
3482 if match.group(2) == ')':
\r
3483 return IsDecltype(
\r
3484 clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
\r
3486 # Check for casts and cv-qualifiers.
\r
3487 # match.group(1) remainder
\r
3488 # -------------- ---------
\r
3489 # const_cast< type&&
\r
3492 if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
\r
3493 r'reinterpret_cast\s*<|\w+\s)\s*$',
\r
3497 # Look for a preceding symbol that might help differentiate the context.
\r
3498 # These are the cases that would be ambiguous:
\r
3499 # match.group(1) remainder
\r
3500 # -------------- ---------
\r
3501 # Call ( expression &&
\r
3502 # Declaration ( type&&
\r
3504 # if ( expression &&
\r
3505 # while ( expression &&
\r
3507 # for( ; expression &&
\r
3508 # statement ; type&&
\r
3510 # constructor { expression &&
\r
3512 line = match.group(1)
\r
3513 match_symbol = None
\r
3515 # We want to skip over identifiers and commas to get to a symbol.
\r
3516 # Commas are skipped so that we can find the opening parenthesis
\r
3517 # for function parameter lists.
\r
3518 match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
\r
3522 line = clean_lines.elided[start]
\r
3524 if not match_symbol:
\r
3525 # Probably the first statement in the file is an rvalue reference
\r
3528 if match_symbol.group(2) == '}':
\r
3529 # Found closing brace, probably an indicate of this:
\r
3533 if match_symbol.group(2) == ';':
\r
3534 # Found semicolon, probably one of these:
\r
3535 # for(; expression &&
\r
3536 # statement; type&&
\r
3538 # Look for the previous 'for(' in the previous lines.
\r
3539 before_text = match_symbol.group(1)
\r
3540 for i in xrange(start - 1, max(start - 6, 0), -1):
\r
3541 before_text = clean_lines.elided[i] + before_text
\r
3542 if Search(r'for\s*\([^{};]*$', before_text):
\r
3543 # This is the condition inside a for-loop
\r
3546 # Did not find a for-init-statement before this semicolon, so this
\r
3547 # is probably a new statement and not a condition.
\r
3550 if match_symbol.group(2) == '{':
\r
3551 # Found opening brace, probably one of these:
\r
3552 # block{ type&& = ... ; }
\r
3553 # constructor{ expression && expression }
\r
3555 # Look for a closing brace or a semicolon. If we see a semicolon
\r
3556 # first, this is probably a rvalue reference.
\r
3557 line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
\r
3571 if end >= clean_lines.NumLines():
\r
3573 line = clean_lines.elided[end]
\r
3574 # Incomplete program?
\r
3577 if match_symbol.group(2) == '(':
\r
3578 # Opening parenthesis. Need to check what's to the left of the
\r
3579 # parenthesis. Look back one extra line for additional context.
\r
3580 before_text = match_symbol.group(1)
\r
3582 before_text = clean_lines.elided[linenum - 1] + before_text
\r
3583 before_text = match_symbol.group(1)
\r
3585 # Patterns that are likely to be types:
\r
3589 # operator=(type&&
\r
3591 if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
\r
3594 # Patterns that are likely to be expressions:
\r
3595 # if (expression &&
\r
3596 # while (expression &&
\r
3597 # : initializer(expression &&
\r
3598 # , initializer(expression &&
\r
3599 # ( FunctionCall(expression &&
\r
3600 # + FunctionCall(expression &&
\r
3601 # + (expression &&
\r
3603 # The last '+' represents operators such as '+' and '-'.
\r
3604 if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
\r
3607 # Something else. Check that tokens to the left look like
\r
3608 # return_type function_name
\r
3609 match_func = Match(r'^(.*\S.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
\r
3610 match_symbol.group(1))
\r
3612 # Check for constructors, which don't have return types.
\r
3613 if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
\r
3615 implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
\r
3616 if (implicit_constructor and
\r
3617 implicit_constructor.group(1) == implicit_constructor.group(2)):
\r
3619 return IsRValueType(typenames, clean_lines, nesting_state, linenum,
\r
3620 len(match_func.group(1)))
\r
3622 # Nothing before the function name. If this is inside a block scope,
\r
3623 # this is probably a function call.
\r
3624 return not (nesting_state.previous_stack_top and
\r
3625 nesting_state.previous_stack_top.IsBlockInfo())
\r
3627 if match_symbol.group(2) == '>':
\r
3628 # Possibly a closing bracket, check that what's on the other side
\r
3629 # looks like the start of a template.
\r
3630 return IsTemplateParameterList(
\r
3631 clean_lines, start, len(match_symbol.group(1)))
\r
3633 # Some other symbol, usually something like "a=b&&c". This is most
\r
3634 # likely not a type.
\r
3638 def IsDeletedOrDefault(clean_lines, linenum):
\r
3639 """Check if current constructor or operator is deleted or default.
\r
3642 clean_lines: A CleansedLines instance containing the file.
\r
3643 linenum: The number of the line to check.
\r
3645 True if this is a deleted or default constructor.
\r
3647 open_paren = clean_lines.elided[linenum].find('(')
\r
3648 if open_paren < 0:
\r
3650 (close_line, _, close_paren) = CloseExpression(
\r
3651 clean_lines, linenum, open_paren)
\r
3652 if close_paren < 0:
\r
3654 return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
\r
3657 def IsRValueAllowed(clean_lines, linenum, typenames):
\r
3658 """Check if RValue reference is allowed on a particular line.
\r
3661 clean_lines: A CleansedLines instance containing the file.
\r
3662 linenum: The number of the line to check.
\r
3663 typenames: set of type names from template-argument-list.
\r
3665 True if line is within the region where RValue references are allowed.
\r
3667 # Allow region marked by PUSH/POP macros
\r
3668 for i in xrange(linenum, 0, -1):
\r
3669 line = clean_lines.elided[i]
\r
3670 if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
\r
3671 if not line.endswith('PUSH'):
\r
3673 for j in xrange(linenum, clean_lines.NumLines(), 1):
\r
3674 line = clean_lines.elided[j]
\r
3675 if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
\r
3676 return line.endswith('POP')
\r
3679 line = clean_lines.elided[linenum]
\r
3680 if Search(r'\boperator\s*=\s*\(', line):
\r
3681 return IsDeletedOrDefault(clean_lines, linenum)
\r
3683 # Allow constructors
\r
3684 match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
\r
3685 if match and match.group(1) == match.group(2):
\r
3686 return IsDeletedOrDefault(clean_lines, linenum)
\r
3687 if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
\r
3688 return IsDeletedOrDefault(clean_lines, linenum)
\r
3690 if Match(r'\s*[\w<>]+\s*\(', line):
\r
3691 previous_line = 'ReturnType'
\r
3693 previous_line = clean_lines.elided[linenum - 1]
\r
3694 if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
\r
3695 return IsDeletedOrDefault(clean_lines, linenum)
\r
3697 # Reject types not mentioned in template-argument-list
\r
3699 match = Match(r'^.*?(\w+)\s*&&(.*)$', line)
\r
3702 if match.group(1) not in typenames:
\r
3704 line = match.group(2)
\r
3706 # All RValue types that were in template-argument-list should have
\r
3707 # been removed by now. Those were allowed, assuming that they will
\r
3710 # If there are no remaining RValue types left (i.e. types that were
\r
3711 # not found in template-argument-list), flag those as not allowed.
\r
3712 return line.find('&&') < 0
\r
3715 def GetTemplateArgs(clean_lines, linenum):
\r
3716 """Find list of template arguments associated with this function declaration.
\r
3719 clean_lines: A CleansedLines instance containing the file.
\r
3720 linenum: Line number containing the start of the function declaration,
\r
3721 usually one line after the end of the template-argument-list.
\r
3723 Set of type names, or empty set if this does not appear to have
\r
3724 any template parameters.
\r
3726 # Find start of function
\r
3727 func_line = linenum
\r
3728 while func_line > 0:
\r
3729 line = clean_lines.elided[func_line]
\r
3730 if Match(r'^\s*$', line):
\r
3732 if line.find('(') >= 0:
\r
3735 if func_line == 0:
\r
3738 # Collapse template-argument-list into a single string
\r
3739 argument_list = ''
\r
3740 match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line])
\r
3742 # template-argument-list on the same line as function name
\r
3743 start_col = len(match.group(1))
\r
3744 _, end_line, end_col = CloseExpression(clean_lines, func_line, start_col)
\r
3745 if end_col > -1 and end_line == func_line:
\r
3746 start_col += 1 # Skip the opening bracket
\r
3747 argument_list = clean_lines.elided[func_line][start_col:end_col]
\r
3749 elif func_line > 1:
\r
3750 # template-argument-list one line before function name
\r
3751 match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1])
\r
3753 end_col = len(match.group(1))
\r
3754 _, start_line, start_col = ReverseCloseExpression(
\r
3755 clean_lines, func_line - 1, end_col)
\r
3756 if start_col > -1:
\r
3757 start_col += 1 # Skip the opening bracket
\r
3758 while start_line < func_line - 1:
\r
3759 argument_list += clean_lines.elided[start_line][start_col:]
\r
3762 argument_list += clean_lines.elided[func_line - 1][start_col:end_col]
\r
3764 if not argument_list:
\r
3767 # Extract type names
\r
3770 match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$',
\r
3774 typenames.add(match.group(1))
\r
3775 argument_list = match.group(2)
\r
3779 def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
\r
3780 """Check for rvalue references.
\r
3783 filename: The name of the current file.
\r
3784 clean_lines: A CleansedLines instance containing the file.
\r
3785 linenum: The number of the line to check.
\r
3786 nesting_state: A NestingState instance which maintains information about
\r
3787 the current stack of nested blocks being parsed.
\r
3788 error: The function to call with any errors found.
\r
3790 # Find lines missing spaces around &&.
\r
3791 # TODO(unknown): currently we don't check for rvalue references
\r
3792 # with spaces surrounding the && to avoid false positives with
\r
3793 # boolean expressions.
\r
3794 line = clean_lines.elided[linenum]
\r
3795 match = Match(r'^(.*\S)&&', line)
\r
3797 match = Match(r'(.*)&&\S', line)
\r
3798 if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
\r
3801 # Either poorly formed && or an rvalue reference, check the context
\r
3802 # to get a more accurate error message. Mostly we want to determine
\r
3803 # if what's to the left of "&&" is a type or not.
\r
3804 typenames = GetTemplateArgs(clean_lines, linenum)
\r
3805 and_pos = len(match.group(1))
\r
3806 if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos):
\r
3807 if not IsRValueAllowed(clean_lines, linenum, typenames):
\r
3808 error(filename, linenum, 'build/c++11', 3,
\r
3809 'RValue references are an unapproved C++ feature.')
\r
3811 # error(filename, linenum, 'whitespace/operators', 3,
\r
3812 # 'Missing spaces around &&')
\r
3815 def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
\r
3816 """Checks for additional blank line issues related to sections.
\r
3818 Currently the only thing checked here is blank line before protected/private.
\r
3821 filename: The name of the current file.
\r
3822 clean_lines: A CleansedLines instance containing the file.
\r
3823 class_info: A _ClassInfo objects.
\r
3824 linenum: The number of the line to check.
\r
3825 error: The function to call with any errors found.
\r
3827 # Skip checks if the class is small, where small means 25 lines or less.
\r
3828 # 25 lines seems like a good cutoff since that's the usual height of
\r
3829 # terminals, and any class that can't fit in one screen can't really
\r
3830 # be considered "small".
\r
3832 # Also skip checks if we are on the first line. This accounts for
\r
3833 # classes that look like
\r
3834 # class Foo { public: ... };
\r
3836 # If we didn't find the end of the class, last_line would be zero,
\r
3837 # and the check will be skipped by the first condition.
\r
3838 if (class_info.last_line - class_info.starting_linenum <= 24 or
\r
3839 linenum <= class_info.starting_linenum):
\r
3842 matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
\r
3844 # Issue warning if the line before public/protected/private was
\r
3845 # not a blank line, but don't do this if the previous line contains
\r
3846 # "class" or "struct". This can happen two ways:
\r
3847 # - We are at the beginning of the class.
\r
3848 # - We are forward-declaring an inner class that is semantically
\r
3849 # private, but needed to be public for implementation reasons.
\r
3850 # Also ignores cases where the previous line ends with a backslash as can be
\r
3851 # common when defining classes in C macros.
\r
3852 prev_line = clean_lines.lines[linenum - 1]
\r
3853 if (not IsBlankLine(prev_line) and
\r
3854 not Search(r'\b(class|struct)\b', prev_line) and
\r
3855 not Search(r'\\$', prev_line)):
\r
3856 # Try a bit harder to find the beginning of the class. This is to
\r
3857 # account for multi-line base-specifier lists, e.g.:
\r
3860 end_class_head = class_info.starting_linenum
\r
3861 for i in range(class_info.starting_linenum, linenum):
\r
3862 if Search(r'\{\s*$', clean_lines.lines[i]):
\r
3863 end_class_head = i
\r
3865 #if end_class_head < linenum - 1:
\r
3866 # error(filename, linenum, 'whitespace/blank_line', 3,
\r
3867 # '"%s:" should be preceded by a blank line' % matched.group(1))
\r
3870 def GetPreviousNonBlankLine(clean_lines, linenum):
\r
3871 """Return the most recent non-blank line and its line number.
\r
3874 clean_lines: A CleansedLines instance containing the file contents.
\r
3875 linenum: The number of the line to check.
\r
3878 A tuple with two elements. The first element is the contents of the last
\r
3879 non-blank line before the current line, or the empty string if this is the
\r
3880 first non-blank line. The second is the line number of that line, or -1
\r
3881 if this is the first non-blank line.
\r
3884 prevlinenum = linenum - 1
\r
3885 while prevlinenum >= 0:
\r
3886 prevline = clean_lines.elided[prevlinenum]
\r
3887 if not IsBlankLine(prevline): # if not a blank line...
\r
3888 return (prevline, prevlinenum)
\r
3893 def CheckBraces(filename, clean_lines, linenum, error):
\r
3894 """Looks for misplaced braces (e.g. at the end of line).
\r
3897 filename: The name of the current file.
\r
3898 clean_lines: A CleansedLines instance containing the file.
\r
3899 linenum: The number of the line to check.
\r
3900 error: The function to call with any errors found.
\r
3903 line = clean_lines.elided[linenum] # get rid of comments and strings
\r
3905 if Match(r'\s*{\s*$', line):
\r
3906 # We allow an open brace to start a line in the case where someone is using
\r
3907 # braces in a block to explicitly create a new scope, which is commonly used
\r
3908 # to control the lifetime of stack-allocated variables. Braces are also
\r
3909 # used for brace initializers inside function calls. We don't detect this
\r
3910 # perfectly: we just don't complain if the last non-whitespace character on
\r
3911 # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
\r
3912 # previous line starts a preprocessor block.
\r
3913 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
\r
3914 #if (not Search(r'[,;:}{(]\s*$', prevline) and
\r
3915 # not Match(r'\s*#', prevline)):
\r
3916 # error(filename, linenum, 'whitespace/braces', 4,
\r
3917 # '{ should almost always be at the end of the previous line')
\r
3919 # An else clause should be on the same line as the preceding closing brace.
\r
3920 if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
\r
3921 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
\r
3922 if Match(r'\s*}\s*$', prevline):
\r
3923 error(filename, linenum, 'whitespace/newline', 4,
\r
3924 'An else should appear on the same line as the preceding }')
\r
3926 # If braces come on one side of an else, they should be on both.
\r
3927 # However, we have to worry about "else if" that spans multiple lines!
\r
3928 if Search(r'else if\s*\(', line): # could be multi-line if
\r
3929 brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
\r
3930 # find the ( after the if
\r
3931 pos = line.find('else if')
\r
3932 pos = line.find('(', pos)
\r
3934 (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
\r
3935 brace_on_right = endline[endpos:].find('{') != -1
\r
3936 if brace_on_left != brace_on_right: # must be brace after if
\r
3937 error(filename, linenum, 'readability/braces', 5,
\r
3938 '[BRC_M_SMT]If an \'else if\' has a brace on one side, it should have it on both')
\r
3939 elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
\r
3940 error(filename, linenum, 'readability/braces', 5,
\r
3941 '[BRC_M_SMT]If an \'else\' has a brace on one side, it should have it on both')
\r
3943 # Likewise, an else should never have the else clause on the same line
\r
3944 if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
\r
3945 error(filename, linenum, 'whitespace/newline', 4,
\r
3946 'Else clause should never be on same line as else (use 2 lines)')
\r
3948 # In the same way, a do/while should never be on one line
\r
3949 if Match(r'\s*do [^\s{]', line):
\r
3950 error(filename, linenum, 'whitespace/newline', 4,
\r
3951 'do/while clauses should not be on a single line')
\r
3953 # Check single-line if/else bodies. The style guide says 'curly braces are not
\r
3954 # required for single-line statements'. We additionally allow multi-line,
\r
3955 # single statements, but we reject anything with more than one semicolon in
\r
3956 # it. This means that the first semicolon after the if should be at the end of
\r
3957 # its line, and the line after that should have an indent level equal to or
\r
3958 # lower than the if. We also check for ambiguous if/else nesting without
\r
3960 if_else_match = Search(r'\b(if\s*\(|else\b)', line)
\r
3961 if if_else_match and not Match(r'\s*#', line):
\r
3962 if_indent = GetIndentLevel(line)
\r
3963 endline, endlinenum, endpos = line, linenum, if_else_match.end()
\r
3964 if_match = Search(r'\bif\s*\(', line)
\r
3966 # This could be a multiline if condition, so find the end first.
\r
3967 pos = if_match.end() - 1
\r
3968 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
\r
3969 # Check for an opening brace, either directly after the if or on the next
\r
3970 # line. If found, this isn't a single-statement conditional.
\r
3971 if (not Match(r'\s*{', endline[endpos:])
\r
3972 and not (Match(r'\s*$', endline[endpos:])
\r
3973 and endlinenum < (len(clean_lines.elided) - 1)
\r
3974 and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
\r
3975 while (endlinenum < len(clean_lines.elided)
\r
3976 and ';' not in clean_lines.elided[endlinenum][endpos:]):
\r
3979 if endlinenum < len(clean_lines.elided):
\r
3980 endline = clean_lines.elided[endlinenum]
\r
3981 # We allow a mix of whitespace and closing braces (e.g. for one-liner
\r
3982 # methods) and a single \ after the semicolon (for macros)
\r
3983 endpos = endline.find(';')
\r
3984 #if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
\r
3985 # Semicolon isn't the last character, there's something trailing.
\r
3986 # Output a warning if the semicolon is not contained inside
\r
3987 # a lambda expression.
\r
3988 #if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
\r
3990 #error(filename, linenum, 'readability/braces', 4,
\r
3991 # 'If/else bodies with multiple statements require braces')
\r
3992 #elif endlinenum < len(clean_lines.elided) - 1:
\r
3993 # Make sure the next line is dedented
\r
3994 #next_line = clean_lines.elided[endlinenum + 1]
\r
3995 #next_indent = GetIndentLevel(next_line)
\r
3996 # With ambiguous nested if statements, this will error out on the
\r
3997 # if that *doesn't* match the else, regardless of whether it's the
\r
3998 # inner one or outer one.
\r
3999 #if (if_match and Match(r'\s*else\b', next_line)
\r
4000 # and next_indent != if_indent):
\r
4001 # error(filename, linenum, 'readability/braces', 4,
\r
4002 # 'Else clause should be indented at the same level as if. '
\r
4003 # 'Ambiguous nested if/else chains require braces.')
\r
4004 #elif next_indent > if_indent:
\r
4005 # error(filename, linenum, 'readability/braces', 4,
\r
4006 # 'If/else bodies with multiple statements require braces')
\r
4009 def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
\r
4010 """Looks for redundant trailing semicolon.
\r
4013 filename: The name of the current file.
\r
4014 clean_lines: A CleansedLines instance containing the file.
\r
4015 linenum: The number of the line to check.
\r
4016 error: The function to call with any errors found.
\r
4019 line = clean_lines.elided[linenum]
\r
4021 # Block bodies should not be followed by a semicolon. Due to C++11
\r
4022 # brace initialization, there are more places where semicolons are
\r
4023 # required than not, so we use a whitelist approach to check these
\r
4024 # rather than a blacklist. These are the places where "};" should
\r
4025 # be replaced by just "}":
\r
4026 # 1. Some flavor of block following closing parenthesis:
\r
4029 # switch (...) {};
\r
4030 # Function(...) {};
\r
4032 # if (...) else if (...) {};
\r
4035 # if (...) else {};
\r
4037 # 3. const member function:
\r
4038 # Function(...) const {};
\r
4040 # 4. Block following some statement:
\r
4044 # 5. Block at the beginning of a function:
\r
4049 # Note that naively checking for the preceding "{" will also match
\r
4050 # braces inside multi-dimensional arrays, but this is fine since
\r
4051 # that expression will not contain semicolons.
\r
4053 # 6. Block following another block:
\r
4057 # 7. End of namespaces:
\r
4060 # These semicolons seems far more common than other kinds of
\r
4061 # redundant semicolons, possibly due to people converting classes
\r
4062 # to namespaces. For now we do not warn for this case.
\r
4064 # Try matching case 1 first.
\r
4065 match = Match(r'^(.*\)\s*)\{', line)
\r
4067 # Matched closing parenthesis (case 1). Check the token before the
\r
4068 # matching opening parenthesis, and don't warn if it looks like a
\r
4069 # macro. This avoids these false positives:
\r
4070 # - macro that defines a base class
\r
4071 # - multi-line macro that defines a base class
\r
4072 # - macro that defines the whole class-head
\r
4074 # But we still issue warnings for macros that we know are safe to
\r
4075 # warn, specifically:
\r
4076 # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
\r
4079 # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
\r
4081 # We implement a whitelist of safe macros instead of a blacklist of
\r
4082 # unsafe macros, even though the latter appears less frequently in
\r
4083 # google code and would have been easier to implement. This is because
\r
4084 # the downside for getting the whitelist wrong means some extra
\r
4085 # semicolons, while the downside for getting the blacklist wrong
\r
4086 # would result in compile errors.
\r
4088 # In addition to macros, we also don't want to warn on
\r
4089 # - Compound literals
\r
4091 # - alignas specifier with anonymous structs:
\r
4092 closing_brace_pos = match.group(1).rfind(')')
\r
4093 opening_parenthesis = ReverseCloseExpression(
\r
4094 clean_lines, linenum, closing_brace_pos)
\r
4095 if opening_parenthesis[2] > -1:
\r
4096 line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
\r
4097 macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
\r
4098 func = Match(r'^(.*\])\s*$', line_prefix)
\r
4100 macro.group(1) not in (
\r
4101 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
\r
4102 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
\r
4103 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
\r
4104 (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
\r
4105 Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
\r
4106 Search(r'\s+=\s*$', line_prefix)):
\r
4109 opening_parenthesis[1] > 1 and
\r
4110 Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
\r
4111 # Multi-line lambda-expression
\r
4115 # Try matching cases 2-3.
\r
4116 match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
\r
4118 # Try matching cases 4-6. These are always matched on separate lines.
\r
4120 # Note that we can't simply concatenate the previous line to the
\r
4121 # current line and do a single match, otherwise we may output
\r
4122 # duplicate warnings for the blank line case:
\r
4126 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
\r
4127 if prevline and Search(r'[;{}]\s*$', prevline):
\r
4128 match = Match(r'^(\s*)\{', line)
\r
4130 # Check matching closing brace
\r
4132 # (endline, endlinenum, endpos) = CloseExpression(
\r
4133 # clean_lines, linenum, len(match.group(1)))
\r
4134 # if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
\r
4135 # Current {} pair is eligible for semicolon check, and we have found
\r
4136 # the redundant semicolon, output warning here.
\r
4138 # Note: because we are scanning forward for opening braces, and
\r
4139 # outputting warnings for the matching closing brace, if there are
\r
4140 # nested blocks with trailing semicolons, we will get the error
\r
4141 # messages in reversed order.
\r
4142 # error(filename, endlinenum, 'readability/braces', 4,
\r
4143 # "You don't need a ; after a }")
\r
4146 def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
\r
4147 """Look for empty loop/conditional body with only a single semicolon.
\r
4150 filename: The name of the current file.
\r
4151 clean_lines: A CleansedLines instance containing the file.
\r
4152 linenum: The number of the line to check.
\r
4153 error: The function to call with any errors found.
\r
4156 # Search for loop keywords at the beginning of the line. Because only
\r
4157 # whitespaces are allowed before the keywords, this will also ignore most
\r
4158 # do-while-loops, since those lines should start with closing brace.
\r
4160 # We also check "if" blocks here, since an empty conditional block
\r
4161 # is likely an error.
\r
4162 line = clean_lines.elided[linenum]
\r
4163 matched = Match(r'\s*(for|while|if)\s*\(', line)
\r
4165 # Find the end of the conditional expression
\r
4166 (end_line, end_linenum, end_pos) = CloseExpression(
\r
4167 clean_lines, linenum, line.find('('))
\r
4169 # Output warning if what follows the condition expression is a semicolon.
\r
4170 # No warning for all other cases, including whitespace or newline, since we
\r
4171 # have a separate check for semicolons preceded by whitespace.
\r
4172 if end_pos >= 0 and Match(r';', end_line[end_pos:]):
\r
4173 if matched.group(1) == 'if':
\r
4174 error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
\r
4175 'Empty conditional bodies should use {}')
\r
4177 error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
\r
4178 'Empty loop bodies should use {} or continue')
\r
4181 def FindCheckMacro(line):
\r
4182 """Find a replaceable CHECK-like macro.
\r
4185 line: line to search on.
\r
4187 (macro name, start position), or (None, -1) if no replaceable
\r
4190 for macro in _CHECK_MACROS:
\r
4191 i = line.find(macro)
\r
4193 # Find opening parenthesis. Do a regular expression match here
\r
4194 # to make sure that we are matching the expected CHECK macro, as
\r
4195 # opposed to some other macro that happens to contain the CHECK
\r
4197 matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
\r
4200 return (macro, len(matched.group(1)))
\r
4204 def CheckCheck(filename, clean_lines, linenum, error):
\r
4205 """Checks the use of CHECK and EXPECT macros.
\r
4208 filename: The name of the current file.
\r
4209 clean_lines: A CleansedLines instance containing the file.
\r
4210 linenum: The number of the line to check.
\r
4211 error: The function to call with any errors found.
\r
4214 # Decide the set of replacement macros that should be suggested
\r
4215 lines = clean_lines.elided
\r
4216 (check_macro, start_pos) = FindCheckMacro(lines[linenum])
\r
4217 if not check_macro:
\r
4220 # Find end of the boolean expression by matching parentheses
\r
4221 (last_line, end_line, end_pos) = CloseExpression(
\r
4222 clean_lines, linenum, start_pos)
\r
4226 # If the check macro is followed by something other than a
\r
4227 # semicolon, assume users will log their own custom error messages
\r
4228 # and don't suggest any replacements.
\r
4229 if not Match(r'\s*;', last_line[end_pos:]):
\r
4232 if linenum == end_line:
\r
4233 expression = lines[linenum][start_pos + 1:end_pos - 1]
\r
4235 expression = lines[linenum][start_pos + 1:]
\r
4236 for i in xrange(linenum + 1, end_line):
\r
4237 expression += lines[i]
\r
4238 expression += last_line[0:end_pos - 1]
\r
4240 # Parse expression so that we can take parentheses into account.
\r
4241 # This avoids false positives for inputs like "CHECK((a < 4) == b)",
\r
4242 # which is not replaceable by CHECK_LE.
\r
4247 matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
\r
4248 r'==|!=|>=|>|<=|<|\()(.*)$', expression)
\r
4250 token = matched.group(1)
\r
4252 # Parenthesized operand
\r
4253 expression = matched.group(2)
\r
4254 (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
\r
4256 return # Unmatched parenthesis
\r
4257 lhs += '(' + expression[0:end]
\r
4258 expression = expression[end:]
\r
4259 elif token in ('&&', '||'):
\r
4260 # Logical and/or operators. This means the expression
\r
4261 # contains more than one term, for example:
\r
4262 # CHECK(42 < a && a < b);
\r
4264 # These are not replaceable with CHECK_LE, so bail out early.
\r
4266 elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
\r
4267 # Non-relational operator
\r
4269 expression = matched.group(2)
\r
4271 # Relational operator
\r
4273 rhs = matched.group(2)
\r
4276 # Unparenthesized operand. Instead of appending to lhs one character
\r
4277 # at a time, we do another regular expression match to consume several
\r
4278 # characters at once if possible. Trivial benchmark shows that this
\r
4279 # is more efficient when the operands are longer than a single
\r
4280 # character, which is generally the case.
\r
4281 matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
\r
4283 matched = Match(r'^(\s*\S)(.*)$', expression)
\r
4286 lhs += matched.group(1)
\r
4287 expression = matched.group(2)
\r
4289 # Only apply checks if we got all parts of the boolean expression
\r
4290 if not (lhs and operator and rhs):
\r
4293 # Check that rhs do not contain logical operators. We already know
\r
4294 # that lhs is fine since the loop above parses out && and ||.
\r
4295 if rhs.find('&&') > -1 or rhs.find('||') > -1:
\r
4298 # At least one of the operands must be a constant literal. This is
\r
4299 # to avoid suggesting replacements for unprintable things like
\r
4300 # CHECK(variable != iterator)
\r
4302 # The following pattern matches decimal, hex integers, strings, and
\r
4303 # characters (in that order).
\r
4306 match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
\r
4307 if Match(match_constant, lhs) or Match(match_constant, rhs):
\r
4308 # Note: since we know both lhs and rhs, we can provide a more
\r
4309 # descriptive error message like:
\r
4310 # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
\r
4312 # Consider using CHECK_EQ instead of CHECK(a == b)
\r
4314 # We are still keeping the less descriptive message because if lhs
\r
4315 # or rhs gets long, the error message might become unreadable.
\r
4316 error(filename, linenum, 'readability/check', 2,
\r
4317 'Consider using %s instead of %s(a %s b)' % (
\r
4318 _CHECK_REPLACEMENT[check_macro][operator],
\r
4319 check_macro, operator))
\r
4322 def CheckAltTokens(filename, clean_lines, linenum, error):
\r
4323 """Check alternative keywords being used in boolean expressions.
\r
4326 filename: The name of the current file.
\r
4327 clean_lines: A CleansedLines instance containing the file.
\r
4328 linenum: The number of the line to check.
\r
4329 error: The function to call with any errors found.
\r
4331 line = clean_lines.elided[linenum]
\r
4333 # Avoid preprocessor lines
\r
4334 if Match(r'^\s*#', line):
\r
4337 # Last ditch effort to avoid multi-line comments. This will not help
\r
4338 # if the comment started before the current line or ended after the
\r
4339 # current line, but it catches most of the false positives. At least,
\r
4340 # it provides a way to workaround this warning for people who use
\r
4341 # multi-line comments in preprocessor macros.
\r
4343 # TODO(unknown): remove this once cpplint has better support for
\r
4344 # multi-line comments.
\r
4345 if line.find('/*') >= 0 or line.find('*/') >= 0:
\r
4348 for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
\r
4349 error(filename, linenum, 'readability/alt_tokens', 2,
\r
4350 'Use operator %s instead of %s' % (
\r
4351 _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
\r
4354 def GetLineWidth(line):
\r
4355 """Determines the width of the line in column positions.
\r
4358 line: A string, which may be a Unicode string.
\r
4361 The width of the line in column positions, accounting for Unicode
\r
4362 combining characters and wide characters.
\r
4364 if isinstance(line, unicode):
\r
4366 for uc in unicodedata.normalize('NFC', line):
\r
4367 if unicodedata.east_asian_width(uc) in ('W', 'F'):
\r
4369 elif not unicodedata.combining(uc):
\r
4376 def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
\r
4378 """Checks rules from the 'C++ style rules' section of cppguide.html.
\r
4380 Most of these rules are hard to test (naming, comment style), but we
\r
4381 do what we can. In particular we check for 2-space indents, line lengths,
\r
4382 tab usage, spaces inside code, etc.
\r
4385 filename: The name of the current file.
\r
4386 clean_lines: A CleansedLines instance containing the file.
\r
4387 linenum: The number of the line to check.
\r
4388 file_extension: The extension (without the dot) of the filename.
\r
4389 nesting_state: A NestingState instance which maintains information about
\r
4390 the current stack of nested blocks being parsed.
\r
4391 error: The function to call with any errors found.
\r
4394 # Don't use "elided" lines here, otherwise we can't check commented lines.
\r
4395 # Don't want to use "raw" either, because we don't want to check inside C++11
\r
4397 raw_lines = clean_lines.lines_without_raw_strings
\r
4398 line = raw_lines[linenum]
\r
4400 if line.find('\t') != -1:
\r
4401 error(filename, linenum, 'whitespace/tab', 1,
\r
4402 'Tab found; better to use spaces')
\r
4404 # One or three blank spaces at the beginning of the line is weird; it's
\r
4405 # hard to reconcile that with 2-space indents.
\r
4406 # NOTE: here are the conditions rob pike used for his tests. Mine aren't
\r
4407 # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
\r
4408 # if(RLENGTH > 20) complain = 0;
\r
4409 # if(match($0, " +(error|private|public|protected):")) complain = 0;
\r
4410 # if(match(prev, "&& *$")) complain = 0;
\r
4411 # if(match(prev, "\\|\\| *$")) complain = 0;
\r
4412 # if(match(prev, "[\",=><] *$")) complain = 0;
\r
4413 # if(match($0, " <<")) complain = 0;
\r
4414 # if(match(prev, " +for \\(")) complain = 0;
\r
4415 # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
\r
4416 scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
\r
4417 classinfo = nesting_state.InnermostClass()
\r
4418 initial_spaces = 0
\r
4419 cleansed_line = clean_lines.elided[linenum]
\r
4420 while initial_spaces < len(line) and line[initial_spaces] == ' ':
\r
4421 initial_spaces += 1
\r
4422 if line and line[-1].isspace():
\r
4423 error(filename, linenum, 'whitespace/end_of_line', 4,
\r
4424 '[LNE_R_TWS]Line ends in whitespace. Consider deleting these extra spaces.')
\r
4425 # There are certain situations we allow one space, notably for
\r
4426 # section labels, and also lines containing multi-line raw strings.
\r
4427 elif ((initial_spaces == 1 or initial_spaces == 3) and
\r
4428 not Match(scope_or_label_pattern, cleansed_line) and
\r
4429 not (clean_lines.raw_lines[linenum] != line and
\r
4430 Match(r'^\s*""', line))):
\r
4431 error(filename, linenum, 'whitespace/indent', 3,
\r
4432 'Weird number of spaces at line-start. '
\r
4433 'Are you using a 2-space indent?')
\r
4435 # Check if the line is a header guard.
\r
4436 is_header_guard = False
\r
4437 if file_extension == 'h':
\r
4438 cppvar = GetHeaderGuardCPPVariable(filename)
\r
4439 if (line.startswith('#ifndef %s' % cppvar) or
\r
4440 line.startswith('#define %s' % cppvar) or
\r
4441 line.startswith('#endif // %s' % cppvar)):
\r
4442 is_header_guard = True
\r
4443 # #include lines and header guards can be long, since there's no clean way to
\r
4446 # URLs can be long too. It's possible to split these, but it makes them
\r
4447 # harder to cut&paste.
\r
4449 # The "$Id:...$" comment may also get very long without it being the
\r
4450 # developers fault.
\r
4452 if (not line.startswith('#include') and not is_header_guard and
\r
4453 not Match(r'^\s*//.*http(s?)://\S*$', line) and
\r
4454 not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
\r
4455 line_width = GetLineWidth(line)
\r
4456 extended_length = int((_line_length * 1.25))
\r
4457 if line_width > extended_length:
\r
4458 error(filename, linenum, 'whitespace/line_length', 4,
\r
4459 'Lines should very rarely be longer than %i characters' %
\r
4461 elif line_width > _line_length:
\r
4462 error(filename, linenum, 'whitespace/line_length', 2,
\r
4463 'Lines should be <= %i characters long' % _line_length)
\r
4465 if (cleansed_line.count(';') > 1 and
\r
4466 # for loops are allowed two ;'s (and may run over two lines).
\r
4467 cleansed_line.find('for') == -1 and
\r
4468 (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
\r
4469 GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
\r
4470 # It's ok to have many commands in a switch case that fits in 1 line
\r
4471 not ((cleansed_line.find('case ') != -1 or
\r
4472 cleansed_line.find('default:') != -1) and
\r
4473 cleansed_line.find('break;') != -1)):
\r
4474 error(filename, linenum, 'whitespace/newline', 0,
\r
4475 'More than one command on the same line')
\r
4477 # Some more style checks #test danakim
\r
4478 CheckBraces(filename, clean_lines, linenum, error)
\r
4479 #CheckTrailingSemicolon(filename, clean_lines, linenum, error)
\r
4480 #CheckEmptyBlockBody(filename, clean_lines, linenum, error)
\r
4481 #CheckAccess(filename, clean_lines, linenum, nesting_state, error)
\r
4482 CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
\r
4483 CheckOperatorSpacing(filename, clean_lines, linenum, error)
\r
4484 CheckParenthesisSpacing(filename, clean_lines, linenum, error)
\r
4485 CheckCommaSpacing(filename, clean_lines, linenum, error)
\r
4486 CheckBracesSpacing(filename, clean_lines, linenum, error)
\r
4487 CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
\r
4488 #CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
\r
4489 #CheckCheck(filename, clean_lines, linenum, error)
\r
4490 #CheckAltTokens(filename, clean_lines, linenum, error)
\r
4491 classinfo = nesting_state.InnermostClass()
\r
4493 CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
\r
4496 _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
\r
4497 # Matches the first component of a filename delimited by -s and _s. That is:
\r
4498 # _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
\r
4499 # _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
\r
4500 # _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
\r
4501 # _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
\r
4502 _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
\r
4505 def _DropCommonSuffixes(filename):
\r
4506 """Drops common suffixes like _test.cc or -inl.h from filename.
\r
4509 >>> _DropCommonSuffixes('foo/foo-inl.h')
\r
4511 >>> _DropCommonSuffixes('foo/bar/foo.cc')
\r
4513 >>> _DropCommonSuffixes('foo/foo_internal.h')
\r
4515 >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
\r
4516 'foo/foo_unusualinternal'
\r
4519 filename: The input filename.
\r
4522 The filename with the common suffix removed.
\r
4524 for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
\r
4525 'inl.h', 'impl.h', 'internal.h'):
\r
4526 if (filename.endswith(suffix) and len(filename) > len(suffix) and
\r
4527 filename[-len(suffix) - 1] in ('-', '_')):
\r
4528 return filename[:-len(suffix) - 1]
\r
4529 return os.path.splitext(filename)[0]
\r
4532 def _IsTestFilename(filename):
\r
4533 """Determines if the given filename has a suffix that identifies it as a test.
\r
4536 filename: The input filename.
\r
4539 True if 'filename' looks like a test, False otherwise.
\r
4541 if (filename.endswith('_test.cc') or
\r
4542 filename.endswith('_unittest.cc') or
\r
4543 filename.endswith('_regtest.cc')):
\r
4549 def _ClassifyInclude(fileinfo, include, is_system):
\r
4550 """Figures out what kind of header 'include' is.
\r
4553 fileinfo: The current file cpplint is running over. A FileInfo instance.
\r
4554 include: The path to a #included file.
\r
4555 is_system: True if the #include used <> rather than "".
\r
4558 One of the _XXX_HEADER constants.
\r
4561 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
\r
4563 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
\r
4565 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
\r
4567 >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
\r
4568 ... 'bar/foo_other_ext.h', False)
\r
4569 _POSSIBLE_MY_HEADER
\r
4570 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
\r
4573 # This is a list of all standard c++ header files, except
\r
4574 # those already checked for above.
\r
4575 is_cpp_h = include in _CPP_HEADERS
\r
4579 return _CPP_SYS_HEADER
\r
4581 return _C_SYS_HEADER
\r
4583 # If the target file and the include we're checking share a
\r
4584 # basename when we drop common extensions, and the include
\r
4585 # lives in . , then it's likely to be owned by the target file.
\r
4586 target_dir, target_base = (
\r
4587 os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
\r
4588 include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
\r
4589 if target_base == include_base and (
\r
4590 include_dir == target_dir or
\r
4591 include_dir == os.path.normpath(target_dir + '/../public')):
\r
4592 return _LIKELY_MY_HEADER
\r
4594 # If the target and include share some initial basename
\r
4595 # component, it's possible the target is implementing the
\r
4596 # include, so it's allowed to be first, but we'll never
\r
4597 # complain if it's not there.
\r
4598 target_first_component = _RE_FIRST_COMPONENT.match(target_base)
\r
4599 include_first_component = _RE_FIRST_COMPONENT.match(include_base)
\r
4600 if (target_first_component and include_first_component and
\r
4601 target_first_component.group(0) ==
\r
4602 include_first_component.group(0)):
\r
4603 return _POSSIBLE_MY_HEADER
\r
4605 return _OTHER_HEADER
\r
4609 def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
\r
4610 """Check rules that are applicable to #include lines.
\r
4612 Strings on #include lines are NOT removed from elided line, to make
\r
4613 certain tasks easier. However, to prevent false positives, checks
\r
4614 applicable to #include lines in CheckLanguage must be put here.
\r
4617 filename: The name of the current file.
\r
4618 clean_lines: A CleansedLines instance containing the file.
\r
4619 linenum: The number of the line to check.
\r
4620 include_state: An _IncludeState instance in which the headers are inserted.
\r
4621 error: The function to call with any errors found.
\r
4623 fileinfo = FileInfo(filename)
\r
4624 line = clean_lines.lines[linenum]
\r
4626 # "include" should use the new style "foo/bar.h" instead of just "bar.h"
\r
4627 # Only do this check if the included header follows google naming
\r
4628 # conventions. If not, assume that it's a 3rd party API that
\r
4629 # requires special include conventions.
\r
4631 # We also make an exception for Lua headers, which follow google
\r
4632 # naming convention but not the include convention.
\r
4633 match = Match(r'#include\s*"([^/]+\.h)"', line)
\r
4634 if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
\r
4635 error(filename, linenum, 'build/include', 4,
\r
4636 'Include the directory when naming .h files')
\r
4638 # we shouldn't include a file more than once. actually, there are a
\r
4639 # handful of instances where doing so is okay, but in general it's
\r
4641 match = _RE_PATTERN_INCLUDE.search(line)
\r
4643 include = match.group(2)
\r
4644 is_system = (match.group(1) == '<')
\r
4645 duplicate_line = include_state.FindHeader(include)
\r
4646 if duplicate_line >= 0:
\r
4647 error(filename, linenum, 'build/include', 4,
\r
4648 '"%s" already included at %s:%s' %
\r
4649 (include, filename, duplicate_line))
\r
4650 elif (include.endswith('.cc') and
\r
4651 os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
\r
4652 error(filename, linenum, 'build/include', 4,
\r
4653 'Do not include .cc files from other packages')
\r
4654 elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
\r
4655 include_state.include_list[-1].append((include, linenum))
\r
4657 # We want to ensure that headers appear in the right order:
\r
4658 # 1) for foo.cc, foo.h (preferred location)
\r
4659 # 2) c system files
\r
4660 # 3) cpp system files
\r
4661 # 4) for foo.cc, foo.h (deprecated location)
\r
4662 # 5) other google headers
\r
4664 # We classify each include statement as one of those 5 types
\r
4665 # using a number of techniques. The include_state object keeps
\r
4666 # track of the highest type seen, and complains if we see a
\r
4667 # lower type after that.
\r
4668 error_message = include_state.CheckNextIncludeOrder(
\r
4669 _ClassifyInclude(fileinfo, include, is_system))
\r
4671 error(filename, linenum, 'build/include_order', 4,
\r
4672 '%s. Should be: %s.h, c system, c++ system, other.' %
\r
4673 (error_message, fileinfo.BaseName()))
\r
4674 canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
\r
4675 if not include_state.IsInAlphabeticalOrder(
\r
4676 clean_lines, linenum, canonical_include):
\r
4677 error(filename, linenum, 'build/include_alpha', 4,
\r
4678 'Include "%s" not in alphabetical order' % include)
\r
4679 include_state.SetLastHeader(canonical_include)
\r
4683 def _GetTextInside(text, start_pattern):
\r
4684 r"""Retrieves all the text between matching open and close parentheses.
\r
4686 Given a string of lines and a regular expression string, retrieve all the text
\r
4687 following the expression and between opening punctuation symbols like
\r
4688 (, [, or {, and the matching close-punctuation symbol. This properly nested
\r
4689 occurrences of the punctuations, so for the text like
\r
4690 printf(a(), b(c()));
\r
4691 a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
\r
4692 start_pattern must match string having an open punctuation symbol at the end.
\r
4695 text: The lines to extract text. Its comments and strings must be elided.
\r
4696 It can be single line and can span multiple lines.
\r
4697 start_pattern: The regexp string indicating where to start extracting
\r
4700 The extracted text.
\r
4701 None if either the opening string or ending punctuation could not be found.
\r
4703 # TODO(unknown): Audit cpplint.py to see what places could be profitably
\r
4704 # rewritten to use _GetTextInside (and use inferior regexp matching today).
\r
4706 # Give opening punctuations to get the matching close-punctuations.
\r
4707 matching_punctuation = {'(': ')', '{': '}', '[': ']'}
\r
4708 closing_punctuation = set(matching_punctuation.itervalues())
\r
4710 # Find the position to start extracting text.
\r
4711 match = re.search(start_pattern, text, re.M)
\r
4712 if not match: # start_pattern not found in text.
\r
4714 start_position = match.end(0)
\r
4716 assert start_position > 0, (
\r
4717 'start_pattern must ends with an opening punctuation.')
\r
4718 assert text[start_position - 1] in matching_punctuation, (
\r
4719 'start_pattern must ends with an opening punctuation.')
\r
4720 # Stack of closing punctuations we expect to have in text after position.
\r
4721 punctuation_stack = [matching_punctuation[text[start_position - 1]]]
\r
4722 position = start_position
\r
4723 while punctuation_stack and position < len(text):
\r
4724 if text[position] == punctuation_stack[-1]:
\r
4725 punctuation_stack.pop()
\r
4726 elif text[position] in closing_punctuation:
\r
4727 # A closing punctuation without matching opening punctuations.
\r
4729 elif text[position] in matching_punctuation:
\r
4730 punctuation_stack.append(matching_punctuation[text[position]])
\r
4732 if punctuation_stack:
\r
4733 # Opening punctuations left without matching close-punctuations.
\r
4735 # punctuations match.
\r
4736 return text[start_position:position - 1]
\r
4739 # Patterns for matching call-by-reference parameters.
\r
4741 # Supports nested templates up to 2 levels deep using this messy pattern:
\r
4742 # < (?: < (?: < [^<>]*
\r
4748 _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
\r
4749 _RE_PATTERN_TYPE = (
\r
4750 r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
\r
4752 r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
\r
4754 # A call-by-reference parameter ends with '& identifier'.
\r
4755 _RE_PATTERN_REF_PARAM = re.compile(
\r
4756 r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
\r
4757 r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
\r
4758 # A call-by-const-reference parameter either ends with 'const& identifier'
\r
4759 # or looks like 'const type& identifier' when 'type' is atomic.
\r
4760 _RE_PATTERN_CONST_REF_PARAM = (
\r
4761 r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
\r
4762 r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
\r
4765 def CheckLanguage(filename, clean_lines, linenum, file_extension,
\r
4766 include_state, nesting_state, error):
\r
4767 """Checks rules from the 'C++ language rules' section of cppguide.html.
\r
4769 Some of these rules are hard to test (function overloading, using
\r
4770 uint32 inappropriately), but we do the best we can.
\r
4773 filename: The name of the current file.
\r
4774 clean_lines: A CleansedLines instance containing the file.
\r
4775 linenum: The number of the line to check.
\r
4776 file_extension: The extension (without the dot) of the filename.
\r
4777 include_state: An _IncludeState instance in which the headers are inserted.
\r
4778 nesting_state: A NestingState instance which maintains information about
\r
4779 the current stack of nested blocks being parsed.
\r
4780 error: The function to call with any errors found.
\r
4782 # If the line is empty or consists of entirely a comment, no need to
\r
4784 line = clean_lines.elided[linenum]
\r
4788 match = _RE_PATTERN_INCLUDE.search(line)
\r
4790 CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
\r
4793 # Reset include state across preprocessor directives. This is meant
\r
4794 # to silence warnings for conditional includes.
\r
4795 match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
\r
4797 include_state.ResetSection(match.group(1))
\r
4799 # Make Windows paths like Unix.
\r
4800 fullname = os.path.abspath(filename).replace('\\', '/')
\r
4802 # Perform other checks now that we are sure that this is not an include line
\r
4803 CheckCasts(filename, clean_lines, linenum, error)
\r
4804 CheckGlobalStatic(filename, clean_lines, linenum, error)
\r
4805 CheckPrintf(filename, clean_lines, linenum, error)
\r
4807 if file_extension == 'h':
\r
4808 # TODO(unknown): check that 1-arg constructors are explicit.
\r
4809 # How to tell it's a constructor?
\r
4810 # (handled in CheckForNonStandardConstructs for now)
\r
4811 # TODO(unknown): check that classes declare or disable copy/assign
\r
4815 # Check if people are using the verboten C basic types. The only exception
\r
4816 # we regularly allow is "unsigned short port" for port.
\r
4817 if Search(r'\bshort port\b', line):
\r
4818 if not Search(r'\bunsigned short port\b', line):
\r
4819 error(filename, linenum, 'runtime/int', 4,
\r
4820 'Use "unsigned short" for ports, not "short"')
\r
4822 match = Search(r'\b(short|long(?! +double)|long long)\b', line)
\r
4824 error(filename, linenum, 'runtime/int', 4,
\r
4825 'Use int16/int64/etc, rather than the C type %s' % match.group(1))
\r
4827 # Check if some verboten operator overloading is going on
\r
4828 # TODO(unknown): catch out-of-line unary operator&:
\r
4830 # int operator&(const X& x) { return 42; } // unary operator&
\r
4831 # The trick is it's hard to tell apart from binary operator&:
\r
4832 # class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
\r
4833 if Search(r'\boperator\s*&\s*\(\s*\)', line):
\r
4834 error(filename, linenum, 'runtime/operator', 4,
\r
4835 'Unary operator& is dangerous. Do not use it.')
\r
4837 # Check for suspicious usage of "if" like
\r
4839 if Search(r'\}\s*if\s*\(', line):
\r
4840 error(filename, linenum, 'readability/braces', 4,
\r
4841 '[BRC_M_SMT]Did you mean "else if"? If not, start a new line for "if".')
\r
4843 # Check for potential format string bugs like printf(foo).
\r
4844 # We constrain the pattern not to pick things like DocidForPrintf(foo).
\r
4845 # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
\r
4846 # TODO(unknown): Catch the following case. Need to change the calling
\r
4847 # convention of the whole function to process multiple line to handle it.
\r
4849 # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
\r
4850 printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
\r
4852 match = Match(r'([\w.\->()]+)$', printf_args)
\r
4853 if match and match.group(1) != '__VA_ARGS__':
\r
4854 function_name = re.search(r'\b((?:string)?printf)\s*\(',
\r
4855 line, re.I).group(1)
\r
4856 error(filename, linenum, 'runtime/printf', 4,
\r
4857 'Potential format string bug. Do %s("%%s", %s) instead.'
\r
4858 % (function_name, match.group(1)))
\r
4860 # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
\r
4861 match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
\r
4862 if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
\r
4863 error(filename, linenum, 'runtime/memset', 4,
\r
4864 'Did you mean "memset(%s, 0, %s)"?'
\r
4865 % (match.group(1), match.group(2)))
\r
4867 if Search(r'\busing namespace\b', line):
\r
4868 error(filename, linenum, 'build/namespaces', 5,
\r
4869 'Do not use namespace using-directives. '
\r
4870 'Use using-declarations instead.')
\r
4872 # Detect variable-length arrays.
\r
4873 match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
\r
4874 if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
\r
4875 match.group(3).find(']') == -1):
\r
4876 # Split the size using space and arithmetic operators as delimiters.
\r
4877 # If any of the resulting tokens are not compile time constants then
\r
4878 # report the error.
\r
4879 tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
\r
4882 for tok in tokens:
\r
4887 if Search(r'sizeof\(.+\)', tok): continue
\r
4888 if Search(r'arraysize\(\w+\)', tok): continue
\r
4890 tok = tok.lstrip('(')
\r
4891 tok = tok.rstrip(')')
\r
4892 if not tok: continue
\r
4893 if Match(r'\d+', tok): continue
\r
4894 if Match(r'0[xX][0-9a-fA-F]+', tok): continue
\r
4895 if Match(r'k[A-Z0-9]\w*', tok): continue
\r
4896 if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
\r
4897 if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
\r
4898 # A catch all for tricky sizeof cases, including 'sizeof expression',
\r
4899 # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
\r
4900 # requires skipping the next token because we split on ' ' and '*'.
\r
4901 if tok.startswith('sizeof'):
\r
4907 error(filename, linenum, 'runtime/arrays', 1,
\r
4908 'Do not use variable-length arrays. Use an appropriately named '
\r
4909 "('k' followed by CamelCase) compile-time constant for the size.")
\r
4911 # Check for use of unnamed namespaces in header files. Registration
\r
4912 # macros are typically OK, so we allow use of "namespace {" on lines
\r
4913 # that end with backslashes.
\r
4914 if (file_extension == 'h'
\r
4915 and Search(r'\bnamespace\s*{', line)
\r
4916 and line[-1] != '\\'):
\r
4917 error(filename, linenum, 'build/namespaces', 4,
\r
4918 'Do not use unnamed namespaces in header files. See '
\r
4919 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
\r
4920 ' for more information.')
\r
4923 def CheckGlobalStatic(filename, clean_lines, linenum, error):
\r
4924 """Check for unsafe global or static objects.
\r
4927 filename: The name of the current file.
\r
4928 clean_lines: A CleansedLines instance containing the file.
\r
4929 linenum: The number of the line to check.
\r
4930 error: The function to call with any errors found.
\r
4932 line = clean_lines.elided[linenum]
\r
4934 # Match two lines at a time to support multiline declarations
\r
4935 if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
\r
4936 line += clean_lines.elided[linenum + 1].strip()
\r
4938 # Check for people declaring static/global STL strings at the top level.
\r
4939 # This is dangerous because the C++ language does not guarantee that
\r
4940 # globals with constructors are initialized before the first access.
\r
4942 r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
\r
4945 # Remove false positives:
\r
4946 # - String pointers (as opposed to values).
\r
4948 # const string *pointer
\r
4949 # string const *pointer
\r
4950 # string *const pointer
\r
4952 # - Functions and template specializations.
\r
4953 # string Function<Type>(...
\r
4954 # string Class<Type>::Method(...
\r
4956 # - Operators. These are matched separately because operator names
\r
4957 # cross non-word boundaries, and trying to match both operators
\r
4958 # and functions at the same time would decrease accuracy of
\r
4959 # matching identifiers.
\r
4960 # string Class::operator*()
\r
4962 not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
\r
4963 not Search(r'\boperator\W', line) and
\r
4964 not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
\r
4965 error(filename, linenum, 'runtime/string', 4,
\r
4966 'For a static/global string constant, use a C style string instead: '
\r
4967 '"%schar %s[]".' %
\r
4968 (match.group(1), match.group(2)))
\r
4970 if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
\r
4971 error(filename, linenum, 'runtime/init', 4,
\r
4972 'You seem to be initializing a member variable with itself.')
\r
4975 def CheckPrintf(filename, clean_lines, linenum, error):
\r
4976 """Check for printf related issues.
\r
4979 filename: The name of the current file.
\r
4980 clean_lines: A CleansedLines instance containing the file.
\r
4981 linenum: The number of the line to check.
\r
4982 error: The function to call with any errors found.
\r
4984 line = clean_lines.elided[linenum]
\r
4986 # When snprintf is used, the second argument shouldn't be a literal.
\r
4987 match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
\r
4988 if match and match.group(2) != '0':
\r
4989 # If 2nd arg is zero, snprintf is used to calculate size.
\r
4990 error(filename, linenum, 'runtime/printf', 3,
\r
4991 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
\r
4992 'to snprintf.' % (match.group(1), match.group(2)))
\r
4994 # Check if some verboten C functions are being used.
\r
4995 if Search(r'\bsprintf\s*\(', line):
\r
4996 error(filename, linenum, 'runtime/printf', 5,
\r
4997 'Never use sprintf. Use snprintf instead.')
\r
4998 match = Search(r'\b(strcpy|strcat)\s*\(', line)
\r
5000 error(filename, linenum, 'runtime/printf', 4,
\r
5001 'Almost always, snprintf is better than %s' % match.group(1))
\r
5004 def IsDerivedFunction(clean_lines, linenum):
\r
5005 """Check if current line contains an inherited function.
\r
5008 clean_lines: A CleansedLines instance containing the file.
\r
5009 linenum: The number of the line to check.
\r
5011 True if current line contains a function with "override"
\r
5014 # Scan back a few lines for start of current function
\r
5015 for i in xrange(linenum, max(-1, linenum - 10), -1):
\r
5016 match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
\r
5018 # Look for "override" after the matching closing parenthesis
\r
5019 line, _, closing_paren = CloseExpression(
\r
5020 clean_lines, i, len(match.group(1)))
\r
5021 return (closing_paren >= 0 and
\r
5022 Search(r'\boverride\b', line[closing_paren:]))
\r
5026 def IsOutOfLineMethodDefinition(clean_lines, linenum):
\r
5027 """Check if current line contains an out-of-line method definition.
\r
5030 clean_lines: A CleansedLines instance containing the file.
\r
5031 linenum: The number of the line to check.
\r
5033 True if current line contains an out-of-line method definition.
\r
5035 # Scan back a few lines for start of current function
\r
5036 for i in xrange(linenum, max(-1, linenum - 10), -1):
\r
5037 if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
\r
5038 return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
\r
5042 def IsInitializerList(clean_lines, linenum):
\r
5043 """Check if current line is inside constructor initializer list.
\r
5046 clean_lines: A CleansedLines instance containing the file.
\r
5047 linenum: The number of the line to check.
\r
5049 True if current line appears to be inside constructor initializer
\r
5050 list, False otherwise.
\r
5052 for i in xrange(linenum, 1, -1):
\r
5053 line = clean_lines.elided[i]
\r
5055 remove_function_body = Match(r'^(.*)\{\s*$', line)
\r
5056 if remove_function_body:
\r
5057 line = remove_function_body.group(1)
\r
5059 if Search(r'\s:\s*\w+[({]', line):
\r
5060 # A lone colon tend to indicate the start of a constructor
\r
5061 # initializer list. It could also be a ternary operator, which
\r
5062 # also tend to appear in constructor initializer lists as
\r
5063 # opposed to parameter lists.
\r
5065 if Search(r'\}\s*,\s*$', line):
\r
5066 # A closing brace followed by a comma is probably the end of a
\r
5067 # brace-initialized member in constructor initializer list.
\r
5069 if Search(r'[{};]\s*$', line):
\r
5070 # Found one of the following:
\r
5071 # - A closing brace or semicolon, probably the end of the previous
\r
5073 # - An opening brace, probably the start of current class or namespace.
\r
5075 # Current line is probably not inside an initializer list since
\r
5076 # we saw one of those things without seeing the starting colon.
\r
5079 # Got to the beginning of the file without seeing the start of
\r
5080 # constructor initializer list.
\r
5084 def CheckForNonConstReference(filename, clean_lines, linenum,
\r
5085 nesting_state, error):
\r
5086 """Check for non-const references.
\r
5088 Separate from CheckLanguage since it scans backwards from current
\r
5089 line, instead of scanning forward.
\r
5092 filename: The name of the current file.
\r
5093 clean_lines: A CleansedLines instance containing the file.
\r
5094 linenum: The number of the line to check.
\r
5095 nesting_state: A NestingState instance which maintains information about
\r
5096 the current stack of nested blocks being parsed.
\r
5097 error: The function to call with any errors found.
\r
5099 # Do nothing if there is no '&' on current line.
\r
5100 line = clean_lines.elided[linenum]
\r
5101 if '&' not in line:
\r
5104 # If a function is inherited, current function doesn't have much of
\r
5105 # a choice, so any non-const references should not be blamed on
\r
5106 # derived function.
\r
5107 if IsDerivedFunction(clean_lines, linenum):
\r
5110 # Don't warn on out-of-line method definitions, as we would warn on the
\r
5111 # in-line declaration, if it isn't marked with 'override'.
\r
5112 if IsOutOfLineMethodDefinition(clean_lines, linenum):
\r
5115 # Long type names may be broken across multiple lines, usually in one
\r
5118 # ::LongTypeContinued &identifier
\r
5120 # LongTypeContinued &identifier
\r
5122 # ...>::LongTypeContinued &identifier
\r
5124 # If we detected a type split across two lines, join the previous
\r
5125 # line to current line so that we can match const references
\r
5128 # Note that this only scans back one line, since scanning back
\r
5129 # arbitrary number of lines would be expensive. If you have a type
\r
5130 # that spans more than 2 lines, please use a typedef.
\r
5133 if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
\r
5134 # previous_line\n + ::current_line
\r
5135 previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
\r
5136 clean_lines.elided[linenum - 1])
\r
5137 elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
\r
5138 # previous_line::\n + current_line
\r
5139 previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
\r
5140 clean_lines.elided[linenum - 1])
\r
5142 line = previous.group(1) + line.lstrip()
\r
5144 # Check for templated parameter that is split across multiple lines
\r
5145 endpos = line.rfind('>')
\r
5147 (_, startline, startpos) = ReverseCloseExpression(
\r
5148 clean_lines, linenum, endpos)
\r
5149 if startpos > -1 and startline < linenum:
\r
5150 # Found the matching < on an earlier line, collect all
\r
5151 # pieces up to current line.
\r
5153 for i in xrange(startline, linenum + 1):
\r
5154 line += clean_lines.elided[i].strip()
\r
5156 # Check for non-const references in function parameters. A single '&' may
\r
5157 # found in the following places:
\r
5158 # inside expression: binary & for bitwise AND
\r
5159 # inside expression: unary & for taking the address of something
\r
5160 # inside declarators: reference parameter
\r
5161 # We will exclude the first two cases by checking that we are not inside a
\r
5162 # function body, including one that was just introduced by a trailing '{'.
\r
5163 # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
\r
5164 if (nesting_state.previous_stack_top and
\r
5165 not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
\r
5166 isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
\r
5167 # Not at toplevel, not within a class, and not within a namespace
\r
5170 # Avoid initializer lists. We only need to scan back from the
\r
5171 # current line for something that starts with ':'.
\r
5173 # We don't need to check the current line, since the '&' would
\r
5174 # appear inside the second set of parentheses on the current line as
\r
5175 # opposed to the first set.
\r
5177 for i in xrange(linenum - 1, max(0, linenum - 10), -1):
\r
5178 previous_line = clean_lines.elided[i]
\r
5179 if not Search(r'[),]\s*$', previous_line):
\r
5181 if Match(r'^\s*:\s+\S', previous_line):
\r
5184 # Avoid preprocessors
\r
5185 if Search(r'\\\s*$', line):
\r
5188 # Avoid constructor initializer lists
\r
5189 if IsInitializerList(clean_lines, linenum):
\r
5192 # We allow non-const references in a few standard places, like functions
\r
5193 # called "swap()" or iostream operators like "<<" or ">>". Do not check
\r
5194 # those function parameters.
\r
5196 # We also accept & in static_assert, which looks like a function but
\r
5197 # it's actually a declaration expression.
\r
5198 whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
\r
5199 r'operator\s*[<>][<>]|'
\r
5200 r'static_assert|COMPILE_ASSERT'
\r
5202 if Search(whitelisted_functions, line):
\r
5204 elif not Search(r'\S+\([^)]*$', line):
\r
5205 # Don't see a whitelisted function on this line. Actually we
\r
5206 # didn't see any function name on this line, so this is likely a
\r
5207 # multi-line parameter list. Try a bit harder to catch this case.
\r
5208 for i in xrange(2):
\r
5209 if (linenum > i and
\r
5210 Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
\r
5213 decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
\r
5214 for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
\r
5215 if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
\r
5216 error(filename, linenum, 'runtime/references', 2,
\r
5217 'Is this a non-const reference? '
\r
5218 'If so, make const or use a pointer: ' +
\r
5219 ReplaceAll(' *<', '<', parameter))
\r
5222 def CheckCasts(filename, clean_lines, linenum, error):
\r
5223 """Various cast related checks.
\r
5226 filename: The name of the current file.
\r
5227 clean_lines: A CleansedLines instance containing the file.
\r
5228 linenum: The number of the line to check.
\r
5229 error: The function to call with any errors found.
\r
5231 line = clean_lines.elided[linenum]
\r
5233 # Check to see if they're using an conversion function cast.
\r
5234 # I just try to capture the most common basic types, though there are more.
\r
5235 # Parameterless conversion functions, such as bool(), are allowed as they are
\r
5236 # probably a member operator declaration or default constructor.
\r
5238 r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
\r
5239 r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
\r
5240 r'(\([^)].*)', line)
\r
5241 expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
\r
5242 if match and not expecting_function:
\r
5243 matched_type = match.group(2)
\r
5245 # matched_new_or_template is used to silence two false positives:
\r
5247 # - Template arguments with function types
\r
5249 # For template arguments, we match on types immediately following
\r
5250 # an opening bracket without any spaces. This is a fast way to
\r
5251 # silence the common case where the function type is the first
\r
5252 # template argument. False negative with less-than comparison is
\r
5253 # avoided because those operators are usually followed by a space.
\r
5255 # function<double(double)> // bracket + no space = false positive
\r
5256 # value < double(42) // bracket + space = true positive
\r
5257 matched_new_or_template = match.group(1)
\r
5259 # Avoid arrays by looking for brackets that come after the closing
\r
5261 if Match(r'\([^()]+\)\s*\[', match.group(3)):
\r
5264 # Other things to ignore:
\r
5265 # - Function pointers
\r
5266 # - Casts to pointer types
\r
5268 # - Alias declarations
\r
5269 matched_funcptr = match.group(3)
\r
5270 if (matched_new_or_template is None and
\r
5271 not (matched_funcptr and
\r
5272 (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
\r
5273 matched_funcptr) or
\r
5274 matched_funcptr.startswith('(*)'))) and
\r
5275 not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
\r
5276 not Search(r'new\(\S+\)\s*' + matched_type, line)):
\r
5277 error(filename, linenum, 'readability/casting', 4,
\r
5278 'Using deprecated casting style. '
\r
5279 'Use static_cast<%s>(...) instead' %
\r
5282 if not expecting_function:
\r
5283 CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
\r
5284 r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
\r
5286 # This doesn't catch all cases. Consider (const char * const)"hello".
\r
5288 # (char *) "foo" should always be a const_cast (reinterpret_cast won't
\r
5290 if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
\r
5291 r'\((char\s?\*+\s?)\)\s*"', error):
\r
5294 # Check pointer casts for other than string constants
\r
5295 CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
\r
5296 r'\((\w+\s?\*+\s?)\)', error)
\r
5298 # In addition, we look for people taking the address of a cast. This
\r
5299 # is dangerous -- casts can assign to temporaries, so the pointer doesn't
\r
5300 # point where you think.
\r
5302 # Some non-identifier character is required before the '&' for the
\r
5303 # expression to be recognized as a cast. These are casts:
\r
5304 # expression = &static_cast<int*>(temporary());
\r
5305 # function(&(int*)(temporary()));
\r
5307 # This is not a cast:
\r
5308 # reference_type&(int* function_param);
\r
5310 r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
\r
5311 r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
\r
5313 # Try a better error message when the & is bound to something
\r
5314 # dereferenced by the casted pointer, as opposed to the casted
\r
5316 parenthesis_error = False
\r
5317 match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
\r
5319 _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
\r
5320 if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
\r
5321 _, y2, x2 = CloseExpression(clean_lines, y1, x1)
\r
5323 extended_line = clean_lines.elided[y2][x2:]
\r
5324 if y2 < clean_lines.NumLines() - 1:
\r
5325 extended_line += clean_lines.elided[y2 + 1]
\r
5326 if Match(r'\s*(?:->|\[)', extended_line):
\r
5327 parenthesis_error = True
\r
5329 if parenthesis_error:
\r
5330 error(filename, linenum, 'readability/casting', 4,
\r
5331 ('Are you taking an address of something dereferenced '
\r
5332 'from a cast? Wrapping the dereferenced expression in '
\r
5333 'parentheses will make the binding more obvious'))
\r
5335 error(filename, linenum, 'runtime/casting', 4,
\r
5336 ('Are you taking an address of a cast? '
\r
5337 'This is dangerous: could be a temp var. '
\r
5338 'Take the address before doing the cast, rather than after'))
\r
5341 def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
\r
5342 """Checks for a C-style cast by looking for the pattern.
\r
5345 filename: The name of the current file.
\r
5346 clean_lines: A CleansedLines instance containing the file.
\r
5347 linenum: The number of the line to check.
\r
5348 cast_type: The string for the C++ cast to recommend. This is either
\r
5349 reinterpret_cast, static_cast, or const_cast, depending.
\r
5350 pattern: The regular expression used to find C-style casts.
\r
5351 error: The function to call with any errors found.
\r
5354 True if an error was emitted.
\r
5357 line = clean_lines.elided[linenum]
\r
5358 match = Search(pattern, line)
\r
5362 # Exclude lines with keywords that tend to look like casts
\r
5363 context = line[0:match.start(1) - 1]
\r
5364 if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
\r
5367 # Try expanding current context to see if we one level of
\r
5368 # parentheses inside a macro.
\r
5370 for i in xrange(linenum - 1, max(0, linenum - 5), -1):
\r
5371 context = clean_lines.elided[i] + context
\r
5372 if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
\r
5375 # operator++(int) and operator--(int)
\r
5376 if context.endswith(' operator++') or context.endswith(' operator--'):
\r
5379 # A single unnamed argument for a function tends to look like old
\r
5380 # style cast. If we see those, don't issue warnings for deprecated
\r
5381 # casts, instead issue warnings for unnamed arguments where
\r
5384 # These are things that we want warnings for, since the style guide
\r
5385 # explicitly require all parameters to be named:
\r
5388 # ConstMember(int) const;
\r
5389 # ConstMember(int) const {
\r
5390 # ExceptionMember(int) throw (...);
\r
5391 # ExceptionMember(int) throw (...) {
\r
5392 # PureVirtual(int) = 0;
\r
5393 # [](int) -> bool {
\r
5395 # These are functions of some sort, where the compiler would be fine
\r
5396 # if they had named parameters, but people often omit those
\r
5397 # identifiers to reduce clutter:
\r
5398 # (FunctionPointer)(int);
\r
5399 # (FunctionPointer)(int) = value;
\r
5400 # Function((function_pointer_arg)(int))
\r
5401 # Function((function_pointer_arg)(int), int param)
\r
5402 # <TemplateArgument(int)>;
\r
5403 # <(FunctionPointerTemplateArgument)(int)>;
\r
5404 remainder = line[match.end(0):]
\r
5405 if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
\r
5407 # Looks like an unnamed parameter.
\r
5409 # Don't warn on any kind of template arguments.
\r
5410 if Match(r'^\s*>', remainder):
\r
5413 # Don't warn on assignments to function pointers, but keep warnings for
\r
5414 # unnamed parameters to pure virtual functions. Note that this pattern
\r
5415 # will also pass on assignments of "0" to function pointers, but the
\r
5416 # preferred values for those would be "nullptr" or "NULL".
\r
5417 matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
\r
5418 if matched_zero and matched_zero.group(1) != '0':
\r
5421 # Don't warn on function pointer declarations. For this we need
\r
5422 # to check what came before the "(type)" string.
\r
5423 if Match(r'.*\)\s*$', line[0:match.start(0)]):
\r
5426 # Don't warn if the parameter is named with block comments, e.g.:
\r
5427 # Function(int /*unused_param*/);
\r
5428 raw_line = clean_lines.raw_lines[linenum]
\r
5429 if '/*' in raw_line:
\r
5432 # Passed all filters, issue warning here.
\r
5433 error(filename, linenum, 'readability/function', 3,
\r
5434 'All parameters should be named in a function')
\r
5437 # At this point, all that should be left is actual casts.
\r
5438 error(filename, linenum, 'readability/casting', 4,
\r
5439 'Using C-style cast. Use %s<%s>(...) instead' %
\r
5440 (cast_type, match.group(1)))
\r
5445 def ExpectingFunctionArgs(clean_lines, linenum):
\r
5446 """Checks whether where function type arguments are expected.
\r
5449 clean_lines: A CleansedLines instance containing the file.
\r
5450 linenum: The number of the line to check.
\r
5453 True if the line at 'linenum' is inside something that expects arguments
\r
5454 of function types.
\r
5456 line = clean_lines.elided[linenum]
\r
5457 return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
\r
5459 (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
\r
5460 clean_lines.elided[linenum - 1]) or
\r
5461 Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
\r
5462 clean_lines.elided[linenum - 2]) or
\r
5463 Search(r'\bstd::m?function\s*\<\s*$',
\r
5464 clean_lines.elided[linenum - 1]))))
\r
5467 _HEADERS_CONTAINING_TEMPLATES = (
\r
5468 ('<deque>', ('deque',)),
\r
5469 ('<functional>', ('unary_function', 'binary_function',
\r
5470 'plus', 'minus', 'multiplies', 'divides', 'modulus',
\r
5472 'equal_to', 'not_equal_to', 'greater', 'less',
\r
5473 'greater_equal', 'less_equal',
\r
5474 'logical_and', 'logical_or', 'logical_not',
\r
5475 'unary_negate', 'not1', 'binary_negate', 'not2',
\r
5476 'bind1st', 'bind2nd',
\r
5477 'pointer_to_unary_function',
\r
5478 'pointer_to_binary_function',
\r
5480 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
\r
5482 'const_mem_fun_t', 'const_mem_fun1_t',
\r
5483 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
\r
5486 ('<limits>', ('numeric_limits',)),
\r
5487 ('<list>', ('list',)),
\r
5488 ('<map>', ('map', 'multimap',)),
\r
5489 ('<memory>', ('allocator',)),
\r
5490 ('<queue>', ('queue', 'priority_queue',)),
\r
5491 ('<set>', ('set', 'multiset',)),
\r
5492 ('<stack>', ('stack',)),
\r
5493 ('<string>', ('char_traits', 'basic_string',)),
\r
5494 ('<tuple>', ('tuple',)),
\r
5495 ('<utility>', ('pair',)),
\r
5496 ('<vector>', ('vector',)),
\r
5499 # Note: std::hash is their hash, ::hash is our hash
\r
5500 ('<hash_map>', ('hash_map', 'hash_multimap',)),
\r
5501 ('<hash_set>', ('hash_set', 'hash_multiset',)),
\r
5502 ('<slist>', ('slist',)),
\r
5505 _RE_PATTERN_STRING = re.compile(r'\bstring\b')
\r
5507 _re_pattern_algorithm_header = []
\r
5508 for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
\r
5510 # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
\r
5512 _re_pattern_algorithm_header.append(
\r
5513 (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
\r
5517 _re_pattern_templates = []
\r
5518 for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
\r
5519 for _template in _templates:
\r
5520 _re_pattern_templates.append(
\r
5521 (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
\r
5526 def FilesBelongToSameModule(filename_cc, filename_h):
\r
5527 """Check if these two filenames belong to the same module.
\r
5529 The concept of a 'module' here is a as follows:
\r
5530 foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
\r
5531 same 'module' if they are in the same directory.
\r
5532 some/path/public/xyzzy and some/path/internal/xyzzy are also considered
\r
5533 to belong to the same module here.
\r
5535 If the filename_cc contains a longer path than the filename_h, for example,
\r
5536 '/absolute/path/to/base/sysinfo.cc', and this file would include
\r
5537 'base/sysinfo.h', this function also produces the prefix needed to open the
\r
5538 header. This is used by the caller of this function to more robustly open the
\r
5539 header file. We don't have access to the real include paths in this context,
\r
5540 so we need this guesswork here.
\r
5542 Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
\r
5543 according to this implementation. Because of this, this function gives
\r
5544 some false positives. This should be sufficiently rare in practice.
\r
5547 filename_cc: is the path for the .cc file
\r
5548 filename_h: is the path for the header path
\r
5551 Tuple with a bool and a string:
\r
5552 bool: True if filename_cc and filename_h belong to the same module.
\r
5553 string: the additional prefix needed to open the header file.
\r
5556 if not filename_cc.endswith('.cc'):
\r
5557 return (False, '')
\r
5558 filename_cc = filename_cc[:-len('.cc')]
\r
5559 if filename_cc.endswith('_unittest'):
\r
5560 filename_cc = filename_cc[:-len('_unittest')]
\r
5561 elif filename_cc.endswith('_test'):
\r
5562 filename_cc = filename_cc[:-len('_test')]
\r
5563 filename_cc = filename_cc.replace('/public/', '/')
\r
5564 filename_cc = filename_cc.replace('/internal/', '/')
\r
5566 if not filename_h.endswith('.h'):
\r
5567 return (False, '')
\r
5568 filename_h = filename_h[:-len('.h')]
\r
5569 if filename_h.endswith('-inl'):
\r
5570 filename_h = filename_h[:-len('-inl')]
\r
5571 filename_h = filename_h.replace('/public/', '/')
\r
5572 filename_h = filename_h.replace('/internal/', '/')
\r
5574 files_belong_to_same_module = filename_cc.endswith(filename_h)
\r
5576 if files_belong_to_same_module:
\r
5577 common_path = filename_cc[:-len(filename_h)]
\r
5578 return files_belong_to_same_module, common_path
\r
5581 def UpdateIncludeState(filename, include_dict, io=codecs):
\r
5582 """Fill up the include_dict with new includes found from the file.
\r
5585 filename: the name of the header to read.
\r
5586 include_dict: a dictionary in which the headers are inserted.
\r
5587 io: The io factory to use to read the file. Provided for testability.
\r
5590 True if a header was successfully added. False otherwise.
\r
5594 headerfile = io.open(filename, 'r', 'utf8', 'replace')
\r
5598 for line in headerfile:
\r
5600 clean_line = CleanseComments(line)
\r
5601 match = _RE_PATTERN_INCLUDE.search(clean_line)
\r
5603 include = match.group(2)
\r
5604 include_dict.setdefault(include, linenum)
\r
5608 def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
\r
5610 """Reports for missing stl includes.
\r
5612 This function will output warnings to make sure you are including the headers
\r
5613 necessary for the stl containers and functions that you use. We only give one
\r
5614 reason to include a header. For example, if you use both equal_to<> and
\r
5615 less<> in a .h file, only one (the latter in the file) of these will be
\r
5616 reported as a reason to include the <functional>.
\r
5619 filename: The name of the current file.
\r
5620 clean_lines: A CleansedLines instance containing the file.
\r
5621 include_state: An _IncludeState instance.
\r
5622 error: The function to call with any errors found.
\r
5623 io: The IO factory to use to read the header file. Provided for unittest
\r
5626 required = {} # A map of header name to linenumber and the template entity.
\r
5627 # Example of required: { '<functional>': (1219, 'less<>') }
\r
5629 for linenum in xrange(clean_lines.NumLines()):
\r
5630 line = clean_lines.elided[linenum]
\r
5631 if not line or line[0] == '#':
\r
5634 # String is special -- it is a non-templatized type in STL.
\r
5635 matched = _RE_PATTERN_STRING.search(line)
\r
5637 # Don't warn about strings in non-STL namespaces:
\r
5638 # (We check only the first match per line; good enough.)
\r
5639 prefix = line[:matched.start()]
\r
5640 if prefix.endswith('std::') or not prefix.endswith('::'):
\r
5641 required['<string>'] = (linenum, 'string')
\r
5643 for pattern, template, header in _re_pattern_algorithm_header:
\r
5644 if pattern.search(line):
\r
5645 required[header] = (linenum, template)
\r
5647 # The following function is just a speed up, no semantics are changed.
\r
5648 if not '<' in line: # Reduces the cpu time usage by skipping lines.
\r
5651 for pattern, template, header in _re_pattern_templates:
\r
5652 if pattern.search(line):
\r
5653 required[header] = (linenum, template)
\r
5655 # The policy is that if you #include something in foo.h you don't need to
\r
5656 # include it again in foo.cc. Here, we will look at possible includes.
\r
5657 # Let's flatten the include_state include_list and copy it into a dictionary.
\r
5658 include_dict = dict([item for sublist in include_state.include_list
\r
5659 for item in sublist])
\r
5661 # Did we find the header for this file (if any) and successfully load it?
\r
5662 header_found = False
\r
5664 # Use the absolute path so that matching works properly.
\r
5665 abs_filename = FileInfo(filename).FullName()
\r
5667 # For Emacs's flymake.
\r
5668 # If cpplint is invoked from Emacs's flymake, a temporary file is generated
\r
5669 # by flymake and that file name might end with '_flymake.cc'. In that case,
\r
5670 # restore original file name here so that the corresponding header file can be
\r
5672 # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
\r
5673 # instead of 'foo_flymake.h'
\r
5674 abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
\r
5676 # include_dict is modified during iteration, so we iterate over a copy of
\r
5678 header_keys = include_dict.keys()
\r
5679 for header in header_keys:
\r
5680 (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
\r
5681 fullpath = common_path + header
\r
5682 if same_module and UpdateIncludeState(fullpath, include_dict, io):
\r
5683 header_found = True
\r
5685 # If we can't find the header file for a .cc, assume it's because we don't
\r
5686 # know where to look. In that case we'll give up as we're not sure they
\r
5687 # didn't include it in the .h file.
\r
5688 # TODO(unknown): Do a better job of finding .h files so we are confident that
\r
5689 # not having the .h file means there isn't one.
\r
5690 if filename.endswith('.cc') and not header_found:
\r
5693 # All the lines have been processed, report the errors found.
\r
5694 for required_header_unstripped in required:
\r
5695 template = required[required_header_unstripped][1]
\r
5696 if required_header_unstripped.strip('<>"') not in include_dict:
\r
5697 error(filename, required[required_header_unstripped][0],
\r
5698 'build/include_what_you_use', 4,
\r
5699 'Add #include ' + required_header_unstripped + ' for ' + template)
\r
5702 _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
\r
5705 def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
\r
5706 """Check that make_pair's template arguments are deduced.
\r
5708 G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
\r
5709 specified explicitly, and such use isn't intended in any case.
\r
5712 filename: The name of the current file.
\r
5713 clean_lines: A CleansedLines instance containing the file.
\r
5714 linenum: The number of the line to check.
\r
5715 error: The function to call with any errors found.
\r
5717 line = clean_lines.elided[linenum]
\r
5718 match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
\r
5720 error(filename, linenum, 'build/explicit_make_pair',
\r
5721 4, # 4 = high confidence
\r
5722 'For C++11-compatibility, omit template arguments from make_pair'
\r
5723 ' OR use pair directly OR if appropriate, construct a pair directly')
\r
5726 def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
\r
5727 """Check that default lambda captures are not used.
\r
5730 filename: The name of the current file.
\r
5731 clean_lines: A CleansedLines instance containing the file.
\r
5732 linenum: The number of the line to check.
\r
5733 error: The function to call with any errors found.
\r
5735 line = clean_lines.elided[linenum]
\r
5737 # A lambda introducer specifies a default capture if it starts with "[="
\r
5738 # or if it starts with "[&" _not_ followed by an identifier.
\r
5739 match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
\r
5741 # Found a potential error, check what comes after the lambda-introducer.
\r
5742 # If it's not open parenthesis (for lambda-declarator) or open brace
\r
5743 # (for compound-statement), it's not a lambda.
\r
5744 line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
\r
5745 if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
\r
5746 error(filename, linenum, 'build/c++11',
\r
5747 4, # 4 = high confidence
\r
5748 'Default lambda captures are an unapproved C++ feature.')
\r
5751 def CheckRedundantVirtual(filename, clean_lines, linenum, error):
\r
5752 """Check if line contains a redundant "virtual" function-specifier.
\r
5755 filename: The name of the current file.
\r
5756 clean_lines: A CleansedLines instance containing the file.
\r
5757 linenum: The number of the line to check.
\r
5758 error: The function to call with any errors found.
\r
5760 # Look for "virtual" on current line.
\r
5761 line = clean_lines.elided[linenum]
\r
5762 virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
\r
5763 if not virtual: return
\r
5765 # Ignore "virtual" keywords that are near access-specifiers. These
\r
5766 # are only used in class base-specifier and do not apply to member
\r
5768 if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
\r
5769 Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
\r
5772 # Ignore the "virtual" keyword from virtual base classes. Usually
\r
5773 # there is a column on the same line in these cases (virtual base
\r
5774 # classes are rare in google3 because multiple inheritance is rare).
\r
5775 if Match(r'^.*[^:]:[^:].*$', line): return
\r
5777 # Look for the next opening parenthesis. This is the start of the
\r
5778 # parameter list (possibly on the next line shortly after virtual).
\r
5779 # TODO(unknown): doesn't work if there are virtual functions with
\r
5780 # decltype() or other things that use parentheses, but csearch suggests
\r
5781 # that this is rare.
\r
5784 start_col = len(virtual.group(2))
\r
5785 for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
\r
5786 line = clean_lines.elided[start_line][start_col:]
\r
5787 parameter_list = Match(r'^([^(]*)\(', line)
\r
5788 if parameter_list:
\r
5789 # Match parentheses to find the end of the parameter list
\r
5790 (_, end_line, end_col) = CloseExpression(
\r
5791 clean_lines, start_line, start_col + len(parameter_list.group(1)))
\r
5796 return # Couldn't find end of parameter list, give up
\r
5798 # Look for "override" or "final" after the parameter list
\r
5799 # (possibly on the next few lines).
\r
5800 for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
\r
5801 line = clean_lines.elided[i][end_col:]
\r
5802 match = Search(r'\b(override|final)\b', line)
\r
5804 error(filename, linenum, 'readability/inheritance', 4,
\r
5805 ('"virtual" is redundant since function is '
\r
5806 'already declared as "%s"' % match.group(1)))
\r
5808 # Set end_col to check whole lines after we are done with the
\r
5811 if Search(r'[^\w]\s*$', line):
\r
5815 def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
\r
5816 """Check if line contains a redundant "override" or "final" virt-specifier.
\r
5819 filename: The name of the current file.
\r
5820 clean_lines: A CleansedLines instance containing the file.
\r
5821 linenum: The number of the line to check.
\r
5822 error: The function to call with any errors found.
\r
5824 # Look for closing parenthesis nearby. We need one to confirm where
\r
5825 # the declarator ends and where the virt-specifier starts to avoid
\r
5826 # false positives.
\r
5827 line = clean_lines.elided[linenum]
\r
5828 declarator_end = line.rfind(')')
\r
5829 if declarator_end >= 0:
\r
5830 fragment = line[declarator_end:]
\r
5832 if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
\r
5837 # Check that at most one of "override" or "final" is present, not both
\r
5838 if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
\r
5839 error(filename, linenum, 'readability/inheritance', 4,
\r
5840 ('"override" is redundant since function is '
\r
5841 'already declared as "final"'))
\r
5846 # Returns true if we are at a new block, and it is directly
\r
5847 # inside of a namespace.
\r
5848 def IsBlockInNameSpace(nesting_state, is_forward_declaration):
\r
5849 """Checks that the new block is directly in a namespace.
\r
5852 nesting_state: The _NestingState object that contains info about our state.
\r
5853 is_forward_declaration: If the class is a forward declared class.
\r
5855 Whether or not the new block is directly in a namespace.
\r
5857 if is_forward_declaration:
\r
5858 if len(nesting_state.stack) >= 1 and (
\r
5859 isinstance(nesting_state.stack[-1], _NamespaceInfo)):
\r
5864 return (len(nesting_state.stack) > 1 and
\r
5865 nesting_state.stack[-1].check_namespace_indentation and
\r
5866 isinstance(nesting_state.stack[-2], _NamespaceInfo))
\r
5869 def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
\r
5870 raw_lines_no_comments, linenum):
\r
5871 """This method determines if we should apply our namespace indentation check.
\r
5874 nesting_state: The current nesting state.
\r
5875 is_namespace_indent_item: If we just put a new class on the stack, True.
\r
5876 If the top of the stack is not a class, or we did not recently
\r
5877 add the class, False.
\r
5878 raw_lines_no_comments: The lines without the comments.
\r
5879 linenum: The current line number we are processing.
\r
5882 True if we should apply our namespace indentation check. Currently, it
\r
5883 only works for classes and namespaces inside of a namespace.
\r
5886 is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
\r
5889 if not (is_namespace_indent_item or is_forward_declaration):
\r
5892 # If we are in a macro, we do not want to check the namespace indentation.
\r
5893 if IsMacroDefinition(raw_lines_no_comments, linenum):
\r
5896 return IsBlockInNameSpace(nesting_state, is_forward_declaration)
\r
5899 # Call this method if the line is directly inside of a namespace.
\r
5900 # If the line above is blank (excluding comments) or the start of
\r
5901 # an inner namespace, it cannot be indented.
\r
5902 def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
\r
5904 line = raw_lines_no_comments[linenum]
\r
5905 if Match(r'^\s+', line):
\r
5906 error(filename, linenum, 'runtime/indentation_namespace', 4,
\r
5907 'Do not indent within a namespace')
\r
5910 def ProcessLine(filename, file_extension, clean_lines, line,
\r
5911 include_state, function_state, nesting_state, error,
\r
5912 extra_check_functions=[]):
\r
5913 """Processes a single line in the file.
\r
5916 filename: Filename of the file that is being processed.
\r
5917 file_extension: The extension (dot not included) of the file.
\r
5918 clean_lines: An array of strings, each representing a line of the file,
\r
5919 with comments stripped.
\r
5920 line: Number of line being processed.
\r
5921 include_state: An _IncludeState instance in which the headers are inserted.
\r
5922 function_state: A _FunctionState instance which counts function lines, etc.
\r
5923 nesting_state: A NestingState instance which maintains information about
\r
5924 the current stack of nested blocks being parsed.
\r
5925 error: A callable to which errors are reported, which takes 4 arguments:
\r
5926 filename, line number, error level, and message
\r
5927 extra_check_functions: An array of additional check functions that will be
\r
5928 run on each source line. Each function takes 4
\r
5929 arguments: filename, clean_lines, line, error
\r
5931 raw_lines = clean_lines.raw_lines
\r
5932 ParseNolintSuppressions(filename, raw_lines[line], line, error)
\r
5933 nesting_state.Update(filename, clean_lines, line, error)
\r
5934 CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
\r
5936 if nesting_state.InAsmBlock(): return
\r
5937 CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
\r
5938 CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
\r
5939 CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
\r
5940 CheckLanguage(filename, clean_lines, line, file_extension, include_state,
\r
5941 nesting_state, error)
\r
5942 CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
\r
5943 CheckForNonStandardConstructs(filename, clean_lines, line,
\r
5944 nesting_state, error)
\r
5945 CheckVlogArguments(filename, clean_lines, line, error)
\r
5946 CheckPosixThreading(filename, clean_lines, line, error)
\r
5947 CheckInvalidIncrement(filename, clean_lines, line, error)
\r
5948 CheckMakePairUsesDeduction(filename, clean_lines, line, error)
\r
5949 CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
\r
5950 CheckRedundantVirtual(filename, clean_lines, line, error)
\r
5951 CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
\r
5952 for check_fn in extra_check_functions:
\r
5953 check_fn(filename, clean_lines, line, error)
\r
5955 def FlagCxx11Features(filename, clean_lines, linenum, error):
\r
5956 """Flag those c++11 features that we only allow in certain places.
\r
5959 filename: The name of the current file.
\r
5960 clean_lines: A CleansedLines instance containing the file.
\r
5961 linenum: The number of the line to check.
\r
5962 error: The function to call with any errors found.
\r
5964 line = clean_lines.elided[linenum]
\r
5966 # Flag unapproved C++11 headers.
\r
5967 include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
\r
5968 if include and include.group(1) in ('cfenv',
\r
5969 'condition_variable',
\r
5979 error(filename, linenum, 'build/c++11', 5,
\r
5980 ('<%s> is an unapproved C++11 header.') % include.group(1))
\r
5982 # The only place where we need to worry about C++11 keywords and library
\r
5983 # features in preprocessor directives is in macro definitions.
\r
5984 if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
\r
5986 # These are classes and free functions. The classes are always
\r
5987 # mentioned as std::*, but we only catch the free functions if
\r
5988 # they're not found by ADL. They're alphabetical by header.
\r
5994 if Search(r'\bstd::%s\b' % top_name, line):
\r
5995 error(filename, linenum, 'build/c++11', 5,
\r
5996 ('std::%s is an unapproved C++11 class or function. Send c-style '
\r
5997 'an example of where it would make your code more readable, and '
\r
5998 'they may let you use it.') % top_name)
\r
6001 def ProcessFileData(filename, file_extension, lines, error,
\r
6002 extra_check_functions=[]):
\r
6003 """Performs lint checks and reports any errors to the given error function.
\r
6006 filename: Filename of the file that is being processed.
\r
6007 file_extension: The extension (dot not included) of the file.
\r
6008 lines: An array of strings, each representing a line of the file, with the
\r
6009 last element being empty if the file is terminated with a newline.
\r
6010 error: A callable to which errors are reported, which takes 4 arguments:
\r
6011 filename, line number, error level, and message
\r
6012 extra_check_functions: An array of additional check functions that will be
\r
6013 run on each source line. Each function takes 4
\r
6014 arguments: filename, clean_lines, line, error
\r
6016 lines = (['// marker so line numbers and indices both start at 1'] + lines +
\r
6017 ['// marker so line numbers end in a known way'])
\r
6019 include_state = _IncludeState()
\r
6020 function_state = _FunctionState()
\r
6021 nesting_state = NestingState()
\r
6023 ResetNolintSuppressions()
\r
6025 CheckForCopyright(filename, lines, error)
\r
6027 RemoveMultiLineComments(filename, lines, error)
\r
6028 clean_lines = CleansedLines(lines)
\r
6030 if file_extension == 'h':
\r
6031 CheckForHeaderGuard(filename, clean_lines, error)
\r
6033 for line in xrange(clean_lines.NumLines()):
\r
6034 ProcessLine(filename, file_extension, clean_lines, line,
\r
6035 include_state, function_state, nesting_state, error,
\r
6036 extra_check_functions)
\r
6037 FlagCxx11Features(filename, clean_lines, line, error)
\r
6038 nesting_state.CheckCompletedBlocks(filename, error)
\r
6040 CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
\r
6042 # Check that the .cc file has included its header if it exists.
\r
6043 if file_extension == 'cc':
\r
6044 CheckHeaderFileIncluded(filename, include_state, error)
\r
6046 # We check here rather than inside ProcessLine so that we see raw
\r
6047 # lines rather than "cleaned" lines.
\r
6048 CheckForBadCharacters(filename, lines, error)
\r
6050 CheckForNewlineAtEOF(filename, lines, error)
\r
6052 def ProcessConfigOverrides(filename):
\r
6053 """ Loads the configuration files and processes the config overrides.
\r
6056 filename: The name of the file being processed by the linter.
\r
6059 False if the current |filename| should not be processed further.
\r
6062 abs_filename = os.path.abspath(filename)
\r
6064 keep_looking = True
\r
6065 while keep_looking:
\r
6066 abs_path, base_name = os.path.split(abs_filename)
\r
6068 break # Reached the root directory.
\r
6070 cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
\r
6071 abs_filename = abs_path
\r
6072 if not os.path.isfile(cfg_file):
\r
6076 with open(cfg_file) as file_handle:
\r
6077 for line in file_handle:
\r
6078 line, _, _ = line.partition('#') # Remove comments.
\r
6079 if not line.strip():
\r
6082 name, _, val = line.partition('=')
\r
6083 name = name.strip()
\r
6085 if name == 'set noparent':
\r
6086 keep_looking = False
\r
6087 elif name == 'filter':
\r
6088 cfg_filters.append(val)
\r
6089 elif name == 'exclude_files':
\r
6090 # When matching exclude_files pattern, use the base_name of
\r
6091 # the current file name or the directory name we are processing.
\r
6092 # For example, if we are checking for lint errors in /foo/bar/baz.cc
\r
6093 # and we found the .cfg file at /foo/CPPLINT.cfg, then the config
\r
6094 # file's "exclude_files" filter is meant to be checked against "bar"
\r
6095 # and not "baz" nor "bar/baz.cc".
\r
6097 pattern = re.compile(val)
\r
6098 if pattern.match(base_name):
\r
6099 sys.stderr.write('Ignoring "%s": file excluded by "%s". '
\r
6100 'File path component "%s" matches '
\r
6101 'pattern "%s"\n' %
\r
6102 (filename, cfg_file, base_name, val))
\r
6104 elif name == 'linelength':
\r
6105 global _line_length
\r
6107 _line_length = int(val)
\r
6108 except ValueError:
\r
6109 sys.stderr.write('Line length must be numeric.')
\r
6112 'Invalid configuration option (%s) in file %s\n' %
\r
6117 "Skipping config file '%s': Can't open for reading\n" % cfg_file)
\r
6118 keep_looking = False
\r
6120 # Apply all the accumulated filters in reverse order (top-level directory
\r
6121 # config options having the least priority).
\r
6122 for filter in reversed(cfg_filters):
\r
6123 _AddFilters(filter)
\r
6128 def ProcessFile(filename, vlevel, extra_check_functions=[]):
\r
6129 """Does google-lint on a single file.
\r
6132 filename: The name of the file to parse.
\r
6134 vlevel: The level of errors to report. Every error of confidence
\r
6135 >= verbose_level will be reported. 0 is a good default.
\r
6137 extra_check_functions: An array of additional check functions that will be
\r
6138 run on each source line. Each function takes 4
\r
6139 arguments: filename, clean_lines, line, error
\r
6142 _SetVerboseLevel(vlevel)
\r
6145 if not ProcessConfigOverrides(filename):
\r
6152 # Support the UNIX convention of using "-" for stdin. Note that
\r
6153 # we are not opening the file with universal newline support
\r
6154 # (which codecs doesn't support anyway), so the resulting lines do
\r
6155 # contain trailing '\r' characters if we are reading a file that
\r
6156 # has CRLF endings.
\r
6157 # If after the split a trailing '\r' is present, it is removed
\r
6159 if filename == '-':
\r
6160 lines = codecs.StreamReaderWriter(sys.stdin,
\r
6161 codecs.getreader('utf8'),
\r
6162 codecs.getwriter('utf8'),
\r
6163 'replace').read().split('\n')
\r
6165 lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
\r
6167 # Remove trailing '\r'.
\r
6168 # The -1 accounts for the extra trailing blank line we get from split()
\r
6169 for linenum in range(len(lines) - 1):
\r
6170 if lines[linenum].endswith('\r'):
\r
6171 lines[linenum] = lines[linenum].rstrip('\r')
\r
6172 crlf_lines.append(linenum + 1)
\r
6174 lf_lines.append(linenum + 1)
\r
6178 "Skipping input '%s': Can't open for reading\n" % filename)
\r
6182 # Note, if no dot is found, this will give the entire filename as the ext.
\r
6183 file_extension = filename[filename.rfind('.') + 1:]
\r
6185 # When reading from stdin, the extension is unknown, so no cpplint tests
\r
6186 # should rely on the extension.
\r
6187 if filename != '-' and file_extension not in _valid_extensions:
\r
6188 sys.stderr.write('Ignoring %s; not a valid file name '
\r
6189 '(%s)\n' % (filename, ', '.join(_valid_extensions)))
\r
6191 ProcessFileData(filename, file_extension, lines, Error,
\r
6192 extra_check_functions)
\r
6194 # If end-of-line sequences are a mix of LF and CR-LF, issue
\r
6195 # warnings on the lines with CR.
\r
6197 # Don't issue any warnings if all lines are uniformly LF or CR-LF,
\r
6198 # since critique can handle these just fine, and the style guide
\r
6199 # doesn't dictate a particular end of line sequence.
\r
6201 # We can't depend on os.linesep to determine what the desired
\r
6202 # end-of-line sequence should be, since that will return the
\r
6203 # server-side end-of-line sequence.
\r
6204 if lf_lines and crlf_lines:
\r
6205 # Warn on every line with CR. An alternative approach might be to
\r
6206 # check whether the file is mostly CRLF or just LF, and warn on the
\r
6207 # minority, we bias toward LF here since most tools prefer LF.
\r
6208 for linenum in crlf_lines:
\r
6209 Error(filename, linenum, 'whitespace/newline', 1,
\r
6210 'Unexpected \\r (^M) found; better to use only \\n')
\r
6212 sys.stderr.write('Done processing %s\n' % filename)
\r
6216 def PrintUsage(message):
\r
6217 """Prints a brief usage string and exits, optionally with an error message.
\r
6220 message: The optional error message.
\r
6222 sys.stderr.write(_USAGE)
\r
6224 sys.exit('\nFATAL ERROR: ' + message)
\r
6229 def PrintCategories():
\r
6230 """Prints a list of all the error-categories used by error messages.
\r
6232 These are the categories used to filter messages via --filter.
\r
6234 sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
\r
6238 def ParseArguments(args):
\r
6239 """Parses the command line arguments.
\r
6241 This may set the output format and verbosity level as side-effects.
\r
6244 args: The command line arguments:
\r
6247 The list of filenames to lint.
\r
6250 (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
\r
6256 except getopt.GetoptError:
\r
6257 PrintUsage('Invalid arguments.')
\r
6259 verbosity = _VerboseLevel()
\r
6260 output_format = _OutputFormat()
\r
6261 # Default filters for Tizen Platform
\r
6262 filters = '-build/class,-build/c++11,' + \
\r
6263 '-build/deprecated,-build/endif_comment,-build/explicit_make_pair,-build/forward_decl,' + \
\r
6264 '-build/header_guard,-build/include,-build/include_alpha,-build/include_order,' + \
\r
6265 '-build/include_what_you_use,-build/namespaces,-build/printf_format,-build/storage_class,' + \
\r
6266 '-legal/copyright,-readability/alt_tokens,-readability/casting,-readability/check,' + \
\r
6267 '-readability/constructors,-readability/fn_size,-readability/function,-readability/inheritance,-readability/multiline_comment,' + \
\r
6268 '-readability/multiline_string,-readability/namespace,-readability/nolint,-readability/nul,-readability/strings,' + \
\r
6269 '-readability/todo,-readability/utf8,-runtime/arrays,-runtime/casting,-runtime/explicit,' + \
\r
6270 '-runtime/int,-runtime/init,-runtime/invalid_increment,-runtime/member_string_references,-runtime/memset,' + \
\r
6271 '-runtime/indentation_namespace,-runtime/operator,-runtime/printf,-runtime/printf_format,-runtime/references,' + \
\r
6272 '-runtime/string,-runtime/threadsafe_fn,-runtime/vlog,-whitespace/comments,' + \
\r
6273 '-whitespace/empty_conditional_body,-whitespace/empty_loop_body,-whitespace/ending_newline,-whitespace/forcolon,' + \
\r
6274 '-whitespace/indent,-whitespace/line_length,-whitespace/newline,-whitespace/semicolon,-whitespace/tab,' + \
\r
6275 '-whitespace/todo'
\r
6276 counting_style = ''
\r
6278 for (opt, val) in opts:
\r
6279 if opt == '--help':
\r
6281 elif opt == '--output':
\r
6282 if val not in ('emacs', 'vs7', 'eclipse'):
\r
6283 PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
\r
6284 output_format = val
\r
6285 elif opt == '--verbose':
\r
6286 verbosity = int(val)
\r
6287 elif opt == '--filter':
\r
6291 elif opt == '--counting':
\r
6292 if val not in ('total', 'toplevel', 'detailed'):
\r
6293 PrintUsage('Valid counting options are total, toplevel, and detailed')
\r
6294 counting_style = val
\r
6295 elif opt == '--root':
\r
6298 elif opt == '--linelength':
\r
6299 global _line_length
\r
6301 _line_length = int(val)
\r
6302 except ValueError:
\r
6303 PrintUsage('Line length must be digits.')
\r
6304 elif opt == '--extensions':
\r
6305 global _valid_extensions
\r
6307 _valid_extensions = set(val.split(','))
\r
6308 except ValueError:
\r
6309 PrintUsage('Extensions must be comma seperated list.')
\r
6312 PrintUsage('No files were specified.')
\r
6314 _SetOutputFormat(output_format)
\r
6315 _SetVerboseLevel(verbosity)
\r
6316 _SetFilters(filters)
\r
6317 _SetCountingStyle(counting_style)
\r
6323 filenames = ParseArguments(sys.argv[1:])
\r
6325 # Change stderr to write with replacement characters so we don't die
\r
6326 # if we try to print something containing non-ASCII characters.
\r
6327 sys.stderr = codecs.StreamReaderWriter(sys.stderr,
\r
6328 codecs.getreader('utf8'),
\r
6329 codecs.getwriter('utf8'),
\r
6332 _cpplint_state.ResetErrorCounts()
\r
6333 for filename in filenames:
\r
6334 ProcessFile(filename, _cpplint_state.verbose_level)
\r
6335 _cpplint_state.PrintErrorCounts()
\r
6337 sys.exit(_cpplint_state.error_count > 0)
\r
6340 if __name__ == '__main__':
\r