3 # Copyright 2007 The Closure Linter Authors. All Rights Reserved.
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
17 """Main class responsible for automatically fixing simple style violations."""
19 __author__ = 'robbyw@google.com (Robert Walker)'
23 import gflags as flags
24 from closure_linter import errors
25 from closure_linter import javascriptstatetracker
26 from closure_linter import javascripttokens
27 from closure_linter import requireprovidesorter
28 from closure_linter import tokenutil
29 from closure_linter.common import errorhandler
32 Token = javascripttokens.JavaScriptToken
33 Type = javascripttokens.JavaScriptTokenType
35 END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
37 # Regex to represent common mistake inverting author name and email as
38 # @author User Name (user@company)
39 INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
41 '(?P<whitespace_after_name>\s+)'
43 '(?P<email>[^\s]+@[^)\s]+)'
45 '(?P<trailing_characters>.*)')
48 flags.DEFINE_boolean('disable_indentation_fixing', False,
49 'Whether to disable automatic fixing of indentation.')
52 class ErrorFixer(errorhandler.ErrorHandler):
53 """Object that fixes simple style errors."""
55 def __init__(self, external_file=None):
56 """Initialize the error fixer.
59 external_file: If included, all output will be directed to this file
60 instead of overwriting the files the errors are found in.
62 errorhandler.ErrorHandler.__init__(self)
64 self._file_name = None
65 self._file_token = None
66 self._external_file = external_file
68 def HandleFile(self, filename, first_token):
69 """Notifies this ErrorPrinter that subsequent errors are in filename.
72 filename: The name of the file about to be checked.
73 first_token: The first token in the file.
75 self._file_name = filename
76 self._file_token = first_token
77 self._file_fix_count = 0
78 self._file_changed_lines = set()
80 def _AddFix(self, tokens):
81 """Adds the fix to the internal count.
84 tokens: The token or sequence of tokens changed to fix an error.
86 self._file_fix_count += 1
87 if hasattr(tokens, 'line_number'):
88 self._file_changed_lines.add(tokens.line_number)
91 self._file_changed_lines.add(token.line_number)
93 def HandleError(self, error):
94 """Attempts to fix the error.
97 error: The error object
102 if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
103 iterator = token.attached_object.type_start_token
104 if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
105 iterator = iterator.next
107 leading_space = len(iterator.string) - len(iterator.string.lstrip())
108 iterator.string = '%s?%s' % (' ' * leading_space,
109 iterator.string.lstrip())
111 # Cover the no outer brace case where the end token is part of the type.
112 while iterator and iterator != token.attached_object.type_end_token.next:
113 iterator.string = iterator.string.replace(
114 'null|', '').replace('|null', '')
115 iterator = iterator.next
117 # Create a new flag object with updated type info.
118 token.attached_object = javascriptstatetracker.JsDocFlag(token)
121 elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
122 iterator = token.attached_object.type_end_token
123 if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
124 iterator = iterator.previous
126 ending_space = len(iterator.string) - len(iterator.string.rstrip())
127 iterator.string = '%s=%s' % (iterator.string.rstrip(),
130 # Create a new flag object with updated type info.
131 token.attached_object = javascriptstatetracker.JsDocFlag(token)
134 elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
135 errors.MISSING_SEMICOLON):
136 semicolon_token = Token(';', Type.SEMICOLON, token.line,
138 tokenutil.InsertTokenAfter(semicolon_token, token)
139 token.metadata.is_implied_semicolon = False
140 semicolon_token.metadata.is_implied_semicolon = False
143 elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
144 errors.REDUNDANT_SEMICOLON,
145 errors.COMMA_AT_END_OF_LITERAL):
146 tokenutil.DeleteToken(token)
149 elif code == errors.INVALID_JSDOC_TAG:
150 if token.string == '@returns':
151 token.string = '@return'
154 elif code == errors.FILE_MISSING_NEWLINE:
155 # This error is fixed implicitly by the way we restore the file
158 elif code == errors.MISSING_SPACE:
160 if error.position.IsAtBeginning():
161 tokenutil.InsertSpaceTokenAfter(token.previous)
162 elif error.position.IsAtEnd(token.string):
163 tokenutil.InsertSpaceTokenAfter(token)
165 token.string = error.position.Set(token.string, ' ')
168 elif code == errors.EXTRA_SPACE:
170 token.string = error.position.Set(token.string, '')
173 elif code == errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER:
174 token.string = error.position.Set(token.string, '.')
177 elif code == errors.MISSING_LINE:
178 if error.position.IsAtBeginning():
179 tokenutil.InsertBlankLineAfter(token.previous)
181 tokenutil.InsertBlankLineAfter(token)
184 elif code == errors.EXTRA_LINE:
185 tokenutil.DeleteToken(token)
188 elif code == errors.WRONG_BLANK_LINE_COUNT:
189 if not token.previous:
190 # TODO(user): Add an insertBefore method to tokenutil.
193 num_lines = error.fix_data
194 should_delete = False
200 for i in xrange(1, num_lines + 1):
202 # TODO(user): DeleteToken should update line numbers.
203 tokenutil.DeleteToken(token.previous)
205 tokenutil.InsertBlankLineAfter(token.previous)
208 elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
209 end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
211 single_quote_start = Token(
212 "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
213 single_quote_end = Token(
214 "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
217 tokenutil.InsertTokenAfter(single_quote_start, token)
218 tokenutil.InsertTokenAfter(single_quote_end, end_quote)
219 tokenutil.DeleteToken(token)
220 tokenutil.DeleteToken(end_quote)
221 self._AddFix([token, end_quote])
223 elif code == errors.MISSING_BRACES_AROUND_TYPE:
225 start_token = token.attached_object.type_start_token
227 if start_token.type != Type.DOC_START_BRACE:
229 len(start_token.string) - len(start_token.string.lstrip()))
231 start_token = tokenutil.SplitToken(start_token, leading_space)
232 # Fix case where start and end token were the same.
233 if token.attached_object.type_end_token == start_token.previous:
234 token.attached_object.type_end_token = start_token
236 new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
237 start_token.line_number)
238 tokenutil.InsertTokenAfter(new_token, start_token.previous)
239 token.attached_object.type_start_token = new_token
240 fixed_tokens.append(new_token)
242 end_token = token.attached_object.type_end_token
243 if end_token.type != Type.DOC_END_BRACE:
244 # If the start token was a brace, the end token will be a
245 # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
246 # the end token is the last token of the actual type.
247 last_type = end_token
249 last_type = end_token.previous
251 while last_type.string.isspace():
252 last_type = last_type.previous
254 # If there was no starting brace then a lone end brace wouldn't have
255 # been type end token. Now that we've added any missing start brace,
256 # see if the last effective type token was an end brace.
257 if last_type.type != Type.DOC_END_BRACE:
258 trailing_space = (len(last_type.string) -
259 len(last_type.string.rstrip()))
261 tokenutil.SplitToken(last_type,
262 len(last_type.string) - trailing_space)
264 new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
265 last_type.line_number)
266 tokenutil.InsertTokenAfter(new_token, last_type)
267 token.attached_object.type_end_token = new_token
268 fixed_tokens.append(new_token)
270 self._AddFix(fixed_tokens)
272 elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
273 require_start_token = error.fix_data
274 sorter = requireprovidesorter.RequireProvideSorter()
275 sorter.FixRequires(require_start_token)
277 self._AddFix(require_start_token)
279 elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
280 provide_start_token = error.fix_data
281 sorter = requireprovidesorter.RequireProvideSorter()
282 sorter.FixProvides(provide_start_token)
284 self._AddFix(provide_start_token)
286 elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
287 if token.previous.string == '{' and token.next.string == '}':
288 tokenutil.DeleteToken(token.previous)
289 tokenutil.DeleteToken(token.next)
290 self._AddFix([token])
292 elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
293 match = INVERTED_AUTHOR_SPEC.match(token.string)
295 token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
296 match.group('email'),
297 match.group('whitespace_after_name'),
299 match.group('trailing_characters'))
302 elif (code == errors.WRONG_INDENTATION and
303 not FLAGS.disable_indentation_fixing):
304 token = tokenutil.GetFirstTokenInSameLine(token)
305 actual = error.position.start
306 expected = error.position.length
308 if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
309 token.string = token.string.lstrip() + (' ' * expected)
310 self._AddFix([token])
312 # We need to add indentation.
313 new_token = Token(' ' * expected, Type.WHITESPACE,
314 token.line, token.line_number)
315 # Note that we'll never need to add indentation at the first line,
316 # since it will always not be indented. Therefore it's safe to assume
317 # token.previous exists.
318 tokenutil.InsertTokenAfter(new_token, token.previous)
319 self._AddFix([token])
321 elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
322 errors.MISSING_END_OF_SCOPE_COMMENT]:
323 # Only fix cases where }); is found with no trailing content on the line
324 # other than a comment. Value of 'token' is set to } for this error.
325 if (token.type == Type.END_BLOCK and
326 token.next.type == Type.END_PAREN and
327 token.next.next.type == Type.SEMICOLON):
328 current_token = token.next.next.next
330 while current_token and current_token.line_number == token.line_number:
331 if current_token.IsAnyType(Type.WHITESPACE,
332 Type.START_SINGLE_LINE_COMMENT,
334 removed_tokens.append(current_token)
335 current_token = current_token.next
340 tokenutil.DeleteTokens(removed_tokens[0], len(removed_tokens))
342 whitespace_token = Token(' ', Type.WHITESPACE, token.line,
344 start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
345 token.line, token.line_number)
346 comment_token = Token(' goog.scope', Type.COMMENT, token.line,
348 insertion_tokens = [whitespace_token, start_comment_token,
351 tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
352 self._AddFix(removed_tokens + insertion_tokens)
354 elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
355 tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
356 tokenutil.DeleteTokens(tokens_in_line[0], len(tokens_in_line))
357 self._AddFix(tokens_in_line)
359 elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
360 is_provide = code == errors.MISSING_GOOG_PROVIDE
361 is_require = code == errors.MISSING_GOOG_REQUIRE
363 missing_namespaces = error.fix_data[0]
364 need_blank_line = error.fix_data[1]
366 if need_blank_line is None:
367 # TODO(user): This happens when there are no existing
368 # goog.provide or goog.require statements to position new statements
369 # relative to. Consider handling this case with a heuristic.
372 insert_location = token.previous
374 # If inserting a missing require with no existing requires, insert a
376 if need_blank_line and is_require:
377 tokenutil.InsertBlankLineAfter(insert_location)
378 insert_location = insert_location.next
380 for missing_namespace in missing_namespaces:
381 new_tokens = self._GetNewRequireOrProvideTokens(
382 is_provide, missing_namespace, insert_location.line_number + 1)
383 tokenutil.InsertLineAfter(insert_location, new_tokens)
384 insert_location = new_tokens[-1]
385 self._AddFix(new_tokens)
387 # If inserting a missing provide with no existing provides, insert a
389 if need_blank_line and is_provide:
390 tokenutil.InsertBlankLineAfter(insert_location)
392 def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
393 """Returns a list of tokens to create a goog.require/provide statement.
396 is_provide: True if getting tokens for a provide, False for require.
397 namespace: The required or provided namespaces to get tokens for.
398 line_number: The line number the new require or provide statement will be
402 Tokens to create a new goog.require or goog.provide statement.
404 string = 'goog.require'
406 string = 'goog.provide'
407 line_text = string + '(\'' + namespace + '\');\n'
409 Token(string, Type.IDENTIFIER, line_text, line_number),
410 Token('(', Type.START_PAREN, line_text, line_number),
411 Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
412 Token(namespace, Type.STRING_TEXT, line_text, line_number),
413 Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
414 Token(')', Type.END_PAREN, line_text, line_number),
415 Token(';', Type.SEMICOLON, line_text, line_number)
418 def FinishFile(self):
419 """Called when the current file has finished style checking.
421 Used to go back and fix any errors in the file.
423 if self._file_fix_count:
424 f = self._external_file
426 print 'Fixed %d errors in %s' % (self._file_fix_count, self._file_name)
427 f = open(self._file_name, 'w')
429 token = self._file_token
432 f.write(token.string)
433 char_count += len(token.string)
435 if token.IsLastInLine():
437 if char_count > 80 and token.line_number in self._file_changed_lines:
438 print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
439 token.line_number, self._file_name)
445 if not self._external_file:
446 # Close the file if we created it