From 946eabd18f623b438e17164b14c98066f7054168 Mon Sep 17 00:00:00 2001 From: Yosuke Furukawa Date: Fri, 16 Jan 2015 02:53:46 +0900 Subject: [PATCH] tools: update closure linter to 2.3.17 PR-URL: https://github.com/iojs/io.js/pull/449 Reviewed-By: Ben Noordhuis --- tools/closure_linter/AUTHORS | 6 + tools/closure_linter/LICENSE | 176 ++ tools/closure_linter/PKG-INFO | 10 - .../build/lib/closure_linter/__init__.py | 16 + .../build/lib/closure_linter/aliaspass.py | 248 ++ .../build/lib/closure_linter/aliaspass_test.py | 191 ++ .../build/lib/closure_linter/checker.py | 108 + .../build/lib/closure_linter/checkerbase.py | 192 ++ .../lib/closure_linter/closurizednamespacesinfo.py | 578 +++++ .../closurizednamespacesinfo_test.py | 873 +++++++ .../build/lib/closure_linter/common/__init__.py | 16 + .../build/lib/closure_linter/common/error.py | 65 + .../lib/closure_linter/common/erroraccumulator.py | 46 + .../lib/closure_linter/common/errorhandler.py | 61 + .../build/lib/closure_linter/common/erroroutput.py | 52 + .../lib/closure_linter/common/filetestcase.py | 115 + .../build/lib/closure_linter/common/htmlutil.py | 170 ++ .../build/lib/closure_linter/common/lintrunner.py | 39 + .../build/lib/closure_linter/common/matcher.py | 60 + .../build/lib/closure_linter/common/position.py | 126 + .../lib/closure_linter/common/simplefileflags.py | 190 ++ .../build/lib/closure_linter/common/tokenizer.py | 185 ++ .../build/lib/closure_linter/common/tokens.py | 145 ++ .../build/lib/closure_linter/common/tokens_test.py | 113 + .../build/lib/closure_linter/ecmalintrules.py | 844 +++++++ .../build/lib/closure_linter/ecmametadatapass.py | 574 +++++ .../build/lib/closure_linter/error_check.py | 95 + .../build/lib/closure_linter/error_fixer.py | 618 +++++ .../build/lib/closure_linter/error_fixer_test.py | 57 + .../build/lib/closure_linter/errorrecord.py | 66 + .../build/lib/closure_linter/errorrules.py | 72 + .../build/lib/closure_linter/errorrules_test.py | 117 + .../build/lib/closure_linter/errors.py | 154 ++ .../build/lib/closure_linter/fixjsstyle.py | 66 + .../build/lib/closure_linter/fixjsstyle_test.py | 615 +++++ .../build/lib/closure_linter/full_test.py | 121 + .../build/lib/closure_linter/gjslint.py | 319 +++ .../build/lib/closure_linter/indentation.py | 617 +++++ .../lib/closure_linter/javascriptlintrules.py | 754 ++++++ .../lib/closure_linter/javascriptstatetracker.py | 150 ++ .../closure_linter/javascriptstatetracker_test.py | 278 +++ .../lib/closure_linter/javascripttokenizer.py | 463 ++++ .../build/lib/closure_linter/javascripttokens.py | 153 ++ .../build/lib/closure_linter/not_strict_test.py | 74 + .../lib/closure_linter/requireprovidesorter.py | 329 +++ .../closure_linter/requireprovidesorter_test.py | 155 ++ .../build/lib/closure_linter/runner.py | 198 ++ .../build/lib/closure_linter/runner_test.py | 101 + .../build/lib/closure_linter/scopeutil.py | 206 ++ .../build/lib/closure_linter/scopeutil_test.py | 222 ++ .../build/lib/closure_linter/statetracker.py | 1294 ++++++++++ .../build/lib/closure_linter/statetracker_test.py | 123 + .../build/lib/closure_linter/strict_test.py | 67 + .../build/lib/closure_linter/testutil.py | 94 + .../build/lib/closure_linter/tokenutil.py | 697 ++++++ .../build/lib/closure_linter/tokenutil_test.py | 297 +++ .../build/lib/closure_linter/typeannotation.py | 401 ++++ .../lib/closure_linter/typeannotation_test.py | 232 ++ .../closure_linter.egg-info/PKG-INFO | 2 +- .../closure_linter.egg-info/SOURCES.txt | 26 +- tools/closure_linter/closure_linter/__init__.py | 15 + tools/closure_linter/closure_linter/aliaspass.py | 248 ++ .../closure_linter/aliaspass_test.py | 191 ++ tools/closure_linter/closure_linter/checker.py | 98 +- tools/closure_linter/closure_linter/checkerbase.py | 197 +- .../closure_linter/closurizednamespacesinfo.py | 578 +++++ .../closurizednamespacesinfo_test.py | 873 +++++++ .../closure_linter/common/__init__.py | 15 + .../closure_linter/closure_linter/common/error.py | 2 +- .../closure_linter/common/erroraccumulator.py | 2 +- .../closure_linter/common/erroroutput.py | 52 + .../closure_linter/common/errorprinter.py | 203 -- .../closure_linter/common/filetestcase.py | 32 +- .../closure_linter/common/tokenizer.py | 3 +- .../closure_linter/closure_linter/common/tokens.py | 22 +- .../closure_linter/common/tokens_test.py | 113 + .../closure_linter/closure_linter/ecmalintrules.py | 660 +++--- .../closure_linter/ecmametadatapass.py | 93 +- tools/closure_linter/closure_linter/error_check.py | 95 + tools/closure_linter/closure_linter/error_fixer.py | 434 +++- .../closure_linter/error_fixer_test.py | 57 + tools/closure_linter/closure_linter/errorrecord.py | 66 + tools/closure_linter/closure_linter/errorrules.py | 40 +- .../closure_linter/errorrules_test.py | 117 + tools/closure_linter/closure_linter/errors.py | 33 +- tools/closure_linter/closure_linter/fixjsstyle.py | 29 +- .../closure_linter/fixjsstyle_test.py | 588 ++++- tools/closure_linter/closure_linter/full_test.py | 38 +- tools/closure_linter/closure_linter/gjslint.py | 229 +- tools/closure_linter/closure_linter/indentation.py | 168 +- .../closure_linter/javascriptlintrules.py | 721 ++++-- .../closure_linter/javascriptstatetracker.py | 176 +- .../closure_linter/javascriptstatetracker_test.py | 289 ++- .../closure_linter/javascripttokenizer.py | 380 +-- .../closure_linter/javascripttokens.py | 12 +- .../closure_linter/not_strict_test.py | 74 + .../closure_linter/requireprovidesorter.py | 329 +++ .../closure_linter/requireprovidesorter_test.py | 155 ++ tools/closure_linter/closure_linter/runner.py | 198 ++ tools/closure_linter/closure_linter/runner_test.py | 101 + tools/closure_linter/closure_linter/scopeutil.py | 206 ++ .../closure_linter/scopeutil_test.py | 222 ++ .../closure_linter/closure_linter/statetracker.py | 488 +++- .../closure_linter/statetracker_test.py | 123 + tools/closure_linter/closure_linter/strict_test.py | 67 + .../closure_linter/testdata/all_js_wrapped.js | 5 + .../closure_linter/testdata/blank_lines.js | 104 + .../closure_linter/closure_linter/testdata/bugs.js | 43 + .../closure_linter/testdata/empty_file.js | 0 .../closure_linter/testdata/ends_with_block.js | 19 + .../closure_linter/testdata/externs.js | 34 + .../closure_linter/testdata/externs_jsdoc.js | 37 + .../closure_linter/testdata/file_level_comment.js | 13 + .../testdata/fixjsstyle.html.in.html | 52 + .../testdata/fixjsstyle.html.out.html | 51 + .../closure_linter/testdata/fixjsstyle.in.js | 293 +++ .../testdata/fixjsstyle.indentation.out.js | 465 ++++ .../testdata/fixjsstyle.oplineend.in.js | 21 + .../testdata/fixjsstyle.oplineend.out.js | 21 + .../closure_linter/testdata/fixjsstyle.out.js | 310 +++ .../closure_linter/testdata/goog_scope.js | 63 + .../closure_linter/testdata/html_parse_error.html | 32 + .../closure_linter/testdata/indentation.js | 465 ++++ .../closure_linter/testdata/interface.js | 89 + .../closure_linter/testdata/jsdoc.js | 1455 ++++++++++++ .../closure_linter/testdata/limited_doc_checks.js | 29 + .../closure_linter/testdata/minimal.js | 1 + .../closure_linter/testdata/not_strict.js | 42 + .../closure_linter/testdata/other.js | 459 ++++ .../closure_linter/testdata/provide_blank.js | 29 + .../closure_linter/testdata/provide_extra.js | 39 + .../closure_linter/testdata/provide_missing.js | 40 + .../closure_linter/testdata/require_alias.js | 14 + .../closure_linter/testdata/require_all_caps.js | 30 + .../closure_linter/testdata/require_blank.js | 29 + .../closure_linter/testdata/require_extra.js | 35 + .../closure_linter/testdata/require_function.js | 22 + .../testdata/require_function_missing.js | 24 + .../testdata/require_function_through_both.js | 23 + .../testdata/require_function_through_namespace.js | 22 + .../closure_linter/testdata/require_interface.js | 31 + .../testdata/require_interface_alias.js | 34 + .../testdata/require_interface_base.js | 31 + .../closure_linter/testdata/require_lower_case.js | 30 + .../closure_linter/testdata/require_missing.js | 40 + .../closure_linter/testdata/require_numeric.js | 30 + .../testdata/require_provide_blank.js | 31 + .../testdata/require_provide_missing.js | 76 + .../closure_linter/testdata/require_provide_ok.js | 214 ++ .../closure_linter/testdata/semicolon_missing.js | 18 + .../closure_linter/testdata/simple.html | 33 + .../closure_linter/testdata/spaces.js | 354 +++ .../closure_linter/testdata/tokenizer.js | 78 + .../closure_linter/testdata/unparseable.js | 44 + .../testdata/unused_local_variables.js | 88 + .../testdata/unused_private_members.js | 205 ++ .../closure_linter/testdata/utf8.html | 26 + tools/closure_linter/closure_linter/testutil.py | 94 + tools/closure_linter/closure_linter/tokenutil.py | 460 +++- .../closure_linter/tokenutil_test.py | 297 +++ .../closure_linter/typeannotation.py | 401 ++++ .../closure_linter/typeannotation_test.py | 232 ++ .../dist/closure_linter-2.3.17-py2.7.egg | Bin 0 -> 315656 bytes tools/closure_linter/gflags.py | 2489 -------------------- tools/closure_linter/setup.cfg | 5 - tools/closure_linter/setup.py | 2 +- 166 files changed, 29049 insertions(+), 3968 deletions(-) create mode 100644 tools/closure_linter/AUTHORS create mode 100644 tools/closure_linter/LICENSE delete mode 100644 tools/closure_linter/PKG-INFO create mode 100644 tools/closure_linter/build/lib/closure_linter/__init__.py create mode 100644 tools/closure_linter/build/lib/closure_linter/aliaspass.py create mode 100644 tools/closure_linter/build/lib/closure_linter/aliaspass_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/checker.py create mode 100644 tools/closure_linter/build/lib/closure_linter/checkerbase.py create mode 100644 tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py create mode 100644 tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/__init__.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/error.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/errorhandler.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/erroroutput.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/filetestcase.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/htmlutil.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/lintrunner.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/matcher.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/position.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/tokenizer.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/tokens.py create mode 100644 tools/closure_linter/build/lib/closure_linter/common/tokens_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/ecmalintrules.py create mode 100644 tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py create mode 100644 tools/closure_linter/build/lib/closure_linter/error_check.py create mode 100644 tools/closure_linter/build/lib/closure_linter/error_fixer.py create mode 100644 tools/closure_linter/build/lib/closure_linter/error_fixer_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/errorrecord.py create mode 100644 tools/closure_linter/build/lib/closure_linter/errorrules.py create mode 100644 tools/closure_linter/build/lib/closure_linter/errorrules_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/errors.py create mode 100644 tools/closure_linter/build/lib/closure_linter/fixjsstyle.py create mode 100644 tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/full_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/gjslint.py create mode 100644 tools/closure_linter/build/lib/closure_linter/indentation.py create mode 100644 tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py create mode 100644 tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py create mode 100644 tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py create mode 100644 tools/closure_linter/build/lib/closure_linter/javascripttokens.py create mode 100644 tools/closure_linter/build/lib/closure_linter/not_strict_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py create mode 100644 tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/runner.py create mode 100644 tools/closure_linter/build/lib/closure_linter/runner_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/scopeutil.py create mode 100644 tools/closure_linter/build/lib/closure_linter/scopeutil_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/statetracker.py create mode 100644 tools/closure_linter/build/lib/closure_linter/statetracker_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/strict_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/testutil.py create mode 100644 tools/closure_linter/build/lib/closure_linter/tokenutil.py create mode 100644 tools/closure_linter/build/lib/closure_linter/tokenutil_test.py create mode 100644 tools/closure_linter/build/lib/closure_linter/typeannotation.py create mode 100644 tools/closure_linter/build/lib/closure_linter/typeannotation_test.py create mode 100644 tools/closure_linter/closure_linter/aliaspass.py create mode 100755 tools/closure_linter/closure_linter/aliaspass_test.py create mode 100755 tools/closure_linter/closure_linter/closurizednamespacesinfo.py create mode 100755 tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py create mode 100644 tools/closure_linter/closure_linter/common/erroroutput.py delete mode 100755 tools/closure_linter/closure_linter/common/errorprinter.py create mode 100644 tools/closure_linter/closure_linter/common/tokens_test.py create mode 100755 tools/closure_linter/closure_linter/error_check.py create mode 100644 tools/closure_linter/closure_linter/error_fixer_test.py create mode 100644 tools/closure_linter/closure_linter/errorrecord.py create mode 100644 tools/closure_linter/closure_linter/errorrules_test.py mode change 100755 => 100644 tools/closure_linter/closure_linter/javascriptlintrules.py mode change 100755 => 100644 tools/closure_linter/closure_linter/javascriptstatetracker_test.py create mode 100755 tools/closure_linter/closure_linter/not_strict_test.py create mode 100755 tools/closure_linter/closure_linter/requireprovidesorter.py create mode 100644 tools/closure_linter/closure_linter/requireprovidesorter_test.py create mode 100644 tools/closure_linter/closure_linter/runner.py create mode 100644 tools/closure_linter/closure_linter/runner_test.py create mode 100644 tools/closure_linter/closure_linter/scopeutil.py create mode 100644 tools/closure_linter/closure_linter/scopeutil_test.py mode change 100755 => 100644 tools/closure_linter/closure_linter/statetracker.py create mode 100755 tools/closure_linter/closure_linter/statetracker_test.py create mode 100755 tools/closure_linter/closure_linter/strict_test.py create mode 100644 tools/closure_linter/closure_linter/testdata/all_js_wrapped.js create mode 100644 tools/closure_linter/closure_linter/testdata/blank_lines.js create mode 100644 tools/closure_linter/closure_linter/testdata/bugs.js create mode 100644 tools/closure_linter/closure_linter/testdata/empty_file.js create mode 100644 tools/closure_linter/closure_linter/testdata/ends_with_block.js create mode 100644 tools/closure_linter/closure_linter/testdata/externs.js create mode 100644 tools/closure_linter/closure_linter/testdata/externs_jsdoc.js create mode 100644 tools/closure_linter/closure_linter/testdata/file_level_comment.js create mode 100644 tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html create mode 100644 tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html create mode 100644 tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js create mode 100644 tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js create mode 100644 tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js create mode 100644 tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js create mode 100644 tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js create mode 100644 tools/closure_linter/closure_linter/testdata/goog_scope.js create mode 100644 tools/closure_linter/closure_linter/testdata/html_parse_error.html create mode 100644 tools/closure_linter/closure_linter/testdata/indentation.js create mode 100644 tools/closure_linter/closure_linter/testdata/interface.js create mode 100644 tools/closure_linter/closure_linter/testdata/jsdoc.js create mode 100644 tools/closure_linter/closure_linter/testdata/limited_doc_checks.js create mode 100644 tools/closure_linter/closure_linter/testdata/minimal.js create mode 100644 tools/closure_linter/closure_linter/testdata/not_strict.js create mode 100644 tools/closure_linter/closure_linter/testdata/other.js create mode 100644 tools/closure_linter/closure_linter/testdata/provide_blank.js create mode 100644 tools/closure_linter/closure_linter/testdata/provide_extra.js create mode 100644 tools/closure_linter/closure_linter/testdata/provide_missing.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_alias.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_all_caps.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_blank.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_extra.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_function.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_function_missing.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_function_through_both.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_interface.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_interface_alias.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_interface_base.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_lower_case.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_missing.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_numeric.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_provide_blank.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_provide_missing.js create mode 100644 tools/closure_linter/closure_linter/testdata/require_provide_ok.js create mode 100644 tools/closure_linter/closure_linter/testdata/semicolon_missing.js create mode 100644 tools/closure_linter/closure_linter/testdata/simple.html create mode 100644 tools/closure_linter/closure_linter/testdata/spaces.js create mode 100644 tools/closure_linter/closure_linter/testdata/tokenizer.js create mode 100644 tools/closure_linter/closure_linter/testdata/unparseable.js create mode 100644 tools/closure_linter/closure_linter/testdata/unused_local_variables.js create mode 100644 tools/closure_linter/closure_linter/testdata/unused_private_members.js create mode 100644 tools/closure_linter/closure_linter/testdata/utf8.html create mode 100644 tools/closure_linter/closure_linter/testutil.py create mode 100644 tools/closure_linter/closure_linter/tokenutil_test.py create mode 100644 tools/closure_linter/closure_linter/typeannotation.py create mode 100755 tools/closure_linter/closure_linter/typeannotation_test.py create mode 100644 tools/closure_linter/dist/closure_linter-2.3.17-py2.7.egg delete mode 100644 tools/closure_linter/gflags.py delete mode 100644 tools/closure_linter/setup.cfg diff --git a/tools/closure_linter/AUTHORS b/tools/closure_linter/AUTHORS new file mode 100644 index 0000000..2f72bd6 --- /dev/null +++ b/tools/closure_linter/AUTHORS @@ -0,0 +1,6 @@ +# This is a list of contributors to the Closure Linter. + +# Names should be added to this file like so: +# Name or Organization + +Google Inc. diff --git a/tools/closure_linter/LICENSE b/tools/closure_linter/LICENSE new file mode 100644 index 0000000..d9a10c0 --- /dev/null +++ b/tools/closure_linter/LICENSE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/tools/closure_linter/PKG-INFO b/tools/closure_linter/PKG-INFO deleted file mode 100644 index b6e71c8..0000000 --- a/tools/closure_linter/PKG-INFO +++ /dev/null @@ -1,10 +0,0 @@ -Metadata-Version: 1.0 -Name: closure_linter -Version: 2.2.6 -Summary: Closure Linter -Home-page: http://code.google.com/p/closure-linter -Author: The Closure Linter Authors -Author-email: opensource@google.com -License: Apache -Description: UNKNOWN -Platform: UNKNOWN diff --git a/tools/closure_linter/build/lib/closure_linter/__init__.py b/tools/closure_linter/build/lib/closure_linter/__init__.py new file mode 100644 index 0000000..1798c8c --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Package indicator for gjslint.""" diff --git a/tools/closure_linter/build/lib/closure_linter/aliaspass.py b/tools/closure_linter/build/lib/closure_linter/aliaspass.py new file mode 100644 index 0000000..bb37bfa --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/aliaspass.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pass that scans for goog.scope aliases and lint/usage errors.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + +from closure_linter import ecmametadatapass +from closure_linter import errors +from closure_linter import javascripttokens +from closure_linter import scopeutil +from closure_linter import tokenutil +from closure_linter.common import error + + +# TODO(nnaze): Create a Pass interface and move this class, EcmaMetaDataPass, +# and related classes onto it. + + +def _GetAliasForIdentifier(identifier, alias_map): + """Returns the aliased_symbol name for an identifier. + + Example usage: + >>> alias_map = {'MyClass': 'goog.foo.MyClass'} + >>> _GetAliasForIdentifier('MyClass.prototype.action', alias_map) + 'goog.foo.MyClass.prototype.action' + + >>> _GetAliasForIdentifier('MyClass.prototype.action', {}) + None + + Args: + identifier: The identifier. + alias_map: A dictionary mapping a symbol to an alias. + + Returns: + The aliased symbol name or None if not found. + """ + ns = identifier.split('.', 1)[0] + aliased_symbol = alias_map.get(ns) + if aliased_symbol: + return aliased_symbol + identifier[len(ns):] + + +def _SetTypeAlias(js_type, alias_map): + """Updates the alias for identifiers in a type. + + Args: + js_type: A typeannotation.TypeAnnotation instance. + alias_map: A dictionary mapping a symbol to an alias. + """ + aliased_symbol = _GetAliasForIdentifier(js_type.identifier, alias_map) + if aliased_symbol: + js_type.alias = aliased_symbol + for sub_type in js_type.IterTypes(): + _SetTypeAlias(sub_type, alias_map) + + +class AliasPass(object): + """Pass to identify goog.scope() usages. + + Identifies goog.scope() usages and finds lint/usage errors. Notes any + aliases of symbols in Closurized namespaces (that is, reassignments + such as "var MyClass = goog.foo.MyClass;") and annotates identifiers + when they're using an alias (so they may be expanded to the full symbol + later -- that "MyClass.prototype.action" refers to + "goog.foo.MyClass.prototype.action" when expanded.). + """ + + def __init__(self, closurized_namespaces=None, error_handler=None): + """Creates a new pass. + + Args: + closurized_namespaces: A set of Closurized namespaces (e.g. 'goog'). + error_handler: An error handler to report lint errors to. + """ + + self._error_handler = error_handler + + # If we have namespaces, freeze the set. + if closurized_namespaces: + closurized_namespaces = frozenset(closurized_namespaces) + + self._closurized_namespaces = closurized_namespaces + + def Process(self, start_token): + """Runs the pass on a token stream. + + Args: + start_token: The first token in the stream. + """ + + if start_token is None: + return + + # TODO(nnaze): Add more goog.scope usage checks. + self._CheckGoogScopeCalls(start_token) + + # If we have closurized namespaces, identify aliased identifiers. + if self._closurized_namespaces: + context = start_token.metadata.context + root_context = context.GetRoot() + self._ProcessRootContext(root_context) + + def _CheckGoogScopeCalls(self, start_token): + """Check goog.scope calls for lint/usage errors.""" + + def IsScopeToken(token): + return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and + token.string == 'goog.scope') + + # Find all the goog.scope tokens in the file + scope_tokens = [t for t in start_token if IsScopeToken(t)] + + for token in scope_tokens: + scope_context = token.metadata.context + + if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and + scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT): + self._MaybeReportError( + error.Error(errors.INVALID_USE_OF_GOOG_SCOPE, + 'goog.scope call not in global scope', token)) + + # There should be only one goog.scope reference. Register errors for + # every instance after the first. + for token in scope_tokens[1:]: + self._MaybeReportError( + error.Error(errors.EXTRA_GOOG_SCOPE_USAGE, + 'More than one goog.scope call in file.', token)) + + def _MaybeReportError(self, err): + """Report an error to the handler (if registered).""" + if self._error_handler: + self._error_handler.HandleError(err) + + @classmethod + def _YieldAllContexts(cls, context): + """Yields all contexts that are contained by the given context.""" + yield context + for child_context in context.children: + for descendent_child in cls._YieldAllContexts(child_context): + yield descendent_child + + @staticmethod + def _IsTokenInParentBlock(token, parent_block): + """Determines whether the given token is contained by the given block. + + Args: + token: A token + parent_block: An EcmaContext. + + Returns: + Whether the token is in a context that is or is a child of the given + parent_block context. + """ + context = token.metadata.context + + while context: + if context is parent_block: + return True + context = context.parent + + return False + + def _ProcessRootContext(self, root_context): + """Processes all goog.scope blocks under the root context.""" + + assert root_context.type is ecmametadatapass.EcmaContext.ROOT + + # Process aliases in statements in the root scope for goog.module-style + # aliases. + global_alias_map = {} + for context in root_context.children: + if context.type == ecmametadatapass.EcmaContext.STATEMENT: + for statement_child in context.children: + if statement_child.type == ecmametadatapass.EcmaContext.VAR: + match = scopeutil.MatchModuleAlias(statement_child) + if match: + # goog.require aliases cannot use further aliases, the symbol is + # the second part of match, directly. + symbol = match[1] + if scopeutil.IsInClosurizedNamespace(symbol, + self._closurized_namespaces): + global_alias_map[match[0]] = symbol + + # Process each block to find aliases. + for context in root_context.children: + self._ProcessBlock(context, global_alias_map) + + def _ProcessBlock(self, context, global_alias_map): + """Scans a goog.scope block to find aliases and mark alias tokens.""" + alias_map = global_alias_map.copy() + + # Iterate over every token in the context. Each token points to one + # context, but multiple tokens may point to the same context. We only want + # to check each context once, so keep track of those we've seen. + seen_contexts = set() + token = context.start_token + while token and self._IsTokenInParentBlock(token, context): + token_context = token.metadata.context if token.metadata else None + + # Check to see if this token is an alias. + if token_context and token_context not in seen_contexts: + seen_contexts.add(token_context) + + # If this is a alias statement in the goog.scope block. + if (token_context.type == ecmametadatapass.EcmaContext.VAR and + scopeutil.IsGoogScopeBlock(token_context.parent.parent)): + match = scopeutil.MatchAlias(token_context) + + # If this is an alias, remember it in the map. + if match: + alias, symbol = match + symbol = _GetAliasForIdentifier(symbol, alias_map) or symbol + if scopeutil.IsInClosurizedNamespace(symbol, + self._closurized_namespaces): + alias_map[alias] = symbol + + # If this token is an identifier that matches an alias, + # mark the token as an alias to the original symbol. + if (token.type is javascripttokens.JavaScriptTokenType.SIMPLE_LVALUE or + token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER): + identifier = tokenutil.GetIdentifierForToken(token) + if identifier: + aliased_symbol = _GetAliasForIdentifier(identifier, alias_map) + if aliased_symbol: + token.metadata.aliased_symbol = aliased_symbol + + elif token.type == javascripttokens.JavaScriptTokenType.DOC_FLAG: + flag = token.attached_object + if flag and flag.HasType() and flag.jstype: + _SetTypeAlias(flag.jstype, alias_map) + + token = token.next # Get next token diff --git a/tools/closure_linter/build/lib/closure_linter/aliaspass_test.py b/tools/closure_linter/build/lib/closure_linter/aliaspass_test.py new file mode 100644 index 0000000..7042e53 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/aliaspass_test.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the aliaspass module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + +import unittest as googletest + +from closure_linter import aliaspass +from closure_linter import errors +from closure_linter import javascriptstatetracker +from closure_linter import testutil +from closure_linter.common import erroraccumulator + + +def _GetTokenByLineAndString(start_token, string, line_number): + for token in start_token: + if token.line_number == line_number and token.string == string: + return token + + +class AliasPassTest(googletest.TestCase): + + def testInvalidGoogScopeCall(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCOPE_SCRIPT) + + error_accumulator = erroraccumulator.ErrorAccumulator() + alias_pass = aliaspass.AliasPass( + error_handler=error_accumulator) + alias_pass.Process(start_token) + + alias_errors = error_accumulator.GetErrors() + self.assertEquals(1, len(alias_errors)) + + alias_error = alias_errors[0] + + self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, alias_error.code) + self.assertEquals('goog.scope', alias_error.token.string) + + def testAliasedIdentifiers(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT) + alias_pass = aliaspass.AliasPass(set(['goog', 'myproject'])) + alias_pass.Process(start_token) + + alias_token = _GetTokenByLineAndString(start_token, 'Event', 4) + self.assertTrue(alias_token.metadata.is_alias_definition) + + my_class_token = _GetTokenByLineAndString(start_token, 'myClass', 9) + self.assertIsNone(my_class_token.metadata.aliased_symbol) + + component_token = _GetTokenByLineAndString(start_token, 'Component', 17) + self.assertEquals('goog.ui.Component', + component_token.metadata.aliased_symbol) + + event_token = _GetTokenByLineAndString(start_token, 'Event.Something', 17) + self.assertEquals('goog.events.Event.Something', + event_token.metadata.aliased_symbol) + + non_closurized_token = _GetTokenByLineAndString( + start_token, 'NonClosurizedClass', 18) + self.assertIsNone(non_closurized_token.metadata.aliased_symbol) + + long_start_token = _GetTokenByLineAndString(start_token, 'Event', 24) + self.assertEquals('goog.events.Event.MultilineIdentifier.someMethod', + long_start_token.metadata.aliased_symbol) + + def testAliasedDoctypes(self): + """Tests that aliases are correctly expanded within type annotations.""" + start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT) + tracker = javascriptstatetracker.JavaScriptStateTracker() + tracker.DocFlagPass(start_token, error_handler=None) + + alias_pass = aliaspass.AliasPass(set(['goog', 'myproject'])) + alias_pass.Process(start_token) + + flag_token = _GetTokenByLineAndString(start_token, '@type', 22) + self.assertEquals( + 'goog.events.Event.>', + repr(flag_token.attached_object.jstype)) + + def testModuleAlias(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass(""" +goog.module('goog.test'); +var Alias = goog.require('goog.Alias'); +Alias.use(); +""") + alias_pass = aliaspass.AliasPass(set(['goog'])) + alias_pass.Process(start_token) + alias_token = _GetTokenByLineAndString(start_token, 'Alias', 3) + self.assertTrue(alias_token.metadata.is_alias_definition) + + def testMultipleGoogScopeCalls(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass( + _TEST_MULTIPLE_SCOPE_SCRIPT) + + error_accumulator = erroraccumulator.ErrorAccumulator() + + alias_pass = aliaspass.AliasPass( + set(['goog', 'myproject']), + error_handler=error_accumulator) + alias_pass.Process(start_token) + + alias_errors = error_accumulator.GetErrors() + + self.assertEquals(3, len(alias_errors)) + + error = alias_errors[0] + self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, error.code) + self.assertEquals(7, error.token.line_number) + + error = alias_errors[1] + self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code) + self.assertEquals(7, error.token.line_number) + + error = alias_errors[2] + self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code) + self.assertEquals(11, error.token.line_number) + + +_TEST_ALIAS_SCRIPT = """ +goog.scope(function() { +var events = goog.events; // scope alias +var Event = events. + Event; // nested multiline scope alias + +// This should not be registered as an aliased identifier because +// it appears before the alias. +var myClass = new MyClass(); + +var Component = goog.ui.Component; // scope alias +var MyClass = myproject.foo.MyClass; // scope alias + +// Scope alias of non-Closurized namespace. +var NonClosurizedClass = aaa.bbb.NonClosurizedClass; + +var component = new Component(Event.Something); +var nonClosurized = NonClosurizedClass(); + +/** + * A created namespace with a really long identifier. + * @type {events.Event.} + */ +Event. + MultilineIdentifier. + someMethod = function() {}; +}); +""" + +_TEST_SCOPE_SCRIPT = """ +function foo () { + // This goog.scope call is invalid. + goog.scope(function() { + + }); +} +""" + +_TEST_MULTIPLE_SCOPE_SCRIPT = """ +goog.scope(function() { + // do nothing +}); + +function foo() { + var test = goog.scope; // We should not see goog.scope mentioned. +} + +// This goog.scope invalid. There can be only one. +goog.scope(function() { + +}); +""" + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/checker.py b/tools/closure_linter/build/lib/closure_linter/checker.py new file mode 100644 index 0000000..1c98417 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/checker.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python +# +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Core methods for checking JS files for common style guide violations.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + +import gflags as flags + +from closure_linter import aliaspass +from closure_linter import checkerbase +from closure_linter import closurizednamespacesinfo +from closure_linter import javascriptlintrules + + +flags.DEFINE_list('closurized_namespaces', '', + 'Namespace prefixes, used for testing of' + 'goog.provide/require') +flags.DEFINE_list('ignored_extra_namespaces', '', + 'Fully qualified namespaces that should be not be reported ' + 'as extra by the linter.') + + +class JavaScriptStyleChecker(checkerbase.CheckerBase): + """Checker that applies JavaScriptLintRules.""" + + def __init__(self, state_tracker, error_handler): + """Initialize an JavaScriptStyleChecker object. + + Args: + state_tracker: State tracker. + error_handler: Error handler to pass all errors to. + """ + self._namespaces_info = None + self._alias_pass = None + if flags.FLAGS.closurized_namespaces: + self._namespaces_info = ( + closurizednamespacesinfo.ClosurizedNamespacesInfo( + flags.FLAGS.closurized_namespaces, + flags.FLAGS.ignored_extra_namespaces)) + + self._alias_pass = aliaspass.AliasPass( + flags.FLAGS.closurized_namespaces, error_handler) + + checkerbase.CheckerBase.__init__( + self, + error_handler=error_handler, + lint_rules=javascriptlintrules.JavaScriptLintRules( + self._namespaces_info), + state_tracker=state_tracker) + + def Check(self, start_token, limited_doc_checks=False, is_html=False, + stop_token=None): + """Checks a token stream for lint warnings/errors. + + Adds a separate pass for computing dependency information based on + goog.require and goog.provide statements prior to the main linting pass. + + Args: + start_token: The first token in the token stream. + limited_doc_checks: Whether to perform limited checks. + is_html: Whether this token stream is HTML. + stop_token: If given, checks should stop at this token. + """ + self._lint_rules.Initialize(self, limited_doc_checks, is_html) + + self._state_tracker.DocFlagPass(start_token, self._error_handler) + + if self._alias_pass: + self._alias_pass.Process(start_token) + + # To maximize the amount of errors that get reported before a parse error + # is displayed, don't run the dependency pass if a parse error exists. + if self._namespaces_info: + self._namespaces_info.Reset() + self._ExecutePass(start_token, self._DependencyPass, stop_token) + + self._ExecutePass(start_token, self._LintPass, stop_token) + + # If we have a stop_token, we didn't end up reading the whole file and, + # thus, don't call Finalize to do end-of-file checks. + if not stop_token: + self._lint_rules.Finalize(self._state_tracker) + + def _DependencyPass(self, token): + """Processes an individual token for dependency information. + + Used to encapsulate the logic needed to process an individual token so that + it can be passed to _ExecutePass. + + Args: + token: The token to process. + """ + self._namespaces_info.ProcessToken(token, self._state_tracker) diff --git a/tools/closure_linter/build/lib/closure_linter/checkerbase.py b/tools/closure_linter/build/lib/closure_linter/checkerbase.py new file mode 100644 index 0000000..6679ded --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/checkerbase.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base classes for writing checkers that operate on tokens.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)', + 'jacobr@google.com (Jacob Richman)') + +from closure_linter import errorrules +from closure_linter.common import error + + +class LintRulesBase(object): + """Base class for all classes defining the lint rules for a language.""" + + def __init__(self): + self.__checker = None + + def Initialize(self, checker, limited_doc_checks, is_html): + """Initializes to prepare to check a file. + + Args: + checker: Class to report errors to. + limited_doc_checks: Whether doc checking is relaxed for this file. + is_html: Whether the file is an HTML file with extracted contents. + """ + self.__checker = checker + self._limited_doc_checks = limited_doc_checks + self._is_html = is_html + + def _HandleError(self, code, message, token, position=None, + fix_data=None): + """Call the HandleError function for the checker we are associated with.""" + if errorrules.ShouldReportError(code): + self.__checker.HandleError(code, message, token, position, fix_data) + + def _SetLimitedDocChecks(self, limited_doc_checks): + """Sets whether doc checking is relaxed for this file. + + Args: + limited_doc_checks: Whether doc checking is relaxed for this file. + """ + self._limited_doc_checks = limited_doc_checks + + def CheckToken(self, token, parser_state): + """Checks a token, given the current parser_state, for warnings and errors. + + Args: + token: The current token under consideration. + parser_state: Object that indicates the parser state in the page. + + Raises: + TypeError: If not overridden. + """ + raise TypeError('Abstract method CheckToken not implemented') + + def Finalize(self, parser_state): + """Perform all checks that need to occur after all lines are processed. + + Args: + parser_state: State of the parser after parsing all tokens + + Raises: + TypeError: If not overridden. + """ + raise TypeError('Abstract method Finalize not implemented') + + +class CheckerBase(object): + """This class handles checking a LintRules object against a file.""" + + def __init__(self, error_handler, lint_rules, state_tracker): + """Initialize a checker object. + + Args: + error_handler: Object that handles errors. + lint_rules: LintRules object defining lint errors given a token + and state_tracker object. + state_tracker: Object that tracks the current state in the token stream. + + """ + self._error_handler = error_handler + self._lint_rules = lint_rules + self._state_tracker = state_tracker + + self._has_errors = False + + def HandleError(self, code, message, token, position=None, + fix_data=None): + """Prints out the given error message including a line number. + + Args: + code: The error code. + message: The error to print. + token: The token where the error occurred, or None if it was a file-wide + issue. + position: The position of the error, defaults to None. + fix_data: Metadata used for fixing the error. + """ + self._has_errors = True + self._error_handler.HandleError( + error.Error(code, message, token, position, fix_data)) + + def HasErrors(self): + """Returns true if the style checker has found any errors. + + Returns: + True if the style checker has found any errors. + """ + return self._has_errors + + def Check(self, start_token, limited_doc_checks=False, is_html=False, + stop_token=None): + """Checks a token stream, reporting errors to the error reporter. + + Args: + start_token: First token in token stream. + limited_doc_checks: Whether doc checking is relaxed for this file. + is_html: Whether the file being checked is an HTML file with extracted + contents. + stop_token: If given, check should stop at this token. + """ + + self._lint_rules.Initialize(self, limited_doc_checks, is_html) + self._ExecutePass(start_token, self._LintPass, stop_token=stop_token) + self._lint_rules.Finalize(self._state_tracker) + + def _LintPass(self, token): + """Checks an individual token for lint warnings/errors. + + Used to encapsulate the logic needed to check an individual token so that it + can be passed to _ExecutePass. + + Args: + token: The token to check. + """ + self._lint_rules.CheckToken(token, self._state_tracker) + + def _ExecutePass(self, token, pass_function, stop_token=None): + """Calls the given function for every token in the given token stream. + + As each token is passed to the given function, state is kept up to date and, + depending on the error_trace flag, errors are either caught and reported, or + allowed to bubble up so developers can see the full stack trace. If a parse + error is specified, the pass will proceed as normal until the token causing + the parse error is reached. + + Args: + token: The first token in the token stream. + pass_function: The function to call for each token in the token stream. + stop_token: The last token to check (if given). + + Raises: + Exception: If any error occurred while calling the given function. + """ + + self._state_tracker.Reset() + while token: + # When we are looking at a token and decided to delete the whole line, we + # will delete all of them in the "HandleToken()" below. So the current + # token and subsequent ones may already be deleted here. The way we + # delete a token does not wipe out the previous and next pointers of the + # deleted token. So we need to check the token itself to make sure it is + # not deleted. + if not token.is_deleted: + # End the pass at the stop token + if stop_token and token is stop_token: + return + + self._state_tracker.HandleToken( + token, self._state_tracker.GetLastNonSpaceToken()) + pass_function(token) + self._state_tracker.HandleAfterToken(token) + + token = token.next diff --git a/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py b/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py new file mode 100644 index 0000000..e7cbfd3 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py @@ -0,0 +1,578 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Logic for computing dependency information for closurized JavaScript files. + +Closurized JavaScript files express dependencies using goog.require and +goog.provide statements. In order for the linter to detect when a statement is +missing or unnecessary, all identifiers in the JavaScript file must first be +processed to determine if they constitute the creation or usage of a dependency. +""" + + + +import re + +from closure_linter import javascripttokens +from closure_linter import tokenutil + +# pylint: disable=g-bad-name +TokenType = javascripttokens.JavaScriptTokenType + +DEFAULT_EXTRA_NAMESPACES = [ + 'goog.testing.asserts', + 'goog.testing.jsunit', +] + + +class UsedNamespace(object): + """A type for information about a used namespace.""" + + def __init__(self, namespace, identifier, token, alias_definition): + """Initializes the instance. + + Args: + namespace: the namespace of an identifier used in the file + identifier: the complete identifier + token: the token that uses the namespace + alias_definition: a boolean stating whether the namespace is only to used + for an alias definition and should not be required. + """ + self.namespace = namespace + self.identifier = identifier + self.token = token + self.alias_definition = alias_definition + + def GetLine(self): + return self.token.line_number + + def __repr__(self): + return 'UsedNamespace(%s)' % ', '.join( + ['%s=%s' % (k, repr(v)) for k, v in self.__dict__.iteritems()]) + + +class ClosurizedNamespacesInfo(object): + """Dependency information for closurized JavaScript files. + + Processes token streams for dependency creation or usage and provides logic + for determining if a given require or provide statement is unnecessary or if + there are missing require or provide statements. + """ + + def __init__(self, closurized_namespaces, ignored_extra_namespaces): + """Initializes an instance the ClosurizedNamespacesInfo class. + + Args: + closurized_namespaces: A list of namespace prefixes that should be + processed for dependency information. Non-matching namespaces are + ignored. + ignored_extra_namespaces: A list of namespaces that should not be reported + as extra regardless of whether they are actually used. + """ + self._closurized_namespaces = closurized_namespaces + self._ignored_extra_namespaces = (ignored_extra_namespaces + + DEFAULT_EXTRA_NAMESPACES) + self.Reset() + + def Reset(self): + """Resets the internal state to prepare for processing a new file.""" + + # A list of goog.provide tokens in the order they appeared in the file. + self._provide_tokens = [] + + # A list of goog.require tokens in the order they appeared in the file. + self._require_tokens = [] + + # Namespaces that are already goog.provided. + self._provided_namespaces = [] + + # Namespaces that are already goog.required. + self._required_namespaces = [] + + # Note that created_namespaces and used_namespaces contain both namespaces + # and identifiers because there are many existing cases where a method or + # constant is provided directly instead of its namespace. Ideally, these + # two lists would only have to contain namespaces. + + # A list of tuples where the first element is the namespace of an identifier + # created in the file, the second is the identifier itself and the third is + # the line number where it's created. + self._created_namespaces = [] + + # A list of UsedNamespace instances. + self._used_namespaces = [] + + # A list of seemingly-unnecessary namespaces that are goog.required() and + # annotated with @suppress {extraRequire}. + self._suppressed_requires = [] + + # A list of goog.provide tokens which are duplicates. + self._duplicate_provide_tokens = [] + + # A list of goog.require tokens which are duplicates. + self._duplicate_require_tokens = [] + + # Whether this file is in a goog.scope. Someday, we may add support + # for checking scopified namespaces, but for now let's just fail + # in a more reasonable way. + self._scopified_file = False + + # TODO(user): Handle the case where there are 2 different requires + # that can satisfy the same dependency, but only one is necessary. + + def GetProvidedNamespaces(self): + """Returns the namespaces which are already provided by this file. + + Returns: + A list of strings where each string is a 'namespace' corresponding to an + existing goog.provide statement in the file being checked. + """ + return set(self._provided_namespaces) + + def GetRequiredNamespaces(self): + """Returns the namespaces which are already required by this file. + + Returns: + A list of strings where each string is a 'namespace' corresponding to an + existing goog.require statement in the file being checked. + """ + return set(self._required_namespaces) + + def IsExtraProvide(self, token): + """Returns whether the given goog.provide token is unnecessary. + + Args: + token: A goog.provide token. + + Returns: + True if the given token corresponds to an unnecessary goog.provide + statement, otherwise False. + """ + namespace = tokenutil.GetStringAfterToken(token) + + if self.GetClosurizedNamespace(namespace) is None: + return False + + if token in self._duplicate_provide_tokens: + return True + + # TODO(user): There's probably a faster way to compute this. + for created_namespace, created_identifier, _ in self._created_namespaces: + if namespace == created_namespace or namespace == created_identifier: + return False + + return True + + def IsExtraRequire(self, token): + """Returns whether the given goog.require token is unnecessary. + + Args: + token: A goog.require token. + + Returns: + True if the given token corresponds to an unnecessary goog.require + statement, otherwise False. + """ + namespace = tokenutil.GetStringAfterToken(token) + + if self.GetClosurizedNamespace(namespace) is None: + return False + + if namespace in self._ignored_extra_namespaces: + return False + + if token in self._duplicate_require_tokens: + return True + + if namespace in self._suppressed_requires: + return False + + # If the namespace contains a component that is initial caps, then that + # must be the last component of the namespace. + parts = namespace.split('.') + if len(parts) > 1 and parts[-2][0].isupper(): + return True + + # TODO(user): There's probably a faster way to compute this. + for ns in self._used_namespaces: + if (not ns.alias_definition and ( + namespace == ns.namespace or namespace == ns.identifier)): + return False + + return True + + def GetMissingProvides(self): + """Returns the dict of missing provided namespaces for the current file. + + Returns: + Returns a dictionary of key as string and value as integer where each + string(key) is a namespace that should be provided by this file, but is + not and integer(value) is first line number where it's defined. + """ + missing_provides = dict() + for namespace, identifier, line_number in self._created_namespaces: + if (not self._IsPrivateIdentifier(identifier) and + namespace not in self._provided_namespaces and + identifier not in self._provided_namespaces and + namespace not in self._required_namespaces and + namespace not in missing_provides): + missing_provides[namespace] = line_number + + return missing_provides + + def GetMissingRequires(self): + """Returns the dict of missing required namespaces for the current file. + + For each non-private identifier used in the file, find either a + goog.require, goog.provide or a created identifier that satisfies it. + goog.require statements can satisfy the identifier by requiring either the + namespace of the identifier or the identifier itself. goog.provide + statements can satisfy the identifier by providing the namespace of the + identifier. A created identifier can only satisfy the used identifier if + it matches it exactly (necessary since things can be defined on a + namespace in more than one file). Note that provided namespaces should be + a subset of created namespaces, but we check both because in some cases we + can't always detect the creation of the namespace. + + Returns: + Returns a dictionary of key as string and value integer where each + string(key) is a namespace that should be required by this file, but is + not and integer(value) is first line number where it's used. + """ + external_dependencies = set(self._required_namespaces) + + # Assume goog namespace is always available. + external_dependencies.add('goog') + # goog.module is treated as a builtin, too (for goog.module.get). + external_dependencies.add('goog.module') + + created_identifiers = set() + for unused_namespace, identifier, unused_line_number in ( + self._created_namespaces): + created_identifiers.add(identifier) + + missing_requires = dict() + illegal_alias_statements = dict() + + def ShouldRequireNamespace(namespace, identifier): + """Checks if a namespace would normally be required.""" + return ( + not self._IsPrivateIdentifier(identifier) and + namespace not in external_dependencies and + namespace not in self._provided_namespaces and + identifier not in external_dependencies and + identifier not in created_identifiers and + namespace not in missing_requires) + + # First check all the used identifiers where we know that their namespace + # needs to be provided (unless they are optional). + for ns in self._used_namespaces: + namespace = ns.namespace + identifier = ns.identifier + if (not ns.alias_definition and + ShouldRequireNamespace(namespace, identifier)): + missing_requires[namespace] = ns.GetLine() + + # Now that all required namespaces are known, we can check if the alias + # definitions (that are likely being used for typeannotations that don't + # need explicit goog.require statements) are already covered. If not + # the user shouldn't use the alias. + for ns in self._used_namespaces: + if (not ns.alias_definition or + not ShouldRequireNamespace(ns.namespace, ns.identifier)): + continue + if self._FindNamespace(ns.identifier, self._provided_namespaces, + created_identifiers, external_dependencies, + missing_requires): + continue + namespace = ns.identifier.rsplit('.', 1)[0] + illegal_alias_statements[namespace] = ns.token + + return missing_requires, illegal_alias_statements + + def _FindNamespace(self, identifier, *namespaces_list): + """Finds the namespace of an identifier given a list of other namespaces. + + Args: + identifier: An identifier whose parent needs to be defined. + e.g. for goog.bar.foo we search something that provides + goog.bar. + *namespaces_list: var args of iterables of namespace identifiers + Returns: + The namespace that the given identifier is part of or None. + """ + identifier = identifier.rsplit('.', 1)[0] + identifier_prefix = identifier + '.' + for namespaces in namespaces_list: + for namespace in namespaces: + if namespace == identifier or namespace.startswith(identifier_prefix): + return namespace + return None + + def _IsPrivateIdentifier(self, identifier): + """Returns whether the given identifier is private.""" + pieces = identifier.split('.') + for piece in pieces: + if piece.endswith('_'): + return True + return False + + def IsFirstProvide(self, token): + """Returns whether token is the first provide token.""" + return self._provide_tokens and token == self._provide_tokens[0] + + def IsFirstRequire(self, token): + """Returns whether token is the first require token.""" + return self._require_tokens and token == self._require_tokens[0] + + def IsLastProvide(self, token): + """Returns whether token is the last provide token.""" + return self._provide_tokens and token == self._provide_tokens[-1] + + def IsLastRequire(self, token): + """Returns whether token is the last require token.""" + return self._require_tokens and token == self._require_tokens[-1] + + def ProcessToken(self, token, state_tracker): + """Processes the given token for dependency information. + + Args: + token: The token to process. + state_tracker: The JavaScript state tracker. + """ + + # Note that this method is in the critical path for the linter and has been + # optimized for performance in the following ways: + # - Tokens are checked by type first to minimize the number of function + # calls necessary to determine if action needs to be taken for the token. + # - The most common tokens types are checked for first. + # - The number of function calls has been minimized (thus the length of this + # function. + + if token.type == TokenType.IDENTIFIER: + # TODO(user): Consider saving the whole identifier in metadata. + whole_identifier_string = tokenutil.GetIdentifierForToken(token) + if whole_identifier_string is None: + # We only want to process the identifier one time. If the whole string + # identifier is None, that means this token was part of a multi-token + # identifier, but it was not the first token of the identifier. + return + + # In the odd case that a goog.require is encountered inside a function, + # just ignore it (e.g. dynamic loading in test runners). + if token.string == 'goog.require' and not state_tracker.InFunction(): + self._require_tokens.append(token) + namespace = tokenutil.GetStringAfterToken(token) + if namespace in self._required_namespaces: + self._duplicate_require_tokens.append(token) + else: + self._required_namespaces.append(namespace) + + # If there is a suppression for the require, add a usage for it so it + # gets treated as a regular goog.require (i.e. still gets sorted). + if self._HasSuppression(state_tracker, 'extraRequire'): + self._suppressed_requires.append(namespace) + self._AddUsedNamespace(state_tracker, namespace, token) + + elif token.string == 'goog.provide': + self._provide_tokens.append(token) + namespace = tokenutil.GetStringAfterToken(token) + if namespace in self._provided_namespaces: + self._duplicate_provide_tokens.append(token) + else: + self._provided_namespaces.append(namespace) + + # If there is a suppression for the provide, add a creation for it so it + # gets treated as a regular goog.provide (i.e. still gets sorted). + if self._HasSuppression(state_tracker, 'extraProvide'): + self._AddCreatedNamespace(state_tracker, namespace, token.line_number) + + elif token.string == 'goog.scope': + self._scopified_file = True + + elif token.string == 'goog.setTestOnly': + + # Since the message is optional, we don't want to scan to later lines. + for t in tokenutil.GetAllTokensInSameLine(token): + if t.type == TokenType.STRING_TEXT: + message = t.string + + if re.match(r'^\w+(\.\w+)+$', message): + # This looks like a namespace. If it's a Closurized namespace, + # consider it created. + base_namespace = message.split('.', 1)[0] + if base_namespace in self._closurized_namespaces: + self._AddCreatedNamespace(state_tracker, message, + token.line_number) + + break + else: + jsdoc = state_tracker.GetDocComment() + if token.metadata and token.metadata.aliased_symbol: + whole_identifier_string = token.metadata.aliased_symbol + elif (token.string == 'goog.module.get' and + not self._HasSuppression(state_tracker, 'extraRequire')): + # Cannot use _AddUsedNamespace as this is not an identifier, but + # already the entire namespace that's required. + namespace = tokenutil.GetStringAfterToken(token) + namespace = UsedNamespace(namespace, namespace, token, + alias_definition=False) + self._used_namespaces.append(namespace) + if jsdoc and jsdoc.HasFlag('typedef'): + self._AddCreatedNamespace(state_tracker, whole_identifier_string, + token.line_number, + namespace=self.GetClosurizedNamespace( + whole_identifier_string)) + else: + is_alias_definition = (token.metadata and + token.metadata.is_alias_definition) + self._AddUsedNamespace(state_tracker, whole_identifier_string, + token, is_alias_definition) + + elif token.type == TokenType.SIMPLE_LVALUE: + identifier = token.values['identifier'] + start_token = tokenutil.GetIdentifierStart(token) + if start_token and start_token != token: + # Multi-line identifier being assigned. Get the whole identifier. + identifier = tokenutil.GetIdentifierForToken(start_token) + else: + start_token = token + # If an alias is defined on the start_token, use it instead. + if (start_token and + start_token.metadata and + start_token.metadata.aliased_symbol and + not start_token.metadata.is_alias_definition): + identifier = start_token.metadata.aliased_symbol + + if identifier: + namespace = self.GetClosurizedNamespace(identifier) + if state_tracker.InFunction(): + self._AddUsedNamespace(state_tracker, identifier, token) + elif namespace and namespace != 'goog': + self._AddCreatedNamespace(state_tracker, identifier, + token.line_number, namespace=namespace) + + elif token.type == TokenType.DOC_FLAG: + flag = token.attached_object + flag_type = flag.flag_type + if flag and flag.HasType() and flag.jstype: + is_interface = state_tracker.GetDocComment().HasFlag('interface') + if flag_type == 'implements' or (flag_type == 'extends' + and is_interface): + identifier = flag.jstype.alias or flag.jstype.identifier + self._AddUsedNamespace(state_tracker, identifier, token) + # Since we process doctypes only for implements and extends, the + # type is a simple one and we don't need any iteration for subtypes. + + def _AddCreatedNamespace(self, state_tracker, identifier, line_number, + namespace=None): + """Adds the namespace of an identifier to the list of created namespaces. + + If the identifier is annotated with a 'missingProvide' suppression, it is + not added. + + Args: + state_tracker: The JavaScriptStateTracker instance. + identifier: The identifier to add. + line_number: Line number where namespace is created. + namespace: The namespace of the identifier or None if the identifier is + also the namespace. + """ + if not namespace: + namespace = identifier + + if self._HasSuppression(state_tracker, 'missingProvide'): + return + + self._created_namespaces.append([namespace, identifier, line_number]) + + def _AddUsedNamespace(self, state_tracker, identifier, token, + is_alias_definition=False): + """Adds the namespace of an identifier to the list of used namespaces. + + If the identifier is annotated with a 'missingRequire' suppression, it is + not added. + + Args: + state_tracker: The JavaScriptStateTracker instance. + identifier: An identifier which has been used. + token: The token in which the namespace is used. + is_alias_definition: If the used namespace is part of an alias_definition. + Aliased symbols need their parent namespace to be available, if it is + not yet required through another symbol, an error will be thrown. + """ + if self._HasSuppression(state_tracker, 'missingRequire'): + return + + namespace = self.GetClosurizedNamespace(identifier) + # b/5362203 If its a variable in scope then its not a required namespace. + if namespace and not state_tracker.IsVariableInScope(namespace): + namespace = UsedNamespace(namespace, identifier, token, + is_alias_definition) + self._used_namespaces.append(namespace) + + def _HasSuppression(self, state_tracker, suppression): + jsdoc = state_tracker.GetDocComment() + return jsdoc and suppression in jsdoc.suppressions + + def GetClosurizedNamespace(self, identifier): + """Given an identifier, returns the namespace that identifier is from. + + Args: + identifier: The identifier to extract a namespace from. + + Returns: + The namespace the given identifier resides in, or None if one could not + be found. + """ + if identifier.startswith('goog.global'): + # Ignore goog.global, since it is, by definition, global. + return None + + parts = identifier.split('.') + for namespace in self._closurized_namespaces: + if not identifier.startswith(namespace + '.'): + continue + + # The namespace for a class is the shortest prefix ending in a class + # name, which starts with a capital letter but is not a capitalized word. + # + # We ultimately do not want to allow requiring or providing of inner + # classes/enums. Instead, a file should provide only the top-level class + # and users should require only that. + namespace = [] + for part in parts: + if part == 'prototype' or part.isupper(): + return '.'.join(namespace) + namespace.append(part) + if part[0].isupper(): + return '.'.join(namespace) + + # At this point, we know there's no class or enum, so the namespace is + # just the identifier with the last part removed. With the exception of + # apply, inherits, and call, which should also be stripped. + if parts[-1] in ('apply', 'inherits', 'call'): + parts.pop() + parts.pop() + + # If the last part ends with an underscore, it is a private variable, + # method, or enum. The namespace is whatever is before it. + if parts and parts[-1].endswith('_'): + parts.pop() + + return '.'.join(parts) + + return None diff --git a/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py b/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py new file mode 100644 index 0000000..7aeae21 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py @@ -0,0 +1,873 @@ +#!/usr/bin/env python +# +# Copyright 2010 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for ClosurizedNamespacesInfo.""" + + + +import unittest as googletest +from closure_linter import aliaspass +from closure_linter import closurizednamespacesinfo +from closure_linter import ecmametadatapass +from closure_linter import javascriptstatetracker +from closure_linter import javascripttokens +from closure_linter import testutil +from closure_linter import tokenutil + +# pylint: disable=g-bad-name +TokenType = javascripttokens.JavaScriptTokenType + + +def _ToLineDict(illegal_alias_stmts): + """Replaces tokens with the respective line number.""" + return {k: v.line_number for k, v in illegal_alias_stmts.iteritems()} + + +class ClosurizedNamespacesInfoTest(googletest.TestCase): + """Tests for ClosurizedNamespacesInfo.""" + + _test_cases = { + 'goog.global.anything': None, + 'package.CONSTANT': 'package', + 'package.methodName': 'package', + 'package.subpackage.methodName': 'package.subpackage', + 'package.subpackage.methodName.apply': 'package.subpackage', + 'package.ClassName.something': 'package.ClassName', + 'package.ClassName.Enum.VALUE.methodName': 'package.ClassName', + 'package.ClassName.CONSTANT': 'package.ClassName', + 'package.namespace.CONSTANT.methodName': 'package.namespace', + 'package.ClassName.inherits': 'package.ClassName', + 'package.ClassName.apply': 'package.ClassName', + 'package.ClassName.methodName.apply': 'package.ClassName', + 'package.ClassName.methodName.call': 'package.ClassName', + 'package.ClassName.prototype.methodName': 'package.ClassName', + 'package.ClassName.privateMethod_': 'package.ClassName', + 'package.className.privateProperty_': 'package.className', + 'package.className.privateProperty_.methodName': 'package.className', + 'package.ClassName.PrivateEnum_': 'package.ClassName', + 'package.ClassName.prototype.methodName.apply': 'package.ClassName', + 'package.ClassName.property.subProperty': 'package.ClassName', + 'package.className.prototype.something.somethingElse': 'package.className' + } + + def testGetClosurizedNamespace(self): + """Tests that the correct namespace is returned for various identifiers.""" + namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( + closurized_namespaces=['package'], ignored_extra_namespaces=[]) + for identifier, expected_namespace in self._test_cases.items(): + actual_namespace = namespaces_info.GetClosurizedNamespace(identifier) + self.assertEqual( + expected_namespace, + actual_namespace, + 'expected namespace "' + str(expected_namespace) + + '" for identifier "' + str(identifier) + '" but was "' + + str(actual_namespace) + '"') + + def testIgnoredExtraNamespaces(self): + """Tests that ignored_extra_namespaces are ignored.""" + token = self._GetRequireTokens('package.Something') + namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( + closurized_namespaces=['package'], + ignored_extra_namespaces=['package.Something']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Should be valid since it is in ignored namespaces.') + + namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( + ['package'], []) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'Should be invalid since it is not in ignored namespaces.') + + def testIsExtraProvide_created(self): + """Tests that provides for created namespaces are not extra.""" + input_lines = [ + 'goog.provide(\'package.Foo\');', + 'package.Foo = function() {};' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraProvide(token), + 'Should not be extra since it is created.') + + def testIsExtraProvide_createdIdentifier(self): + """Tests that provides for created identifiers are not extra.""" + input_lines = [ + 'goog.provide(\'package.Foo.methodName\');', + 'package.Foo.methodName = function() {};' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraProvide(token), + 'Should not be extra since it is created.') + + def testIsExtraProvide_notCreated(self): + """Tests that provides for non-created namespaces are extra.""" + input_lines = ['goog.provide(\'package.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertTrue(namespaces_info.IsExtraProvide(token), + 'Should be extra since it is not created.') + + def testIsExtraProvide_notCreatedMultipartClosurizedNamespace(self): + """Tests that provides for non-created namespaces are extra.""" + input_lines = ['goog.provide(\'multi.part.namespace.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['multi.part']) + + self.assertTrue(namespaces_info.IsExtraProvide(token), + 'Should be extra since it is not created.') + + def testIsExtraProvide_duplicate(self): + """Tests that providing a namespace twice makes the second one extra.""" + input_lines = [ + 'goog.provide(\'package.Foo\');', + 'goog.provide(\'package.Foo\');', + 'package.Foo = function() {};' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + # Advance to the second goog.provide token. + token = tokenutil.Search(token.next, TokenType.IDENTIFIER) + + self.assertTrue(namespaces_info.IsExtraProvide(token), + 'Should be extra since it is already provided.') + + def testIsExtraProvide_notClosurized(self): + """Tests that provides of non-closurized namespaces are not extra.""" + input_lines = ['goog.provide(\'notclosurized.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraProvide(token), + 'Should not be extra since it is not closurized.') + + def testIsExtraRequire_used(self): + """Tests that requires for used namespaces are not extra.""" + input_lines = [ + 'goog.require(\'package.Foo\');', + 'var x = package.Foo.methodName();' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Should not be extra since it is used.') + + def testIsExtraRequire_usedIdentifier(self): + """Tests that requires for used methods on classes are extra.""" + input_lines = [ + 'goog.require(\'package.Foo.methodName\');', + 'var x = package.Foo.methodName();' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'Should require the package, not the method specifically.') + + def testIsExtraRequire_notUsed(self): + """Tests that requires for unused namespaces are extra.""" + input_lines = ['goog.require(\'package.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'Should be extra since it is not used.') + + def testIsExtraRequire_notUsedMultiPartClosurizedNamespace(self): + """Tests unused require with multi-part closurized namespaces.""" + + input_lines = ['goog.require(\'multi.part.namespace.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['multi.part']) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'Should be extra since it is not used.') + + def testIsExtraRequire_notClosurized(self): + """Tests that requires of non-closurized namespaces are not extra.""" + input_lines = ['goog.require(\'notclosurized.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Should not be extra since it is not closurized.') + + def testIsExtraRequire_objectOnClass(self): + """Tests that requiring an object on a class is extra.""" + input_lines = [ + 'goog.require(\'package.Foo.Enum\');', + 'var x = package.Foo.Enum.VALUE1;', + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'The whole class, not the object, should be required.'); + + def testIsExtraRequire_constantOnClass(self): + """Tests that requiring a constant on a class is extra.""" + input_lines = [ + 'goog.require(\'package.Foo.CONSTANT\');', + 'var x = package.Foo.CONSTANT', + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'The class, not the constant, should be required.'); + + def testIsExtraRequire_constantNotOnClass(self): + """Tests that requiring a constant not on a class is OK.""" + input_lines = [ + 'goog.require(\'package.subpackage.CONSTANT\');', + 'var x = package.subpackage.CONSTANT', + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Constants can be required except on classes.'); + + def testIsExtraRequire_methodNotOnClass(self): + """Tests that requiring a method not on a class is OK.""" + input_lines = [ + 'goog.require(\'package.subpackage.method\');', + 'var x = package.subpackage.method()', + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Methods can be required except on classes.'); + + def testIsExtraRequire_defaults(self): + """Tests that there are no warnings about extra requires for test utils""" + input_lines = ['goog.require(\'goog.testing.jsunit\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['goog']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Should not be extra since it is for testing.') + + def testGetMissingProvides_provided(self): + """Tests that provided functions don't cause a missing provide.""" + input_lines = [ + 'goog.provide(\'package.Foo\');', + 'package.Foo = function() {};' + ] + + namespaces_info = self._GetNamespacesInfoForScript( + input_lines, ['package']) + + self.assertEquals(0, len(namespaces_info.GetMissingProvides())) + + def testGetMissingProvides_providedIdentifier(self): + """Tests that provided identifiers don't cause a missing provide.""" + input_lines = [ + 'goog.provide(\'package.Foo.methodName\');', + 'package.Foo.methodName = function() {};' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + self.assertEquals(0, len(namespaces_info.GetMissingProvides())) + + def testGetMissingProvides_providedParentIdentifier(self): + """Tests that provided identifiers on a class don't cause a missing provide + on objects attached to that class.""" + input_lines = [ + 'goog.provide(\'package.foo.ClassName\');', + 'package.foo.ClassName.methodName = function() {};', + 'package.foo.ClassName.ObjectName = 1;', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + self.assertEquals(0, len(namespaces_info.GetMissingProvides())) + + def testGetMissingProvides_unprovided(self): + """Tests that unprovided functions cause a missing provide.""" + input_lines = ['package.Foo = function() {};'] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + + missing_provides = namespaces_info.GetMissingProvides() + self.assertEquals(1, len(missing_provides)) + missing_provide = missing_provides.popitem() + self.assertEquals('package.Foo', missing_provide[0]) + self.assertEquals(1, missing_provide[1]) + + def testGetMissingProvides_privatefunction(self): + """Tests that unprovided private functions don't cause a missing provide.""" + input_lines = ['package.Foo_ = function() {};'] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + self.assertEquals(0, len(namespaces_info.GetMissingProvides())) + + def testGetMissingProvides_required(self): + """Tests that required namespaces don't cause a missing provide.""" + input_lines = [ + 'goog.require(\'package.Foo\');', + 'package.Foo.methodName = function() {};' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + self.assertEquals(0, len(namespaces_info.GetMissingProvides())) + + def testGetMissingRequires_required(self): + """Tests that required namespaces don't cause a missing require.""" + input_lines = [ + 'goog.require(\'package.Foo\');', + 'package.Foo();' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_requiredIdentifier(self): + """Tests that required namespaces satisfy identifiers on that namespace.""" + input_lines = [ + 'goog.require(\'package.Foo\');', + 'package.Foo.methodName();' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_requiredNamespace(self): + """Tests that required namespaces satisfy the namespace.""" + input_lines = [ + 'goog.require(\'package.soy.fooTemplate\');', + 'render(package.soy.fooTemplate);' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_requiredParentClass(self): + """Tests that requiring a parent class of an object is sufficient to prevent + a missing require on that object.""" + input_lines = [ + 'goog.require(\'package.Foo\');', + 'package.Foo.methodName();', + 'package.Foo.methodName(package.Foo.ObjectName);' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_unrequired(self): + """Tests that unrequired namespaces cause a missing require.""" + input_lines = ['package.Foo();'] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(1, len(missing_requires)) + missing_req = missing_requires.popitem() + self.assertEquals('package.Foo', missing_req[0]) + self.assertEquals(1, missing_req[1]) + + def testGetMissingRequires_provided(self): + """Tests that provided namespaces satisfy identifiers on that namespace.""" + input_lines = [ + 'goog.provide(\'package.Foo\');', + 'package.Foo.methodName();' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_created(self): + """Tests that created namespaces do not satisfy usage of an identifier.""" + input_lines = [ + 'package.Foo = function();', + 'package.Foo.methodName();', + 'package.Foo.anotherMethodName1();', + 'package.Foo.anotherMethodName2();' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(1, len(missing_requires)) + missing_require = missing_requires.popitem() + self.assertEquals('package.Foo', missing_require[0]) + # Make sure line number of first occurrence is reported + self.assertEquals(2, missing_require[1]) + + def testGetMissingRequires_createdIdentifier(self): + """Tests that created identifiers satisfy usage of the identifier.""" + input_lines = [ + 'package.Foo.methodName = function();', + 'package.Foo.methodName();' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_implements(self): + """Tests that a parametrized type requires the correct identifier.""" + input_lines = [ + '/** @constructor @implements {package.Bar} */', + 'package.Foo = function();', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertItemsEqual({'package.Bar': 1}, missing_requires) + + def testGetMissingRequires_objectOnClass(self): + """Tests that we should require a class, not the object on the class.""" + input_lines = [ + 'goog.require(\'package.Foo.Enum\');', + 'var x = package.Foo.Enum.VALUE1;', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(1, len(missing_requires), + 'The whole class, not the object, should be required.') + + def testGetMissingRequires_variableWithSameName(self): + """Tests that we should not goog.require variables and parameters. + + b/5362203 Variables in scope are not missing namespaces. + """ + input_lines = [ + 'goog.provide(\'Foo\');', + 'Foo.A = function();', + 'Foo.A.prototype.method = function(ab) {', + ' if (ab) {', + ' var docs;', + ' var lvalue = new Obj();', + ' // Variable in scope hence not goog.require here.', + ' docs.foo.abc = 1;', + ' lvalue.next();', + ' }', + ' // Since js is function scope this should also not goog.require.', + ' docs.foo.func();', + ' // Its not a variable in scope hence goog.require.', + ' dummy.xyz.reset();', + ' return this.method2();', + '};', + 'Foo.A.prototype.method1 = function(docs, abcd, xyz) {', + ' // Parameter hence not goog.require.', + ' docs.nodes.length = 2;', + ' lvalue.abc.reset();', + '};' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['Foo', + 'docs', + 'lvalue', + 'dummy']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(2, len(missing_requires)) + self.assertItemsEqual( + {'dummy.xyz': 14, + 'lvalue.abc': 20}, missing_requires) + + def testIsFirstProvide(self): + """Tests operation of the isFirstProvide method.""" + input_lines = [ + 'goog.provide(\'package.Foo\');', + 'package.Foo.methodName();' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + self.assertTrue(namespaces_info.IsFirstProvide(token)) + + def testGetWholeIdentifierString(self): + """Tests that created identifiers satisfy usage of the identifier.""" + input_lines = [ + 'package.Foo.', + ' veryLong.', + ' identifier;' + ] + + token = testutil.TokenizeSource(input_lines) + + self.assertEquals('package.Foo.veryLong.identifier', + tokenutil.GetIdentifierForToken(token)) + + self.assertEquals(None, + tokenutil.GetIdentifierForToken(token.next)) + + def testScopified(self): + """Tests that a goog.scope call is noticed.""" + input_lines = [ + 'goog.scope(function() {', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + self.assertTrue(namespaces_info._scopified_file) + + def testScope_unusedAlias(self): + """Tests that an unused alias symbol is illegal.""" + input_lines = [ + 'goog.scope(function() {', + 'var Event = goog.events.Event;', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts)) + + def testScope_usedMultilevelAlias(self): + """Tests that an used alias symbol in a deep namespace is ok.""" + input_lines = [ + 'goog.require(\'goog.Events\');', + 'goog.scope(function() {', + 'var Event = goog.Events.DeepNamespace.Event;', + 'Event();', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({}, illegal_alias_stmts) + + def testScope_usedAlias(self): + """Tests that aliased symbols result in correct requires.""" + input_lines = [ + 'goog.scope(function() {', + 'var Event = goog.events.Event;', + 'var dom = goog.dom;', + 'Event(dom.classes.get);', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, illegal_alias_stmts) + self.assertEquals({'goog.dom.classes': 4, 'goog.events.Event': 4}, + missing_requires) + + def testModule_alias(self): + """Tests that goog.module style aliases are supported.""" + input_lines = [ + 'goog.module(\'test.module\');', + 'var Unused = goog.require(\'goog.Unused\');', + 'var AliasedClass = goog.require(\'goog.AliasedClass\');', + 'var x = new AliasedClass();', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + namespaceToken = self._GetRequireTokens('goog.AliasedClass') + self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken), + 'AliasedClass should be marked as used') + unusedToken = self._GetRequireTokens('goog.Unused') + self.assertTrue(namespaces_info.IsExtraRequire(unusedToken), + 'Unused should be marked as not used') + + def testModule_aliasInScope(self): + """Tests that goog.module style aliases are supported.""" + input_lines = [ + 'goog.module(\'test.module\');', + 'var AliasedClass = goog.require(\'goog.AliasedClass\');', + 'goog.scope(function() {', + 'var x = new AliasedClass();', + '});', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + namespaceToken = self._GetRequireTokens('goog.AliasedClass') + self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken), + 'AliasedClass should be marked as used') + + def testModule_getAlwaysProvided(self): + """Tests that goog.module.get is recognized as a built-in.""" + input_lines = [ + 'goog.provide(\'test.MyClass\');', + 'goog.require(\'goog.someModule\');', + 'goog.scope(function() {', + 'var someModule = goog.module.get(\'goog.someModule\');', + 'test.MyClass = function() {};', + '});', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + self.assertEquals({}, namespaces_info.GetMissingRequires()[0]) + + def testModule_requireForGet(self): + """Tests that goog.module.get needs a goog.require call.""" + input_lines = [ + 'goog.provide(\'test.MyClass\');', + 'function foo() {', + ' var someModule = goog.module.get(\'goog.someModule\');', + ' someModule.doSth();', + '}', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + self.assertEquals({'goog.someModule': 3}, + namespaces_info.GetMissingRequires()[0]) + + def testScope_usedTypeAlias(self): + """Tests aliased symbols in type annotations.""" + input_lines = [ + 'goog.scope(function() {', + 'var Event = goog.events.Event;', + '/** @type {Event} */;', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts)) + + def testScope_partialAlias_typeOnly(self): + """Tests a partial alias only used in type annotations. + + In this example, some goog.events namespace would need to be required + so that evaluating goog.events.bar doesn't throw an error. + """ + input_lines = [ + 'goog.scope(function() {', + 'var bar = goog.events.bar;', + '/** @type {bar.Foo} */;', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts)) + + def testScope_partialAlias(self): + """Tests a partial alias in conjunction with a type annotation. + + In this example, the partial alias is already defined by another type, + therefore the doc-only type doesn't need to be required. + """ + input_lines = [ + 'goog.scope(function() {', + 'var bar = goog.events.bar;', + '/** @type {bar.Event} */;', + 'bar.EventType();' + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({'goog.events.bar.EventType': 4}, missing_requires) + self.assertEquals({}, illegal_alias_stmts) + + def testScope_partialAliasRequires(self): + """Tests partial aliases with correct requires.""" + input_lines = [ + 'goog.require(\'goog.events.bar.EventType\');', + 'goog.scope(function() {', + 'var bar = goog.events.bar;', + '/** @type {bar.Event} */;', + 'bar.EventType();' + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({}, illegal_alias_stmts) + + def testScope_partialAliasRequiresBoth(self): + """Tests partial aliases with correct requires.""" + input_lines = [ + 'goog.require(\'goog.events.bar.Event\');', + 'goog.require(\'goog.events.bar.EventType\');', + 'goog.scope(function() {', + 'var bar = goog.events.bar;', + '/** @type {bar.Event} */;', + 'bar.EventType();' + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({}, illegal_alias_stmts) + event_token = self._GetRequireTokens('goog.events.bar.Event') + self.assertTrue(namespaces_info.IsExtraRequire(event_token)) + + def testScope_partialAliasNoSubtypeRequires(self): + """Tests that partial aliases don't yield subtype requires (regression).""" + input_lines = [ + 'goog.provide(\'goog.events.Foo\');', + 'goog.scope(function() {', + 'goog.events.Foo = {};', + 'var Foo = goog.events.Foo;' + 'Foo.CssName_ = {};' + 'var CssName_ = Foo.CssName_;' + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + + def testScope_aliasNamespace(self): + """Tests that an unused alias namespace is not required when available. + + In the example goog.events.Bar is not required, because the namespace + goog.events is already defined because goog.events.Foo is required. + """ + input_lines = [ + 'goog.require(\'goog.events.Foo\');', + 'goog.scope(function() {', + 'var Bar = goog.events.Bar;', + '/** @type {Bar} */;', + 'goog.events.Foo;', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({}, illegal_alias_stmts) + + def testScope_aliasNamespaceIllegal(self): + """Tests that an unused alias namespace is not required when available.""" + input_lines = [ + 'goog.scope(function() {', + 'var Bar = goog.events.Bar;', + '/** @type {Bar} */;', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts)) + + def testScope_provides(self): + """Tests that aliased symbols result in correct provides.""" + input_lines = [ + 'goog.scope(function() {', + 'goog.bar = {};', + 'var bar = goog.bar;', + 'bar.Foo = {};', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_provides = namespaces_info.GetMissingProvides() + self.assertEquals({'goog.bar.Foo': 4}, missing_provides) + _, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, illegal_alias_stmts) + + def testSetTestOnlyNamespaces(self): + """Tests that a namespace in setTestOnly makes it a valid provide.""" + namespaces_info = self._GetNamespacesInfoForScript([ + 'goog.setTestOnly(\'goog.foo.barTest\');' + ], ['goog']) + + token = self._GetProvideTokens('goog.foo.barTest') + self.assertFalse(namespaces_info.IsExtraProvide(token)) + + token = self._GetProvideTokens('goog.foo.bazTest') + self.assertTrue(namespaces_info.IsExtraProvide(token)) + + def testSetTestOnlyComment(self): + """Ensure a comment in setTestOnly does not cause a created namespace.""" + namespaces_info = self._GetNamespacesInfoForScript([ + 'goog.setTestOnly(\'this is a comment\');' + ], ['goog']) + + self.assertEquals( + [], namespaces_info._created_namespaces, + 'A comment in setTestOnly should not modify created namespaces.') + + def _GetNamespacesInfoForScript(self, script, closurized_namespaces=None): + _, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + script, closurized_namespaces) + + return namespaces_info + + def _GetStartTokenAndNamespacesInfoForScript( + self, script, closurized_namespaces): + + token = testutil.TokenizeSource(script) + return token, self._GetInitializedNamespacesInfo( + token, closurized_namespaces, []) + + def _GetInitializedNamespacesInfo(self, token, closurized_namespaces, + ignored_extra_namespaces): + """Returns a namespaces info initialized with the given token stream.""" + namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( + closurized_namespaces=closurized_namespaces, + ignored_extra_namespaces=ignored_extra_namespaces) + state_tracker = javascriptstatetracker.JavaScriptStateTracker() + + ecma_pass = ecmametadatapass.EcmaMetaDataPass() + ecma_pass.Process(token) + + state_tracker.DocFlagPass(token, error_handler=None) + + alias_pass = aliaspass.AliasPass(closurized_namespaces) + alias_pass.Process(token) + + while token: + state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken()) + namespaces_info.ProcessToken(token, state_tracker) + state_tracker.HandleAfterToken(token) + token = token.next + + return namespaces_info + + def _GetProvideTokens(self, namespace): + """Returns a list of tokens for a goog.require of the given namespace.""" + line_text = 'goog.require(\'' + namespace + '\');\n' + return testutil.TokenizeSource([line_text]) + + def _GetRequireTokens(self, namespace): + """Returns a list of tokens for a goog.require of the given namespace.""" + line_text = 'goog.require(\'' + namespace + '\');\n' + return testutil.TokenizeSource([line_text]) + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/common/__init__.py b/tools/closure_linter/build/lib/closure_linter/common/__init__.py new file mode 100644 index 0000000..5793043 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Package indicator for gjslint.common.""" diff --git a/tools/closure_linter/build/lib/closure_linter/common/error.py b/tools/closure_linter/build/lib/closure_linter/common/error.py new file mode 100644 index 0000000..4209c23 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/error.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +# +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Error object commonly used in linters.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + + +class Error(object): + """Object representing a style error.""" + + def __init__(self, code, message, token=None, position=None, fix_data=None): + """Initialize the error object. + + Args: + code: The numeric error code. + message: The error message string. + token: The tokens.Token where the error occurred. + position: The position of the error within the token. + fix_data: Data to be used in autofixing. Codes with fix_data are: + GOOG_REQUIRES_NOT_ALPHABETIZED - List of string value tokens that are + class names in goog.requires calls. + """ + self.code = code + self.message = message + self.token = token + self.position = position + if token: + self.start_index = token.start_index + else: + self.start_index = 0 + self.fix_data = fix_data + if self.position: + self.start_index += self.position.start + + def Compare(a, b): + """Compare two error objects, by source code order. + + Args: + a: First error object. + b: Second error object. + + Returns: + A Negative/0/Positive number when a is before/the same as/after b. + """ + line_diff = a.token.line_number - b.token.line_number + if line_diff: + return line_diff + + return a.start_index - b.start_index + Compare = staticmethod(Compare) diff --git a/tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py b/tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py new file mode 100644 index 0000000..55844ba --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Linter error handler class that accumulates an array of errors.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + + +from closure_linter.common import errorhandler + + +class ErrorAccumulator(errorhandler.ErrorHandler): + """Error handler object that accumulates errors in a list.""" + + def __init__(self): + self._errors = [] + + def HandleError(self, error): + """Append the error to the list. + + Args: + error: The error object + """ + self._errors.append(error) + + def GetErrors(self): + """Returns the accumulated errors. + + Returns: + A sequence of errors. + """ + return self._errors diff --git a/tools/closure_linter/build/lib/closure_linter/common/errorhandler.py b/tools/closure_linter/build/lib/closure_linter/common/errorhandler.py new file mode 100644 index 0000000..764d54d --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/errorhandler.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Interface for a linter error handler. + +Error handlers aggregate a set of errors from multiple files and can optionally +perform some action based on the reported errors, for example, logging the error +or automatically fixing it. +""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + + +class ErrorHandler(object): + """Error handler interface.""" + + def __init__(self): + if self.__class__ == ErrorHandler: + raise NotImplementedError('class ErrorHandler is abstract') + + def HandleFile(self, filename, first_token): + """Notifies this ErrorHandler that subsequent errors are in filename. + + Args: + filename: The file being linted. + first_token: The first token of the file. + """ + + def HandleError(self, error): + """Append the error to the list. + + Args: + error: The error object + """ + + def FinishFile(self): + """Finishes handling the current file. + + Should be called after all errors in a file have been handled. + """ + + def GetErrors(self): + """Returns the accumulated errors. + + Returns: + A sequence of errors. + """ diff --git a/tools/closure_linter/build/lib/closure_linter/common/erroroutput.py b/tools/closure_linter/build/lib/closure_linter/common/erroroutput.py new file mode 100644 index 0000000..149738b --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/erroroutput.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utility functions to format errors.""" + + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)', + 'nnaze@google.com (Nathan Naze)') + + +def GetUnixErrorOutput(filename, error, new_error=False): + """Get a output line for an error in UNIX format.""" + + line = '' + + if error.token: + line = '%d' % error.token.line_number + + error_code = '%04d' % error.code + if new_error: + error_code = 'New Error ' + error_code + return '%s:%s:(%s) %s' % (filename, line, error_code, error.message) + + +def GetErrorOutput(error, new_error=False): + """Get a output line for an error in regular format.""" + + line = '' + if error.token: + line = 'Line %d, ' % error.token.line_number + + code = 'E:%04d' % error.code + + error_message = error.message + if new_error: + error_message = 'New Error ' + error_message + + return '%s%s: %s' % (line, code, error.message) diff --git a/tools/closure_linter/build/lib/closure_linter/common/filetestcase.py b/tools/closure_linter/build/lib/closure_linter/common/filetestcase.py new file mode 100644 index 0000000..7cd83cd --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/filetestcase.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test case that runs a checker on a file, matching errors against annotations. + +Runs the given checker on the given file, accumulating all errors. The list +of errors is then matched against those annotated in the file. Based heavily +on devtools/javascript/gpylint/full_test.py. +""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + +import re + +import gflags as flags +import unittest as googletest +from closure_linter.common import erroraccumulator + + +class AnnotatedFileTestCase(googletest.TestCase): + """Test case to run a linter against a single file.""" + + # Matches an all caps letters + underscores error identifer + _MESSAGE = {'msg': '[A-Z][A-Z_]+'} + # Matches a //, followed by an optional line number with a +/-, followed by a + # list of message IDs. Used to extract expected messages from testdata files. + # TODO(robbyw): Generalize to use different commenting patterns. + _EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P[+-]?[0-9]+):)?' + r'\s*(?P%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE) + + def __init__(self, filename, lint_callable, converter): + """Create a single file lint test case. + + Args: + filename: Filename to test. + lint_callable: Callable that lints a file. This is usually runner.Run(). + converter: Function taking an error string and returning an error code. + """ + + googletest.TestCase.__init__(self, 'runTest') + self._filename = filename + self._messages = [] + self._lint_callable = lint_callable + self._converter = converter + + def setUp(self): + flags.FLAGS.dot_on_next_line = True + + def tearDown(self): + flags.FLAGS.dot_on_next_line = False + + def shortDescription(self): + """Provides a description for the test.""" + return 'Run linter on %s' % self._filename + + def runTest(self): + """Runs the test.""" + try: + filename = self._filename + stream = open(filename) + except IOError as ex: + raise IOError('Could not find testdata resource for %s: %s' % + (self._filename, ex)) + + expected = self._GetExpectedMessages(stream) + got = self._ProcessFileAndGetMessages(filename) + self.assertEqual(expected, got) + + def _GetExpectedMessages(self, stream): + """Parse a file and get a sorted list of expected messages.""" + messages = [] + for i, line in enumerate(stream): + match = self._EXPECTED_RE.search(line) + if match: + line = match.group('line') + msg_ids = match.group('msgs') + if line is None: + line = i + 1 + elif line.startswith('+') or line.startswith('-'): + line = i + 1 + int(line) + else: + line = int(line) + for msg_id in msg_ids.split(','): + # Ignore a spurious message from the license preamble. + if msg_id != 'WITHOUT': + messages.append((line, self._converter(msg_id.strip()))) + stream.seek(0) + messages.sort() + return messages + + def _ProcessFileAndGetMessages(self, filename): + """Trap gjslint's output parse it to get messages added.""" + error_accumulator = erroraccumulator.ErrorAccumulator() + self._lint_callable(filename, error_accumulator) + + errors = error_accumulator.GetErrors() + + # Convert to expected tuple format. + + error_msgs = [(error.token.line_number, error.code) for error in errors] + error_msgs.sort() + return error_msgs diff --git a/tools/closure_linter/build/lib/closure_linter/common/htmlutil.py b/tools/closure_linter/build/lib/closure_linter/common/htmlutil.py new file mode 100644 index 0000000..26d44c5 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/htmlutil.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python +# +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for dealing with HTML.""" + +__author__ = ('robbyw@google.com (Robert Walker)') + +import cStringIO +import formatter +import htmllib +import HTMLParser +import re + + +class ScriptExtractor(htmllib.HTMLParser): + """Subclass of HTMLParser that extracts script contents from an HTML file. + + Also inserts appropriate blank lines so that line numbers in the extracted + code match the line numbers in the original HTML. + """ + + def __init__(self): + """Initialize a ScriptExtractor.""" + htmllib.HTMLParser.__init__(self, formatter.NullFormatter()) + self._in_script = False + self._text = '' + + def start_script(self, attrs): + """Internal handler for the start of a script tag. + + Args: + attrs: The attributes of the script tag, as a list of tuples. + """ + for attribute in attrs: + if attribute[0].lower() == 'src': + # Skip script tags with a src specified. + return + self._in_script = True + + def end_script(self): + """Internal handler for the end of a script tag.""" + self._in_script = False + + def handle_data(self, data): + """Internal handler for character data. + + Args: + data: The character data from the HTML file. + """ + if self._in_script: + # If the last line contains whitespace only, i.e. is just there to + # properly align a tag, strip the whitespace. + if data.rstrip(' \t') != data.rstrip(' \t\n\r\f'): + data = data.rstrip(' \t') + self._text += data + else: + self._AppendNewlines(data) + + def handle_comment(self, data): + """Internal handler for HTML comments. + + Args: + data: The text of the comment. + """ + self._AppendNewlines(data) + + def _AppendNewlines(self, data): + """Count the number of newlines in the given string and append them. + + This ensures line numbers are correct for reported errors. + + Args: + data: The data to count newlines in. + """ + # We append 'x' to both sides of the string to ensure that splitlines + # gives us an accurate count. + for i in xrange(len(('x' + data + 'x').splitlines()) - 1): + self._text += '\n' + + def GetScriptLines(self): + """Return the extracted script lines. + + Returns: + The extracted script lines as a list of strings. + """ + return self._text.splitlines() + + +def GetScriptLines(f): + """Extract script tag contents from the given HTML file. + + Args: + f: The HTML file. + + Returns: + Lines in the HTML file that are from script tags. + """ + extractor = ScriptExtractor() + + # The HTML parser chokes on text like Array., so we patch + # that bug by replacing the < with < - escaping all text inside script + # tags would be better but it's a bit of a catch 22. + contents = f.read() + contents = re.sub(r'<([^\s\w/])', + lambda x: '<%s' % x.group(1), + contents) + + extractor.feed(contents) + extractor.close() + return extractor.GetScriptLines() + + +def StripTags(str): + """Returns the string with HTML tags stripped. + + Args: + str: An html string. + + Returns: + The html string with all tags stripped. If there was a parse error, returns + the text successfully parsed so far. + """ + # Brute force approach to stripping as much HTML as possible. If there is a + # parsing error, don't strip text before parse error position, and continue + # trying from there. + final_text = '' + finished = False + while not finished: + try: + strip = _HtmlStripper() + strip.feed(str) + strip.close() + str = strip.get_output() + final_text += str + finished = True + except HTMLParser.HTMLParseError, e: + final_text += str[:e.offset] + str = str[e.offset + 1:] + + return final_text + + +class _HtmlStripper(HTMLParser.HTMLParser): + """Simple class to strip tags from HTML. + + Does so by doing nothing when encountering tags, and appending character data + to a buffer when that is encountered. + """ + def __init__(self): + self.reset() + self.__output = cStringIO.StringIO() + + def handle_data(self, d): + self.__output.write(d) + + def get_output(self): + return self.__output.getvalue() diff --git a/tools/closure_linter/build/lib/closure_linter/common/lintrunner.py b/tools/closure_linter/build/lib/closure_linter/common/lintrunner.py new file mode 100644 index 0000000..07842c7 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/lintrunner.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Interface for a lint running wrapper.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + + +class LintRunner(object): + """Interface for a lint running wrapper.""" + + def __init__(self): + if self.__class__ == LintRunner: + raise NotImplementedError('class LintRunner is abstract') + + def Run(self, filenames, error_handler): + """Run a linter on the given filenames. + + Args: + filenames: The filenames to check + error_handler: An ErrorHandler object + + Returns: + The error handler, which may have been used to collect error info. + """ diff --git a/tools/closure_linter/build/lib/closure_linter/common/matcher.py b/tools/closure_linter/build/lib/closure_linter/common/matcher.py new file mode 100644 index 0000000..9b4402c --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/matcher.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Regular expression based JavaScript matcher classes.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + +from closure_linter.common import position +from closure_linter.common import tokens + +# Shorthand +Token = tokens.Token +Position = position.Position + + +class Matcher(object): + """A token matcher. + + Specifies a pattern to match, the type of token it represents, what mode the + token changes to, and what mode the token applies to. + + Modes allow more advanced grammars to be incorporated, and are also necessary + to tokenize line by line. We can have different patterns apply to different + modes - i.e. looking for documentation while in comment mode. + + Attributes: + regex: The regular expression representing this matcher. + type: The type of token indicated by a successful match. + result_mode: The mode to move to after a successful match. + """ + + def __init__(self, regex, token_type, result_mode=None, line_start=False): + """Create a new matcher template. + + Args: + regex: The regular expression to match. + token_type: The type of token a successful match indicates. + result_mode: What mode to change to after a successful match. Defaults to + None, which means to not change the current mode. + line_start: Whether this matcher should only match string at the start + of a line. + """ + self.regex = regex + self.type = token_type + self.result_mode = result_mode + self.line_start = line_start diff --git a/tools/closure_linter/build/lib/closure_linter/common/position.py b/tools/closure_linter/build/lib/closure_linter/common/position.py new file mode 100644 index 0000000..cebf17e --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/position.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Classes to represent positions within strings.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + + +class Position(object): + """Object representing a segment of a string. + + Attributes: + start: The index in to the string where the segment starts. + length: The length of the string segment. + """ + + def __init__(self, start, length): + """Initialize the position object. + + Args: + start: The start index. + length: The number of characters to include. + """ + self.start = start + self.length = length + + def Get(self, string): + """Returns this range of the given string. + + Args: + string: The string to slice. + + Returns: + The string within the range specified by this object. + """ + return string[self.start:self.start + self.length] + + def Set(self, target, source): + """Sets this range within the target string to the source string. + + Args: + target: The target string. + source: The source string. + + Returns: + The resulting string + """ + return target[:self.start] + source + target[self.start + self.length:] + + def AtEnd(string): + """Create a Position representing the end of the given string. + + Args: + string: The string to represent the end of. + + Returns: + The created Position object. + """ + return Position(len(string), 0) + AtEnd = staticmethod(AtEnd) + + def IsAtEnd(self, string): + """Returns whether this position is at the end of the given string. + + Args: + string: The string to test for the end of. + + Returns: + Whether this position is at the end of the given string. + """ + return self.start == len(string) and self.length == 0 + + def AtBeginning(): + """Create a Position representing the beginning of any string. + + Returns: + The created Position object. + """ + return Position(0, 0) + AtBeginning = staticmethod(AtBeginning) + + def IsAtBeginning(self): + """Returns whether this position is at the beginning of any string. + + Returns: + Whether this position is at the beginning of any string. + """ + return self.start == 0 and self.length == 0 + + def All(string): + """Create a Position representing the entire string. + + Args: + string: The string to represent the entirety of. + + Returns: + The created Position object. + """ + return Position(0, len(string)) + All = staticmethod(All) + + def Index(index): + """Returns a Position object for the specified index. + + Args: + index: The index to select, inclusively. + + Returns: + The created Position object. + """ + return Position(index, 1) + Index = staticmethod(Index) diff --git a/tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py b/tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py new file mode 100644 index 0000000..3402bef --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py @@ -0,0 +1,190 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Determines the list of files to be checked from command line arguments.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + +import glob +import os +import re + +import gflags as flags + + +FLAGS = flags.FLAGS + +flags.DEFINE_multistring( + 'recurse', + None, + 'Recurse in to the subdirectories of the given path', + short_name='r') +flags.DEFINE_list( + 'exclude_directories', + ('_demos'), + 'Exclude the specified directories (only applicable along with -r or ' + '--presubmit)', + short_name='e') +flags.DEFINE_list( + 'exclude_files', + ('deps.js'), + 'Exclude the specified files', + short_name='x') + + +def MatchesSuffixes(filename, suffixes): + """Returns whether the given filename matches one of the given suffixes. + + Args: + filename: Filename to check. + suffixes: Sequence of suffixes to check. + + Returns: + Whether the given filename matches one of the given suffixes. + """ + suffix = filename[filename.rfind('.'):] + return suffix in suffixes + + +def _GetUserSpecifiedFiles(argv, suffixes): + """Returns files to be linted, specified directly on the command line. + + Can handle the '*' wildcard in filenames, but no other wildcards. + + Args: + argv: Sequence of command line arguments. The second and following arguments + are assumed to be files that should be linted. + suffixes: Expected suffixes for the file type being checked. + + Returns: + A sequence of files to be linted. + """ + files = argv[1:] or [] + all_files = [] + lint_files = [] + + # Perform any necessary globs. + for f in files: + if f.find('*') != -1: + for result in glob.glob(f): + all_files.append(result) + else: + all_files.append(f) + + for f in all_files: + if MatchesSuffixes(f, suffixes): + lint_files.append(f) + return lint_files + + +def _GetRecursiveFiles(suffixes): + """Returns files to be checked specified by the --recurse flag. + + Args: + suffixes: Expected suffixes for the file type being checked. + + Returns: + A list of files to be checked. + """ + lint_files = [] + # Perform any request recursion + if FLAGS.recurse: + for start in FLAGS.recurse: + for root, subdirs, files in os.walk(start): + for f in files: + if MatchesSuffixes(f, suffixes): + lint_files.append(os.path.join(root, f)) + return lint_files + + +def GetAllSpecifiedFiles(argv, suffixes): + """Returns all files specified by the user on the commandline. + + Args: + argv: Sequence of command line arguments. The second and following arguments + are assumed to be files that should be linted. + suffixes: Expected suffixes for the file type + + Returns: + A list of all files specified directly or indirectly (via flags) on the + command line by the user. + """ + files = _GetUserSpecifiedFiles(argv, suffixes) + + if FLAGS.recurse: + files += _GetRecursiveFiles(suffixes) + + return FilterFiles(files) + + +def FilterFiles(files): + """Filters the list of files to be linted be removing any excluded files. + + Filters out files excluded using --exclude_files and --exclude_directories. + + Args: + files: Sequence of files that needs filtering. + + Returns: + Filtered list of files to be linted. + """ + num_files = len(files) + + ignore_dirs_regexs = [] + for ignore in FLAGS.exclude_directories: + ignore_dirs_regexs.append(re.compile(r'(^|[\\/])%s[\\/]' % ignore)) + + result_files = [] + for f in files: + add_file = True + for exclude in FLAGS.exclude_files: + if f.endswith('/' + exclude) or f == exclude: + add_file = False + break + for ignore in ignore_dirs_regexs: + if ignore.search(f): + # Break out of ignore loop so we don't add to + # filtered files. + add_file = False + break + if add_file: + # Convert everything to absolute paths so we can easily remove duplicates + # using a set. + result_files.append(os.path.abspath(f)) + + skipped = num_files - len(result_files) + if skipped: + print 'Skipping %d file(s).' % skipped + + return set(result_files) + + +def GetFileList(argv, file_type, suffixes): + """Parse the flags and return the list of files to check. + + Args: + argv: Sequence of command line arguments. + suffixes: Sequence of acceptable suffixes for the file type. + + Returns: + The list of files to check. + """ + return sorted(GetAllSpecifiedFiles(argv, suffixes)) + + +def IsEmptyArgumentList(argv): + return not (len(argv[1:]) or FLAGS.recurse) diff --git a/tools/closure_linter/build/lib/closure_linter/common/tokenizer.py b/tools/closure_linter/build/lib/closure_linter/common/tokenizer.py new file mode 100644 index 0000000..9420ea3 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/tokenizer.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +# +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Regular expression based lexer.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + +from closure_linter.common import tokens + +# Shorthand +Type = tokens.TokenType + + +class Tokenizer(object): + """General purpose tokenizer. + + Attributes: + mode: The latest mode of the tokenizer. This allows patterns to distinguish + if they are mid-comment, mid-parameter list, etc. + matchers: Dictionary of modes to sequences of matchers that define the + patterns to check at any given time. + default_types: Dictionary of modes to types, defining what type to give + non-matched text when in the given mode. Defaults to Type.NORMAL. + """ + + def __init__(self, starting_mode, matchers, default_types): + """Initialize the tokenizer. + + Args: + starting_mode: Mode to start in. + matchers: Dictionary of modes to sequences of matchers that defines the + patterns to check at any given time. + default_types: Dictionary of modes to types, defining what type to give + non-matched text when in the given mode. Defaults to Type.NORMAL. + """ + self.__starting_mode = starting_mode + self.matchers = matchers + self.default_types = default_types + + def TokenizeFile(self, file): + """Tokenizes the given file. + + Args: + file: An iterable that yields one line of the file at a time. + + Returns: + The first token in the file + """ + # The current mode. + self.mode = self.__starting_mode + # The first token in the stream. + self.__first_token = None + # The last token added to the token stream. + self.__last_token = None + # The current line number. + self.__line_number = 0 + + for line in file: + self.__line_number += 1 + self.__TokenizeLine(line) + + return self.__first_token + + def _CreateToken(self, string, token_type, line, line_number, values=None): + """Creates a new Token object (or subclass). + + Args: + string: The string of input the token represents. + token_type: The type of token. + line: The text of the line this token is in. + line_number: The line number of the token. + values: A dict of named values within the token. For instance, a + function declaration may have a value called 'name' which captures the + name of the function. + + Returns: + The newly created Token object. + """ + return tokens.Token(string, token_type, line, line_number, values, + line_number) + + def __TokenizeLine(self, line): + """Tokenizes the given line. + + Args: + line: The contents of the line. + """ + string = line.rstrip('\n\r\f') + line_number = self.__line_number + self.__start_index = 0 + + if not string: + self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number)) + return + + normal_token = '' + index = 0 + while index < len(string): + for matcher in self.matchers[self.mode]: + if matcher.line_start and index > 0: + continue + + match = matcher.regex.match(string, index) + + if match: + if normal_token: + self.__AddToken( + self.__CreateNormalToken(self.mode, normal_token, line, + line_number)) + normal_token = '' + + # Add the match. + self.__AddToken(self._CreateToken(match.group(), matcher.type, line, + line_number, match.groupdict())) + + # Change the mode to the correct one for after this match. + self.mode = matcher.result_mode or self.mode + + # Shorten the string to be matched. + index = match.end() + + break + + else: + # If the for loop finishes naturally (i.e. no matches) we just add the + # first character to the string of consecutive non match characters. + # These will constitute a NORMAL token. + if string: + normal_token += string[index:index + 1] + index += 1 + + if normal_token: + self.__AddToken( + self.__CreateNormalToken(self.mode, normal_token, line, line_number)) + + def __CreateNormalToken(self, mode, string, line, line_number): + """Creates a normal token. + + Args: + mode: The current mode. + string: The string to tokenize. + line: The line of text. + line_number: The line number within the file. + + Returns: + A Token object, of the default type for the current mode. + """ + type = Type.NORMAL + if mode in self.default_types: + type = self.default_types[mode] + return self._CreateToken(string, type, line, line_number) + + def __AddToken(self, token): + """Add the given token to the token stream. + + Args: + token: The token to add. + """ + # Store the first token, or point the previous token to this one. + if not self.__first_token: + self.__first_token = token + else: + self.__last_token.next = token + + # Establish the doubly linked list + token.previous = self.__last_token + self.__last_token = token + + # Compute the character indices + token.start_index = self.__start_index + self.__start_index += token.length diff --git a/tools/closure_linter/build/lib/closure_linter/common/tokens.py b/tools/closure_linter/build/lib/closure_linter/common/tokens.py new file mode 100644 index 0000000..4703998 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/tokens.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Classes to represent tokens and positions within them.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + + +class TokenType(object): + """Token types common to all languages.""" + NORMAL = 'normal' + WHITESPACE = 'whitespace' + BLANK_LINE = 'blank line' + + +class Token(object): + """Token class for intelligent text splitting. + + The token class represents a string of characters and an identifying type. + + Attributes: + type: The type of token. + string: The characters the token comprises. + length: The length of the token. + line: The text of the line the token is found in. + line_number: The number of the line the token is found in. + values: Dictionary of values returned from the tokens regex match. + previous: The token before this one. + next: The token after this one. + start_index: The character index in the line where this token starts. + attached_object: Object containing more information about this token. + metadata: Object containing metadata about this token. Must be added by + a separate metadata pass. + """ + + def __init__(self, string, token_type, line, line_number, values=None, + orig_line_number=None): + """Creates a new Token object. + + Args: + string: The string of input the token contains. + token_type: The type of token. + line: The text of the line this token is in. + line_number: The line number of the token. + values: A dict of named values within the token. For instance, a + function declaration may have a value called 'name' which captures the + name of the function. + orig_line_number: The line number of the original file this token comes + from. This should be only set during the tokenization process. For newly + created error fix tokens after that, it should be None. + """ + self.type = token_type + self.string = string + self.length = len(string) + self.line = line + self.line_number = line_number + self.orig_line_number = orig_line_number + self.values = values + self.is_deleted = False + + # These parts can only be computed when the file is fully tokenized + self.previous = None + self.next = None + self.start_index = None + + # This part is set in statetracker.py + # TODO(robbyw): Wrap this in to metadata + self.attached_object = None + + # This part is set in *metadatapass.py + self.metadata = None + + def IsFirstInLine(self): + """Tests if this token is the first token in its line. + + Returns: + Whether the token is the first token in its line. + """ + return not self.previous or self.previous.line_number != self.line_number + + def IsLastInLine(self): + """Tests if this token is the last token in its line. + + Returns: + Whether the token is the last token in its line. + """ + return not self.next or self.next.line_number != self.line_number + + def IsType(self, token_type): + """Tests if this token is of the given type. + + Args: + token_type: The type to test for. + + Returns: + True if the type of this token matches the type passed in. + """ + return self.type == token_type + + def IsAnyType(self, *token_types): + """Tests if this token is any of the given types. + + Args: + token_types: The types to check. Also accepts a single array. + + Returns: + True if the type of this token is any of the types passed in. + """ + if not isinstance(token_types[0], basestring): + return self.type in token_types[0] + else: + return self.type in token_types + + def __repr__(self): + return '' % (self.type, self.string, + self.values, self.line_number, + self.metadata) + + def __iter__(self): + """Returns a token iterator.""" + node = self + while node: + yield node + node = node.next + + def __reversed__(self): + """Returns a reverse-direction token iterator.""" + node = self + while node: + yield node + node = node.previous diff --git a/tools/closure_linter/build/lib/closure_linter/common/tokens_test.py b/tools/closure_linter/build/lib/closure_linter/common/tokens_test.py new file mode 100644 index 0000000..01ec89d --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/common/tokens_test.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# Copyright 2011 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'nnaze@google.com (Nathan Naze)' + +import unittest as googletest +from closure_linter.common import tokens + + +def _CreateDummyToken(): + return tokens.Token('foo', None, 1, 1) + + +def _CreateDummyTokens(count): + dummy_tokens = [] + for _ in xrange(count): + dummy_tokens.append(_CreateDummyToken()) + return dummy_tokens + + +def _SetTokensAsNeighbors(neighbor_tokens): + for i in xrange(len(neighbor_tokens)): + prev_index = i - 1 + next_index = i + 1 + + if prev_index >= 0: + neighbor_tokens[i].previous = neighbor_tokens[prev_index] + + if next_index < len(neighbor_tokens): + neighbor_tokens[i].next = neighbor_tokens[next_index] + + +class TokensTest(googletest.TestCase): + + def testIsFirstInLine(self): + + # First token in file (has no previous). + self.assertTrue(_CreateDummyToken().IsFirstInLine()) + + a, b = _CreateDummyTokens(2) + _SetTokensAsNeighbors([a, b]) + + # Tokens on same line + a.line_number = 30 + b.line_number = 30 + + self.assertFalse(b.IsFirstInLine()) + + # Tokens on different lines + b.line_number = 31 + self.assertTrue(b.IsFirstInLine()) + + def testIsLastInLine(self): + # Last token in file (has no next). + self.assertTrue(_CreateDummyToken().IsLastInLine()) + + a, b = _CreateDummyTokens(2) + _SetTokensAsNeighbors([a, b]) + + # Tokens on same line + a.line_number = 30 + b.line_number = 30 + self.assertFalse(a.IsLastInLine()) + + b.line_number = 31 + self.assertTrue(a.IsLastInLine()) + + def testIsType(self): + a = tokens.Token('foo', 'fakeType1', 1, 1) + self.assertTrue(a.IsType('fakeType1')) + self.assertFalse(a.IsType('fakeType2')) + + def testIsAnyType(self): + a = tokens.Token('foo', 'fakeType1', 1, 1) + self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2'])) + self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4'])) + + def testRepr(self): + a = tokens.Token('foo', 'fakeType1', 1, 1) + self.assertEquals('', str(a)) + + def testIter(self): + dummy_tokens = _CreateDummyTokens(5) + _SetTokensAsNeighbors(dummy_tokens) + a, b, c, d, e = dummy_tokens + + i = iter(a) + self.assertListEqual([a, b, c, d, e], list(i)) + + def testReverseIter(self): + dummy_tokens = _CreateDummyTokens(5) + _SetTokensAsNeighbors(dummy_tokens) + a, b, c, d, e = dummy_tokens + + ri = reversed(e) + self.assertListEqual([e, d, c, b, a], list(ri)) + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/ecmalintrules.py b/tools/closure_linter/build/lib/closure_linter/ecmalintrules.py new file mode 100644 index 0000000..c07dffc --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/ecmalintrules.py @@ -0,0 +1,844 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Core methods for checking EcmaScript files for common style guide violations. +""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)', + 'jacobr@google.com (Jacob Richman)') + +import re + +import gflags as flags + +from closure_linter import checkerbase +from closure_linter import ecmametadatapass +from closure_linter import error_check +from closure_linter import errorrules +from closure_linter import errors +from closure_linter import indentation +from closure_linter import javascripttokenizer +from closure_linter import javascripttokens +from closure_linter import statetracker +from closure_linter import tokenutil +from closure_linter.common import error +from closure_linter.common import position + + +FLAGS = flags.FLAGS +flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow') +# TODO(user): When flipping this to True, remove logic from unit tests +# that overrides this flag. +flags.DEFINE_boolean('dot_on_next_line', False, 'Require dots to be' + 'placed on the next line for wrapped expressions') + +# TODO(robbyw): Check for extra parens on return statements +# TODO(robbyw): Check for 0px in strings +# TODO(robbyw): Ensure inline jsDoc is in {} +# TODO(robbyw): Check for valid JS types in parameter docs + +# Shorthand +Context = ecmametadatapass.EcmaContext +Error = error.Error +Modes = javascripttokenizer.JavaScriptModes +Position = position.Position +Rule = error_check.Rule +Type = javascripttokens.JavaScriptTokenType + + +class EcmaScriptLintRules(checkerbase.LintRulesBase): + """EmcaScript lint style checking rules. + + Can be used to find common style errors in JavaScript, ActionScript and other + Ecma like scripting languages. Style checkers for Ecma scripting languages + should inherit from this style checker. + Please do not add any state to EcmaScriptLintRules or to any subclasses. + + All state should be added to the StateTracker subclass used for a particular + language. + """ + + # It will be initialized in constructor so the flags are initialized. + max_line_length = -1 + + # Static constants. + MISSING_PARAMETER_SPACE = re.compile(r',\S') + + EXTRA_SPACE = re.compile(r'(\(\s|\s\))') + + ENDS_WITH_SPACE = re.compile(r'\s$') + + ILLEGAL_TAB = re.compile(r'\t') + + # Regex used to split up complex types to check for invalid use of ? and |. + TYPE_SPLIT = re.compile(r'[,<>()]') + + # Regex for form of author lines after the @author tag. + AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)') + + # Acceptable tokens to remove for line too long testing. + LONG_LINE_IGNORE = frozenset( + ['*', '//', '@see'] + + ['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE]) + + JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([ + '@fileoverview', '@param', '@return', '@returns']) + + def __init__(self): + """Initialize this lint rule object.""" + checkerbase.LintRulesBase.__init__(self) + if EcmaScriptLintRules.max_line_length == -1: + EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength() + + def Initialize(self, checker, limited_doc_checks, is_html): + """Initialize this lint rule object before parsing a new file.""" + checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks, + is_html) + self._indentation = indentation.IndentationRules() + + def HandleMissingParameterDoc(self, token, param_name): + """Handle errors associated with a parameter missing a @param tag.""" + raise TypeError('Abstract method HandleMissingParameterDoc not implemented') + + def _CheckLineLength(self, last_token, state): + """Checks whether the line is too long. + + Args: + last_token: The last token in the line. + state: parser_state object that indicates the current state in the page + """ + # Start from the last token so that we have the flag object attached to + # and DOC_FLAG tokens. + line_number = last_token.line_number + token = last_token + + # Build a representation of the string where spaces indicate potential + # line-break locations. + line = [] + while token and token.line_number == line_number: + if state.IsTypeToken(token): + line.insert(0, 'x' * len(token.string)) + elif token.type in (Type.IDENTIFIER, Type.OPERATOR): + # Dots are acceptable places to wrap (may be tokenized as identifiers). + line.insert(0, token.string.replace('.', ' ')) + else: + line.insert(0, token.string) + token = token.previous + + line = ''.join(line) + line = line.rstrip('\n\r\f') + try: + length = len(unicode(line, 'utf-8')) + except (LookupError, UnicodeDecodeError): + # Unknown encoding. The line length may be wrong, as was originally the + # case for utf-8 (see bug 1735846). For now just accept the default + # length, but as we find problems we can either add test for other + # possible encodings or return without an error to protect against + # false positives at the cost of more false negatives. + length = len(line) + + if length > EcmaScriptLintRules.max_line_length: + + # If the line matches one of the exceptions, then it's ok. + for long_line_regexp in self.GetLongLineExceptions(): + if long_line_regexp.match(last_token.line): + return + + # If the line consists of only one "word", or multiple words but all + # except one are ignoreable, then it's ok. + parts = set(line.split()) + + # We allow two "words" (type and name) when the line contains @param + max_parts = 1 + if '@param' in parts: + max_parts = 2 + + # Custom tags like @requires may have url like descriptions, so ignore + # the tag, similar to how we handle @see. + custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags]) + if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags)) + > max_parts): + self._HandleError( + errors.LINE_TOO_LONG, + 'Line too long (%d characters).' % len(line), last_token) + + def _CheckJsDocType(self, token, js_type): + """Checks the given type for style errors. + + Args: + token: The DOC_FLAG token for the flag whose type to check. + js_type: The flag's typeannotation.TypeAnnotation instance. + """ + if not js_type: return + + if js_type.type_group and len(js_type.sub_types) == 2: + identifiers = [t.identifier for t in js_type.sub_types] + if 'null' in identifiers: + # Don't warn if the identifier is a template type (e.g. {TYPE|null}. + if not identifiers[0].isupper() and not identifiers[1].isupper(): + self._HandleError( + errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL, + 'Prefer "?Type" to "Type|null": "%s"' % js_type, token) + + # TODO(user): We should report an error for wrong usage of '?' and '|' + # e.g. {?number|string|null} etc. + + for sub_type in js_type.IterTypes(): + self._CheckJsDocType(token, sub_type) + + def _CheckForMissingSpaceBeforeToken(self, token): + """Checks for a missing space at the beginning of a token. + + Reports a MISSING_SPACE error if the token does not begin with a space or + the previous token doesn't end with a space and the previous token is on the + same line as the token. + + Args: + token: The token being checked + """ + # TODO(user): Check if too many spaces? + if (len(token.string) == len(token.string.lstrip()) and + token.previous and token.line_number == token.previous.line_number and + len(token.previous.string) - len(token.previous.string.rstrip()) == 0): + self._HandleError( + errors.MISSING_SPACE, + 'Missing space before "%s"' % token.string, + token, + position=Position.AtBeginning()) + + def _CheckOperator(self, token): + """Checks an operator for spacing and line style. + + Args: + token: The operator token. + """ + last_code = token.metadata.last_code + + if not self._ExpectSpaceBeforeOperator(token): + if (token.previous and token.previous.type == Type.WHITESPACE and + last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and + last_code.line_number == token.line_number): + self._HandleError( + errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string, + token.previous, position=Position.All(token.previous.string)) + + elif (token.previous and + not token.previous.IsComment() and + not tokenutil.IsDot(token) and + token.previous.type in Type.EXPRESSION_ENDER_TYPES): + self._HandleError(errors.MISSING_SPACE, + 'Missing space before "%s"' % token.string, token, + position=Position.AtBeginning()) + + # Check wrapping of operators. + next_code = tokenutil.GetNextCodeToken(token) + + is_dot = tokenutil.IsDot(token) + wrapped_before = last_code and last_code.line_number != token.line_number + wrapped_after = next_code and next_code.line_number != token.line_number + + if FLAGS.dot_on_next_line and is_dot and wrapped_after: + self._HandleError( + errors.LINE_ENDS_WITH_DOT, + '"." must go on the following line', + token) + if (not is_dot and wrapped_before and + not token.metadata.IsUnaryOperator()): + self._HandleError( + errors.LINE_STARTS_WITH_OPERATOR, + 'Binary operator must go on previous line "%s"' % token.string, + token) + + def _IsLabel(self, token): + # A ':' token is considered part of a label if it occurs in a case + # statement, a plain label, or an object literal, i.e. is not part of a + # ternary. + + return (token.string == ':' and + token.metadata.context.type in (Context.LITERAL_ELEMENT, + Context.CASE_BLOCK, + Context.STATEMENT)) + + def _ExpectSpaceBeforeOperator(self, token): + """Returns whether a space should appear before the given operator token. + + Args: + token: The operator token. + + Returns: + Whether there should be a space before the token. + """ + if token.string == ',' or token.metadata.IsUnaryPostOperator(): + return False + + if tokenutil.IsDot(token): + return False + + # Colons should appear in labels, object literals, the case of a switch + # statement, and ternary operator. Only want a space in the case of the + # ternary operator. + if self._IsLabel(token): + return False + + if token.metadata.IsUnaryOperator() and token.IsFirstInLine(): + return False + + return True + + def CheckToken(self, token, state): + """Checks a token, given the current parser_state, for warnings and errors. + + Args: + token: The current token under consideration + state: parser_state object that indicates the current state in the page + """ + # Store some convenience variables + first_in_line = token.IsFirstInLine() + last_in_line = token.IsLastInLine() + last_non_space_token = state.GetLastNonSpaceToken() + + token_type = token.type + + # Process the line change. + if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION): + # TODO(robbyw): Support checking indentation in HTML files. + indentation_errors = self._indentation.CheckToken(token, state) + for indentation_error in indentation_errors: + self._HandleError(*indentation_error) + + if last_in_line: + self._CheckLineLength(token, state) + + if token_type == Type.PARAMETERS: + # Find missing spaces in parameter lists. + if self.MISSING_PARAMETER_SPACE.search(token.string): + fix_data = ', '.join([s.strip() for s in token.string.split(',')]) + self._HandleError(errors.MISSING_SPACE, 'Missing space after ","', + token, position=None, fix_data=fix_data.strip()) + + # Find extra spaces at the beginning of parameter lists. Make sure + # we aren't at the beginning of a continuing multi-line list. + if not first_in_line: + space_count = len(token.string) - len(token.string.lstrip()) + if space_count: + self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("', + token, position=Position(0, space_count)) + + elif (token_type == Type.START_BLOCK and + token.metadata.context.type == Context.BLOCK): + self._CheckForMissingSpaceBeforeToken(token) + + elif token_type == Type.END_BLOCK: + last_code = token.metadata.last_code + if state.InFunction() and state.IsFunctionClose(): + if state.InTopLevelFunction(): + # A semicolons should not be included at the end of a function + # declaration. + if not state.InAssignedFunction(): + if not last_in_line and token.next.type == Type.SEMICOLON: + self._HandleError( + errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION, + 'Illegal semicolon after function declaration', + token.next, position=Position.All(token.next.string)) + + # A semicolon should be included at the end of a function expression + # that is not immediately called or used by a dot operator. + if (state.InAssignedFunction() and token.next + and token.next.type != Type.SEMICOLON): + next_token = tokenutil.GetNextCodeToken(token) + is_immediately_used = (next_token.type == Type.START_PAREN or + tokenutil.IsDot(next_token)) + if not is_immediately_used: + self._HandleError( + errors.MISSING_SEMICOLON_AFTER_FUNCTION, + 'Missing semicolon after function assigned to a variable', + token, position=Position.AtEnd(token.string)) + + if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK: + self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE, + 'Interface methods cannot contain code', last_code) + + elif (state.IsBlockClose() and + token.next and token.next.type == Type.SEMICOLON): + if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL + and last_code.metadata.context.type != Context.OBJECT_LITERAL): + self._HandleError( + errors.REDUNDANT_SEMICOLON, + 'No semicolon is required to end a code block', + token.next, position=Position.All(token.next.string)) + + elif token_type == Type.SEMICOLON: + if token.previous and token.previous.type == Type.WHITESPACE: + self._HandleError( + errors.EXTRA_SPACE, 'Extra space before ";"', + token.previous, position=Position.All(token.previous.string)) + + if token.next and token.next.line_number == token.line_number: + if token.metadata.context.type != Context.FOR_GROUP_BLOCK: + # TODO(robbyw): Error about no multi-statement lines. + pass + + elif token.next.type not in ( + Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN): + self._HandleError( + errors.MISSING_SPACE, + 'Missing space after ";" in for statement', + token.next, + position=Position.AtBeginning()) + + last_code = token.metadata.last_code + if last_code and last_code.type == Type.SEMICOLON: + # Allow a single double semi colon in for loops for cases like: + # for (;;) { }. + # NOTE(user): This is not a perfect check, and will not throw an error + # for cases like: for (var i = 0;; i < n; i++) {}, but then your code + # probably won't work either. + for_token = tokenutil.CustomSearch( + last_code, + lambda token: token.type == Type.KEYWORD and token.string == 'for', + end_func=lambda token: token.type == Type.SEMICOLON, + distance=None, + reverse=True) + + if not for_token: + self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon', + token, position=Position.All(token.string)) + + elif token_type == Type.START_PAREN: + # Ensure that opening parentheses have a space before any keyword + # that is not being invoked like a member function. + if (token.previous and token.previous.type == Type.KEYWORD and + (not token.previous.metadata or + not token.previous.metadata.last_code or + not token.previous.metadata.last_code.string or + token.previous.metadata.last_code.string[-1:] != '.')): + self._HandleError(errors.MISSING_SPACE, 'Missing space before "("', + token, position=Position.AtBeginning()) + elif token.previous and token.previous.type == Type.WHITESPACE: + before_space = token.previous.previous + # Ensure that there is no extra space before a function invocation, + # even if the function being invoked happens to be a keyword. + if (before_space and before_space.line_number == token.line_number and + before_space.type == Type.IDENTIFIER or + (before_space.type == Type.KEYWORD and before_space.metadata and + before_space.metadata.last_code and + before_space.metadata.last_code.string and + before_space.metadata.last_code.string[-1:] == '.')): + self._HandleError( + errors.EXTRA_SPACE, 'Extra space before "("', + token.previous, position=Position.All(token.previous.string)) + + elif token_type == Type.START_BRACKET: + self._HandleStartBracket(token, last_non_space_token) + elif token_type in (Type.END_PAREN, Type.END_BRACKET): + # Ensure there is no space before closing parentheses, except when + # it's in a for statement with an omitted section, or when it's at the + # beginning of a line. + if (token.previous and token.previous.type == Type.WHITESPACE and + not token.previous.IsFirstInLine() and + not (last_non_space_token and last_non_space_token.line_number == + token.line_number and + last_non_space_token.type == Type.SEMICOLON)): + self._HandleError( + errors.EXTRA_SPACE, 'Extra space before "%s"' % + token.string, token.previous, + position=Position.All(token.previous.string)) + + elif token_type == Type.WHITESPACE: + if self.ILLEGAL_TAB.search(token.string): + if token.IsFirstInLine(): + if token.next: + self._HandleError( + errors.ILLEGAL_TAB, + 'Illegal tab in whitespace before "%s"' % token.next.string, + token, position=Position.All(token.string)) + else: + self._HandleError( + errors.ILLEGAL_TAB, + 'Illegal tab in whitespace', + token, position=Position.All(token.string)) + else: + self._HandleError( + errors.ILLEGAL_TAB, + 'Illegal tab in whitespace after "%s"' % token.previous.string, + token, position=Position.All(token.string)) + + # Check whitespace length if it's not the first token of the line and + # if it's not immediately before a comment. + if last_in_line: + # Check for extra whitespace at the end of a line. + self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line', + token, position=Position.All(token.string)) + elif not first_in_line and not token.next.IsComment(): + if token.length > 1: + self._HandleError( + errors.EXTRA_SPACE, 'Extra space after "%s"' % + token.previous.string, token, + position=Position(1, len(token.string) - 1)) + + elif token_type == Type.OPERATOR: + self._CheckOperator(token) + elif token_type == Type.DOC_FLAG: + flag = token.attached_object + + if flag.flag_type == 'bug': + # TODO(robbyw): Check for exactly 1 space on the left. + string = token.next.string.lstrip() + string = string.split(' ', 1)[0] + + if not string.isdigit(): + self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG, + '@bug should be followed by a bug number', token) + + elif flag.flag_type == 'suppress': + if flag.type is None: + # A syntactically invalid suppress tag will get tokenized as a normal + # flag, indicating an error. + self._HandleError( + errors.INCORRECT_SUPPRESS_SYNTAX, + 'Invalid suppress syntax: should be @suppress {errortype}. ' + 'Spaces matter.', token) + else: + for suppress_type in flag.jstype.IterIdentifiers(): + if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES: + self._HandleError( + errors.INVALID_SUPPRESS_TYPE, + 'Invalid suppression type: %s' % suppress_type, token) + + elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and + flag.flag_type == 'author'): + # TODO(user): In non strict mode check the author tag for as much as + # it exists, though the full form checked below isn't required. + string = token.next.string + result = self.AUTHOR_SPEC.match(string) + if not result: + self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION, + 'Author tag line should be of the form: ' + '@author foo@somewhere.com (Your Name)', + token.next) + else: + # Check spacing between email address and name. Do this before + # checking earlier spacing so positions are easier to calculate for + # autofixing. + num_spaces = len(result.group(2)) + if num_spaces < 1: + self._HandleError(errors.MISSING_SPACE, + 'Missing space after email address', + token.next, position=Position(result.start(2), 0)) + elif num_spaces > 1: + self._HandleError( + errors.EXTRA_SPACE, 'Extra space after email address', + token.next, + position=Position(result.start(2) + 1, num_spaces - 1)) + + # Check for extra spaces before email address. Can't be too few, if + # not at least one we wouldn't match @author tag. + num_spaces = len(result.group(1)) + if num_spaces > 1: + self._HandleError(errors.EXTRA_SPACE, + 'Extra space before email address', + token.next, position=Position(1, num_spaces - 1)) + + elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and + not self._limited_doc_checks): + if flag.flag_type == 'param': + if flag.name is None: + self._HandleError(errors.MISSING_JSDOC_PARAM_NAME, + 'Missing name in @param tag', token) + + if not flag.description or flag.description is None: + flag_name = token.type + if 'name' in token.values: + flag_name = '@' + token.values['name'] + + if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED: + self._HandleError( + errors.MISSING_JSDOC_TAG_DESCRIPTION, + 'Missing description in %s tag' % flag_name, token) + else: + self._CheckForMissingSpaceBeforeToken(flag.description_start_token) + + if flag.HasType(): + if flag.type_start_token is not None: + self._CheckForMissingSpaceBeforeToken( + token.attached_object.type_start_token) + + if flag.jstype and not flag.jstype.IsEmpty(): + self._CheckJsDocType(token, flag.jstype) + + if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and ( + flag.type_start_token.type != Type.DOC_START_BRACE or + flag.type_end_token.type != Type.DOC_END_BRACE): + self._HandleError( + errors.MISSING_BRACES_AROUND_TYPE, + 'Type must always be surrounded by curly braces.', token) + + if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG): + if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and + token.values['name'] not in FLAGS.custom_jsdoc_tags): + self._HandleError( + errors.INVALID_JSDOC_TAG, + 'Invalid JsDoc tag: %s' % token.values['name'], token) + + if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and + token.values['name'] == 'inheritDoc' and + token_type == Type.DOC_INLINE_FLAG): + self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC, + 'Unnecessary braces around @inheritDoc', + token) + + elif token_type == Type.SIMPLE_LVALUE: + identifier = token.values['identifier'] + + if ((not state.InFunction() or state.InConstructor()) and + state.InTopLevel() and not state.InObjectLiteralDescendant()): + jsdoc = state.GetDocComment() + if not state.HasDocComment(identifier): + # Only test for documentation on identifiers with .s in them to + # avoid checking things like simple variables. We don't require + # documenting assignments to .prototype itself (bug 1880803). + if (not state.InConstructor() and + identifier.find('.') != -1 and not + identifier.endswith('.prototype') and not + self._limited_doc_checks): + comment = state.GetLastComment() + if not (comment and comment.lower().count('jsdoc inherited')): + self._HandleError( + errors.MISSING_MEMBER_DOCUMENTATION, + "No docs found for member '%s'" % identifier, + token) + elif jsdoc and (not state.InConstructor() or + identifier.startswith('this.')): + # We are at the top level and the function/member is documented. + if identifier.endswith('_') and not identifier.endswith('__'): + # Can have a private class which inherits documentation from a + # public superclass. + # + # @inheritDoc is deprecated in favor of using @override, and they + if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor') + and ('accessControls' not in jsdoc.suppressions)): + self._HandleError( + errors.INVALID_OVERRIDE_PRIVATE, + '%s should not override a private member.' % identifier, + jsdoc.GetFlag('override').flag_token) + if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor') + and ('accessControls' not in jsdoc.suppressions)): + self._HandleError( + errors.INVALID_INHERIT_DOC_PRIVATE, + '%s should not inherit from a private member.' % identifier, + jsdoc.GetFlag('inheritDoc').flag_token) + if (not jsdoc.HasFlag('private') and + ('underscore' not in jsdoc.suppressions) and not + ((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and + ('accessControls' in jsdoc.suppressions))): + self._HandleError( + errors.MISSING_PRIVATE, + 'Member "%s" must have @private JsDoc.' % + identifier, token) + if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions: + self._HandleError( + errors.UNNECESSARY_SUPPRESS, + '@suppress {underscore} is not necessary with @private', + jsdoc.suppressions['underscore']) + elif (jsdoc.HasFlag('private') and + not self.InExplicitlyTypedLanguage()): + # It is convention to hide public fields in some ECMA + # implementations from documentation using the @private tag. + self._HandleError( + errors.EXTRA_PRIVATE, + 'Member "%s" must not have @private JsDoc' % + identifier, token) + + # These flags are only legal on localizable message definitions; + # such variables always begin with the prefix MSG_. + for f in ('desc', 'hidden', 'meaning'): + if (jsdoc.HasFlag(f) + and not identifier.startswith('MSG_') + and identifier.find('.MSG_') == -1): + self._HandleError( + errors.INVALID_USE_OF_DESC_TAG, + 'Member "%s" should not have @%s JsDoc' % (identifier, f), + token) + + # Check for illegaly assigning live objects as prototype property values. + index = identifier.find('.prototype.') + # Ignore anything with additional .s after the prototype. + if index != -1 and identifier.find('.', index + 11) == -1: + equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) + next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES) + if next_code and ( + next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or + next_code.IsOperator('new')): + self._HandleError( + errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE, + 'Member %s cannot have a non-primitive value' % identifier, + token) + + elif token_type == Type.END_PARAMETERS: + # Find extra space at the end of parameter lists. We check the token + # prior to the current one when it is a closing paren. + if (token.previous and token.previous.type == Type.PARAMETERS + and self.ENDS_WITH_SPACE.search(token.previous.string)): + self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"', + token.previous) + + jsdoc = state.GetDocComment() + if state.GetFunction().is_interface: + if token.previous and token.previous.type == Type.PARAMETERS: + self._HandleError( + errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS, + 'Interface constructor cannot have parameters', + token.previous) + elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see') + and not jsdoc.InheritsDocumentation() + and not state.InObjectLiteralDescendant() and not + jsdoc.IsInvalidated()): + distance, edit = jsdoc.CompareParameters(state.GetParams()) + if distance: + params_iter = iter(state.GetParams()) + docs_iter = iter(jsdoc.ordered_params) + + for op in edit: + if op == 'I': + # Insertion. + # Parsing doc comments is the same for all languages + # but some languages care about parameters that don't have + # doc comments and some languages don't care. + # Languages that don't allow variables to by typed such as + # JavaScript care but languages such as ActionScript or Java + # that allow variables to be typed don't care. + if not self._limited_doc_checks: + self.HandleMissingParameterDoc(token, params_iter.next()) + + elif op == 'D': + # Deletion + self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION, + 'Found docs for non-existing parameter: "%s"' % + docs_iter.next(), token) + elif op == 'S': + # Substitution + if not self._limited_doc_checks: + self._HandleError( + errors.WRONG_PARAMETER_DOCUMENTATION, + 'Parameter mismatch: got "%s", expected "%s"' % + (params_iter.next(), docs_iter.next()), token) + + else: + # Equality - just advance the iterators + params_iter.next() + docs_iter.next() + + elif token_type == Type.STRING_TEXT: + # If this is the first token after the start of the string, but it's at + # the end of a line, we know we have a multi-line string. + if token.previous.type in ( + Type.SINGLE_QUOTE_STRING_START, + Type.DOUBLE_QUOTE_STRING_START) and last_in_line: + self._HandleError(errors.MULTI_LINE_STRING, + 'Multi-line strings are not allowed', token) + + # This check is orthogonal to the ones above, and repeats some types, so + # it is a plain if and not an elif. + if token.type in Type.COMMENT_TYPES: + if self.ILLEGAL_TAB.search(token.string): + self._HandleError(errors.ILLEGAL_TAB, + 'Illegal tab in comment "%s"' % token.string, token) + + trimmed = token.string.rstrip() + if last_in_line and token.string != trimmed: + # Check for extra whitespace at the end of a line. + self._HandleError( + errors.EXTRA_SPACE, 'Extra space at end of line', token, + position=Position(len(trimmed), len(token.string) - len(trimmed))) + + # This check is also orthogonal since it is based on metadata. + if token.metadata.is_implied_semicolon: + self._HandleError(errors.MISSING_SEMICOLON, + 'Missing semicolon at end of line', token) + + def _HandleStartBracket(self, token, last_non_space_token): + """Handles a token that is an open bracket. + + Args: + token: The token to handle. + last_non_space_token: The last token that was not a space. + """ + if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and + last_non_space_token and + last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES): + self._HandleError( + errors.EXTRA_SPACE, 'Extra space before "["', + token.previous, position=Position.All(token.previous.string)) + # If the [ token is the first token in a line we shouldn't complain + # about a missing space before [. This is because some Ecma script + # languages allow syntax like: + # [Annotation] + # class MyClass {...} + # So we don't want to blindly warn about missing spaces before [. + # In the the future, when rules for computing exactly how many spaces + # lines should be indented are added, then we can return errors for + # [ tokens that are improperly indented. + # For example: + # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName = + # [a,b,c]; + # should trigger a proper indentation warning message as [ is not indented + # by four spaces. + elif (not token.IsFirstInLine() and token.previous and + token.previous.type not in ( + [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] + + Type.EXPRESSION_ENDER_TYPES)): + self._HandleError(errors.MISSING_SPACE, 'Missing space before "["', + token, position=Position.AtBeginning()) + + def Finalize(self, state): + """Perform all checks that need to occur after all lines are processed. + + Args: + state: State of the parser after parsing all tokens + + Raises: + TypeError: If not overridden. + """ + last_non_space_token = state.GetLastNonSpaceToken() + # Check last line for ending with newline. + if state.GetLastLine() and not ( + state.GetLastLine().isspace() or + state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()): + self._HandleError( + errors.FILE_MISSING_NEWLINE, + 'File does not end with new line. (%s)' % state.GetLastLine(), + last_non_space_token) + + try: + self._indentation.Finalize() + except Exception, e: + self._HandleError( + errors.FILE_DOES_NOT_PARSE, + str(e), + last_non_space_token) + + def GetLongLineExceptions(self): + """Gets a list of regexps for lines which can be longer than the limit. + + Returns: + A list of regexps, used as matches (rather than searches). + """ + return [] + + def InExplicitlyTypedLanguage(self): + """Returns whether this ecma implementation is explicitly typed.""" + return False diff --git a/tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py b/tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py new file mode 100644 index 0000000..5062161 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py @@ -0,0 +1,574 @@ +#!/usr/bin/env python +# +# Copyright 2010 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Metadata pass for annotating tokens in EcmaScript files.""" + +__author__ = ('robbyw@google.com (Robert Walker)') + +from closure_linter import javascripttokens +from closure_linter import tokenutil + + +TokenType = javascripttokens.JavaScriptTokenType + + +class ParseError(Exception): + """Exception indicating a parse error at the given token. + + Attributes: + token: The token where the parse error occurred. + """ + + def __init__(self, token, message=None): + """Initialize a parse error at the given token with an optional message. + + Args: + token: The token where the parse error occurred. + message: A message describing the parse error. + """ + Exception.__init__(self, message) + self.token = token + + +class EcmaContext(object): + """Context object for EcmaScript languages. + + Attributes: + type: The context type. + start_token: The token where this context starts. + end_token: The token where this context ends. + parent: The parent context. + """ + + # The root context. + ROOT = 'root' + + # A block of code. + BLOCK = 'block' + + # A pseudo-block of code for a given case or default section. + CASE_BLOCK = 'case_block' + + # Block of statements in a for loop's parentheses. + FOR_GROUP_BLOCK = 'for_block' + + # An implied block of code for 1 line if, while, and for statements + IMPLIED_BLOCK = 'implied_block' + + # An index in to an array or object. + INDEX = 'index' + + # An array literal in []. + ARRAY_LITERAL = 'array_literal' + + # An object literal in {}. + OBJECT_LITERAL = 'object_literal' + + # An individual element in an array or object literal. + LITERAL_ELEMENT = 'literal_element' + + # The portion of a ternary statement between ? and : + TERNARY_TRUE = 'ternary_true' + + # The portion of a ternary statment after : + TERNARY_FALSE = 'ternary_false' + + # The entire switch statment. This will contain a GROUP with the variable + # and a BLOCK with the code. + + # Since that BLOCK is not a normal block, it can not contain statements except + # for case and default. + SWITCH = 'switch' + + # A normal comment. + COMMENT = 'comment' + + # A JsDoc comment. + DOC = 'doc' + + # An individual statement. + STATEMENT = 'statement' + + # Code within parentheses. + GROUP = 'group' + + # Parameter names in a function declaration. + PARAMETERS = 'parameters' + + # A set of variable declarations appearing after the 'var' keyword. + VAR = 'var' + + # Context types that are blocks. + BLOCK_TYPES = frozenset([ + ROOT, BLOCK, CASE_BLOCK, FOR_GROUP_BLOCK, IMPLIED_BLOCK]) + + def __init__(self, context_type, start_token, parent=None): + """Initializes the context object. + + Args: + context_type: The context type. + start_token: The token where this context starts. + parent: The parent context. + + Attributes: + type: The context type. + start_token: The token where this context starts. + end_token: The token where this context ends. + parent: The parent context. + children: The child contexts of this context, in order. + """ + self.type = context_type + self.start_token = start_token + self.end_token = None + + self.parent = None + self.children = [] + + if parent: + parent.AddChild(self) + + def __repr__(self): + """Returns a string representation of the context object.""" + stack = [] + context = self + while context: + stack.append(context.type) + context = context.parent + return 'Context(%s)' % ' > '.join(stack) + + def AddChild(self, child): + """Adds a child to this context and sets child's parent to this context. + + Args: + child: A child EcmaContext. The child's parent will be set to this + context. + """ + + child.parent = self + + self.children.append(child) + self.children.sort(EcmaContext._CompareContexts) + + def GetRoot(self): + """Get the root context that contains this context, if any.""" + context = self + while context: + if context.type is EcmaContext.ROOT: + return context + context = context.parent + + @staticmethod + def _CompareContexts(context1, context2): + """Sorts contexts 1 and 2 by start token document position.""" + return tokenutil.Compare(context1.start_token, context2.start_token) + + +class EcmaMetaData(object): + """Token metadata for EcmaScript languages. + + Attributes: + last_code: The last code token to appear before this one. + context: The context this token appears in. + operator_type: The operator type, will be one of the *_OPERATOR constants + defined below. + aliased_symbol: The full symbol being identified, as a string (e.g. an + 'XhrIo' alias for 'goog.net.XhrIo'). Only applicable to identifier + tokens. This is set in aliaspass.py and is a best guess. + is_alias_definition: True if the symbol is part of an alias definition. + If so, these symbols won't be counted towards goog.requires/provides. + """ + + UNARY_OPERATOR = 'unary' + + UNARY_POST_OPERATOR = 'unary_post' + + BINARY_OPERATOR = 'binary' + + TERNARY_OPERATOR = 'ternary' + + def __init__(self): + """Initializes a token metadata object.""" + self.last_code = None + self.context = None + self.operator_type = None + self.is_implied_semicolon = False + self.is_implied_block = False + self.is_implied_block_close = False + self.aliased_symbol = None + self.is_alias_definition = False + + def __repr__(self): + """Returns a string representation of the context object.""" + parts = ['%r' % self.context] + if self.operator_type: + parts.append('optype: %r' % self.operator_type) + if self.is_implied_semicolon: + parts.append('implied;') + if self.aliased_symbol: + parts.append('alias for: %s' % self.aliased_symbol) + return 'MetaData(%s)' % ', '.join(parts) + + def IsUnaryOperator(self): + return self.operator_type in (EcmaMetaData.UNARY_OPERATOR, + EcmaMetaData.UNARY_POST_OPERATOR) + + def IsUnaryPostOperator(self): + return self.operator_type == EcmaMetaData.UNARY_POST_OPERATOR + + +class EcmaMetaDataPass(object): + """A pass that iterates over all tokens and builds metadata about them.""" + + def __init__(self): + """Initialize the meta data pass object.""" + self.Reset() + + def Reset(self): + """Resets the metadata pass to prepare for the next file.""" + self._token = None + self._context = None + self._AddContext(EcmaContext.ROOT) + self._last_code = None + + def _CreateContext(self, context_type): + """Overridable by subclasses to create the appropriate context type.""" + return EcmaContext(context_type, self._token, self._context) + + def _CreateMetaData(self): + """Overridable by subclasses to create the appropriate metadata type.""" + return EcmaMetaData() + + def _AddContext(self, context_type): + """Adds a context of the given type to the context stack. + + Args: + context_type: The type of context to create + """ + self._context = self._CreateContext(context_type) + + def _PopContext(self): + """Moves up one level in the context stack. + + Returns: + The former context. + + Raises: + ParseError: If the root context is popped. + """ + top_context = self._context + top_context.end_token = self._token + self._context = top_context.parent + if self._context: + return top_context + else: + raise ParseError(self._token) + + def _PopContextType(self, *stop_types): + """Pops the context stack until a context of the given type is popped. + + Args: + *stop_types: The types of context to pop to - stops at the first match. + + Returns: + The context object of the given type that was popped. + """ + last = None + while not last or last.type not in stop_types: + last = self._PopContext() + return last + + def _EndStatement(self): + """Process the end of a statement.""" + self._PopContextType(EcmaContext.STATEMENT) + if self._context.type == EcmaContext.IMPLIED_BLOCK: + self._token.metadata.is_implied_block_close = True + self._PopContext() + + def _ProcessContext(self): + """Process the context at the current token. + + Returns: + The context that should be assigned to the current token, or None if + the current context after this method should be used. + + Raises: + ParseError: When the token appears in an invalid context. + """ + token = self._token + token_type = token.type + + if self._context.type in EcmaContext.BLOCK_TYPES: + # Whenever we're in a block, we add a statement context. We make an + # exception for switch statements since they can only contain case: and + # default: and therefore don't directly contain statements. + # The block we add here may be immediately removed in some cases, but + # that causes no harm. + parent = self._context.parent + if not parent or parent.type != EcmaContext.SWITCH: + self._AddContext(EcmaContext.STATEMENT) + + elif self._context.type == EcmaContext.ARRAY_LITERAL: + self._AddContext(EcmaContext.LITERAL_ELEMENT) + + if token_type == TokenType.START_PAREN: + if self._last_code and self._last_code.IsKeyword('for'): + # for loops contain multiple statements in the group unlike while, + # switch, if, etc. + self._AddContext(EcmaContext.FOR_GROUP_BLOCK) + else: + self._AddContext(EcmaContext.GROUP) + + elif token_type == TokenType.END_PAREN: + result = self._PopContextType(EcmaContext.GROUP, + EcmaContext.FOR_GROUP_BLOCK) + keyword_token = result.start_token.metadata.last_code + # keyword_token will not exist if the open paren is the first line of the + # file, for example if all code is wrapped in an immediately executed + # annonymous function. + if keyword_token and keyword_token.string in ('if', 'for', 'while'): + next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES) + if next_code.type != TokenType.START_BLOCK: + # Check for do-while. + is_do_while = False + pre_keyword_token = keyword_token.metadata.last_code + if (pre_keyword_token and + pre_keyword_token.type == TokenType.END_BLOCK): + start_block_token = pre_keyword_token.metadata.context.start_token + is_do_while = start_block_token.metadata.last_code.string == 'do' + + # If it's not do-while, it's an implied block. + if not is_do_while: + self._AddContext(EcmaContext.IMPLIED_BLOCK) + token.metadata.is_implied_block = True + + return result + + # else (not else if) with no open brace after it should be considered the + # start of an implied block, similar to the case with if, for, and while + # above. + elif (token_type == TokenType.KEYWORD and + token.string == 'else'): + next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES) + if (next_code.type != TokenType.START_BLOCK and + (next_code.type != TokenType.KEYWORD or next_code.string != 'if')): + self._AddContext(EcmaContext.IMPLIED_BLOCK) + token.metadata.is_implied_block = True + + elif token_type == TokenType.START_PARAMETERS: + self._AddContext(EcmaContext.PARAMETERS) + + elif token_type == TokenType.END_PARAMETERS: + return self._PopContextType(EcmaContext.PARAMETERS) + + elif token_type == TokenType.START_BRACKET: + if (self._last_code and + self._last_code.type in TokenType.EXPRESSION_ENDER_TYPES): + self._AddContext(EcmaContext.INDEX) + else: + self._AddContext(EcmaContext.ARRAY_LITERAL) + + elif token_type == TokenType.END_BRACKET: + return self._PopContextType(EcmaContext.INDEX, EcmaContext.ARRAY_LITERAL) + + elif token_type == TokenType.START_BLOCK: + if (self._last_code.type in (TokenType.END_PAREN, + TokenType.END_PARAMETERS) or + self._last_code.IsKeyword('else') or + self._last_code.IsKeyword('do') or + self._last_code.IsKeyword('try') or + self._last_code.IsKeyword('finally') or + (self._last_code.IsOperator(':') and + self._last_code.metadata.context.type == EcmaContext.CASE_BLOCK)): + # else, do, try, and finally all might have no () before {. + # Also, handle the bizzare syntax case 10: {...}. + self._AddContext(EcmaContext.BLOCK) + else: + self._AddContext(EcmaContext.OBJECT_LITERAL) + + elif token_type == TokenType.END_BLOCK: + context = self._PopContextType(EcmaContext.BLOCK, + EcmaContext.OBJECT_LITERAL) + if self._context.type == EcmaContext.SWITCH: + # The end of the block also means the end of the switch statement it + # applies to. + return self._PopContext() + return context + + elif token.IsKeyword('switch'): + self._AddContext(EcmaContext.SWITCH) + + elif (token_type == TokenType.KEYWORD and + token.string in ('case', 'default') and + self._context.type != EcmaContext.OBJECT_LITERAL): + # Pop up to but not including the switch block. + while self._context.parent.type != EcmaContext.SWITCH: + self._PopContext() + if self._context.parent is None: + raise ParseError(token, 'Encountered case/default statement ' + 'without switch statement') + + elif token.IsOperator('?'): + self._AddContext(EcmaContext.TERNARY_TRUE) + + elif token.IsOperator(':'): + if self._context.type == EcmaContext.OBJECT_LITERAL: + self._AddContext(EcmaContext.LITERAL_ELEMENT) + + elif self._context.type == EcmaContext.TERNARY_TRUE: + self._PopContext() + self._AddContext(EcmaContext.TERNARY_FALSE) + + # Handle nested ternary statements like: + # foo = bar ? baz ? 1 : 2 : 3 + # When we encounter the second ":" the context is + # ternary_false > ternary_true > statement > root + elif (self._context.type == EcmaContext.TERNARY_FALSE and + self._context.parent.type == EcmaContext.TERNARY_TRUE): + self._PopContext() # Leave current ternary false context. + self._PopContext() # Leave current parent ternary true + self._AddContext(EcmaContext.TERNARY_FALSE) + + elif self._context.parent.type == EcmaContext.SWITCH: + self._AddContext(EcmaContext.CASE_BLOCK) + + elif token.IsKeyword('var'): + self._AddContext(EcmaContext.VAR) + + elif token.IsOperator(','): + while self._context.type not in (EcmaContext.VAR, + EcmaContext.ARRAY_LITERAL, + EcmaContext.OBJECT_LITERAL, + EcmaContext.STATEMENT, + EcmaContext.PARAMETERS, + EcmaContext.GROUP): + self._PopContext() + + elif token_type == TokenType.SEMICOLON: + self._EndStatement() + + def Process(self, first_token): + """Processes the token stream starting with the given token.""" + self._token = first_token + while self._token: + self._ProcessToken() + + if self._token.IsCode(): + self._last_code = self._token + + self._token = self._token.next + + try: + self._PopContextType(self, EcmaContext.ROOT) + except ParseError: + # Ignore the "popped to root" error. + pass + + def _ProcessToken(self): + """Process the given token.""" + token = self._token + token.metadata = self._CreateMetaData() + context = (self._ProcessContext() or self._context) + token.metadata.context = context + token.metadata.last_code = self._last_code + + # Determine the operator type of the token, if applicable. + if token.type == TokenType.OPERATOR: + token.metadata.operator_type = self._GetOperatorType(token) + + # Determine if there is an implied semicolon after the token. + if token.type != TokenType.SEMICOLON: + next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES) + # A statement like if (x) does not need a semicolon after it + is_implied_block = self._context == EcmaContext.IMPLIED_BLOCK + is_last_code_in_line = token.IsCode() and ( + not next_code or next_code.line_number != token.line_number) + is_continued_operator = (token.type == TokenType.OPERATOR and + not token.metadata.IsUnaryPostOperator()) + is_continued_dot = token.string == '.' + next_code_is_operator = next_code and next_code.type == TokenType.OPERATOR + is_end_of_block = ( + token.type == TokenType.END_BLOCK and + token.metadata.context.type != EcmaContext.OBJECT_LITERAL) + is_multiline_string = token.type == TokenType.STRING_TEXT + is_continued_var_decl = (token.IsKeyword('var') and + next_code and + (next_code.type in [TokenType.IDENTIFIER, + TokenType.SIMPLE_LVALUE]) and + token.line_number < next_code.line_number) + next_code_is_block = next_code and next_code.type == TokenType.START_BLOCK + if (is_last_code_in_line and + self._StatementCouldEndInContext() and + not is_multiline_string and + not is_end_of_block and + not is_continued_var_decl and + not is_continued_operator and + not is_continued_dot and + not next_code_is_operator and + not is_implied_block and + not next_code_is_block): + token.metadata.is_implied_semicolon = True + self._EndStatement() + + def _StatementCouldEndInContext(self): + """Returns if the current statement (if any) may end in this context.""" + # In the basic statement or variable declaration context, statement can + # always end in this context. + if self._context.type in (EcmaContext.STATEMENT, EcmaContext.VAR): + return True + + # End of a ternary false branch inside a statement can also be the + # end of the statement, for example: + # var x = foo ? foo.bar() : null + # In this case the statement ends after the null, when the context stack + # looks like ternary_false > var > statement > root. + if (self._context.type == EcmaContext.TERNARY_FALSE and + self._context.parent.type in (EcmaContext.STATEMENT, EcmaContext.VAR)): + return True + + # In all other contexts like object and array literals, ternary true, etc. + # the statement can't yet end. + return False + + def _GetOperatorType(self, token): + """Returns the operator type of the given operator token. + + Args: + token: The token to get arity for. + + Returns: + The type of the operator. One of the *_OPERATOR constants defined in + EcmaMetaData. + """ + if token.string == '?': + return EcmaMetaData.TERNARY_OPERATOR + + if token.string in TokenType.UNARY_OPERATORS: + return EcmaMetaData.UNARY_OPERATOR + + last_code = token.metadata.last_code + if not last_code or last_code.type == TokenType.END_BLOCK: + return EcmaMetaData.UNARY_OPERATOR + + if (token.string in TokenType.UNARY_POST_OPERATORS and + last_code.type in TokenType.EXPRESSION_ENDER_TYPES): + return EcmaMetaData.UNARY_POST_OPERATOR + + if (token.string in TokenType.UNARY_OK_OPERATORS and + last_code.type not in TokenType.EXPRESSION_ENDER_TYPES and + last_code.string not in TokenType.UNARY_POST_OPERATORS): + return EcmaMetaData.UNARY_OPERATOR + + return EcmaMetaData.BINARY_OPERATOR diff --git a/tools/closure_linter/build/lib/closure_linter/error_check.py b/tools/closure_linter/build/lib/closure_linter/error_check.py new file mode 100644 index 0000000..8d657fe --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/error_check.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python +# +# Copyright 2011 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Specific JSLint errors checker.""" + + + +import gflags as flags + +FLAGS = flags.FLAGS + + +class Rule(object): + """Different rules to check.""" + + # Documentations for specific rules goes in flag definition. + BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level' + INDENTATION = 'indentation' + WELL_FORMED_AUTHOR = 'well_formed_author' + NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc' + BRACES_AROUND_TYPE = 'braces_around_type' + OPTIONAL_TYPE_MARKER = 'optional_type_marker' + VARIABLE_ARG_MARKER = 'variable_arg_marker' + UNUSED_PRIVATE_MEMBERS = 'unused_private_members' + UNUSED_LOCAL_VARIABLES = 'unused_local_variables' + + # Rule to raise all known errors. + ALL = 'all' + + # All rules that are to be checked when using the strict flag. E.g. the rules + # that are specific to the stricter Closure style. + CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL, + INDENTATION, + WELL_FORMED_AUTHOR, + NO_BRACES_AROUND_INHERIT_DOC, + BRACES_AROUND_TYPE, + OPTIONAL_TYPE_MARKER, + VARIABLE_ARG_MARKER]) + + +flags.DEFINE_boolean('strict', False, + 'Whether to validate against the stricter Closure style. ' + 'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.') +flags.DEFINE_multistring('jslint_error', [], + 'List of specific lint errors to check. Here is a list' + ' of accepted values:\n' + ' - ' + Rule.ALL + ': enables all following errors.\n' + ' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates' + 'number of blank lines between blocks at top level.\n' + ' - ' + Rule.INDENTATION + ': checks correct ' + 'indentation of code.\n' + ' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the ' + '@author JsDoc tags.\n' + ' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': ' + 'forbids braces around @inheritdoc JsDoc tags.\n' + ' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces ' + 'around types in JsDoc tags.\n' + ' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct ' + 'use of optional marker = in param types.\n' + ' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for ' + 'unused private variables.\n' + ' - ' + Rule.UNUSED_LOCAL_VARIABLES + ': checks for ' + 'unused local variables.\n') + + +def ShouldCheck(rule): + """Returns whether the optional rule should be checked. + + Computes different flags (strict, jslint_error, jslint_noerror) to find out if + this specific rule should be checked. + + Args: + rule: Name of the rule (see Rule). + + Returns: + True if the rule should be checked according to the flags, otherwise False. + """ + if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error: + return True + # Checks strict rules. + return FLAGS.strict and rule in Rule.CLOSURE_RULES diff --git a/tools/closure_linter/build/lib/closure_linter/error_fixer.py b/tools/closure_linter/build/lib/closure_linter/error_fixer.py new file mode 100644 index 0000000..88f9c72 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/error_fixer.py @@ -0,0 +1,618 @@ +#!/usr/bin/env python +# +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Main class responsible for automatically fixing simple style violations.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = 'robbyw@google.com (Robert Walker)' + +import re + +import gflags as flags +from closure_linter import errors +from closure_linter import javascriptstatetracker +from closure_linter import javascripttokens +from closure_linter import requireprovidesorter +from closure_linter import tokenutil +from closure_linter.common import errorhandler + +# Shorthand +Token = javascripttokens.JavaScriptToken +Type = javascripttokens.JavaScriptTokenType + +END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$') + +# Regex to represent common mistake inverting author name and email as +# @author User Name (user@company) +INVERTED_AUTHOR_SPEC = re.compile(r'(?P\s*)' + r'(?P[^(]+)' + r'(?P\s+)' + r'\(' + r'(?P[^\s]+@[^)\s]+)' + r'\)' + r'(?P.*)') + +FLAGS = flags.FLAGS +flags.DEFINE_boolean('disable_indentation_fixing', False, + 'Whether to disable automatic fixing of indentation.') +flags.DEFINE_list('fix_error_codes', [], 'A list of specific error codes to ' + 'fix. Defaults to all supported error codes when empty. ' + 'See errors.py for a list of error codes.') + + +class ErrorFixer(errorhandler.ErrorHandler): + """Object that fixes simple style errors.""" + + def __init__(self, external_file=None): + """Initialize the error fixer. + + Args: + external_file: If included, all output will be directed to this file + instead of overwriting the files the errors are found in. + """ + errorhandler.ErrorHandler.__init__(self) + + self._file_name = None + self._file_token = None + self._external_file = external_file + + try: + self._fix_error_codes = set([errors.ByName(error.upper()) for error in + FLAGS.fix_error_codes]) + except KeyError as ke: + raise ValueError('Unknown error code ' + ke.args[0]) + + def HandleFile(self, filename, first_token): + """Notifies this ErrorPrinter that subsequent errors are in filename. + + Args: + filename: The name of the file about to be checked. + first_token: The first token in the file. + """ + self._file_name = filename + self._file_is_html = filename.endswith('.html') or filename.endswith('.htm') + self._file_token = first_token + self._file_fix_count = 0 + self._file_changed_lines = set() + + def _AddFix(self, tokens): + """Adds the fix to the internal count. + + Args: + tokens: The token or sequence of tokens changed to fix an error. + """ + self._file_fix_count += 1 + if hasattr(tokens, 'line_number'): + self._file_changed_lines.add(tokens.line_number) + else: + for token in tokens: + self._file_changed_lines.add(token.line_number) + + def _FixJsDocPipeNull(self, js_type): + """Change number|null or null|number to ?number. + + Args: + js_type: The typeannotation.TypeAnnotation instance to fix. + """ + + # Recurse into all sub_types if the error was at a deeper level. + map(self._FixJsDocPipeNull, js_type.IterTypes()) + + if js_type.type_group and len(js_type.sub_types) == 2: + # Find and remove the null sub_type: + sub_type = None + for sub_type in js_type.sub_types: + if sub_type.identifier == 'null': + map(tokenutil.DeleteToken, sub_type.tokens) + self._AddFix(sub_type.tokens) + break + else: + return + + first_token = js_type.FirstToken() + question_mark = Token('?', Type.DOC_TYPE_MODIFIER, first_token.line, + first_token.line_number) + tokenutil.InsertTokenBefore(question_mark, first_token) + js_type.tokens.insert(0, question_mark) + js_type.tokens.remove(sub_type) + js_type.or_null = True + + # Now also remove the separator, which is in the parent's token list, + # either before or after the sub_type, there is exactly one. Scan for it. + for token in js_type.tokens: + if (token and isinstance(token, Token) and + token.type == Type.DOC_TYPE_MODIFIER and token.string == '|'): + tokenutil.DeleteToken(token) + self._AddFix(token) + break + + def HandleError(self, error): + """Attempts to fix the error. + + Args: + error: The error object + """ + code = error.code + token = error.token + + if self._fix_error_codes and code not in self._fix_error_codes: + return + + if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL: + self._FixJsDocPipeNull(token.attached_object.jstype) + + elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE: + iterator = token.attached_object.type_end_token + if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace(): + iterator = iterator.previous + + ending_space = len(iterator.string) - len(iterator.string.rstrip()) + iterator.string = '%s=%s' % (iterator.string.rstrip(), + ' ' * ending_space) + + # Create a new flag object with updated type info. + token.attached_object = javascriptstatetracker.JsDocFlag(token) + self._AddFix(token) + + elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE: + iterator = token.attached_object.type_start_token + if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace(): + iterator = iterator.next + + starting_space = len(iterator.string) - len(iterator.string.lstrip()) + iterator.string = '%s...%s' % (' ' * starting_space, + iterator.string.lstrip()) + + # Create a new flag object with updated type info. + token.attached_object = javascriptstatetracker.JsDocFlag(token) + self._AddFix(token) + + elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION, + errors.MISSING_SEMICOLON): + semicolon_token = Token(';', Type.SEMICOLON, token.line, + token.line_number) + tokenutil.InsertTokenAfter(semicolon_token, token) + token.metadata.is_implied_semicolon = False + semicolon_token.metadata.is_implied_semicolon = False + self._AddFix(token) + + elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION, + errors.REDUNDANT_SEMICOLON, + errors.COMMA_AT_END_OF_LITERAL): + self._DeleteToken(token) + self._AddFix(token) + + elif code == errors.INVALID_JSDOC_TAG: + if token.string == '@returns': + token.string = '@return' + self._AddFix(token) + + elif code == errors.FILE_MISSING_NEWLINE: + # This error is fixed implicitly by the way we restore the file + self._AddFix(token) + + elif code == errors.MISSING_SPACE: + if error.fix_data: + token.string = error.fix_data + self._AddFix(token) + elif error.position: + if error.position.IsAtBeginning(): + tokenutil.InsertSpaceTokenAfter(token.previous) + elif error.position.IsAtEnd(token.string): + tokenutil.InsertSpaceTokenAfter(token) + else: + token.string = error.position.Set(token.string, ' ') + self._AddFix(token) + + elif code == errors.EXTRA_SPACE: + if error.position: + token.string = error.position.Set(token.string, '') + self._AddFix(token) + + elif code == errors.MISSING_LINE: + if error.position.IsAtBeginning(): + tokenutil.InsertBlankLineAfter(token.previous) + else: + tokenutil.InsertBlankLineAfter(token) + self._AddFix(token) + + elif code == errors.EXTRA_LINE: + self._DeleteToken(token) + self._AddFix(token) + + elif code == errors.WRONG_BLANK_LINE_COUNT: + if not token.previous: + # TODO(user): Add an insertBefore method to tokenutil. + return + + num_lines = error.fix_data + should_delete = False + + if num_lines < 0: + num_lines *= -1 + should_delete = True + + for unused_i in xrange(1, num_lines + 1): + if should_delete: + # TODO(user): DeleteToken should update line numbers. + self._DeleteToken(token.previous) + else: + tokenutil.InsertBlankLineAfter(token.previous) + self._AddFix(token) + + elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING: + end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END) + if end_quote: + single_quote_start = Token( + "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number) + single_quote_end = Token( + "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line, + token.line_number) + + tokenutil.InsertTokenAfter(single_quote_start, token) + tokenutil.InsertTokenAfter(single_quote_end, end_quote) + self._DeleteToken(token) + self._DeleteToken(end_quote) + self._AddFix([token, end_quote]) + + elif code == errors.MISSING_BRACES_AROUND_TYPE: + fixed_tokens = [] + start_token = token.attached_object.type_start_token + + if start_token.type != Type.DOC_START_BRACE: + leading_space = ( + len(start_token.string) - len(start_token.string.lstrip())) + if leading_space: + start_token = tokenutil.SplitToken(start_token, leading_space) + # Fix case where start and end token were the same. + if token.attached_object.type_end_token == start_token.previous: + token.attached_object.type_end_token = start_token + + new_token = Token('{', Type.DOC_START_BRACE, start_token.line, + start_token.line_number) + tokenutil.InsertTokenAfter(new_token, start_token.previous) + token.attached_object.type_start_token = new_token + fixed_tokens.append(new_token) + + end_token = token.attached_object.type_end_token + if end_token.type != Type.DOC_END_BRACE: + # If the start token was a brace, the end token will be a + # FLAG_ENDING_TYPE token, if there wasn't a starting brace then + # the end token is the last token of the actual type. + last_type = end_token + if not fixed_tokens: + last_type = end_token.previous + + while last_type.string.isspace(): + last_type = last_type.previous + + # If there was no starting brace then a lone end brace wouldn't have + # been type end token. Now that we've added any missing start brace, + # see if the last effective type token was an end brace. + if last_type.type != Type.DOC_END_BRACE: + trailing_space = (len(last_type.string) - + len(last_type.string.rstrip())) + if trailing_space: + tokenutil.SplitToken(last_type, + len(last_type.string) - trailing_space) + + new_token = Token('}', Type.DOC_END_BRACE, last_type.line, + last_type.line_number) + tokenutil.InsertTokenAfter(new_token, last_type) + token.attached_object.type_end_token = new_token + fixed_tokens.append(new_token) + + self._AddFix(fixed_tokens) + + elif code == errors.LINE_STARTS_WITH_OPERATOR: + # Remove whitespace following the operator so the line starts clean. + self._StripSpace(token, before=False) + + # Remove the operator. + tokenutil.DeleteToken(token) + self._AddFix(token) + + insertion_point = tokenutil.GetPreviousCodeToken(token) + + # Insert a space between the previous token and the new operator. + space = Token(' ', Type.WHITESPACE, insertion_point.line, + insertion_point.line_number) + tokenutil.InsertTokenAfter(space, insertion_point) + + # Insert the operator on the end of the previous line. + new_token = Token(token.string, token.type, insertion_point.line, + insertion_point.line_number) + tokenutil.InsertTokenAfter(new_token, space) + self._AddFix(new_token) + + elif code == errors.LINE_ENDS_WITH_DOT: + # Remove whitespace preceding the operator to remove trailing whitespace. + self._StripSpace(token, before=True) + + # Remove the dot. + tokenutil.DeleteToken(token) + self._AddFix(token) + + insertion_point = tokenutil.GetNextCodeToken(token) + + # Insert the dot at the beginning of the next line of code. + new_token = Token(token.string, token.type, insertion_point.line, + insertion_point.line_number) + tokenutil.InsertTokenBefore(new_token, insertion_point) + self._AddFix(new_token) + + elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED: + require_start_token = error.fix_data + sorter = requireprovidesorter.RequireProvideSorter() + sorter.FixRequires(require_start_token) + + self._AddFix(require_start_token) + + elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED: + provide_start_token = error.fix_data + sorter = requireprovidesorter.RequireProvideSorter() + sorter.FixProvides(provide_start_token) + + self._AddFix(provide_start_token) + + elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC: + if token.previous.string == '{' and token.next.string == '}': + self._DeleteToken(token.previous) + self._DeleteToken(token.next) + self._AddFix([token]) + + elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION: + match = INVERTED_AUTHOR_SPEC.match(token.string) + if match: + token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'), + match.group('email'), + match.group('whitespace_after_name'), + match.group('name'), + match.group('trailing_characters')) + self._AddFix(token) + + elif (code == errors.WRONG_INDENTATION and + not FLAGS.disable_indentation_fixing): + token = tokenutil.GetFirstTokenInSameLine(token) + actual = error.position.start + expected = error.position.length + + # Cases where first token is param but with leading spaces. + if (len(token.string.lstrip()) == len(token.string) - actual and + token.string.lstrip()): + token.string = token.string.lstrip() + actual = 0 + + if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0: + token.string = token.string.lstrip() + (' ' * expected) + self._AddFix([token]) + else: + # We need to add indentation. + new_token = Token(' ' * expected, Type.WHITESPACE, + token.line, token.line_number) + # Note that we'll never need to add indentation at the first line, + # since it will always not be indented. Therefore it's safe to assume + # token.previous exists. + tokenutil.InsertTokenAfter(new_token, token.previous) + self._AddFix([token]) + + elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT, + errors.MISSING_END_OF_SCOPE_COMMENT]: + # Only fix cases where }); is found with no trailing content on the line + # other than a comment. Value of 'token' is set to } for this error. + if (token.type == Type.END_BLOCK and + token.next.type == Type.END_PAREN and + token.next.next.type == Type.SEMICOLON): + current_token = token.next.next.next + removed_tokens = [] + while current_token and current_token.line_number == token.line_number: + if current_token.IsAnyType(Type.WHITESPACE, + Type.START_SINGLE_LINE_COMMENT, + Type.COMMENT): + removed_tokens.append(current_token) + current_token = current_token.next + else: + return + + if removed_tokens: + self._DeleteTokens(removed_tokens[0], len(removed_tokens)) + + whitespace_token = Token(' ', Type.WHITESPACE, token.line, + token.line_number) + start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT, + token.line, token.line_number) + comment_token = Token(' goog.scope', Type.COMMENT, token.line, + token.line_number) + insertion_tokens = [whitespace_token, start_comment_token, + comment_token] + + tokenutil.InsertTokensAfter(insertion_tokens, token.next.next) + self._AddFix(removed_tokens + insertion_tokens) + + elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]: + tokens_in_line = tokenutil.GetAllTokensInSameLine(token) + num_delete_tokens = len(tokens_in_line) + # If line being deleted is preceded and succeed with blank lines then + # delete one blank line also. + if (tokens_in_line[0].previous and tokens_in_line[-1].next + and tokens_in_line[0].previous.type == Type.BLANK_LINE + and tokens_in_line[-1].next.type == Type.BLANK_LINE): + num_delete_tokens += 1 + self._DeleteTokens(tokens_in_line[0], num_delete_tokens) + self._AddFix(tokens_in_line) + + elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]: + missing_namespaces = error.fix_data[0] + need_blank_line = error.fix_data[1] or (not token.previous) + + insert_location = Token('', Type.NORMAL, '', token.line_number - 1) + dummy_first_token = insert_location + tokenutil.InsertTokenBefore(insert_location, token) + + # If inserting a blank line check blank line does not exist before + # token to avoid extra blank lines. + if (need_blank_line and insert_location.previous + and insert_location.previous.type != Type.BLANK_LINE): + tokenutil.InsertBlankLineAfter(insert_location) + insert_location = insert_location.next + + for missing_namespace in missing_namespaces: + new_tokens = self._GetNewRequireOrProvideTokens( + code == errors.MISSING_GOOG_PROVIDE, + missing_namespace, insert_location.line_number + 1) + tokenutil.InsertLineAfter(insert_location, new_tokens) + insert_location = new_tokens[-1] + self._AddFix(new_tokens) + + # If inserting a blank line check blank line does not exist after + # token to avoid extra blank lines. + if (need_blank_line and insert_location.next + and insert_location.next.type != Type.BLANK_LINE): + tokenutil.InsertBlankLineAfter(insert_location) + + tokenutil.DeleteToken(dummy_first_token) + + def _StripSpace(self, token, before): + """Strip whitespace tokens either preceding or following the given token. + + Args: + token: The token. + before: If true, strip space before the token, if false, after it. + """ + token = token.previous if before else token.next + while token and token.type == Type.WHITESPACE: + tokenutil.DeleteToken(token) + token = token.previous if before else token.next + + def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number): + """Returns a list of tokens to create a goog.require/provide statement. + + Args: + is_provide: True if getting tokens for a provide, False for require. + namespace: The required or provided namespaces to get tokens for. + line_number: The line number the new require or provide statement will be + on. + + Returns: + Tokens to create a new goog.require or goog.provide statement. + """ + string = 'goog.require' + if is_provide: + string = 'goog.provide' + line_text = string + '(\'' + namespace + '\');\n' + return [ + Token(string, Type.IDENTIFIER, line_text, line_number), + Token('(', Type.START_PAREN, line_text, line_number), + Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number), + Token(namespace, Type.STRING_TEXT, line_text, line_number), + Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number), + Token(')', Type.END_PAREN, line_text, line_number), + Token(';', Type.SEMICOLON, line_text, line_number) + ] + + def _DeleteToken(self, token): + """Deletes the specified token from the linked list of tokens. + + Updates instance variables pointing to tokens such as _file_token if + they reference the deleted token. + + Args: + token: The token to delete. + """ + if token == self._file_token: + self._file_token = token.next + + tokenutil.DeleteToken(token) + + def _DeleteTokens(self, token, token_count): + """Deletes the given number of tokens starting with the given token. + + Updates instance variables pointing to tokens such as _file_token if + they reference the deleted token. + + Args: + token: The first token to delete. + token_count: The total number of tokens to delete. + """ + if token == self._file_token: + for unused_i in xrange(token_count): + self._file_token = self._file_token.next + + tokenutil.DeleteTokens(token, token_count) + + def FinishFile(self): + """Called when the current file has finished style checking. + + Used to go back and fix any errors in the file. It currently supports both + js and html files. For js files it does a simple dump of all tokens, but in + order to support html file, we need to merge the original file with the new + token set back together. This works because the tokenized html file is the + original html file with all non js lines kept but blanked out with one blank + line token per line of html. + """ + if self._file_fix_count: + # Get the original file content for html. + if self._file_is_html: + f = open(self._file_name, 'r') + original_lines = f.readlines() + f.close() + + f = self._external_file + if not f: + error_noun = 'error' if self._file_fix_count == 1 else 'errors' + print 'Fixed %d %s in %s' % ( + self._file_fix_count, error_noun, self._file_name) + f = open(self._file_name, 'w') + + token = self._file_token + # Finding the first not deleted token. + while token.is_deleted: + token = token.next + # If something got inserted before first token (e.g. due to sorting) + # then move to start. Bug 8398202. + while token.previous: + token = token.previous + char_count = 0 + line = '' + while token: + line += token.string + char_count += len(token.string) + + if token.IsLastInLine(): + # We distinguish if a blank line in html was from stripped original + # file or newly added error fix by looking at the "org_line_number" + # field on the token. It is only set in the tokenizer, so for all + # error fixes, the value should be None. + if (line or not self._file_is_html or + token.orig_line_number is None): + f.write(line) + f.write('\n') + else: + f.write(original_lines[token.orig_line_number - 1]) + line = '' + if char_count > 80 and token.line_number in self._file_changed_lines: + print 'WARNING: Line %d of %s is now longer than 80 characters.' % ( + token.line_number, self._file_name) + + char_count = 0 + + token = token.next + + if not self._external_file: + # Close the file if we created it + f.close() diff --git a/tools/closure_linter/build/lib/closure_linter/error_fixer_test.py b/tools/closure_linter/build/lib/closure_linter/error_fixer_test.py new file mode 100644 index 0000000..49f449d --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/error_fixer_test.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the error_fixer module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + + + +import unittest as googletest +from closure_linter import error_fixer +from closure_linter import testutil + + +class ErrorFixerTest(googletest.TestCase): + """Unit tests for error_fixer.""" + + def setUp(self): + self.error_fixer = error_fixer.ErrorFixer() + + def testDeleteToken(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT) + second_token = start_token.next + self.error_fixer.HandleFile('test_file', start_token) + + self.error_fixer._DeleteToken(start_token) + + self.assertEqual(second_token, self.error_fixer._file_token) + + def testDeleteTokens(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT) + fourth_token = start_token.next.next.next + self.error_fixer.HandleFile('test_file', start_token) + + self.error_fixer._DeleteTokens(start_token, 3) + + self.assertEqual(fourth_token, self.error_fixer._file_token) + +_TEST_SCRIPT = """\ +var x = 3; +""" + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/errorrecord.py b/tools/closure_linter/build/lib/closure_linter/errorrecord.py new file mode 100644 index 0000000..ce9fb90 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/errorrecord.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""A simple, pickle-serializable class to represent a lint error.""" + +__author__ = 'nnaze@google.com (Nathan Naze)' + +import gflags as flags + +from closure_linter import errors +from closure_linter.common import erroroutput + +FLAGS = flags.FLAGS + + +class ErrorRecord(object): + """Record-keeping struct that can be serialized back from a process. + + Attributes: + path: Path to the file. + error_string: Error string for the user. + new_error: Whether this is a "new error" (see errors.NEW_ERRORS). + """ + + def __init__(self, path, error_string, new_error): + self.path = path + self.error_string = error_string + self.new_error = new_error + + +def MakeErrorRecord(path, error): + """Make an error record with correctly formatted error string. + + Errors are not able to be serialized (pickled) over processes because of + their pointers to the complex token/context graph. We use an intermediary + serializable class to pass back just the relevant information. + + Args: + path: Path of file the error was found in. + error: An error.Error instance. + + Returns: + _ErrorRecord instance. + """ + new_error = error.code in errors.NEW_ERRORS + + if FLAGS.unix_mode: + error_string = erroroutput.GetUnixErrorOutput( + path, error, new_error=new_error) + else: + error_string = erroroutput.GetErrorOutput(error, new_error=new_error) + + return ErrorRecord(path, error_string, new_error) diff --git a/tools/closure_linter/build/lib/closure_linter/errorrules.py b/tools/closure_linter/build/lib/closure_linter/errorrules.py new file mode 100644 index 0000000..b1b72aa --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/errorrules.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# +# Copyright 2010 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Linter error rules class for Closure Linter.""" + +__author__ = 'robbyw@google.com (Robert Walker)' + +import gflags as flags +from closure_linter import errors + + +FLAGS = flags.FLAGS +flags.DEFINE_boolean('jsdoc', True, + 'Whether to report errors for missing JsDoc.') +flags.DEFINE_list('disable', None, + 'Disable specific error. Usage Ex.: gjslint --disable 1,' + '0011 foo.js.') +flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed ' + 'without warning.', lower_bound=1) + +disabled_error_nums = None + + +def GetMaxLineLength(): + """Returns allowed maximum length of line. + + Returns: + Length of line allowed without any warning. + """ + return FLAGS.max_line_length + + +def ShouldReportError(error): + """Whether the given error should be reported. + + Returns: + True for all errors except missing documentation errors and disabled + errors. For missing documentation, it returns the value of the + jsdoc flag. + """ + global disabled_error_nums + if disabled_error_nums is None: + disabled_error_nums = [] + if FLAGS.disable: + for error_str in FLAGS.disable: + error_num = 0 + try: + error_num = int(error_str) + except ValueError: + pass + disabled_error_nums.append(error_num) + + return ((FLAGS.jsdoc or error not in ( + errors.MISSING_PARAMETER_DOCUMENTATION, + errors.MISSING_RETURN_DOCUMENTATION, + errors.MISSING_MEMBER_DOCUMENTATION, + errors.MISSING_PRIVATE, + errors.MISSING_JSDOC_TAG_THIS)) and + (not FLAGS.disable or error not in disabled_error_nums)) diff --git a/tools/closure_linter/build/lib/closure_linter/errorrules_test.py b/tools/closure_linter/build/lib/closure_linter/errorrules_test.py new file mode 100644 index 0000000..cb90378 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/errorrules_test.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# Copyright 2013 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Medium tests for the gjslint errorrules. + +Currently its just verifying that warnings can't be disabled. +""" + + + +import gflags as flags +import unittest as googletest + +from closure_linter import errors +from closure_linter import runner +from closure_linter.common import erroraccumulator + +flags.FLAGS.strict = True +flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js') +flags.FLAGS.closurized_namespaces = ('goog', 'dummy') + + +class ErrorRulesTest(googletest.TestCase): + """Test case to for gjslint errorrules.""" + + def testNoMaxLineLengthFlagExists(self): + """Tests that --max_line_length flag does not exists.""" + self.assertTrue('max_line_length' not in flags.FLAGS.FlagDict()) + + def testGetMaxLineLength(self): + """Tests warning are reported for line greater than 80. + """ + + # One line > 100 and one line > 80 and < 100. So should produce two + # line too long error. + original = [ + 'goog.require(\'dummy.aa\');', + '', + 'function a() {', + ' dummy.aa.i = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13' + ' + 14 + 15 + 16 + 17 + 18 + 19 + 20;', + ' dummy.aa.j = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13' + ' + 14 + 15 + 16 + 17 + 18;', + '}', + '' + ] + + # Expect line too long. + expected = [errors.LINE_TOO_LONG, errors.LINE_TOO_LONG] + + self._AssertErrors(original, expected) + + def testNoDisableFlagExists(self): + """Tests that --disable flag does not exists.""" + self.assertTrue('disable' not in flags.FLAGS.FlagDict()) + + def testWarningsNotDisabled(self): + """Tests warnings are reported when nothing is disabled. + """ + original = [ + 'goog.require(\'dummy.aa\');', + 'goog.require(\'dummy.Cc\');', + 'goog.require(\'dummy.Dd\');', + '', + 'function a() {', + ' dummy.aa.i = 1;', + ' dummy.Cc.i = 1;', + ' dummy.Dd.i = 1;', + '}', + ] + + expected = [errors.GOOG_REQUIRES_NOT_ALPHABETIZED, + errors.FILE_MISSING_NEWLINE] + + self._AssertErrors(original, expected) + + def _AssertErrors(self, original, expected_errors, include_header=True): + """Asserts that the error fixer corrects original to expected.""" + if include_header: + original = self._GetHeader() + original + + # Trap gjslint's output parse it to get messages added. + error_accumulator = erroraccumulator.ErrorAccumulator() + runner.Run('testing.js', error_accumulator, source=original) + error_nums = [e.code for e in error_accumulator.GetErrors()] + + error_nums.sort() + expected_errors.sort() + self.assertListEqual(error_nums, expected_errors) + + def _GetHeader(self): + """Returns a fake header for a JavaScript file.""" + return [ + '// Copyright 2011 Google Inc. All Rights Reserved.', + '', + '/**', + ' * @fileoverview Fake file overview.', + ' * @author fake@google.com (Fake Person)', + ' */', + '' + ] + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/errors.py b/tools/closure_linter/build/lib/closure_linter/errors.py new file mode 100644 index 0000000..356ee0c --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/errors.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Error codes for JavaScript style checker.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + + +def ByName(name): + """Get the error code for the given error name. + + Args: + name: The name of the error + + Returns: + The error code + """ + return globals()[name] + + +# "File-fatal" errors - these errors stop further parsing of a single file +FILE_NOT_FOUND = -1 +FILE_DOES_NOT_PARSE = -2 + +# Spacing +EXTRA_SPACE = 1 +MISSING_SPACE = 2 +EXTRA_LINE = 3 +MISSING_LINE = 4 +ILLEGAL_TAB = 5 +WRONG_INDENTATION = 6 +WRONG_BLANK_LINE_COUNT = 7 + +# Semicolons +MISSING_SEMICOLON = 10 +MISSING_SEMICOLON_AFTER_FUNCTION = 11 +ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12 +REDUNDANT_SEMICOLON = 13 + +# Miscellaneous +ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100 +LINE_TOO_LONG = 110 +LINE_STARTS_WITH_OPERATOR = 120 +COMMA_AT_END_OF_LITERAL = 121 +LINE_ENDS_WITH_DOT = 122 +MULTI_LINE_STRING = 130 +UNNECESSARY_DOUBLE_QUOTED_STRING = 131 +UNUSED_PRIVATE_MEMBER = 132 +UNUSED_LOCAL_VARIABLE = 133 + +# Requires, provides +GOOG_REQUIRES_NOT_ALPHABETIZED = 140 +GOOG_PROVIDES_NOT_ALPHABETIZED = 141 +MISSING_GOOG_REQUIRE = 142 +MISSING_GOOG_PROVIDE = 143 +EXTRA_GOOG_REQUIRE = 144 +EXTRA_GOOG_PROVIDE = 145 +ALIAS_STMT_NEEDS_GOOG_REQUIRE = 146 + +# JsDoc +INVALID_JSDOC_TAG = 200 +INVALID_USE_OF_DESC_TAG = 201 +NO_BUG_NUMBER_AFTER_BUG_TAG = 202 +MISSING_PARAMETER_DOCUMENTATION = 210 +EXTRA_PARAMETER_DOCUMENTATION = 211 +WRONG_PARAMETER_DOCUMENTATION = 212 +MISSING_JSDOC_TAG_TYPE = 213 +MISSING_JSDOC_TAG_DESCRIPTION = 214 +MISSING_JSDOC_PARAM_NAME = 215 +OUT_OF_ORDER_JSDOC_TAG_TYPE = 216 +MISSING_RETURN_DOCUMENTATION = 217 +UNNECESSARY_RETURN_DOCUMENTATION = 218 +MISSING_BRACES_AROUND_TYPE = 219 +MISSING_MEMBER_DOCUMENTATION = 220 +MISSING_PRIVATE = 221 +EXTRA_PRIVATE = 222 +INVALID_OVERRIDE_PRIVATE = 223 +INVALID_INHERIT_DOC_PRIVATE = 224 +MISSING_JSDOC_TAG_THIS = 225 +UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226 +INVALID_AUTHOR_TAG_DESCRIPTION = 227 +JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230 +JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231 +JSDOC_MISSING_OPTIONAL_TYPE = 232 +JSDOC_MISSING_OPTIONAL_PREFIX = 233 +JSDOC_MISSING_VAR_ARGS_TYPE = 234 +JSDOC_MISSING_VAR_ARGS_NAME = 235 +JSDOC_DOES_NOT_PARSE = 236 +# TODO(robbyw): Split this in to more specific syntax problems. +INCORRECT_SUPPRESS_SYNTAX = 250 +INVALID_SUPPRESS_TYPE = 251 +UNNECESSARY_SUPPRESS = 252 + +# File ending +FILE_MISSING_NEWLINE = 300 +FILE_IN_BLOCK = 301 + +# Interfaces +INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400 +INTERFACE_METHOD_CANNOT_HAVE_CODE = 401 + +# Comments +MISSING_END_OF_SCOPE_COMMENT = 500 +MALFORMED_END_OF_SCOPE_COMMENT = 501 + +# goog.scope - Namespace aliasing +# TODO(nnaze) Add additional errors here and in aliaspass.py +INVALID_USE_OF_GOOG_SCOPE = 600 +EXTRA_GOOG_SCOPE_USAGE = 601 + +# ActionScript specific errors: +# TODO(user): move these errors to their own file and move all JavaScript +# specific errors to their own file as well. +# All ActionScript specific errors should have error number at least 1000. +FUNCTION_MISSING_RETURN_TYPE = 1132 +PARAMETER_MISSING_TYPE = 1133 +VAR_MISSING_TYPE = 1134 +PARAMETER_MISSING_DEFAULT_VALUE = 1135 +IMPORTS_NOT_ALPHABETIZED = 1140 +IMPORT_CONTAINS_WILDCARD = 1141 +UNUSED_IMPORT = 1142 +INVALID_TRACE_SEVERITY_LEVEL = 1250 +MISSING_TRACE_SEVERITY_LEVEL = 1251 +MISSING_TRACE_MESSAGE = 1252 +REMOVE_TRACE_BEFORE_SUBMIT = 1253 +REMOVE_COMMENT_BEFORE_SUBMIT = 1254 +# End of list of ActionScript specific errors. + +NEW_ERRORS = frozenset([ + # Errors added after 2.0.2: + WRONG_INDENTATION, + MISSING_SEMICOLON, + # Errors added after 2.3.9: + JSDOC_MISSING_VAR_ARGS_TYPE, + JSDOC_MISSING_VAR_ARGS_NAME, + # Errors added after 2.3.15: + ALIAS_STMT_NEEDS_GOOG_REQUIRE, + JSDOC_DOES_NOT_PARSE, + LINE_ENDS_WITH_DOT, + # Errors added after 2.3.17: + ]) diff --git a/tools/closure_linter/build/lib/closure_linter/fixjsstyle.py b/tools/closure_linter/build/lib/closure_linter/fixjsstyle.py new file mode 100644 index 0000000..2d65e03 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/fixjsstyle.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python +# +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Automatically fix simple style guide violations.""" + +__author__ = 'robbyw@google.com (Robert Walker)' + +import StringIO +import sys + +import gflags as flags + +from closure_linter import error_fixer +from closure_linter import runner +from closure_linter.common import simplefileflags as fileflags + +FLAGS = flags.FLAGS +flags.DEFINE_list('additional_extensions', None, 'List of additional file ' + 'extensions (not js) that should be treated as ' + 'JavaScript files.') +flags.DEFINE_boolean('dry_run', False, 'Do not modify the file, only print it.') + + +def main(argv=None): + """Main function. + + Args: + argv: Sequence of command line arguments. + """ + if argv is None: + argv = flags.FLAGS(sys.argv) + + suffixes = ['.js'] + if FLAGS.additional_extensions: + suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions] + + files = fileflags.GetFileList(argv, 'JavaScript', suffixes) + + output_buffer = None + if FLAGS.dry_run: + output_buffer = StringIO.StringIO() + + fixer = error_fixer.ErrorFixer(output_buffer) + + # Check the list of files. + for filename in files: + runner.Run(filename, fixer) + if FLAGS.dry_run: + print output_buffer.getvalue() + + +if __name__ == '__main__': + main() diff --git a/tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py b/tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py new file mode 100644 index 0000000..34de3f8 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py @@ -0,0 +1,615 @@ +#!/usr/bin/env python +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Medium tests for the gpylint auto-fixer.""" + +__author__ = 'robbyw@google.com (Robby Walker)' + +import StringIO + +import gflags as flags +import unittest as googletest +from closure_linter import error_fixer +from closure_linter import runner + + +_RESOURCE_PREFIX = 'closure_linter/testdata' + +flags.FLAGS.strict = True +flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js') +flags.FLAGS.closurized_namespaces = ('goog', 'dummy') + + +class FixJsStyleTest(googletest.TestCase): + """Test case to for gjslint auto-fixing.""" + + def setUp(self): + flags.FLAGS.dot_on_next_line = True + + def tearDown(self): + flags.FLAGS.dot_on_next_line = False + + def testFixJsStyle(self): + test_cases = [ + ['fixjsstyle.in.js', 'fixjsstyle.out.js'], + ['indentation.js', 'fixjsstyle.indentation.out.js'], + ['fixjsstyle.html.in.html', 'fixjsstyle.html.out.html'], + ['fixjsstyle.oplineend.in.js', 'fixjsstyle.oplineend.out.js']] + for [running_input_file, running_output_file] in test_cases: + print 'Checking %s vs %s' % (running_input_file, running_output_file) + input_filename = None + golden_filename = None + current_filename = None + try: + input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file) + current_filename = input_filename + + golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file) + current_filename = golden_filename + except IOError as ex: + raise IOError('Could not find testdata resource for %s: %s' % + (current_filename, ex)) + + if running_input_file == 'fixjsstyle.in.js': + with open(input_filename) as f: + for line in f: + # Go to last line. + pass + self.assertTrue(line == line.rstrip(), '%s file should not end ' + 'with a new line.' % (input_filename)) + + # Autofix the file, sending output to a fake file. + actual = StringIO.StringIO() + runner.Run(input_filename, error_fixer.ErrorFixer(actual)) + + # Now compare the files. + actual.seek(0) + expected = open(golden_filename, 'r') + + # Uncomment to generate new golden files and run + # open('/'.join(golden_filename.split('/')[4:]), 'w').write(actual.read()) + # actual.seek(0) + + self.assertEqual(actual.readlines(), expected.readlines()) + + def testAddProvideFirstLine(self): + """Tests handling of case where goog.provide is added.""" + original = [ + 'dummy.bb.cc = 1;', + ] + + expected = [ + 'goog.provide(\'dummy.bb\');', + '', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + '', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testAddRequireFirstLine(self): + """Tests handling of case where goog.require is added.""" + original = [ + 'a = dummy.bb.cc;', + ] + + expected = [ + 'goog.require(\'dummy.bb\');', + '', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + '', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testDeleteProvideAndAddProvideFirstLine(self): + """Tests handling of case where goog.provide is deleted and added. + + Bug 14832597. + """ + original = [ + 'goog.provide(\'dummy.aa\');', + '', + 'dummy.bb.cc = 1;', + ] + + expected = [ + 'goog.provide(\'dummy.bb\');', + '', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + 'goog.provide(\'dummy.aa\');', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testDeleteProvideAndAddRequireFirstLine(self): + """Tests handling where goog.provide is deleted and goog.require added. + + Bug 14832597. + """ + original = [ + 'goog.provide(\'dummy.aa\');', + '', + 'a = dummy.bb.cc;', + ] + + expected = [ + 'goog.require(\'dummy.bb\');', + '', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + 'goog.provide(\'dummy.aa\');', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testDeleteRequireAndAddRequireFirstLine(self): + """Tests handling of case where goog.require is deleted and added. + + Bug 14832597. + """ + original = [ + 'goog.require(\'dummy.aa\');', + '', + 'a = dummy.bb.cc;', + ] + + expected = [ + 'goog.require(\'dummy.bb\');', + '', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + 'goog.require(\'dummy.aa\');', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testDeleteRequireAndAddProvideFirstLine(self): + """Tests handling where goog.require is deleted and goog.provide added. + + Bug 14832597. + """ + original = [ + 'goog.require(\'dummy.aa\');', + '', + 'dummy.bb.cc = 1;', + ] + + expected = [ + 'goog.provide(\'dummy.bb\');', + '', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + 'goog.require(\'dummy.aa\');', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testMultipleProvideInsert(self): + original = [ + 'goog.provide(\'dummy.bb\');', + 'goog.provide(\'dummy.dd\');', + '', + 'dummy.aa.ff = 1;', + 'dummy.bb.ff = 1;', + 'dummy.cc.ff = 1;', + 'dummy.dd.ff = 1;', + 'dummy.ee.ff = 1;', + ] + + expected = [ + 'goog.provide(\'dummy.aa\');', + 'goog.provide(\'dummy.bb\');', + 'goog.provide(\'dummy.cc\');', + 'goog.provide(\'dummy.dd\');', + 'goog.provide(\'dummy.ee\');', + '', + 'dummy.aa.ff = 1;', + 'dummy.bb.ff = 1;', + 'dummy.cc.ff = 1;', + 'dummy.dd.ff = 1;', + 'dummy.ee.ff = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testMultipleRequireInsert(self): + original = [ + 'goog.require(\'dummy.bb\');', + 'goog.require(\'dummy.dd\');', + '', + 'a = dummy.aa.ff;', + 'b = dummy.bb.ff;', + 'c = dummy.cc.ff;', + 'd = dummy.dd.ff;', + 'e = dummy.ee.ff;', + ] + + expected = [ + 'goog.require(\'dummy.aa\');', + 'goog.require(\'dummy.bb\');', + 'goog.require(\'dummy.cc\');', + 'goog.require(\'dummy.dd\');', + 'goog.require(\'dummy.ee\');', + '', + 'a = dummy.aa.ff;', + 'b = dummy.bb.ff;', + 'c = dummy.cc.ff;', + 'd = dummy.dd.ff;', + 'e = dummy.ee.ff;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testUnsortedRequires(self): + """Tests handling of unsorted goog.require statements without header. + + Bug 8398202. + """ + original = [ + 'goog.require(\'dummy.aa\');', + 'goog.require(\'dummy.Cc\');', + 'goog.require(\'dummy.Dd\');', + '', + 'function a() {', + ' dummy.aa.i = 1;', + ' dummy.Cc.i = 1;', + ' dummy.Dd.i = 1;', + '}', + ] + + expected = [ + 'goog.require(\'dummy.Cc\');', + 'goog.require(\'dummy.Dd\');', + 'goog.require(\'dummy.aa\');', + '', + 'function a() {', + ' dummy.aa.i = 1;', + ' dummy.Cc.i = 1;', + ' dummy.Dd.i = 1;', + '}', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testMissingExtraAndUnsortedRequires(self): + """Tests handling of missing extra and unsorted goog.require statements.""" + original = [ + 'goog.require(\'dummy.aa\');', + 'goog.require(\'dummy.Cc\');', + 'goog.require(\'dummy.Dd\');', + '', + 'var x = new dummy.Bb();', + 'dummy.Cc.someMethod();', + 'dummy.aa.someMethod();', + ] + + expected = [ + 'goog.require(\'dummy.Bb\');', + 'goog.require(\'dummy.Cc\');', + 'goog.require(\'dummy.aa\');', + '', + 'var x = new dummy.Bb();', + 'dummy.Cc.someMethod();', + 'dummy.aa.someMethod();', + ] + + self._AssertFixes(original, expected) + + def testExtraRequireOnFirstLine(self): + """Tests handling of extra goog.require statement on the first line. + + There was a bug when fixjsstyle quits with an exception. It happened if + - the first line of the file is an extra goog.require() statement, + - goog.require() statements are not sorted. + """ + original = [ + 'goog.require(\'dummy.aa\');', + 'goog.require(\'dummy.cc\');', + 'goog.require(\'dummy.bb\');', + '', + 'var x = new dummy.bb();', + 'var y = new dummy.cc();', + ] + + expected = [ + 'goog.require(\'dummy.bb\');', + 'goog.require(\'dummy.cc\');', + '', + 'var x = new dummy.bb();', + 'var y = new dummy.cc();', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testUnsortedProvides(self): + """Tests handling of unsorted goog.provide statements without header. + + Bug 8398202. + """ + original = [ + 'goog.provide(\'dummy.aa\');', + 'goog.provide(\'dummy.Cc\');', + 'goog.provide(\'dummy.Dd\');', + '', + 'dummy.aa = function() {};' + 'dummy.Cc = function() {};' + 'dummy.Dd = function() {};' + ] + + expected = [ + 'goog.provide(\'dummy.Cc\');', + 'goog.provide(\'dummy.Dd\');', + 'goog.provide(\'dummy.aa\');', + '', + 'dummy.aa = function() {};' + 'dummy.Cc = function() {};' + 'dummy.Dd = function() {};' + ] + + self._AssertFixes(original, expected, include_header=False) + + def testMissingExtraAndUnsortedProvides(self): + """Tests handling of missing extra and unsorted goog.provide statements.""" + original = [ + 'goog.provide(\'dummy.aa\');', + 'goog.provide(\'dummy.Cc\');', + 'goog.provide(\'dummy.Dd\');', + '', + 'dummy.Cc = function() {};', + 'dummy.Bb = function() {};', + 'dummy.aa.someMethod = function();', + ] + + expected = [ + 'goog.provide(\'dummy.Bb\');', + 'goog.provide(\'dummy.Cc\');', + 'goog.provide(\'dummy.aa\');', + '', + 'dummy.Cc = function() {};', + 'dummy.Bb = function() {};', + 'dummy.aa.someMethod = function();', + ] + + self._AssertFixes(original, expected) + + def testNoRequires(self): + """Tests positioning of missing requires without existing requires.""" + original = [ + 'goog.provide(\'dummy.Something\');', + '', + 'dummy.Something = function() {};', + '', + 'var x = new dummy.Bb();', + ] + + expected = [ + 'goog.provide(\'dummy.Something\');', + '', + 'goog.require(\'dummy.Bb\');', + '', + 'dummy.Something = function() {};', + '', + 'var x = new dummy.Bb();', + ] + + self._AssertFixes(original, expected) + + def testNoProvides(self): + """Tests positioning of missing provides without existing provides.""" + original = [ + 'goog.require(\'dummy.Bb\');', + '', + 'dummy.Something = function() {};', + '', + 'var x = new dummy.Bb();', + ] + + expected = [ + 'goog.provide(\'dummy.Something\');', + '', + 'goog.require(\'dummy.Bb\');', + '', + 'dummy.Something = function() {};', + '', + 'var x = new dummy.Bb();', + ] + + self._AssertFixes(original, expected) + + def testOutputOkayWhenFirstTokenIsDeleted(self): + """Tests that autofix output is is correct when first token is deleted. + + Regression test for bug 4581567 + """ + original = ['"use strict";'] + expected = ["'use strict';"] + + self._AssertFixes(original, expected, include_header=False) + + def testGoogScopeIndentation(self): + """Tests Handling a typical end-of-scope indentation fix.""" + original = [ + 'goog.scope(function() {', + ' // TODO(brain): Take over the world.', + '}); // goog.scope', + ] + + expected = [ + 'goog.scope(function() {', + '// TODO(brain): Take over the world.', + '}); // goog.scope', + ] + + self._AssertFixes(original, expected) + + def testMissingEndOfScopeComment(self): + """Tests Handling a missing comment at end of goog.scope.""" + original = [ + 'goog.scope(function() {', + '});', + ] + + expected = [ + 'goog.scope(function() {', + '}); // goog.scope', + ] + + self._AssertFixes(original, expected) + + def testMissingEndOfScopeCommentWithOtherComment(self): + """Tests handling an irrelevant comment at end of goog.scope.""" + original = [ + 'goog.scope(function() {', + "}); // I don't belong here!", + ] + + expected = [ + 'goog.scope(function() {', + '}); // goog.scope', + ] + + self._AssertFixes(original, expected) + + def testMalformedEndOfScopeComment(self): + """Tests Handling a malformed comment at end of goog.scope.""" + original = [ + 'goog.scope(function() {', + '}); // goog.scope FTW', + ] + + expected = [ + 'goog.scope(function() {', + '}); // goog.scope', + ] + + self._AssertFixes(original, expected) + + def testEndsWithIdentifier(self): + """Tests Handling case where script ends with identifier. Bug 7643404.""" + original = [ + 'goog.provide(\'xyz\');', + '', + 'abc' + ] + + expected = [ + 'goog.provide(\'xyz\');', + '', + 'abc;' + ] + + self._AssertFixes(original, expected) + + def testFileStartsWithSemicolon(self): + """Tests handling files starting with semicolon. + + b/10062516 + """ + original = [ + ';goog.provide(\'xyz\');', + '', + 'abc;' + ] + + expected = [ + 'goog.provide(\'xyz\');', + '', + 'abc;' + ] + + self._AssertFixes(original, expected, include_header=False) + + def testCodeStartsWithSemicolon(self): + """Tests handling code in starting with semicolon after comments. + + b/10062516 + """ + original = [ + ';goog.provide(\'xyz\');', + '', + 'abc;' + ] + + expected = [ + 'goog.provide(\'xyz\');', + '', + 'abc;' + ] + + self._AssertFixes(original, expected) + + def _AssertFixes(self, original, expected, include_header=True): + """Asserts that the error fixer corrects original to expected.""" + if include_header: + original = self._GetHeader() + original + expected = self._GetHeader() + expected + + actual = StringIO.StringIO() + runner.Run('testing.js', error_fixer.ErrorFixer(actual), original) + actual.seek(0) + + expected = [x + '\n' for x in expected] + + self.assertListEqual(actual.readlines(), expected) + + def _GetHeader(self): + """Returns a fake header for a JavaScript file.""" + return [ + '// Copyright 2011 Google Inc. All Rights Reserved.', + '', + '/**', + ' * @fileoverview Fake file overview.', + ' * @author fake@google.com (Fake Person)', + ' */', + '' + ] + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/full_test.py b/tools/closure_linter/build/lib/closure_linter/full_test.py new file mode 100644 index 0000000..d0a1557 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/full_test.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +# +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Full regression-type (Medium) tests for gjslint. + +Tests every error that can be thrown by gjslint. Based heavily on +devtools/javascript/gpylint/full_test.py +""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + +import os +import sys +import unittest + +import gflags as flags +import unittest as googletest + +from closure_linter import error_check +from closure_linter import errors +from closure_linter import runner +from closure_linter.common import filetestcase + +_RESOURCE_PREFIX = 'closure_linter/testdata' + +flags.FLAGS.strict = True +flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires') +flags.FLAGS.closurized_namespaces = ('goog', 'dummy') +flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js', + 'limited_doc_checks.js') +flags.FLAGS.jslint_error = error_check.Rule.ALL + +# List of files under testdata to test. +# We need to list files explicitly since pyglib can't list directories. +# TODO(user): Figure out how to list the directory. +_TEST_FILES = [ + 'all_js_wrapped.js', + 'blank_lines.js', + 'ends_with_block.js', + 'empty_file.js', + 'externs.js', + 'externs_jsdoc.js', + 'goog_scope.js', + 'html_parse_error.html', + 'indentation.js', + 'interface.js', + 'jsdoc.js', + 'limited_doc_checks.js', + 'minimal.js', + 'other.js', + 'provide_blank.js', + 'provide_extra.js', + 'provide_missing.js', + 'require_alias.js', + 'require_all_caps.js', + 'require_blank.js', + 'require_extra.js', + 'require_function.js', + 'require_function_missing.js', + 'require_function_through_both.js', + 'require_function_through_namespace.js', + 'require_interface.js', + 'require_interface_alias.js', + 'require_interface_base.js', + 'require_lower_case.js', + 'require_missing.js', + 'require_numeric.js', + 'require_provide_blank.js', + 'require_provide_missing.js', + 'require_provide_ok.js', + 'semicolon_missing.js', + 'simple.html', + 'spaces.js', + 'tokenizer.js', + 'unparseable.js', + 'unused_local_variables.js', + 'unused_private_members.js', + 'utf8.html', +] + + +class GJsLintTestSuite(unittest.TestSuite): + """Test suite to run a GJsLintTest for each of several files. + + If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in + testdata to test. Otherwise, _TEST_FILES is used. + """ + + def __init__(self, tests=()): + unittest.TestSuite.__init__(self, tests) + + argv = sys.argv and sys.argv[1:] or [] + if argv: + test_files = argv + else: + test_files = _TEST_FILES + for test_file in test_files: + resource_path = os.path.join(_RESOURCE_PREFIX, test_file) + self.addTest( + filetestcase.AnnotatedFileTestCase( + resource_path, + runner.Run, + errors.ByName)) + +if __name__ == '__main__': + # Don't let main parse args; it happens in the TestSuite. + googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite') diff --git a/tools/closure_linter/build/lib/closure_linter/gjslint.py b/tools/closure_linter/build/lib/closure_linter/gjslint.py new file mode 100644 index 0000000..824e025 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/gjslint.py @@ -0,0 +1,319 @@ +#!/usr/bin/env python +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Checks JavaScript files for common style guide violations. + +gjslint.py is designed to be used as a PRESUBMIT script to check for javascript +style guide violations. As of now, it checks for the following violations: + + * Missing and extra spaces + * Lines longer than 80 characters + * Missing newline at end of file + * Missing semicolon after function declaration + * Valid JsDoc including parameter matching + +Someday it will validate to the best of its ability against the entirety of the +JavaScript style guide. + +This file is a front end that parses arguments and flags. The core of the code +is in tokenizer.py and checker.py. +""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)', + 'nnaze@google.com (Nathan Naze)',) + +import errno +import itertools +import os +import platform +import re +import sys +import time + +import gflags as flags + +from closure_linter import errorrecord +from closure_linter import runner +from closure_linter.common import erroraccumulator +from closure_linter.common import simplefileflags as fileflags + +# Attempt import of multiprocessing (should be available in Python 2.6 and up). +try: + # pylint: disable=g-import-not-at-top + import multiprocessing +except ImportError: + multiprocessing = None + +FLAGS = flags.FLAGS +flags.DEFINE_boolean('unix_mode', False, + 'Whether to emit warnings in standard unix format.') +flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.') +flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.') +flags.DEFINE_boolean('quiet', False, 'Whether to minimize logged messages. ' + 'Most useful for per-file linting, such as that performed ' + 'by the presubmit linter service.') +flags.DEFINE_boolean('check_html', False, + 'Whether to check javascript in html files.') +flags.DEFINE_boolean('summary', False, + 'Whether to show an error count summary.') +flags.DEFINE_list('additional_extensions', None, 'List of additional file ' + 'extensions (not js) that should be treated as ' + 'JavaScript files.') +flags.DEFINE_boolean('multiprocess', + platform.system() is 'Linux' and bool(multiprocessing), + 'Whether to attempt parallelized linting using the ' + 'multiprocessing module. Enabled by default on Linux ' + 'if the multiprocessing module is present (Python 2.6+). ' + 'Otherwise disabled by default. ' + 'Disabling may make debugging easier.') +flags.ADOPT_module_key_flags(fileflags) +flags.ADOPT_module_key_flags(runner) + + +GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time', + '--check_html', '--summary', '--quiet'] + + + +def _MultiprocessCheckPaths(paths): + """Run _CheckPath over mutltiple processes. + + Tokenization, passes, and checks are expensive operations. Running in a + single process, they can only run on one CPU/core. Instead, + shard out linting over all CPUs with multiprocessing to parallelize. + + Args: + paths: paths to check. + + Yields: + errorrecord.ErrorRecords for any found errors. + """ + + pool = multiprocessing.Pool() + + path_results = pool.imap(_CheckPath, paths) + for results in path_results: + for result in results: + yield result + + # Force destruct before returning, as this can sometimes raise spurious + # "interrupted system call" (EINTR), which we can ignore. + try: + pool.close() + pool.join() + del pool + except OSError as err: + if err.errno is not errno.EINTR: + raise err + + +def _CheckPaths(paths): + """Run _CheckPath on all paths in one thread. + + Args: + paths: paths to check. + + Yields: + errorrecord.ErrorRecords for any found errors. + """ + + for path in paths: + results = _CheckPath(path) + for record in results: + yield record + + +def _CheckPath(path): + """Check a path and return any errors. + + Args: + path: paths to check. + + Returns: + A list of errorrecord.ErrorRecords for any found errors. + """ + + error_handler = erroraccumulator.ErrorAccumulator() + runner.Run(path, error_handler) + + make_error_record = lambda err: errorrecord.MakeErrorRecord(path, err) + return map(make_error_record, error_handler.GetErrors()) + + +def _GetFilePaths(argv): + suffixes = ['.js'] + if FLAGS.additional_extensions: + suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions] + if FLAGS.check_html: + suffixes += ['.html', '.htm'] + return fileflags.GetFileList(argv, 'JavaScript', suffixes) + + +# Error printing functions + + +def _PrintFileSummary(paths, records): + """Print a detailed summary of the number of errors in each file.""" + + paths = list(paths) + paths.sort() + + for path in paths: + path_errors = [e for e in records if e.path == path] + print '%s: %d' % (path, len(path_errors)) + + +def _PrintFileSeparator(path): + print '----- FILE : %s -----' % path + + +def _PrintSummary(paths, error_records): + """Print a summary of the number of errors and files.""" + + error_count = len(error_records) + all_paths = set(paths) + all_paths_count = len(all_paths) + + if error_count is 0: + print '%d files checked, no errors found.' % all_paths_count + + new_error_count = len([e for e in error_records if e.new_error]) + + error_paths = set([e.path for e in error_records]) + error_paths_count = len(error_paths) + no_error_paths_count = all_paths_count - error_paths_count + + if (error_count or new_error_count) and not FLAGS.quiet: + error_noun = 'error' if error_count == 1 else 'errors' + new_error_noun = 'error' if new_error_count == 1 else 'errors' + error_file_noun = 'file' if error_paths_count == 1 else 'files' + ok_file_noun = 'file' if no_error_paths_count == 1 else 'files' + print ('Found %d %s, including %d new %s, in %d %s (%d %s OK).' % + (error_count, + error_noun, + new_error_count, + new_error_noun, + error_paths_count, + error_file_noun, + no_error_paths_count, + ok_file_noun)) + + +def _PrintErrorRecords(error_records): + """Print error records strings in the expected format.""" + + current_path = None + for record in error_records: + + if current_path != record.path: + current_path = record.path + if not FLAGS.unix_mode: + _PrintFileSeparator(current_path) + + print record.error_string + + +def _FormatTime(t): + """Formats a duration as a human-readable string. + + Args: + t: A duration in seconds. + + Returns: + A formatted duration string. + """ + if t < 1: + return '%dms' % round(t * 1000) + else: + return '%.2fs' % t + + + + +def main(argv=None): + """Main function. + + Args: + argv: Sequence of command line arguments. + """ + if argv is None: + argv = flags.FLAGS(sys.argv) + + if FLAGS.time: + start_time = time.time() + + suffixes = ['.js'] + if FLAGS.additional_extensions: + suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions] + if FLAGS.check_html: + suffixes += ['.html', '.htm'] + paths = fileflags.GetFileList(argv, 'JavaScript', suffixes) + + if FLAGS.multiprocess: + records_iter = _MultiprocessCheckPaths(paths) + else: + records_iter = _CheckPaths(paths) + + records_iter, records_iter_copy = itertools.tee(records_iter, 2) + _PrintErrorRecords(records_iter_copy) + + error_records = list(records_iter) + _PrintSummary(paths, error_records) + + exit_code = 0 + + # If there are any errors + if error_records: + exit_code += 1 + + # If there are any new errors + if [r for r in error_records if r.new_error]: + exit_code += 2 + + if exit_code: + if FLAGS.summary: + _PrintFileSummary(paths, error_records) + + if FLAGS.beep: + # Make a beep noise. + sys.stdout.write(chr(7)) + + # Write out instructions for using fixjsstyle script to fix some of the + # reported errors. + fix_args = [] + for flag in sys.argv[1:]: + for f in GJSLINT_ONLY_FLAGS: + if flag.startswith(f): + break + else: + fix_args.append(flag) + + if not FLAGS.quiet: + print """ +Some of the errors reported by GJsLint may be auto-fixable using the script +fixjsstyle. Please double check any changes it makes and report any bugs. The +script can be run by executing: + +fixjsstyle %s """ % ' '.join(fix_args) + + if FLAGS.time: + print 'Done in %s.' % _FormatTime(time.time() - start_time) + + sys.exit(exit_code) + + +if __name__ == '__main__': + main() diff --git a/tools/closure_linter/build/lib/closure_linter/indentation.py b/tools/closure_linter/build/lib/closure_linter/indentation.py new file mode 100644 index 0000000..d48ad2b --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/indentation.py @@ -0,0 +1,617 @@ +#!/usr/bin/env python +# Copyright 2010 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Methods for checking EcmaScript files for indentation issues.""" + +__author__ = ('robbyw@google.com (Robert Walker)') + +import gflags as flags + +from closure_linter import ecmametadatapass +from closure_linter import errors +from closure_linter import javascripttokens +from closure_linter import tokenutil +from closure_linter.common import error +from closure_linter.common import position + + +flags.DEFINE_boolean('debug_indentation', False, + 'Whether to print debugging information for indentation.') + + +# Shorthand +Context = ecmametadatapass.EcmaContext +Error = error.Error +Position = position.Position +Type = javascripttokens.JavaScriptTokenType + + +# The general approach: +# +# 1. Build a stack of tokens that can affect indentation. +# For each token, we determine if it is a block or continuation token. +# Some tokens need to be temporarily overwritten in case they are removed +# before the end of the line. +# Much of the work here is determining which tokens to keep on the stack +# at each point. Operators, for example, should be removed once their +# expression or line is gone, while parentheses must stay until the matching +# end parentheses is found. +# +# 2. Given that stack, determine the allowable indentations. +# Due to flexible indentation rules in JavaScript, there may be many +# allowable indentations for each stack. We follows the general +# "no false positives" approach of GJsLint and build the most permissive +# set possible. + + +class TokenInfo(object): + """Stores information about a token. + + Attributes: + token: The token + is_block: Whether the token represents a block indentation. + is_transient: Whether the token should be automatically removed without + finding a matching end token. + overridden_by: TokenInfo for a token that overrides the indentation that + this token would require. + is_permanent_override: Whether the override on this token should persist + even after the overriding token is removed from the stack. For example: + x([ + 1], + 2); + needs this to be set so the last line is not required to be a continuation + indent. + line_number: The effective line number of this token. Will either be the + actual line number or the one before it in the case of a mis-wrapped + operator. + """ + + def __init__(self, token, is_block=False): + """Initializes a TokenInfo object. + + Args: + token: The token + is_block: Whether the token represents a block indentation. + """ + self.token = token + self.overridden_by = None + self.is_permanent_override = False + self.is_block = is_block + self.is_transient = not is_block and token.type not in ( + Type.START_PAREN, Type.START_PARAMETERS) + self.line_number = token.line_number + + def __repr__(self): + result = '\n %s' % self.token + if self.overridden_by: + result = '%s OVERRIDDEN [by "%s"]' % ( + result, self.overridden_by.token.string) + result += ' {is_block: %s, is_transient: %s}' % ( + self.is_block, self.is_transient) + return result + + +class IndentationRules(object): + """EmcaScript indentation rules. + + Can be used to find common indentation errors in JavaScript, ActionScript and + other Ecma like scripting languages. + """ + + def __init__(self): + """Initializes the IndentationRules checker.""" + self._stack = [] + + # Map from line number to number of characters it is off in indentation. + self._start_index_offset = {} + + def Finalize(self): + if self._stack: + old_stack = self._stack + self._stack = [] + raise Exception('INTERNAL ERROR: indentation stack is not empty: %r' % + old_stack) + + def CheckToken(self, token, state): + """Checks a token for indentation errors. + + Args: + token: The current token under consideration + state: Additional information about the current tree state + + Returns: + An error array [error code, error string, error token] if the token is + improperly indented, or None if indentation is correct. + """ + + token_type = token.type + indentation_errors = [] + stack = self._stack + is_first = self._IsFirstNonWhitespaceTokenInLine(token) + + # Add tokens that could decrease indentation before checking. + if token_type == Type.END_PAREN: + self._PopTo(Type.START_PAREN) + + elif token_type == Type.END_PARAMETERS: + self._PopTo(Type.START_PARAMETERS) + + elif token_type == Type.END_BRACKET: + self._PopTo(Type.START_BRACKET) + + elif token_type == Type.END_BLOCK: + start_token = self._PopTo(Type.START_BLOCK) + # Check for required goog.scope comment. + if start_token: + goog_scope = tokenutil.GoogScopeOrNoneFromStartBlock(start_token.token) + if goog_scope is not None: + if not token.line.endswith('; // goog.scope\n'): + if (token.line.find('//') > -1 and + token.line.find('goog.scope') > + token.line.find('//')): + indentation_errors.append([ + errors.MALFORMED_END_OF_SCOPE_COMMENT, + ('Malformed end of goog.scope comment. Please use the ' + 'exact following syntax to close the scope:\n' + '}); // goog.scope'), + token, + Position(token.start_index, token.length)]) + else: + indentation_errors.append([ + errors.MISSING_END_OF_SCOPE_COMMENT, + ('Missing comment for end of goog.scope which opened at line ' + '%d. End the scope with:\n' + '}); // goog.scope' % + (start_token.line_number)), + token, + Position(token.start_index, token.length)]) + + elif token_type == Type.KEYWORD and token.string in ('case', 'default'): + self._Add(self._PopTo(Type.START_BLOCK)) + + elif token_type == Type.SEMICOLON: + self._PopTransient() + + if (is_first and + token_type not in (Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT)): + if flags.FLAGS.debug_indentation: + print 'Line #%d: stack %r' % (token.line_number, stack) + + # Ignore lines that start in JsDoc since we don't check them properly yet. + # TODO(robbyw): Support checking JsDoc indentation. + # Ignore lines that start as multi-line strings since indentation is N/A. + # Ignore lines that start with operators since we report that already. + # Ignore lines with tabs since we report that already. + expected = self._GetAllowableIndentations() + actual = self._GetActualIndentation(token) + + # Special case comments describing else, case, and default. Allow them + # to outdent to the parent block. + if token_type in Type.COMMENT_TYPES: + next_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) + if next_code and next_code.type == Type.END_BLOCK: + next_code = tokenutil.SearchExcept(next_code, Type.NON_CODE_TYPES) + if next_code and next_code.string in ('else', 'case', 'default'): + # TODO(robbyw): This almost certainly introduces false negatives. + expected |= self._AddToEach(expected, -2) + + if actual >= 0 and actual not in expected: + expected = sorted(expected) + indentation_errors.append([ + errors.WRONG_INDENTATION, + 'Wrong indentation: expected any of {%s} but got %d' % ( + ', '.join('%d' % x for x in expected if x < 80), actual), + token, + Position(actual, expected[0])]) + self._start_index_offset[token.line_number] = expected[0] - actual + + # Add tokens that could increase indentation. + if token_type == Type.START_BRACKET: + self._Add(TokenInfo( + token=token, + is_block=token.metadata.context.type == Context.ARRAY_LITERAL)) + + elif token_type == Type.START_BLOCK or token.metadata.is_implied_block: + self._Add(TokenInfo(token=token, is_block=True)) + + elif token_type in (Type.START_PAREN, Type.START_PARAMETERS): + self._Add(TokenInfo(token=token, is_block=False)) + + elif token_type == Type.KEYWORD and token.string == 'return': + self._Add(TokenInfo(token)) + + elif not token.IsLastInLine() and ( + token.IsAssignment() or token.IsOperator('?')): + self._Add(TokenInfo(token=token)) + + # Handle implied block closes. + if token.metadata.is_implied_block_close: + self._PopToImpliedBlock() + + # Add some tokens only if they appear at the end of the line. + is_last = self._IsLastCodeInLine(token) + if is_last: + next_code_token = tokenutil.GetNextCodeToken(token) + # Increase required indentation if this is an overlong wrapped statement + # ending in an operator. + if token_type == Type.OPERATOR: + if token.string == ':': + if stack and stack[-1].token.string == '?': + # When a ternary : is on a different line than its '?', it doesn't + # add indentation. + if token.line_number == stack[-1].token.line_number: + self._Add(TokenInfo(token)) + elif token.metadata.context.type == Context.CASE_BLOCK: + # Pop transient tokens from say, line continuations, e.g., + # case x. + # y: + # Want to pop the transient 4 space continuation indent. + self._PopTransient() + # Starting the body of the case statement, which is a type of + # block. + self._Add(TokenInfo(token=token, is_block=True)) + elif token.metadata.context.type == Context.LITERAL_ELEMENT: + # When in an object literal, acts as operator indicating line + # continuations. + self._Add(TokenInfo(token)) + else: + # ':' might also be a statement label, no effect on indentation in + # this case. + pass + + elif token.string != ',': + self._Add(TokenInfo(token)) + else: + # The token is a comma. + if token.metadata.context.type == Context.VAR: + self._Add(TokenInfo(token)) + elif token.metadata.context.type != Context.PARAMETERS: + self._PopTransient() + # Increase required indentation if this is the end of a statement that's + # continued with an operator on the next line (e.g. the '.'). + elif (next_code_token and next_code_token.type == Type.OPERATOR and + not next_code_token.metadata.IsUnaryOperator()): + self._Add(TokenInfo(token)) + elif token_type == Type.PARAMETERS and token.string.endswith(','): + # Parameter lists. + self._Add(TokenInfo(token)) + elif token.IsKeyword('var'): + self._Add(TokenInfo(token)) + elif token.metadata.is_implied_semicolon: + self._PopTransient() + elif token.IsAssignment(): + self._Add(TokenInfo(token)) + + return indentation_errors + + def _AddToEach(self, original, amount): + """Returns a new set with the given amount added to each element. + + Args: + original: The original set of numbers + amount: The amount to add to each element + + Returns: + A new set containing each element of the original set added to the amount. + """ + return set([x + amount for x in original]) + + _HARD_STOP_TYPES = (Type.START_PAREN, Type.START_PARAMETERS, + Type.START_BRACKET) + + _HARD_STOP_STRINGS = ('return', '?') + + def _IsHardStop(self, token): + """Determines if the given token can have a hard stop after it. + + Args: + token: token to examine + + Returns: + Whether the token can have a hard stop after it. + + Hard stops are indentations defined by the position of another token as in + indentation lined up with return, (, [, and ?. + """ + return (token.type in self._HARD_STOP_TYPES or + token.string in self._HARD_STOP_STRINGS or + token.IsAssignment()) + + def _GetAllowableIndentations(self): + """Computes the set of allowable indentations. + + Returns: + The set of allowable indentations, given the current stack. + """ + expected = set([0]) + hard_stops = set([]) + + # Whether the tokens are still in the same continuation, meaning additional + # indentation is optional. As an example: + # x = 5 + + # 6 + + # 7; + # The second '+' does not add any required indentation. + in_same_continuation = False + + for token_info in self._stack: + token = token_info.token + + # Handle normal additive indentation tokens. + if not token_info.overridden_by and token.string != 'return': + if token_info.is_block: + expected = self._AddToEach(expected, 2) + hard_stops = self._AddToEach(hard_stops, 2) + in_same_continuation = False + elif in_same_continuation: + expected |= self._AddToEach(expected, 4) + hard_stops |= self._AddToEach(hard_stops, 4) + else: + expected = self._AddToEach(expected, 4) + hard_stops |= self._AddToEach(hard_stops, 4) + in_same_continuation = True + + # Handle hard stops after (, [, return, =, and ? + if self._IsHardStop(token): + override_is_hard_stop = (token_info.overridden_by and + self._IsHardStop( + token_info.overridden_by.token)) + if token.type == Type.START_PAREN and token.previous: + # For someFunction(...) we allow to indent at the beginning of the + # identifier +4 + prev = token.previous + if (prev.type == Type.IDENTIFIER and + prev.line_number == token.line_number): + hard_stops.add(prev.start_index + 4) + if not override_is_hard_stop: + start_index = token.start_index + if token.line_number in self._start_index_offset: + start_index += self._start_index_offset[token.line_number] + if (token.type in (Type.START_PAREN, Type.START_PARAMETERS) and + not token_info.overridden_by): + hard_stops.add(start_index + 1) + + elif token.string == 'return' and not token_info.overridden_by: + hard_stops.add(start_index + 7) + + elif token.type == Type.START_BRACKET: + hard_stops.add(start_index + 1) + + elif token.IsAssignment(): + hard_stops.add(start_index + len(token.string) + 1) + + elif token.IsOperator('?') and not token_info.overridden_by: + hard_stops.add(start_index + 2) + + return (expected | hard_stops) or set([0]) + + def _GetActualIndentation(self, token): + """Gets the actual indentation of the line containing the given token. + + Args: + token: Any token on the line. + + Returns: + The actual indentation of the line containing the given token. Returns + -1 if this line should be ignored due to the presence of tabs. + """ + # Move to the first token in the line + token = tokenutil.GetFirstTokenInSameLine(token) + + # If it is whitespace, it is the indentation. + if token.type == Type.WHITESPACE: + if token.string.find('\t') >= 0: + return -1 + else: + return len(token.string) + elif token.type == Type.PARAMETERS: + return len(token.string) - len(token.string.lstrip()) + else: + return 0 + + def _IsFirstNonWhitespaceTokenInLine(self, token): + """Determines if the given token is the first non-space token on its line. + + Args: + token: The token. + + Returns: + True if the token is the first non-whitespace token on its line. + """ + if token.type in (Type.WHITESPACE, Type.BLANK_LINE): + return False + if token.IsFirstInLine(): + return True + return (token.previous and token.previous.IsFirstInLine() and + token.previous.type == Type.WHITESPACE) + + def _IsLastCodeInLine(self, token): + """Determines if the given token is the last code token on its line. + + Args: + token: The token. + + Returns: + True if the token is the last code token on its line. + """ + if token.type in Type.NON_CODE_TYPES: + return False + start_token = token + while True: + token = token.next + if not token or token.line_number != start_token.line_number: + return True + if token.type not in Type.NON_CODE_TYPES: + return False + + def _AllFunctionPropertyAssignTokens(self, start_token, end_token): + """Checks if tokens are (likely) a valid function property assignment. + + Args: + start_token: Start of the token range. + end_token: End of the token range. + + Returns: + True if all tokens between start_token and end_token are legal tokens + within a function declaration and assignment into a property. + """ + for token in tokenutil.GetTokenRange(start_token, end_token): + fn_decl_tokens = (Type.FUNCTION_DECLARATION, + Type.PARAMETERS, + Type.START_PARAMETERS, + Type.END_PARAMETERS, + Type.END_PAREN) + if (token.type not in fn_decl_tokens and + token.IsCode() and + not tokenutil.IsIdentifierOrDot(token) and + not token.IsAssignment() and + not (token.type == Type.OPERATOR and token.string == ',')): + return False + return True + + def _Add(self, token_info): + """Adds the given token info to the stack. + + Args: + token_info: The token information to add. + """ + if self._stack and self._stack[-1].token == token_info.token: + # Don't add the same token twice. + return + + if token_info.is_block or token_info.token.type == Type.START_PAREN: + scope_token = tokenutil.GoogScopeOrNoneFromStartBlock(token_info.token) + token_info.overridden_by = TokenInfo(scope_token) if scope_token else None + + if (token_info.token.type == Type.START_BLOCK and + token_info.token.metadata.context.type == Context.BLOCK): + # Handle function() {} assignments: their block contents get special + # treatment and are allowed to just indent by two whitespace. + # For example + # long.long.name = function( + # a) { + # In this case the { and the = are on different lines. But the + # override should still apply for all previous stack tokens that are + # part of an assignment of a block. + + has_assignment = any(x for x in self._stack if x.token.IsAssignment()) + if has_assignment: + last_token = token_info.token.previous + for stack_info in reversed(self._stack): + if (last_token and + not self._AllFunctionPropertyAssignTokens(stack_info.token, + last_token)): + break + stack_info.overridden_by = token_info + stack_info.is_permanent_override = True + last_token = stack_info.token + + index = len(self._stack) - 1 + while index >= 0: + stack_info = self._stack[index] + stack_token = stack_info.token + + if stack_info.line_number == token_info.line_number: + # In general, tokens only override each other when they are on + # the same line. + stack_info.overridden_by = token_info + if (token_info.token.type == Type.START_BLOCK and + (stack_token.IsAssignment() or + stack_token.type in (Type.IDENTIFIER, Type.START_PAREN))): + # Multi-line blocks have lasting overrides, as in: + # callFn({ + # a: 10 + # }, + # 30); + # b/11450054. If a string is not closed properly then close_block + # could be null. + close_block = token_info.token.metadata.context.end_token + stack_info.is_permanent_override = close_block and ( + close_block.line_number != token_info.token.line_number) + else: + break + index -= 1 + + self._stack.append(token_info) + + def _Pop(self): + """Pops the top token from the stack. + + Returns: + The popped token info. + """ + token_info = self._stack.pop() + if token_info.token.type not in (Type.START_BLOCK, Type.START_BRACKET): + # Remove any temporary overrides. + self._RemoveOverrides(token_info) + else: + # For braces and brackets, which can be object and array literals, remove + # overrides when the literal is closed on the same line. + token_check = token_info.token + same_type = token_check.type + goal_type = None + if token_info.token.type == Type.START_BRACKET: + goal_type = Type.END_BRACKET + else: + goal_type = Type.END_BLOCK + line_number = token_info.token.line_number + count = 0 + while token_check and token_check.line_number == line_number: + if token_check.type == goal_type: + count -= 1 + if not count: + self._RemoveOverrides(token_info) + break + if token_check.type == same_type: + count += 1 + token_check = token_check.next + return token_info + + def _PopToImpliedBlock(self): + """Pops the stack until an implied block token is found.""" + while not self._Pop().token.metadata.is_implied_block: + pass + + def _PopTo(self, stop_type): + """Pops the stack until a token of the given type is popped. + + Args: + stop_type: The type of token to pop to. + + Returns: + The token info of the given type that was popped. + """ + last = None + while True: + last = self._Pop() + if last.token.type == stop_type: + break + return last + + def _RemoveOverrides(self, token_info): + """Marks any token that was overridden by this token as active again. + + Args: + token_info: The token that is being removed from the stack. + """ + for stack_token in self._stack: + if (stack_token.overridden_by == token_info and + not stack_token.is_permanent_override): + stack_token.overridden_by = None + + def _PopTransient(self): + """Pops all transient tokens - i.e. not blocks, literals, or parens.""" + while self._stack and self._stack[-1].is_transient: + self._Pop() diff --git a/tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py b/tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py new file mode 100644 index 0000000..9578009 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py @@ -0,0 +1,754 @@ +#!/usr/bin/env python +# Copyright 2011 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Methods for checking JS files for common style guide violations. + +These style guide violations should only apply to JavaScript and not an Ecma +scripting languages. +""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)', + 'jacobr@google.com (Jacob Richman)') + +import re + +from closure_linter import ecmalintrules +from closure_linter import error_check +from closure_linter import errors +from closure_linter import javascripttokenizer +from closure_linter import javascripttokens +from closure_linter import requireprovidesorter +from closure_linter import tokenutil +from closure_linter.common import error +from closure_linter.common import position + +# Shorthand +Error = error.Error +Position = position.Position +Rule = error_check.Rule +Type = javascripttokens.JavaScriptTokenType + + +class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): + """JavaScript lint rules that catch JavaScript specific style errors.""" + + def __init__(self, namespaces_info): + """Initializes a JavaScriptLintRules instance.""" + ecmalintrules.EcmaScriptLintRules.__init__(self) + self._namespaces_info = namespaces_info + self._declared_private_member_tokens = {} + self._declared_private_members = set() + self._used_private_members = set() + # A stack of dictionaries, one for each function scope entered. Each + # dictionary is keyed by an identifier that defines a local variable and has + # a token as its value. + self._unused_local_variables_by_scope = [] + + def HandleMissingParameterDoc(self, token, param_name): + """Handle errors associated with a parameter missing a param tag.""" + self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION, + 'Missing docs for parameter: "%s"' % param_name, token) + + # pylint: disable=too-many-statements + def CheckToken(self, token, state): + """Checks a token, given the current parser_state, for warnings and errors. + + Args: + token: The current token under consideration + state: parser_state object that indicates the current state in the page + """ + + # Call the base class's CheckToken function. + super(JavaScriptLintRules, self).CheckToken(token, state) + + # Store some convenience variables + namespaces_info = self._namespaces_info + + if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES): + self._CheckUnusedLocalVariables(token, state) + + if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS): + # Find all assignments to private members. + if token.type == Type.SIMPLE_LVALUE: + identifier = token.string + if identifier.endswith('_') and not identifier.endswith('__'): + doc_comment = state.GetDocComment() + suppressed = doc_comment and ( + 'underscore' in doc_comment.suppressions or + 'unusedPrivateMembers' in doc_comment.suppressions) + if not suppressed: + # Look for static members defined on a provided namespace. + if namespaces_info: + namespace = namespaces_info.GetClosurizedNamespace(identifier) + provided_namespaces = namespaces_info.GetProvidedNamespaces() + else: + namespace = None + provided_namespaces = set() + + # Skip cases of this.something_.somethingElse_. + regex = re.compile(r'^this\.[a-zA-Z_]+$') + if namespace in provided_namespaces or regex.match(identifier): + variable = identifier.split('.')[-1] + self._declared_private_member_tokens[variable] = token + self._declared_private_members.add(variable) + elif not identifier.endswith('__'): + # Consider setting public members of private members to be a usage. + for piece in identifier.split('.'): + if piece.endswith('_'): + self._used_private_members.add(piece) + + # Find all usages of private members. + if token.type == Type.IDENTIFIER: + for piece in token.string.split('.'): + if piece.endswith('_'): + self._used_private_members.add(piece) + + if token.type == Type.DOC_FLAG: + flag = token.attached_object + + if flag.flag_type == 'param' and flag.name_token is not None: + self._CheckForMissingSpaceBeforeToken( + token.attached_object.name_token) + + if flag.type is not None and flag.name is not None: + if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER): + # Check for variable arguments marker in type. + if flag.jstype.IsVarArgsType() and flag.name != 'var_args': + self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_NAME, + 'Variable length argument %s must be renamed ' + 'to var_args.' % flag.name, + token) + elif not flag.jstype.IsVarArgsType() and flag.name == 'var_args': + self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_TYPE, + 'Variable length argument %s type must start ' + 'with \'...\'.' % flag.name, + token) + + if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER): + # Check for optional marker in type. + if (flag.jstype.opt_arg and + not flag.name.startswith('opt_')): + self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX, + 'Optional parameter name %s must be prefixed ' + 'with opt_.' % flag.name, + token) + elif (not flag.jstype.opt_arg and + flag.name.startswith('opt_')): + self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE, + 'Optional parameter %s type must end with =.' % + flag.name, + token) + + if flag.flag_type in state.GetDocFlag().HAS_TYPE: + # Check for both missing type token and empty type braces '{}' + # Missing suppress types are reported separately and we allow enums, + # const, private, public and protected without types. + if (flag.flag_type not in state.GetDocFlag().CAN_OMIT_TYPE + and (not flag.jstype or flag.jstype.IsEmpty())): + self._HandleError(errors.MISSING_JSDOC_TAG_TYPE, + 'Missing type in %s tag' % token.string, token) + + elif flag.name_token and flag.type_end_token and tokenutil.Compare( + flag.type_end_token, flag.name_token) > 0: + self._HandleError( + errors.OUT_OF_ORDER_JSDOC_TAG_TYPE, + 'Type should be immediately after %s tag' % token.string, + token) + + elif token.type == Type.DOUBLE_QUOTE_STRING_START: + next_token = token.next + while next_token.type == Type.STRING_TEXT: + if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search( + next_token.string): + break + next_token = next_token.next + else: + self._HandleError( + errors.UNNECESSARY_DOUBLE_QUOTED_STRING, + 'Single-quoted string preferred over double-quoted string.', + token, + position=Position.All(token.string)) + + elif token.type == Type.END_DOC_COMMENT: + doc_comment = state.GetDocComment() + + # When @externs appears in a @fileoverview comment, it should trigger + # the same limited doc checks as a special filename like externs.js. + if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'): + self._SetLimitedDocChecks(True) + + if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and + not self._is_html and + state.InTopLevel() and + not state.InNonScopeBlock()): + + # Check if we're in a fileoverview or constructor JsDoc. + is_constructor = ( + doc_comment.HasFlag('constructor') or + doc_comment.HasFlag('interface')) + # @fileoverview is an optional tag so if the dosctring is the first + # token in the file treat it as a file level docstring. + is_file_level_comment = ( + doc_comment.HasFlag('fileoverview') or + not doc_comment.start_token.previous) + + # If the comment is not a file overview, and it does not immediately + # precede some code, skip it. + # NOTE: The tokenutil methods are not used here because of their + # behavior at the top of a file. + next_token = token.next + if (not next_token or + (not is_file_level_comment and + next_token.type in Type.NON_CODE_TYPES)): + return + + # Don't require extra blank lines around suppression of extra + # goog.require errors. + if (doc_comment.SuppressionOnly() and + next_token.type == Type.IDENTIFIER and + next_token.string in ['goog.provide', 'goog.require']): + return + + # Find the start of this block (include comments above the block, unless + # this is a file overview). + block_start = doc_comment.start_token + if not is_file_level_comment: + token = block_start.previous + while token and token.type in Type.COMMENT_TYPES: + block_start = token + token = token.previous + + # Count the number of blank lines before this block. + blank_lines = 0 + token = block_start.previous + while token and token.type in [Type.WHITESPACE, Type.BLANK_LINE]: + if token.type == Type.BLANK_LINE: + # A blank line. + blank_lines += 1 + elif token.type == Type.WHITESPACE and not token.line.strip(): + # A line with only whitespace on it. + blank_lines += 1 + token = token.previous + + # Log errors. + error_message = False + expected_blank_lines = 0 + + # Only need blank line before file overview if it is not the beginning + # of the file, e.g. copyright is first. + if is_file_level_comment and blank_lines == 0 and block_start.previous: + error_message = 'Should have a blank line before a file overview.' + expected_blank_lines = 1 + elif is_constructor and blank_lines != 3: + error_message = ( + 'Should have 3 blank lines before a constructor/interface.') + expected_blank_lines = 3 + elif (not is_file_level_comment and not is_constructor and + blank_lines != 2): + error_message = 'Should have 2 blank lines between top-level blocks.' + expected_blank_lines = 2 + + if error_message: + self._HandleError( + errors.WRONG_BLANK_LINE_COUNT, error_message, + block_start, position=Position.AtBeginning(), + fix_data=expected_blank_lines - blank_lines) + + elif token.type == Type.END_BLOCK: + if state.InFunction() and state.IsFunctionClose(): + is_immediately_called = (token.next and + token.next.type == Type.START_PAREN) + + function = state.GetFunction() + if not self._limited_doc_checks: + if (function.has_return and function.doc and + not is_immediately_called and + not function.doc.HasFlag('return') and + not function.doc.InheritsDocumentation() and + not function.doc.HasFlag('constructor')): + # Check for proper documentation of return value. + self._HandleError( + errors.MISSING_RETURN_DOCUMENTATION, + 'Missing @return JsDoc in function with non-trivial return', + function.doc.end_token, position=Position.AtBeginning()) + elif (not function.has_return and + not function.has_throw and + function.doc and + function.doc.HasFlag('return') and + not state.InInterfaceMethod()): + flag = function.doc.GetFlag('return') + valid_no_return_names = ['undefined', 'void', '*'] + invalid_return = flag.jstype is None or not any( + sub_type.identifier in valid_no_return_names + for sub_type in flag.jstype.IterTypeGroup()) + + if invalid_return: + self._HandleError( + errors.UNNECESSARY_RETURN_DOCUMENTATION, + 'Found @return JsDoc on function that returns nothing', + flag.flag_token, position=Position.AtBeginning()) + + # b/4073735. Method in object literal definition of prototype can + # safely reference 'this'. + prototype_object_literal = False + block_start = None + previous_code = None + previous_previous_code = None + + # Search for cases where prototype is defined as object literal. + # previous_previous_code + # | previous_code + # | | block_start + # | | | + # a.b.prototype = { + # c : function() { + # this.d = 1; + # } + # } + + # If in object literal, find first token of block so to find previous + # tokens to check above condition. + if state.InObjectLiteral(): + block_start = state.GetCurrentBlockStart() + + # If an object literal then get previous token (code type). For above + # case it should be '='. + if block_start: + previous_code = tokenutil.SearchExcept(block_start, + Type.NON_CODE_TYPES, + reverse=True) + + # If previous token to block is '=' then get its previous token. + if previous_code and previous_code.IsOperator('='): + previous_previous_code = tokenutil.SearchExcept(previous_code, + Type.NON_CODE_TYPES, + reverse=True) + + # If variable/token before '=' ends with '.prototype' then its above + # case of prototype defined with object literal. + prototype_object_literal = (previous_previous_code and + previous_previous_code.string.endswith( + '.prototype')) + + if (function.has_this and function.doc and + not function.doc.HasFlag('this') and + not function.is_constructor and + not function.is_interface and + '.prototype.' not in function.name and + not prototype_object_literal): + self._HandleError( + errors.MISSING_JSDOC_TAG_THIS, + 'Missing @this JsDoc in function referencing "this". (' + 'this usually means you are trying to reference "this" in ' + 'a static function, or you have forgotten to mark a ' + 'constructor with @constructor)', + function.doc.end_token, position=Position.AtBeginning()) + + elif token.type == Type.IDENTIFIER: + if token.string == 'goog.inherits' and not state.InFunction(): + if state.GetLastNonSpaceToken().line_number == token.line_number: + self._HandleError( + errors.MISSING_LINE, + 'Missing newline between constructor and goog.inherits', + token, + position=Position.AtBeginning()) + + extra_space = state.GetLastNonSpaceToken().next + while extra_space != token: + if extra_space.type == Type.BLANK_LINE: + self._HandleError( + errors.EXTRA_LINE, + 'Extra line between constructor and goog.inherits', + extra_space) + extra_space = extra_space.next + + # TODO(robbyw): Test the last function was a constructor. + # TODO(robbyw): Test correct @extends and @implements documentation. + + elif (token.string == 'goog.provide' and + not state.InFunction() and + namespaces_info is not None): + namespace = tokenutil.GetStringAfterToken(token) + + # Report extra goog.provide statement. + if not namespace or namespaces_info.IsExtraProvide(token): + if not namespace: + msg = 'Empty namespace in goog.provide' + else: + msg = 'Unnecessary goog.provide: ' + namespace + + # Hint to user if this is a Test namespace. + if namespace.endswith('Test'): + msg += (' *Test namespaces must be mentioned in the ' + 'goog.setTestOnly() call') + + self._HandleError( + errors.EXTRA_GOOG_PROVIDE, + msg, + token, position=Position.AtBeginning()) + + if namespaces_info.IsLastProvide(token): + # Report missing provide statements after the last existing provide. + missing_provides = namespaces_info.GetMissingProvides() + if missing_provides: + self._ReportMissingProvides( + missing_provides, + tokenutil.GetLastTokenInSameLine(token).next, + False) + + # If there are no require statements, missing requires should be + # reported after the last provide. + if not namespaces_info.GetRequiredNamespaces(): + missing_requires, illegal_alias_statements = ( + namespaces_info.GetMissingRequires()) + if missing_requires: + self._ReportMissingRequires( + missing_requires, + tokenutil.GetLastTokenInSameLine(token).next, + True) + if illegal_alias_statements: + self._ReportIllegalAliasStatement(illegal_alias_statements) + + elif (token.string == 'goog.require' and + not state.InFunction() and + namespaces_info is not None): + namespace = tokenutil.GetStringAfterToken(token) + + # If there are no provide statements, missing provides should be + # reported before the first require. + if (namespaces_info.IsFirstRequire(token) and + not namespaces_info.GetProvidedNamespaces()): + missing_provides = namespaces_info.GetMissingProvides() + if missing_provides: + self._ReportMissingProvides( + missing_provides, + tokenutil.GetFirstTokenInSameLine(token), + True) + + # Report extra goog.require statement. + if not namespace or namespaces_info.IsExtraRequire(token): + if not namespace: + msg = 'Empty namespace in goog.require' + else: + msg = 'Unnecessary goog.require: ' + namespace + + self._HandleError( + errors.EXTRA_GOOG_REQUIRE, + msg, + token, position=Position.AtBeginning()) + + # Report missing goog.require statements. + if namespaces_info.IsLastRequire(token): + missing_requires, illegal_alias_statements = ( + namespaces_info.GetMissingRequires()) + if missing_requires: + self._ReportMissingRequires( + missing_requires, + tokenutil.GetLastTokenInSameLine(token).next, + False) + if illegal_alias_statements: + self._ReportIllegalAliasStatement(illegal_alias_statements) + + elif token.type == Type.OPERATOR: + last_in_line = token.IsLastInLine() + # If the token is unary and appears to be used in a unary context + # it's ok. Otherwise, if it's at the end of the line or immediately + # before a comment, it's ok. + # Don't report an error before a start bracket - it will be reported + # by that token's space checks. + if (not token.metadata.IsUnaryOperator() and not last_in_line + and not token.next.IsComment() + and not token.next.IsOperator(',') + and not tokenutil.IsDot(token) + and token.next.type not in (Type.WHITESPACE, Type.END_PAREN, + Type.END_BRACKET, Type.SEMICOLON, + Type.START_BRACKET)): + self._HandleError( + errors.MISSING_SPACE, + 'Missing space after "%s"' % token.string, + token, + position=Position.AtEnd(token.string)) + elif token.type == Type.WHITESPACE: + first_in_line = token.IsFirstInLine() + last_in_line = token.IsLastInLine() + # Check whitespace length if it's not the first token of the line and + # if it's not immediately before a comment. + if not last_in_line and not first_in_line and not token.next.IsComment(): + # Ensure there is no space after opening parentheses. + if (token.previous.type in (Type.START_PAREN, Type.START_BRACKET, + Type.FUNCTION_NAME) + or token.next.type == Type.START_PARAMETERS): + self._HandleError( + errors.EXTRA_SPACE, + 'Extra space after "%s"' % token.previous.string, + token, + position=Position.All(token.string)) + elif token.type == Type.SEMICOLON: + previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, + reverse=True) + if not previous_token: + self._HandleError( + errors.REDUNDANT_SEMICOLON, + 'Semicolon without any statement', + token, + position=Position.AtEnd(token.string)) + elif (previous_token.type == Type.KEYWORD and + previous_token.string not in ['break', 'continue', 'return']): + self._HandleError( + errors.REDUNDANT_SEMICOLON, + ('Semicolon after \'%s\' without any statement.' + ' Looks like an error.' % previous_token.string), + token, + position=Position.AtEnd(token.string)) + + def _CheckUnusedLocalVariables(self, token, state): + """Checks for unused local variables in function blocks. + + Args: + token: The token to check. + state: The state tracker. + """ + # We don't use state.InFunction because that disregards scope functions. + in_function = state.FunctionDepth() > 0 + if token.type == Type.SIMPLE_LVALUE or token.type == Type.IDENTIFIER: + if in_function: + identifier = token.string + # Check whether the previous token was var. + previous_code_token = tokenutil.CustomSearch( + token, + lambda t: t.type not in Type.NON_CODE_TYPES, + reverse=True) + if previous_code_token and previous_code_token.IsKeyword('var'): + # Add local variable declaration to the top of the unused locals + # stack. + self._unused_local_variables_by_scope[-1][identifier] = token + elif token.type == Type.IDENTIFIER: + # This covers most cases where the variable is used as an identifier. + self._MarkLocalVariableUsed(token.string) + elif token.type == Type.SIMPLE_LVALUE and '.' in identifier: + # This covers cases where a value is assigned to a property of the + # variable. + self._MarkLocalVariableUsed(token.string) + elif token.type == Type.START_BLOCK: + if in_function and state.IsFunctionOpen(): + # Push a new map onto the stack + self._unused_local_variables_by_scope.append({}) + elif token.type == Type.END_BLOCK: + if state.IsFunctionClose(): + # Pop the stack and report any remaining locals as unused. + unused_local_variables = self._unused_local_variables_by_scope.pop() + for unused_token in unused_local_variables.values(): + self._HandleError( + errors.UNUSED_LOCAL_VARIABLE, + 'Unused local variable: %s.' % unused_token.string, + unused_token) + elif token.type == Type.DOC_FLAG: + # Flags that use aliased symbols should be counted. + flag = token.attached_object + js_type = flag and flag.jstype + if flag and flag.flag_type in state.GetDocFlag().HAS_TYPE and js_type: + self._MarkAliasUsed(js_type) + + def _MarkAliasUsed(self, js_type): + """Marks aliases in a type as used. + + Recursively iterates over all subtypes in a jsdoc type annotation and + tracks usage of aliased symbols (which may be local variables). + Marks the local variable as used in the scope nearest to the current + scope that matches the given token. + + Args: + js_type: The jsdoc type, a typeannotation.TypeAnnotation object. + """ + if js_type.alias: + self._MarkLocalVariableUsed(js_type.identifier) + for sub_type in js_type.IterTypes(): + self._MarkAliasUsed(sub_type) + + def _MarkLocalVariableUsed(self, identifier): + """Marks the local variable as used in the relevant scope. + + Marks the local variable in the scope nearest to the current scope that + matches the given identifier as used. + + Args: + identifier: The identifier representing the potential usage of a local + variable. + """ + identifier = identifier.split('.', 1)[0] + # Find the first instance of the identifier in the stack of function scopes + # and mark it used. + for unused_local_variables in reversed( + self._unused_local_variables_by_scope): + if identifier in unused_local_variables: + del unused_local_variables[identifier] + break + + def _ReportMissingProvides(self, missing_provides, token, need_blank_line): + """Reports missing provide statements to the error handler. + + Args: + missing_provides: A dictionary of string(key) and integer(value) where + each string(key) is a namespace that should be provided, but is not + and integer(value) is first line number where it's required. + token: The token where the error was detected (also where the new provides + will be inserted. + need_blank_line: Whether a blank line needs to be inserted after the new + provides are inserted. May be True, False, or None, where None + indicates that the insert location is unknown. + """ + + missing_provides_msg = 'Missing the following goog.provide statements:\n' + missing_provides_msg += '\n'.join(['goog.provide(\'%s\');' % x for x in + sorted(missing_provides)]) + missing_provides_msg += '\n' + + missing_provides_msg += '\nFirst line where provided: \n' + missing_provides_msg += '\n'.join( + [' %s : line %d' % (x, missing_provides[x]) for x in + sorted(missing_provides)]) + missing_provides_msg += '\n' + + self._HandleError( + errors.MISSING_GOOG_PROVIDE, + missing_provides_msg, + token, position=Position.AtBeginning(), + fix_data=(missing_provides.keys(), need_blank_line)) + + def _ReportMissingRequires(self, missing_requires, token, need_blank_line): + """Reports missing require statements to the error handler. + + Args: + missing_requires: A dictionary of string(key) and integer(value) where + each string(key) is a namespace that should be required, but is not + and integer(value) is first line number where it's required. + token: The token where the error was detected (also where the new requires + will be inserted. + need_blank_line: Whether a blank line needs to be inserted before the new + requires are inserted. May be True, False, or None, where None + indicates that the insert location is unknown. + """ + + missing_requires_msg = 'Missing the following goog.require statements:\n' + missing_requires_msg += '\n'.join(['goog.require(\'%s\');' % x for x in + sorted(missing_requires)]) + missing_requires_msg += '\n' + + missing_requires_msg += '\nFirst line where required: \n' + missing_requires_msg += '\n'.join( + [' %s : line %d' % (x, missing_requires[x]) for x in + sorted(missing_requires)]) + missing_requires_msg += '\n' + + self._HandleError( + errors.MISSING_GOOG_REQUIRE, + missing_requires_msg, + token, position=Position.AtBeginning(), + fix_data=(missing_requires.keys(), need_blank_line)) + + def _ReportIllegalAliasStatement(self, illegal_alias_statements): + """Reports alias statements that would need a goog.require.""" + for namespace, token in illegal_alias_statements.iteritems(): + self._HandleError( + errors.ALIAS_STMT_NEEDS_GOOG_REQUIRE, + 'The alias definition would need the namespace \'%s\' which is not ' + 'required through any other symbol.' % namespace, + token, position=Position.AtBeginning()) + + def Finalize(self, state): + """Perform all checks that need to occur after all lines are processed.""" + # Call the base class's Finalize function. + super(JavaScriptLintRules, self).Finalize(state) + + if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS): + # Report an error for any declared private member that was never used. + unused_private_members = (self._declared_private_members - + self._used_private_members) + + for variable in unused_private_members: + token = self._declared_private_member_tokens[variable] + self._HandleError(errors.UNUSED_PRIVATE_MEMBER, + 'Unused private member: %s.' % token.string, + token) + + # Clear state to prepare for the next file. + self._declared_private_member_tokens = {} + self._declared_private_members = set() + self._used_private_members = set() + + namespaces_info = self._namespaces_info + if namespaces_info is not None: + # If there are no provide or require statements, missing provides and + # requires should be reported on line 1. + if (not namespaces_info.GetProvidedNamespaces() and + not namespaces_info.GetRequiredNamespaces()): + missing_provides = namespaces_info.GetMissingProvides() + if missing_provides: + self._ReportMissingProvides( + missing_provides, state.GetFirstToken(), None) + + missing_requires, illegal_alias = namespaces_info.GetMissingRequires() + if missing_requires: + self._ReportMissingRequires( + missing_requires, state.GetFirstToken(), None) + if illegal_alias: + self._ReportIllegalAliasStatement(illegal_alias) + + self._CheckSortedRequiresProvides(state.GetFirstToken()) + + def _CheckSortedRequiresProvides(self, token): + """Checks that all goog.require and goog.provide statements are sorted. + + Note that this method needs to be run after missing statements are added to + preserve alphabetical order. + + Args: + token: The first token in the token stream. + """ + sorter = requireprovidesorter.RequireProvideSorter() + first_provide_token = sorter.CheckProvides(token) + if first_provide_token: + new_order = sorter.GetFixedProvideString(first_provide_token) + self._HandleError( + errors.GOOG_PROVIDES_NOT_ALPHABETIZED, + 'goog.provide classes must be alphabetized. The correct code is:\n' + + new_order, + first_provide_token, + position=Position.AtBeginning(), + fix_data=first_provide_token) + + first_require_token = sorter.CheckRequires(token) + if first_require_token: + new_order = sorter.GetFixedRequireString(first_require_token) + self._HandleError( + errors.GOOG_REQUIRES_NOT_ALPHABETIZED, + 'goog.require classes must be alphabetized. The correct code is:\n' + + new_order, + first_require_token, + position=Position.AtBeginning(), + fix_data=first_require_token) + + def GetLongLineExceptions(self): + """Gets a list of regexps for lines which can be longer than the limit. + + Returns: + A list of regexps, used as matches (rather than searches). + """ + return [ + re.compile(r'(var .+\s*=\s*)?goog\.require\(.+\);?\s*$'), + re.compile(r'goog\.(provide|module|setTestOnly)\(.+\);?\s*$'), + re.compile(r'[\s/*]*@visibility\s*{.*}[\s*/]*$'), + ] diff --git a/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py b/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py new file mode 100644 index 0000000..e0a42f6 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Parser for JavaScript files.""" + + + +from closure_linter import javascripttokens +from closure_linter import statetracker +from closure_linter import tokenutil + +# Shorthand +Type = javascripttokens.JavaScriptTokenType + + +class JsDocFlag(statetracker.DocFlag): + """Javascript doc flag object. + + Attribute: + flag_type: param, return, define, type, etc. + flag_token: The flag token. + type_start_token: The first token specifying the flag JS type, + including braces. + type_end_token: The last token specifying the flag JS type, + including braces. + type: The type spec string. + jstype: The type spec, a TypeAnnotation instance. + name_token: The token specifying the flag name. + name: The flag name + description_start_token: The first token in the description. + description_end_token: The end token in the description. + description: The description. + """ + + # Please keep these lists alphabetized. + + # Some projects use the following extensions to JsDoc. + # TODO(robbyw): determine which of these, if any, should be illegal. + EXTENDED_DOC = frozenset([ + 'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link', + 'meaning', 'provideGoog', 'throws']) + + LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC + + +class JavaScriptStateTracker(statetracker.StateTracker): + """JavaScript state tracker. + + Inherits from the core EcmaScript StateTracker adding extra state tracking + functionality needed for JavaScript. + """ + + def __init__(self): + """Initializes a JavaScript token stream state tracker.""" + statetracker.StateTracker.__init__(self, JsDocFlag) + + def Reset(self): + self._scope_depth = 0 + self._block_stack = [] + super(JavaScriptStateTracker, self).Reset() + + def InTopLevel(self): + """Compute whether we are at the top level in the class. + + This function call is language specific. In some languages like + JavaScript, a function is top level if it is not inside any parenthesis. + In languages such as ActionScript, a function is top level if it is directly + within a class. + + Returns: + Whether we are at the top level in the class. + """ + return self._scope_depth == self.ParenthesesDepth() + + def InFunction(self): + """Returns true if the current token is within a function. + + This js-specific override ignores goog.scope functions. + + Returns: + True if the current token is within a function. + """ + return self._scope_depth != self.FunctionDepth() + + def InNonScopeBlock(self): + """Compute whether we are nested within a non-goog.scope block. + + Returns: + True if the token is not enclosed in a block that does not originate from + a goog.scope statement. False otherwise. + """ + return self._scope_depth != self.BlockDepth() + + def GetBlockType(self, token): + """Determine the block type given a START_BLOCK token. + + Code blocks come after parameters, keywords like else, and closing parens. + + Args: + token: The current token. Can be assumed to be type START_BLOCK + Returns: + Code block type for current token. + """ + last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True) + if last_code.type in (Type.END_PARAMETERS, Type.END_PAREN, + Type.KEYWORD) and not last_code.IsKeyword('return'): + return self.CODE + else: + return self.OBJECT_LITERAL + + def GetCurrentBlockStart(self): + """Gets the start token of current block. + + Returns: + Starting token of current block. None if not in block. + """ + if self._block_stack: + return self._block_stack[-1] + else: + return None + + def HandleToken(self, token, last_non_space_token): + """Handles the given token and updates state. + + Args: + token: The token to handle. + last_non_space_token: The last non space token encountered + """ + if token.type == Type.START_BLOCK: + self._block_stack.append(token) + if token.type == Type.IDENTIFIER and token.string == 'goog.scope': + self._scope_depth += 1 + if token.type == Type.END_BLOCK: + start_token = self._block_stack.pop() + if tokenutil.GoogScopeOrNoneFromStartBlock(start_token): + self._scope_depth -= 1 + super(JavaScriptStateTracker, self).HandleToken(token, + last_non_space_token) diff --git a/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py b/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py new file mode 100644 index 0000000..76dabd2 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the javascriptstatetracker module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + + +import unittest as googletest + +from closure_linter import javascripttokens +from closure_linter import testutil +from closure_linter import tokenutil + + +_FUNCTION_SCRIPT = """\ +var a = 3; + +function foo(aaa, bbb, ccc) { + var b = 4; +} + + +/** + * JSDoc comment. + */ +var bar = function(ddd, eee, fff) { + +}; + + +/** + * Verify that nested functions get their proper parameters recorded. + */ +var baz = function(ggg, hhh, iii) { + var qux = function(jjj, kkk, lll) { + }; + // make sure that entering a new block does not change baz' parameters. + {}; +}; + +""" + + +class FunctionTest(googletest.TestCase): + + def testFunctionParse(self): + functions, _ = testutil.ParseFunctionsAndComments(_FUNCTION_SCRIPT) + self.assertEquals(4, len(functions)) + + # First function + function = functions[0] + self.assertEquals(['aaa', 'bbb', 'ccc'], function.parameters) + + start_token = function.start_token + end_token = function.end_token + + self.assertEquals( + javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, + function.start_token.type) + + self.assertEquals('function', start_token.string) + self.assertEquals(3, start_token.line_number) + self.assertEquals(0, start_token.start_index) + + self.assertEquals('}', end_token.string) + self.assertEquals(5, end_token.line_number) + self.assertEquals(0, end_token.start_index) + + self.assertEquals('foo', function.name) + + self.assertIsNone(function.doc) + + # Second function + function = functions[1] + self.assertEquals(['ddd', 'eee', 'fff'], function.parameters) + + start_token = function.start_token + end_token = function.end_token + + self.assertEquals( + javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, + function.start_token.type) + + self.assertEquals('function', start_token.string) + self.assertEquals(11, start_token.line_number) + self.assertEquals(10, start_token.start_index) + + self.assertEquals('}', end_token.string) + self.assertEquals(13, end_token.line_number) + self.assertEquals(0, end_token.start_index) + + self.assertEquals('bar', function.name) + + self.assertIsNotNone(function.doc) + + # Check function JSDoc + doc = function.doc + doc_tokens = tokenutil.GetTokenRange(doc.start_token, doc.end_token) + + comment_type = javascripttokens.JavaScriptTokenType.COMMENT + comment_tokens = filter(lambda t: t.type is comment_type, doc_tokens) + + self.assertEquals('JSDoc comment.', + tokenutil.TokensToString(comment_tokens).strip()) + + # Third function + function = functions[2] + self.assertEquals(['ggg', 'hhh', 'iii'], function.parameters) + + start_token = function.start_token + end_token = function.end_token + + self.assertEquals( + javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, + function.start_token.type) + + self.assertEquals('function', start_token.string) + self.assertEquals(19, start_token.line_number) + self.assertEquals(10, start_token.start_index) + + self.assertEquals('}', end_token.string) + self.assertEquals(24, end_token.line_number) + self.assertEquals(0, end_token.start_index) + + self.assertEquals('baz', function.name) + self.assertIsNotNone(function.doc) + + # Fourth function (inside third function) + function = functions[3] + self.assertEquals(['jjj', 'kkk', 'lll'], function.parameters) + + start_token = function.start_token + end_token = function.end_token + + self.assertEquals( + javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, + function.start_token.type) + + self.assertEquals('function', start_token.string) + self.assertEquals(20, start_token.line_number) + self.assertEquals(12, start_token.start_index) + + self.assertEquals('}', end_token.string) + self.assertEquals(21, end_token.line_number) + self.assertEquals(2, end_token.start_index) + + self.assertEquals('qux', function.name) + self.assertIsNone(function.doc) + + + +class CommentTest(googletest.TestCase): + + def testGetDescription(self): + comment = self._ParseComment(""" + /** + * Comment targeting goog.foo. + * + * This is the second line. + * @param {number} foo The count of foo. + */ + target;""") + + self.assertEqual( + 'Comment targeting goog.foo.\n\nThis is the second line.', + comment.description) + + def testCommentGetTarget(self): + self.assertCommentTarget('goog.foo', """ + /** + * Comment targeting goog.foo. + */ + goog.foo = 6; + """) + + self.assertCommentTarget('bar', """ + /** + * Comment targeting bar. + */ + var bar = "Karate!"; + """) + + self.assertCommentTarget('doThing', """ + /** + * Comment targeting doThing. + */ + function doThing() {}; + """) + + self.assertCommentTarget('this.targetProperty', """ + goog.bar.Baz = function() { + /** + * Comment targeting targetProperty. + */ + this.targetProperty = 3; + }; + """) + + self.assertCommentTarget('goog.bar.prop', """ + /** + * Comment targeting goog.bar.prop. + */ + goog.bar.prop; + """) + + self.assertCommentTarget('goog.aaa.bbb', """ + /** + * Comment targeting goog.aaa.bbb. + */ + (goog.aaa.bbb) + """) + + self.assertCommentTarget('theTarget', """ + /** + * Comment targeting symbol preceded by newlines, whitespace, + * and parens -- things we ignore. + */ + (theTarget) + """) + + self.assertCommentTarget(None, """ + /** + * @fileoverview File overview. + */ + (notATarget) + """) + + self.assertCommentTarget(None, """ + /** + * Comment that doesn't find a target. + */ + """) + + self.assertCommentTarget('theTarget.is.split.across.lines', """ + /** + * Comment that addresses a symbol split across lines. + */ + (theTarget.is.split + .across.lines) + """) + + self.assertCommentTarget('theTarget.is.split.across.lines', """ + /** + * Comment that addresses a symbol split across lines. + */ + (theTarget.is.split. + across.lines) + """) + + def _ParseComment(self, script): + """Parse a script that contains one comment and return it.""" + _, comments = testutil.ParseFunctionsAndComments(script) + self.assertEquals(1, len(comments)) + return comments[0] + + def assertCommentTarget(self, target, script): + comment = self._ParseComment(script) + self.assertEquals(target, comment.GetTargetIdentifier()) + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py b/tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py new file mode 100644 index 0000000..2ee5b81 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py @@ -0,0 +1,463 @@ +#!/usr/bin/env python +# +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Regular expression based JavaScript parsing classes.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + +import copy +import re + +from closure_linter import javascripttokens +from closure_linter.common import matcher +from closure_linter.common import tokenizer + +# Shorthand +Type = javascripttokens.JavaScriptTokenType +Matcher = matcher.Matcher + + +class JavaScriptModes(object): + """Enumeration of the different matcher modes used for JavaScript.""" + TEXT_MODE = 'text' + SINGLE_QUOTE_STRING_MODE = 'single_quote_string' + DOUBLE_QUOTE_STRING_MODE = 'double_quote_string' + BLOCK_COMMENT_MODE = 'block_comment' + DOC_COMMENT_MODE = 'doc_comment' + DOC_COMMENT_LEX_SPACES_MODE = 'doc_comment_spaces' + LINE_COMMENT_MODE = 'line_comment' + PARAMETER_MODE = 'parameter' + FUNCTION_MODE = 'function' + + +class JavaScriptTokenizer(tokenizer.Tokenizer): + """JavaScript tokenizer. + + Convert JavaScript code in to an array of tokens. + """ + + # Useful patterns for JavaScript parsing. + IDENTIFIER_CHAR = r'A-Za-z0-9_$' + + # Number patterns based on: + # http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html + MANTISSA = r""" + (\d+(?!\.)) | # Matches '10' + (\d+\.(?!\d)) | # Matches '10.' + (\d*\.\d+) # Matches '.5' or '10.5' + """ + DECIMAL_LITERAL = r'(%s)([eE][-+]?\d+)?' % MANTISSA + HEX_LITERAL = r'0[xX][0-9a-fA-F]+' + NUMBER = re.compile(r""" + ((%s)|(%s)) + """ % (HEX_LITERAL, DECIMAL_LITERAL), re.VERBOSE) + + # Strings come in three parts - first we match the start of the string, then + # the contents, then the end. The contents consist of any character except a + # backslash or end of string, or a backslash followed by any character, or a + # backslash followed by end of line to support correct parsing of multi-line + # strings. + SINGLE_QUOTE = re.compile(r"'") + SINGLE_QUOTE_TEXT = re.compile(r"([^'\\]|\\(.|$))+") + DOUBLE_QUOTE = re.compile(r'"') + DOUBLE_QUOTE_TEXT = re.compile(r'([^"\\]|\\(.|$))+') + + START_SINGLE_LINE_COMMENT = re.compile(r'//') + END_OF_LINE_SINGLE_LINE_COMMENT = re.compile(r'//$') + + START_DOC_COMMENT = re.compile(r'/\*\*') + START_BLOCK_COMMENT = re.compile(r'/\*') + END_BLOCK_COMMENT = re.compile(r'\*/') + BLOCK_COMMENT_TEXT = re.compile(r'([^*]|\*(?!/))+') + + # Comment text is anything that we are not going to parse into another special + # token like (inline) flags or end comments. Complicated regex to match + # most normal characters, and '*', '{', '}', and '@' when we are sure that + # it is safe. Expression [^*{\s]@ must come first, or the other options will + # match everything before @, and we won't match @'s that aren't part of flags + # like in email addresses in the @author tag. + DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+') + DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+') + # Match anything that is allowed in a type definition, except for tokens + # needed to parse it (and the lookahead assertion for "*/"). + DOC_COMMENT_TYPE_TEXT = re.compile(r'([^*|!?=<>(){}:,\s]|\*(?!/))+') + + # Match the prefix ' * ' that starts every line of jsdoc. Want to include + # spaces after the '*', but nothing else that occurs after a '*', and don't + # want to match the '*' in '*/'. + DOC_PREFIX = re.compile(r'\s*\*(\s+|(?!/))') + + START_BLOCK = re.compile('{') + END_BLOCK = re.compile('}') + + REGEX_CHARACTER_CLASS = r""" + \[ # Opening bracket + ([^\]\\]|\\.)* # Anything but a ] or \, + # or a backslash followed by anything + \] # Closing bracket + """ + # We ensure the regex is followed by one of the above tokens to avoid + # incorrectly parsing something like x / y / z as x REGEX(/ y /) z + POST_REGEX_LIST = [ + ';', ',', r'\.', r'\)', r'\]', '$', r'\/\/', r'\/\*', ':', '}'] + + REGEX = re.compile(r""" + / # opening slash + (?!\*) # not the start of a comment + (\\.|[^\[\/\\]|(%s))* # a backslash followed by anything, + # or anything but a / or [ or \, + # or a character class + / # closing slash + [gimsx]* # optional modifiers + (?=\s*(%s)) + """ % (REGEX_CHARACTER_CLASS, '|'.join(POST_REGEX_LIST)), + re.VERBOSE) + + ANYTHING = re.compile(r'.*') + PARAMETERS = re.compile(r'[^\)]+') + CLOSING_PAREN_WITH_SPACE = re.compile(r'\)\s*') + + FUNCTION_DECLARATION = re.compile(r'\bfunction\b') + + OPENING_PAREN = re.compile(r'\(') + CLOSING_PAREN = re.compile(r'\)') + + OPENING_BRACKET = re.compile(r'\[') + CLOSING_BRACKET = re.compile(r'\]') + + # We omit these JS keywords from the list: + # function - covered by FUNCTION_DECLARATION. + # delete, in, instanceof, new, typeof - included as operators. + # this - included in identifiers. + # null, undefined - not included, should go in some "special constant" list. + KEYWORD_LIST = [ + 'break', + 'case', + 'catch', + 'continue', + 'default', + 'do', + 'else', + 'finally', + 'for', + 'if', + 'return', + 'switch', + 'throw', + 'try', + 'var', + 'while', + 'with', + ] + + # List of regular expressions to match as operators. Some notes: for our + # purposes, the comma behaves similarly enough to a normal operator that we + # include it here. r'\bin\b' actually matches 'in' surrounded by boundary + # characters - this may not match some very esoteric uses of the in operator. + # Operators that are subsets of larger operators must come later in this list + # for proper matching, e.g., '>>' must come AFTER '>>>'. + OPERATOR_LIST = [ + ',', + r'\+\+', + '===', + '!==', + '>>>=', + '>>>', + '==', + '>=', + '<=', + '!=', + '<<=', + '>>=', + '<<', + '>>', + '=>', + '>', + '<', + r'\+=', + r'\+', + '--', + r'\^=', + '-=', + '-', + '/=', + '/', + r'\*=', + r'\*', + '%=', + '%', + '&&', + r'\|\|', + '&=', + '&', + r'\|=', + r'\|', + '=', + '!', + ':', + r'\?', + r'\^', + r'\bdelete\b', + r'\bin\b', + r'\binstanceof\b', + r'\bnew\b', + r'\btypeof\b', + r'\bvoid\b', + r'\.', + ] + OPERATOR = re.compile('|'.join(OPERATOR_LIST)) + + WHITESPACE = re.compile(r'\s+') + SEMICOLON = re.compile(r';') + # Technically JavaScript identifiers can't contain '.', but we treat a set of + # nested identifiers as a single identifier, except for trailing dots. + NESTED_IDENTIFIER = r'[a-zA-Z_$]([%s]|\.[a-zA-Z_$])*' % IDENTIFIER_CHAR + IDENTIFIER = re.compile(NESTED_IDENTIFIER) + + SIMPLE_LVALUE = re.compile(r""" + (?P%s) # a valid identifier + (?=\s* # optional whitespace + \= # look ahead to equal sign + (?!=)) # not follwed by equal + """ % NESTED_IDENTIFIER, re.VERBOSE) + + # A doc flag is a @ sign followed by non-space characters that appears at the + # beginning of the line, after whitespace, or after a '{'. The look-behind + # check is necessary to not match someone@google.com as a flag. + DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P[a-zA-Z]+)') + # To properly parse parameter names and complex doctypes containing + # whitespace, we need to tokenize whitespace into a token after certain + # doctags. All statetracker.HAS_TYPE that are not listed here must not contain + # any whitespace in their types. + DOC_FLAG_LEX_SPACES = re.compile( + r'(^|(?<=\s))@(?P%s)\b' % + '|'.join([ + 'const', + 'enum', + 'extends', + 'final', + 'implements', + 'param', + 'private', + 'protected', + 'public', + 'return', + 'type', + 'typedef' + ])) + + DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P[a-zA-Z]+)') + + DOC_TYPE_BLOCK_START = re.compile(r'[<(]') + DOC_TYPE_BLOCK_END = re.compile(r'[>)]') + DOC_TYPE_MODIFIERS = re.compile(r'[!?|,:=]') + + # Star followed by non-slash, i.e a star that does not end a comment. + # This is used for TYPE_GROUP below. + SAFE_STAR = r'(\*(?!/))' + + COMMON_DOC_MATCHERS = [ + # Find the end of the comment. + Matcher(END_BLOCK_COMMENT, Type.END_DOC_COMMENT, + JavaScriptModes.TEXT_MODE), + + # Tokenize documented flags like @private. + Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG), + Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG, + JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE), + + # Encountering a doc flag should leave lex spaces mode. + Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE), + + # Tokenize braces so we can find types. + Matcher(START_BLOCK, Type.DOC_START_BRACE), + Matcher(END_BLOCK, Type.DOC_END_BRACE), + + # And some more to parse types. + Matcher(DOC_TYPE_BLOCK_START, Type.DOC_TYPE_START_BLOCK), + Matcher(DOC_TYPE_BLOCK_END, Type.DOC_TYPE_END_BLOCK), + + Matcher(DOC_TYPE_MODIFIERS, Type.DOC_TYPE_MODIFIER), + Matcher(DOC_COMMENT_TYPE_TEXT, Type.COMMENT), + + Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)] + + # When text is not matched, it is given this default type based on mode. + # If unspecified in this map, the default default is Type.NORMAL. + JAVASCRIPT_DEFAULT_TYPES = { + JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT, + JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT + } + + @classmethod + def BuildMatchers(cls): + """Builds the token matcher group. + + The token matcher groups work as follows: it is a list of Matcher objects. + The matchers will be tried in this order, and the first to match will be + returned. Hence the order is important because the matchers that come first + overrule the matchers that come later. + + Returns: + The completed token matcher group. + """ + # Match a keyword string followed by a non-identifier character in order to + # not match something like doSomething as do + Something. + keyword = re.compile('(%s)((?=[^%s])|$)' % ( + '|'.join(cls.KEYWORD_LIST), cls.IDENTIFIER_CHAR)) + return { + + # Matchers for basic text mode. + JavaScriptModes.TEXT_MODE: [ + # Check a big group - strings, starting comments, and regexes - all + # of which could be intertwined. 'string with /regex/', + # /regex with 'string'/, /* comment with /regex/ and string */ (and + # so on) + Matcher(cls.START_DOC_COMMENT, Type.START_DOC_COMMENT, + JavaScriptModes.DOC_COMMENT_MODE), + Matcher(cls.START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT, + JavaScriptModes.BLOCK_COMMENT_MODE), + Matcher(cls.END_OF_LINE_SINGLE_LINE_COMMENT, + Type.START_SINGLE_LINE_COMMENT), + Matcher(cls.START_SINGLE_LINE_COMMENT, + Type.START_SINGLE_LINE_COMMENT, + JavaScriptModes.LINE_COMMENT_MODE), + Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START, + JavaScriptModes.SINGLE_QUOTE_STRING_MODE), + Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START, + JavaScriptModes.DOUBLE_QUOTE_STRING_MODE), + Matcher(cls.REGEX, Type.REGEX), + + # Next we check for start blocks appearing outside any of the items + # above. + Matcher(cls.START_BLOCK, Type.START_BLOCK), + Matcher(cls.END_BLOCK, Type.END_BLOCK), + + # Then we search for function declarations. + Matcher(cls.FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION, + JavaScriptModes.FUNCTION_MODE), + + # Next, we convert non-function related parens to tokens. + Matcher(cls.OPENING_PAREN, Type.START_PAREN), + Matcher(cls.CLOSING_PAREN, Type.END_PAREN), + + # Next, we convert brackets to tokens. + Matcher(cls.OPENING_BRACKET, Type.START_BRACKET), + Matcher(cls.CLOSING_BRACKET, Type.END_BRACKET), + + # Find numbers. This has to happen before operators because + # scientific notation numbers can have + and - in them. + Matcher(cls.NUMBER, Type.NUMBER), + + # Find operators and simple assignments + Matcher(cls.SIMPLE_LVALUE, Type.SIMPLE_LVALUE), + Matcher(cls.OPERATOR, Type.OPERATOR), + + # Find key words and whitespace. + Matcher(keyword, Type.KEYWORD), + Matcher(cls.WHITESPACE, Type.WHITESPACE), + + # Find identifiers. + Matcher(cls.IDENTIFIER, Type.IDENTIFIER), + + # Finally, we convert semicolons to tokens. + Matcher(cls.SEMICOLON, Type.SEMICOLON)], + + # Matchers for single quote strings. + JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [ + Matcher(cls.SINGLE_QUOTE_TEXT, Type.STRING_TEXT), + Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END, + JavaScriptModes.TEXT_MODE)], + + # Matchers for double quote strings. + JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [ + Matcher(cls.DOUBLE_QUOTE_TEXT, Type.STRING_TEXT), + Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END, + JavaScriptModes.TEXT_MODE)], + + # Matchers for block comments. + JavaScriptModes.BLOCK_COMMENT_MODE: [ + # First we check for exiting a block comment. + Matcher(cls.END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT, + JavaScriptModes.TEXT_MODE), + + # Match non-comment-ending text.. + Matcher(cls.BLOCK_COMMENT_TEXT, Type.COMMENT)], + + # Matchers for doc comments. + JavaScriptModes.DOC_COMMENT_MODE: cls.COMMON_DOC_MATCHERS + [ + Matcher(cls.DOC_COMMENT_TEXT, Type.COMMENT)], + + JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: cls.COMMON_DOC_MATCHERS + [ + Matcher(cls.WHITESPACE, Type.COMMENT), + Matcher(cls.DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)], + + # Matchers for single line comments. + JavaScriptModes.LINE_COMMENT_MODE: [ + # We greedy match until the end of the line in line comment mode. + Matcher(cls.ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)], + + # Matchers for code after the function keyword. + JavaScriptModes.FUNCTION_MODE: [ + # Must match open paren before anything else and move into parameter + # mode, otherwise everything inside the parameter list is parsed + # incorrectly. + Matcher(cls.OPENING_PAREN, Type.START_PARAMETERS, + JavaScriptModes.PARAMETER_MODE), + Matcher(cls.WHITESPACE, Type.WHITESPACE), + Matcher(cls.IDENTIFIER, Type.FUNCTION_NAME)], + + # Matchers for function parameters + JavaScriptModes.PARAMETER_MODE: [ + # When in function parameter mode, a closing paren is treated + # specially. Everything else is treated as lines of parameters. + Matcher(cls.CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS, + JavaScriptModes.TEXT_MODE), + Matcher(cls.PARAMETERS, Type.PARAMETERS, + JavaScriptModes.PARAMETER_MODE)]} + + def __init__(self, parse_js_doc=True): + """Create a tokenizer object. + + Args: + parse_js_doc: Whether to do detailed parsing of javascript doc comments, + or simply treat them as normal comments. Defaults to parsing JsDoc. + """ + matchers = self.BuildMatchers() + if not parse_js_doc: + # Make a copy so the original doesn't get modified. + matchers = copy.deepcopy(matchers) + matchers[JavaScriptModes.DOC_COMMENT_MODE] = matchers[ + JavaScriptModes.BLOCK_COMMENT_MODE] + + tokenizer.Tokenizer.__init__(self, JavaScriptModes.TEXT_MODE, matchers, + self.JAVASCRIPT_DEFAULT_TYPES) + + def _CreateToken(self, string, token_type, line, line_number, values=None): + """Creates a new JavaScriptToken object. + + Args: + string: The string of input the token contains. + token_type: The type of token. + line: The text of the line this token is in. + line_number: The line number of the token. + values: A dict of named values within the token. For instance, a + function declaration may have a value called 'name' which captures the + name of the function. + """ + return javascripttokens.JavaScriptToken(string, token_type, line, + line_number, values, line_number) diff --git a/tools/closure_linter/build/lib/closure_linter/javascripttokens.py b/tools/closure_linter/build/lib/closure_linter/javascripttokens.py new file mode 100644 index 0000000..f5815d2 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/javascripttokens.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Classes to represent JavaScript tokens.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + +from closure_linter.common import tokens + +class JavaScriptTokenType(tokens.TokenType): + """Enumeration of JavaScript token types, and useful sets of token types.""" + NUMBER = 'number' + START_SINGLE_LINE_COMMENT = '//' + START_BLOCK_COMMENT = '/*' + START_DOC_COMMENT = '/**' + END_BLOCK_COMMENT = '*/' + END_DOC_COMMENT = 'doc */' + COMMENT = 'comment' + SINGLE_QUOTE_STRING_START = "'string" + SINGLE_QUOTE_STRING_END = "string'" + DOUBLE_QUOTE_STRING_START = '"string' + DOUBLE_QUOTE_STRING_END = 'string"' + STRING_TEXT = 'string' + START_BLOCK = '{' + END_BLOCK = '}' + START_PAREN = '(' + END_PAREN = ')' + START_BRACKET = '[' + END_BRACKET = ']' + REGEX = '/regex/' + FUNCTION_DECLARATION = 'function(...)' + FUNCTION_NAME = 'function functionName(...)' + START_PARAMETERS = 'startparams(' + PARAMETERS = 'pa,ra,ms' + END_PARAMETERS = ')endparams' + SEMICOLON = ';' + DOC_FLAG = '@flag' + DOC_INLINE_FLAG = '{@flag ...}' + DOC_START_BRACE = 'doc {' + DOC_END_BRACE = 'doc }' + DOC_PREFIX = 'comment prefix: * ' + DOC_TYPE_START_BLOCK = 'Type <' + DOC_TYPE_END_BLOCK = 'Type >' + DOC_TYPE_MODIFIER = 'modifier' + SIMPLE_LVALUE = 'lvalue=' + KEYWORD = 'keyword' + OPERATOR = 'operator' + IDENTIFIER = 'identifier' + + STRING_TYPES = frozenset([ + SINGLE_QUOTE_STRING_START, SINGLE_QUOTE_STRING_END, + DOUBLE_QUOTE_STRING_START, DOUBLE_QUOTE_STRING_END, STRING_TEXT]) + + COMMENT_TYPES = frozenset([ + START_SINGLE_LINE_COMMENT, COMMENT, + START_BLOCK_COMMENT, START_DOC_COMMENT, + END_BLOCK_COMMENT, END_DOC_COMMENT, + DOC_START_BRACE, DOC_END_BRACE, + DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX, + DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER]) + + FLAG_DESCRIPTION_TYPES = frozenset([ + DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE, + DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER]) + + FLAG_ENDING_TYPES = frozenset([DOC_FLAG, END_DOC_COMMENT]) + + NON_CODE_TYPES = COMMENT_TYPES | frozenset([ + tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE]) + + UNARY_OPERATORS = ['!', 'new', 'delete', 'typeof', 'void'] + + UNARY_OK_OPERATORS = ['--', '++', '-', '+'] + UNARY_OPERATORS + + UNARY_POST_OPERATORS = ['--', '++'] + + # An expression ender is any token that can end an object - i.e. we could have + # x.y or [1, 2], or (10 + 9) or {a: 10}. + EXPRESSION_ENDER_TYPES = [tokens.TokenType.NORMAL, IDENTIFIER, NUMBER, + SIMPLE_LVALUE, END_BRACKET, END_PAREN, END_BLOCK, + SINGLE_QUOTE_STRING_END, DOUBLE_QUOTE_STRING_END] + + +class JavaScriptToken(tokens.Token): + """JavaScript token subclass of Token, provides extra instance checks. + + The following token types have data in attached_object: + - All JsDoc flags: a parser.JsDocFlag object. + """ + + def IsKeyword(self, keyword): + """Tests if this token is the given keyword. + + Args: + keyword: The keyword to compare to. + + Returns: + True if this token is a keyword token with the given name. + """ + return self.type == JavaScriptTokenType.KEYWORD and self.string == keyword + + def IsOperator(self, operator): + """Tests if this token is the given operator. + + Args: + operator: The operator to compare to. + + Returns: + True if this token is a operator token with the given name. + """ + return self.type == JavaScriptTokenType.OPERATOR and self.string == operator + + def IsAssignment(self): + """Tests if this token is an assignment operator. + + Returns: + True if this token is an assignment operator. + """ + return (self.type == JavaScriptTokenType.OPERATOR and + self.string.endswith('=') and + self.string not in ('==', '!=', '>=', '<=', '===', '!==')) + + def IsComment(self): + """Tests if this token is any part of a comment. + + Returns: + True if this token is any part of a comment. + """ + return self.type in JavaScriptTokenType.COMMENT_TYPES + + def IsCode(self): + """Tests if this token is code, as opposed to a comment or whitespace.""" + return self.type not in JavaScriptTokenType.NON_CODE_TYPES + + def __repr__(self): + return '' % (self.line_number, + self.type, self.string, + self.values, + self.metadata) diff --git a/tools/closure_linter/build/lib/closure_linter/not_strict_test.py b/tools/closure_linter/build/lib/closure_linter/not_strict_test.py new file mode 100644 index 0000000..c92c13e --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/not_strict_test.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# +# Copyright 2011 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for gjslint --nostrict. + +Tests errors that can be thrown by gjslint when not in strict mode. +""" + + + +import os +import sys +import unittest + +import gflags as flags +import unittest as googletest + +from closure_linter import errors +from closure_linter import runner +from closure_linter.common import filetestcase + +_RESOURCE_PREFIX = 'closure_linter/testdata' + +flags.FLAGS.strict = False +flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires') +flags.FLAGS.closurized_namespaces = ('goog', 'dummy') +flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js', + 'limited_doc_checks.js') + + +# List of files under testdata to test. +# We need to list files explicitly since pyglib can't list directories. +_TEST_FILES = [ + 'not_strict.js' + ] + + +class GJsLintTestSuite(unittest.TestSuite): + """Test suite to run a GJsLintTest for each of several files. + + If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in + testdata to test. Otherwise, _TEST_FILES is used. + """ + + def __init__(self, tests=()): + unittest.TestSuite.__init__(self, tests) + + argv = sys.argv and sys.argv[1:] or [] + if argv: + test_files = argv + else: + test_files = _TEST_FILES + for test_file in test_files: + resource_path = os.path.join(_RESOURCE_PREFIX, test_file) + self.addTest(filetestcase.AnnotatedFileTestCase(resource_path, + runner.Run, + errors.ByName)) + +if __name__ == '__main__': + # Don't let main parse args; it happens in the TestSuite. + googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite') diff --git a/tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py b/tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py new file mode 100644 index 0000000..e7e08a1 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python +# +# Copyright 2011 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains logic for sorting goog.provide and goog.require statements. + +Closurized JavaScript files use goog.provide and goog.require statements at the +top of the file to manage dependencies. These statements should be sorted +alphabetically, however, it is common for them to be accompanied by inline +comments or suppression annotations. In order to sort these statements without +disrupting their comments and annotations, the association between statements +and comments/annotations must be maintained while sorting. + + RequireProvideSorter: Handles checking/fixing of provide/require statements. +""" + + + +from closure_linter import javascripttokens +from closure_linter import tokenutil + +# Shorthand +Type = javascripttokens.JavaScriptTokenType + + +class RequireProvideSorter(object): + """Checks for and fixes alphabetization of provide and require statements. + + When alphabetizing, comments on the same line or comments directly above a + goog.provide or goog.require statement are associated with that statement and + stay with the statement as it gets sorted. + """ + + def CheckProvides(self, token): + """Checks alphabetization of goog.provide statements. + + Iterates over tokens in given token stream, identifies goog.provide tokens, + and checks that they occur in alphabetical order by the object being + provided. + + Args: + token: A token in the token stream before any goog.provide tokens. + + Returns: + The first provide token in the token stream. + + None is returned if all goog.provide statements are already sorted. + """ + provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide') + provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens) + sorted_provide_strings = sorted(provide_strings) + if provide_strings != sorted_provide_strings: + return provide_tokens[0] + return None + + def CheckRequires(self, token): + """Checks alphabetization of goog.require statements. + + Iterates over tokens in given token stream, identifies goog.require tokens, + and checks that they occur in alphabetical order by the dependency being + required. + + Args: + token: A token in the token stream before any goog.require tokens. + + Returns: + The first require token in the token stream. + + None is returned if all goog.require statements are already sorted. + """ + require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require') + require_strings = self._GetRequireOrProvideTokenStrings(require_tokens) + sorted_require_strings = sorted(require_strings) + if require_strings != sorted_require_strings: + return require_tokens[0] + return None + + def FixProvides(self, token): + """Sorts goog.provide statements in the given token stream alphabetically. + + Args: + token: The first token in the token stream. + """ + self._FixProvidesOrRequires( + self._GetRequireOrProvideTokens(token, 'goog.provide')) + + def FixRequires(self, token): + """Sorts goog.require statements in the given token stream alphabetically. + + Args: + token: The first token in the token stream. + """ + self._FixProvidesOrRequires( + self._GetRequireOrProvideTokens(token, 'goog.require')) + + def _FixProvidesOrRequires(self, tokens): + """Sorts goog.provide or goog.require statements. + + Args: + tokens: A list of goog.provide or goog.require tokens in the order they + appear in the token stream. i.e. the first token in this list must + be the first goog.provide or goog.require token. + """ + strings = self._GetRequireOrProvideTokenStrings(tokens) + sorted_strings = sorted(strings) + + # Make a separate pass to remove any blank lines between goog.require/ + # goog.provide tokens. + first_token = tokens[0] + last_token = tokens[-1] + i = last_token + while i != first_token and i is not None: + if i.type is Type.BLANK_LINE: + tokenutil.DeleteToken(i) + i = i.previous + + # A map from required/provided object name to tokens that make up the line + # it was on, including any comments immediately before it or after it on the + # same line. + tokens_map = self._GetTokensMap(tokens) + + # Iterate over the map removing all tokens. + for name in tokens_map: + tokens_to_delete = tokens_map[name] + for i in tokens_to_delete: + tokenutil.DeleteToken(i) + + # Save token to rest of file. Sorted token will be inserted before this. + rest_of_file = tokens_map[strings[-1]][-1].next + + # Re-add all tokens in the map in alphabetical order. + insert_after = tokens[0].previous + for string in sorted_strings: + for i in tokens_map[string]: + if rest_of_file: + tokenutil.InsertTokenBefore(i, rest_of_file) + else: + tokenutil.InsertTokenAfter(i, insert_after) + insert_after = i + + def _GetRequireOrProvideTokens(self, token, token_string): + """Gets all goog.provide or goog.require tokens in the given token stream. + + Args: + token: The first token in the token stream. + token_string: One of 'goog.provide' or 'goog.require' to indicate which + tokens to find. + + Returns: + A list of goog.provide or goog.require tokens in the order they appear in + the token stream. + """ + tokens = [] + while token: + if token.type == Type.IDENTIFIER: + if token.string == token_string: + tokens.append(token) + elif token.string not in [ + 'goog.provide', 'goog.require', 'goog.setTestOnly']: + # These 3 identifiers are at the top of the file. So if any other + # identifier is encountered, return. + # TODO(user): Once it's decided what ordering goog.require + # should use, add 'goog.module' to the list above and implement the + # decision. + break + token = token.next + + return tokens + + def _GetRequireOrProvideTokenStrings(self, tokens): + """Gets a list of strings corresponding to the given list of tokens. + + The string will be the next string in the token stream after each token in + tokens. This is used to find the object being provided/required by a given + goog.provide or goog.require token. + + Args: + tokens: A list of goog.provide or goog.require tokens. + + Returns: + A list of object names that are being provided or required by the given + list of tokens. For example: + + ['object.a', 'object.c', 'object.b'] + """ + token_strings = [] + for token in tokens: + if not token.is_deleted: + name = tokenutil.GetStringAfterToken(token) + token_strings.append(name) + return token_strings + + def _GetTokensMap(self, tokens): + """Gets a map from object name to tokens associated with that object. + + Starting from the goog.provide/goog.require token, searches backwards in the + token stream for any lines that start with a comment. These lines are + associated with the goog.provide/goog.require token. Also associates any + tokens on the same line as the goog.provide/goog.require token with that + token. + + Args: + tokens: A list of goog.provide or goog.require tokens. + + Returns: + A dictionary that maps object names to the tokens associated with the + goog.provide or goog.require of that object name. For example: + + { + 'object.a': [JavaScriptToken, JavaScriptToken, ...], + 'object.b': [...] + } + + The list of tokens includes any comment lines above the goog.provide or + goog.require statement and everything after the statement on the same + line. For example, all of the following would be associated with + 'object.a': + + /** @suppress {extraRequire} */ + goog.require('object.a'); // Some comment. + """ + tokens_map = {} + for token in tokens: + object_name = tokenutil.GetStringAfterToken(token) + # If the previous line starts with a comment, presume that the comment + # relates to the goog.require or goog.provide and keep them together when + # sorting. + first_token = token + previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token) + while (previous_first_token and + previous_first_token.IsAnyType(Type.COMMENT_TYPES)): + first_token = previous_first_token + previous_first_token = tokenutil.GetFirstTokenInPreviousLine( + first_token) + + # Find the last token on the line. + last_token = tokenutil.GetLastTokenInSameLine(token) + + all_tokens = self._GetTokenList(first_token, last_token) + tokens_map[object_name] = all_tokens + return tokens_map + + def _GetTokenList(self, first_token, last_token): + """Gets a list of all tokens from first_token to last_token, inclusive. + + Args: + first_token: The first token to get. + last_token: The last token to get. + + Returns: + A list of all tokens between first_token and last_token, including both + first_token and last_token. + + Raises: + Exception: If the token stream ends before last_token is reached. + """ + token_list = [] + token = first_token + while token != last_token: + if not token: + raise Exception('ran out of tokens') + token_list.append(token) + token = token.next + token_list.append(last_token) + + return token_list + + def GetFixedRequireString(self, token): + """Get fixed/sorted order of goog.require statements. + + Args: + token: The first token in the token stream. + + Returns: + A string for correct sorted order of goog.require. + """ + return self._GetFixedRequireOrProvideString( + self._GetRequireOrProvideTokens(token, 'goog.require')) + + def GetFixedProvideString(self, token): + """Get fixed/sorted order of goog.provide statements. + + Args: + token: The first token in the token stream. + + Returns: + A string for correct sorted order of goog.provide. + """ + return self._GetFixedRequireOrProvideString( + self._GetRequireOrProvideTokens(token, 'goog.provide')) + + def _GetFixedRequireOrProvideString(self, tokens): + """Sorts goog.provide or goog.require statements. + + Args: + tokens: A list of goog.provide or goog.require tokens in the order they + appear in the token stream. i.e. the first token in this list must + be the first goog.provide or goog.require token. + + Returns: + A string for sorted goog.require or goog.provide statements + """ + + # A map from required/provided object name to tokens that make up the line + # it was on, including any comments immediately before it or after it on the + # same line. + tokens_map = self._GetTokensMap(tokens) + sorted_strings = sorted(tokens_map.keys()) + + new_order = '' + for string in sorted_strings: + for i in tokens_map[string]: + new_order += i.string + if i.IsLastInLine(): + new_order += '\n' + + return new_order diff --git a/tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py b/tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py new file mode 100644 index 0000000..fecb6d0 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for RequireProvideSorter.""" + + + +import unittest as googletest +from closure_linter import javascripttokens +from closure_linter import requireprovidesorter +from closure_linter import testutil + +# pylint: disable=g-bad-name +TokenType = javascripttokens.JavaScriptTokenType + + +class RequireProvideSorterTest(googletest.TestCase): + """Tests for RequireProvideSorter.""" + + def testGetFixedProvideString(self): + """Tests that fixed string constains proper comments also.""" + input_lines = [ + 'goog.provide(\'package.xyz\');', + '/** @suppress {extraprovide} **/', + 'goog.provide(\'package.abcd\');' + ] + + expected_lines = [ + '/** @suppress {extraprovide} **/', + 'goog.provide(\'package.abcd\');', + 'goog.provide(\'package.xyz\');' + ] + + token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) + + sorter = requireprovidesorter.RequireProvideSorter() + fixed_provide_string = sorter.GetFixedProvideString(token) + + self.assertEquals(expected_lines, fixed_provide_string.splitlines()) + + def testGetFixedRequireString(self): + """Tests that fixed string constains proper comments also.""" + input_lines = [ + 'goog.require(\'package.xyz\');', + '/** This is needed for scope. **/', + 'goog.require(\'package.abcd\');' + ] + + expected_lines = [ + '/** This is needed for scope. **/', + 'goog.require(\'package.abcd\');', + 'goog.require(\'package.xyz\');' + ] + + token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) + + sorter = requireprovidesorter.RequireProvideSorter() + fixed_require_string = sorter.GetFixedRequireString(token) + + self.assertEquals(expected_lines, fixed_require_string.splitlines()) + + def testFixRequires_removeBlankLines(self): + """Tests that blank lines are omitted in sorted goog.require statements.""" + input_lines = [ + 'goog.provide(\'package.subpackage.Whatever\');', + '', + 'goog.require(\'package.subpackage.ClassB\');', + '', + 'goog.require(\'package.subpackage.ClassA\');' + ] + expected_lines = [ + 'goog.provide(\'package.subpackage.Whatever\');', + '', + 'goog.require(\'package.subpackage.ClassA\');', + 'goog.require(\'package.subpackage.ClassB\');' + ] + token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) + + sorter = requireprovidesorter.RequireProvideSorter() + sorter.FixRequires(token) + + self.assertEquals(expected_lines, self._GetLines(token)) + + def fixRequiresTest_withTestOnly(self, position): + """Regression-tests sorting even with a goog.setTestOnly statement. + + Args: + position: The position in the list where to insert the goog.setTestOnly + statement. Will be used to test all possible combinations for + this test. + """ + input_lines = [ + 'goog.provide(\'package.subpackage.Whatever\');', + '', + 'goog.require(\'package.subpackage.ClassB\');', + 'goog.require(\'package.subpackage.ClassA\');' + ] + expected_lines = [ + 'goog.provide(\'package.subpackage.Whatever\');', + '', + 'goog.require(\'package.subpackage.ClassA\');', + 'goog.require(\'package.subpackage.ClassB\');' + ] + input_lines.insert(position, 'goog.setTestOnly();') + expected_lines.insert(position, 'goog.setTestOnly();') + + token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) + + sorter = requireprovidesorter.RequireProvideSorter() + sorter.FixRequires(token) + + self.assertEquals(expected_lines, self._GetLines(token)) + + def testFixRequires_withTestOnly(self): + """Regression-tests sorting even after a goog.setTestOnly statement.""" + + # goog.setTestOnly at first line. + self.fixRequiresTest_withTestOnly(position=0) + + # goog.setTestOnly after goog.provide. + self.fixRequiresTest_withTestOnly(position=1) + + # goog.setTestOnly before goog.require. + self.fixRequiresTest_withTestOnly(position=2) + + # goog.setTestOnly after goog.require. + self.fixRequiresTest_withTestOnly(position=4) + + def _GetLines(self, token): + """Returns an array of lines based on the specified token stream.""" + lines = [] + line = '' + while token: + line += token.string + if token.IsLastInLine(): + lines.append(line) + line = '' + token = token.next + return lines + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/runner.py b/tools/closure_linter/build/lib/closure_linter/runner.py new file mode 100644 index 0000000..04e7fa4 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/runner.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Main lint function. Tokenizes file, runs passes, and feeds to checker.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = 'nnaze@google.com (Nathan Naze)' + +import traceback + +import gflags as flags + +from closure_linter import checker +from closure_linter import ecmalintrules +from closure_linter import ecmametadatapass +from closure_linter import error_check +from closure_linter import errors +from closure_linter import javascriptstatetracker +from closure_linter import javascripttokenizer + +from closure_linter.common import error +from closure_linter.common import htmlutil +from closure_linter.common import tokens + +flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'], + 'List of files with relaxed documentation checks. Will not ' + 'report errors for missing documentation, some missing ' + 'descriptions, or methods whose @return tags don\'t have a ' + 'matching return statement.') +flags.DEFINE_boolean('error_trace', False, + 'Whether to show error exceptions.') +flags.ADOPT_module_key_flags(checker) +flags.ADOPT_module_key_flags(ecmalintrules) +flags.ADOPT_module_key_flags(error_check) + + +def _GetLastNonWhiteSpaceToken(start_token): + """Get the last non-whitespace token in a token stream.""" + ret_token = None + + whitespace_tokens = frozenset([ + tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE]) + for t in start_token: + if t.type not in whitespace_tokens: + ret_token = t + + return ret_token + + +def _IsHtml(filename): + return filename.endswith('.html') or filename.endswith('.htm') + + +def _Tokenize(fileobj): + """Tokenize a file. + + Args: + fileobj: file-like object (or iterable lines) with the source. + + Returns: + The first token in the token stream and the ending mode of the tokenizer. + """ + tokenizer = javascripttokenizer.JavaScriptTokenizer() + start_token = tokenizer.TokenizeFile(fileobj) + return start_token, tokenizer.mode + + +def _IsLimitedDocCheck(filename, limited_doc_files): + """Whether this this a limited-doc file. + + Args: + filename: The filename. + limited_doc_files: Iterable of strings. Suffixes of filenames that should + be limited doc check. + + Returns: + Whether the file should be limited check. + """ + for limited_doc_filename in limited_doc_files: + if filename.endswith(limited_doc_filename): + return True + return False + + +def Run(filename, error_handler, source=None): + """Tokenize, run passes, and check the given file. + + Args: + filename: The path of the file to check + error_handler: The error handler to report errors to. + source: A file-like object with the file source. If omitted, the file will + be read from the filename path. + """ + if not source: + try: + source = open(filename) + except IOError: + error_handler.HandleFile(filename, None) + error_handler.HandleError( + error.Error(errors.FILE_NOT_FOUND, 'File not found')) + error_handler.FinishFile() + return + + if _IsHtml(filename): + source_file = htmlutil.GetScriptLines(source) + else: + source_file = source + + token, tokenizer_mode = _Tokenize(source_file) + + error_handler.HandleFile(filename, token) + + # If we did not end in the basic mode, this a failed parse. + if tokenizer_mode is not javascripttokenizer.JavaScriptModes.TEXT_MODE: + error_handler.HandleError( + error.Error(errors.FILE_IN_BLOCK, + 'File ended in mode "%s".' % tokenizer_mode, + _GetLastNonWhiteSpaceToken(token))) + + # Run the ECMA pass + error_token = None + + ecma_pass = ecmametadatapass.EcmaMetaDataPass() + error_token = RunMetaDataPass(token, ecma_pass, error_handler, filename) + + is_limited_doc_check = ( + _IsLimitedDocCheck(filename, flags.FLAGS.limited_doc_files)) + + _RunChecker(token, error_handler, + is_limited_doc_check, + is_html=_IsHtml(filename), + stop_token=error_token) + + error_handler.FinishFile() + + +def RunMetaDataPass(start_token, metadata_pass, error_handler, filename=''): + """Run a metadata pass over a token stream. + + Args: + start_token: The first token in a token stream. + metadata_pass: Metadata pass to run. + error_handler: The error handler to report errors to. + filename: Filename of the source. + + Returns: + The token where the error occurred (if any). + """ + + try: + metadata_pass.Process(start_token) + except ecmametadatapass.ParseError, parse_err: + if flags.FLAGS.error_trace: + traceback.print_exc() + error_token = parse_err.token + error_msg = str(parse_err) + error_handler.HandleError( + error.Error(errors.FILE_DOES_NOT_PARSE, + ('Error parsing file at token "%s". Unable to ' + 'check the rest of file.' + '\nError "%s"' % (error_token, error_msg)), error_token)) + return error_token + except Exception: # pylint: disable=broad-except + traceback.print_exc() + error_handler.HandleError( + error.Error( + errors.FILE_DOES_NOT_PARSE, + 'Internal error in %s' % filename)) + + +def _RunChecker(start_token, error_handler, + limited_doc_checks, is_html, + stop_token=None): + + state_tracker = javascriptstatetracker.JavaScriptStateTracker() + + style_checker = checker.JavaScriptStyleChecker( + state_tracker=state_tracker, + error_handler=error_handler) + + style_checker.Check(start_token, + is_html=is_html, + limited_doc_checks=limited_doc_checks, + stop_token=stop_token) diff --git a/tools/closure_linter/build/lib/closure_linter/runner_test.py b/tools/closure_linter/build/lib/closure_linter/runner_test.py new file mode 100644 index 0000000..da5857d --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/runner_test.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the runner module.""" + +__author__ = ('nnaze@google.com (Nathan Naze)') + +import StringIO + + +import mox + + +import unittest as googletest + +from closure_linter import errors +from closure_linter import runner +from closure_linter.common import error +from closure_linter.common import errorhandler +from closure_linter.common import tokens + + +class LimitedDocTest(googletest.TestCase): + + def testIsLimitedDocCheck(self): + self.assertTrue(runner._IsLimitedDocCheck('foo_test.js', ['_test.js'])) + self.assertFalse(runner._IsLimitedDocCheck('foo_bar.js', ['_test.js'])) + + self.assertTrue(runner._IsLimitedDocCheck( + 'foo_moo.js', ['moo.js', 'quack.js'])) + self.assertFalse(runner._IsLimitedDocCheck( + 'foo_moo.js', ['woof.js', 'quack.js'])) + + +class RunnerTest(googletest.TestCase): + + def setUp(self): + self.mox = mox.Mox() + + def testRunOnMissingFile(self): + mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler) + + def ValidateError(err): + return (isinstance(err, error.Error) and + err.code is errors.FILE_NOT_FOUND and + err.token is None) + + mock_error_handler.HandleFile('does_not_exist.js', None) + mock_error_handler.HandleError(mox.Func(ValidateError)) + mock_error_handler.FinishFile() + + self.mox.ReplayAll() + + runner.Run('does_not_exist.js', mock_error_handler) + + self.mox.VerifyAll() + + def testBadTokenization(self): + mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler) + + def ValidateError(err): + return (isinstance(err, error.Error) and + err.code is errors.FILE_IN_BLOCK and + err.token.string == '}') + + mock_error_handler.HandleFile('foo.js', mox.IsA(tokens.Token)) + mock_error_handler.HandleError(mox.Func(ValidateError)) + mock_error_handler.HandleError(mox.IsA(error.Error)) + mock_error_handler.FinishFile() + + self.mox.ReplayAll() + + source = StringIO.StringIO(_BAD_TOKENIZATION_SCRIPT) + runner.Run('foo.js', mock_error_handler, source) + + self.mox.VerifyAll() + + +_BAD_TOKENIZATION_SCRIPT = """ +function foo () { + var a = 3; + var b = 2; + return b + a; /* Comment not closed +} +""" + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/scopeutil.py b/tools/closure_linter/build/lib/closure_linter/scopeutil.py new file mode 100644 index 0000000..a7ca9b6 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/scopeutil.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools to match goog.scope alias statements.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + +import itertools + +from closure_linter import ecmametadatapass +from closure_linter import tokenutil +from closure_linter.javascripttokens import JavaScriptTokenType + + + +def IsGoogScopeBlock(context): + """Whether the given context is a goog.scope block. + + This function only checks that the block is a function block inside + a goog.scope() call. + + TODO(nnaze): Implement goog.scope checks that verify the call is + in the root context and contains only a single function literal. + + Args: + context: An EcmaContext of type block. + + Returns: + Whether the context is a goog.scope block. + """ + + if context.type != ecmametadatapass.EcmaContext.BLOCK: + return False + + if not _IsFunctionLiteralBlock(context): + return False + + # Check that this function is contained by a group + # of form "goog.scope(...)". + parent = context.parent + if parent and parent.type is ecmametadatapass.EcmaContext.GROUP: + + last_code_token = parent.start_token.metadata.last_code + + if (last_code_token and + last_code_token.type is JavaScriptTokenType.IDENTIFIER and + last_code_token.string == 'goog.scope'): + return True + + return False + + +def _IsFunctionLiteralBlock(block_context): + """Check if a context is a function literal block (without parameters). + + Example function literal block: 'function() {}' + + Args: + block_context: An EcmaContext of type block. + + Returns: + Whether this context is a function literal block. + """ + + previous_code_tokens_iter = itertools.ifilter( + lambda token: token not in JavaScriptTokenType.NON_CODE_TYPES, + reversed(block_context.start_token)) + + # Ignore the current token + next(previous_code_tokens_iter, None) + + # Grab the previous three tokens and put them in correct order. + previous_code_tokens = list(itertools.islice(previous_code_tokens_iter, 3)) + previous_code_tokens.reverse() + + # There aren't three previous tokens. + if len(previous_code_tokens) is not 3: + return False + + # Check that the previous three code tokens are "function ()" + previous_code_token_types = [token.type for token in previous_code_tokens] + if (previous_code_token_types == [ + JavaScriptTokenType.FUNCTION_DECLARATION, + JavaScriptTokenType.START_PARAMETERS, + JavaScriptTokenType.END_PARAMETERS]): + return True + + return False + + +def IsInClosurizedNamespace(symbol, closurized_namespaces): + """Match a goog.scope alias. + + Args: + symbol: An identifier like 'goog.events.Event'. + closurized_namespaces: Iterable of valid Closurized namespaces (strings). + + Returns: + True if symbol is an identifier in a Closurized namespace, otherwise False. + """ + for ns in closurized_namespaces: + if symbol.startswith(ns + '.'): + return True + + return False + + +def _GetVarAssignmentTokens(context): + """Returns the tokens from context if it is a var assignment. + + Args: + context: An EcmaContext. + + Returns: + If a var assignment, the tokens contained within it w/o the trailing + semicolon. + """ + if context.type != ecmametadatapass.EcmaContext.VAR: + return + + # Get the tokens in this statement. + if context.start_token and context.end_token: + statement_tokens = tokenutil.GetTokenRange(context.start_token, + context.end_token) + else: + return + + # And now just those tokens that are actually code. + is_non_code_type = lambda t: t.type not in JavaScriptTokenType.NON_CODE_TYPES + code_tokens = filter(is_non_code_type, statement_tokens) + + # Pop off the semicolon if present. + if code_tokens and code_tokens[-1].IsType(JavaScriptTokenType.SEMICOLON): + code_tokens.pop() + + if len(code_tokens) < 4: + return + + if (code_tokens[0].IsKeyword('var') and + code_tokens[1].IsType(JavaScriptTokenType.SIMPLE_LVALUE) and + code_tokens[2].IsOperator('=')): + return code_tokens + + +def MatchAlias(context): + """Match an alias statement (some identifier assigned to a variable). + + Example alias: var MyClass = proj.longNamespace.MyClass. + + Args: + context: An EcmaContext of type EcmaContext.VAR. + + Returns: + If a valid alias, returns a tuple of alias and symbol, otherwise None. + """ + code_tokens = _GetVarAssignmentTokens(context) + if code_tokens is None: + return + + if all(tokenutil.IsIdentifierOrDot(t) for t in code_tokens[3:]): + # var Foo = bar.Foo; + alias, symbol = code_tokens[1], code_tokens[3] + # Mark both tokens as an alias definition to not count them as usages. + alias.metadata.is_alias_definition = True + symbol.metadata.is_alias_definition = True + return alias.string, tokenutil.GetIdentifierForToken(symbol) + + +def MatchModuleAlias(context): + """Match an alias statement in a goog.module style import. + + Example alias: var MyClass = goog.require('proj.longNamespace.MyClass'). + + Args: + context: An EcmaContext. + + Returns: + If a valid alias, returns a tuple of alias and symbol, otherwise None. + """ + code_tokens = _GetVarAssignmentTokens(context) + if code_tokens is None: + return + + if(code_tokens[3].IsType(JavaScriptTokenType.IDENTIFIER) and + code_tokens[3].string == 'goog.require'): + # var Foo = goog.require('bar.Foo'); + alias = code_tokens[1] + symbol = tokenutil.GetStringAfterToken(code_tokens[3]) + if symbol: + alias.metadata.is_alias_definition = True + return alias.string, symbol diff --git a/tools/closure_linter/build/lib/closure_linter/scopeutil_test.py b/tools/closure_linter/build/lib/closure_linter/scopeutil_test.py new file mode 100644 index 0000000..722a953 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/scopeutil_test.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the scopeutil module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + + +import unittest as googletest + +from closure_linter import ecmametadatapass +from closure_linter import scopeutil +from closure_linter import testutil + + +def _FindContexts(start_token): + """Depth first search of all contexts referenced by a token stream. + + Includes contexts' parents, which might not be directly referenced + by any token in the stream. + + Args: + start_token: First token in the token stream. + + Yields: + All contexts referenced by this token stream. + """ + + seen_contexts = set() + + # For each token, yield the context if we haven't seen it before. + for token in start_token: + + token_context = token.metadata.context + contexts = [token_context] + + # Also grab all the context's ancestors. + parent = token_context.parent + while parent: + contexts.append(parent) + parent = parent.parent + + # Yield each of these contexts if we've not seen them. + for context in contexts: + if context not in seen_contexts: + yield context + + seen_contexts.add(context) + + +def _FindFirstContextOfType(token, context_type): + """Returns the first statement context.""" + for context in _FindContexts(token): + if context.type == context_type: + return context + + +def _ParseAssignment(script): + start_token = testutil.TokenizeSourceAndRunEcmaPass(script) + statement = _FindFirstContextOfType( + start_token, ecmametadatapass.EcmaContext.VAR) + return statement + + +class StatementTest(googletest.TestCase): + + def assertAlias(self, expected_match, script): + statement = _ParseAssignment(script) + match = scopeutil.MatchAlias(statement) + self.assertEquals(expected_match, match) + + def assertModuleAlias(self, expected_match, script): + statement = _ParseAssignment(script) + match = scopeutil.MatchModuleAlias(statement) + self.assertEquals(expected_match, match) + + def testSimpleAliases(self): + self.assertAlias( + ('foo', 'goog.foo'), + 'var foo = goog.foo;') + + self.assertAlias( + ('foo', 'goog.foo'), + 'var foo = goog.foo') # No semicolon + + def testAliasWithComment(self): + self.assertAlias( + ('Component', 'goog.ui.Component'), + 'var Component = /* comment */ goog.ui.Component;') + + def testMultilineAlias(self): + self.assertAlias( + ('Component', 'goog.ui.Component'), + 'var Component = \n goog.ui.\n Component;') + + def testNonSymbolAliasVarStatements(self): + self.assertAlias(None, 'var foo = 3;') + self.assertAlias(None, 'var foo = function() {};') + self.assertAlias(None, 'var foo = bar ? baz : qux;') + + def testModuleAlias(self): + self.assertModuleAlias( + ('foo', 'goog.foo'), + 'var foo = goog.require("goog.foo");') + self.assertModuleAlias( + None, + 'var foo = goog.require(notastring);') + + +class ScopeBlockTest(googletest.TestCase): + + @staticmethod + def _GetBlocks(source): + start_token = testutil.TokenizeSourceAndRunEcmaPass(source) + for context in _FindContexts(start_token): + if context.type is ecmametadatapass.EcmaContext.BLOCK: + yield context + + def assertNoBlocks(self, script): + blocks = list(self._GetBlocks(script)) + self.assertEquals([], blocks) + + def testNotBlocks(self): + # Ensure these are not considered blocks. + self.assertNoBlocks('goog.scope(if{});') + self.assertNoBlocks('goog.scope(for{});') + self.assertNoBlocks('goog.scope(switch{});') + self.assertNoBlocks('goog.scope(function foo{});') + + def testNonScopeBlocks(self): + + blocks = list(self._GetBlocks('goog.scope(try{});')) + self.assertEquals(1, len(blocks)) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + + blocks = list(self._GetBlocks('goog.scope(function(a,b){});')) + self.assertEquals(1, len(blocks)) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + + blocks = list(self._GetBlocks('goog.scope(try{} catch(){});')) + # Two blocks: try and catch. + self.assertEquals(2, len(blocks)) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + + blocks = list(self._GetBlocks('goog.scope(try{} catch(){} finally {});')) + self.assertEquals(3, len(blocks)) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + + +class AliasTest(googletest.TestCase): + + def setUp(self): + self.start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT) + + def testMatchAliasStatement(self): + matches = set() + for context in _FindContexts(self.start_token): + match = scopeutil.MatchAlias(context) + if match: + matches.add(match) + + self.assertEquals( + set([('bar', 'baz'), + ('foo', 'this.foo_'), + ('Component', 'goog.ui.Component'), + ('MyClass', 'myproject.foo.MyClass'), + ('NonClosurizedClass', 'aaa.bbb.NonClosurizedClass')]), + matches) + + def testMatchAliasStatement_withClosurizedNamespaces(self): + + closurized_namepaces = frozenset(['goog', 'myproject']) + + matches = set() + for context in _FindContexts(self.start_token): + match = scopeutil.MatchAlias(context) + if match: + unused_alias, symbol = match + if scopeutil.IsInClosurizedNamespace(symbol, closurized_namepaces): + matches.add(match) + + self.assertEquals( + set([('MyClass', 'myproject.foo.MyClass'), + ('Component', 'goog.ui.Component')]), + matches) + +_TEST_SCRIPT = """ +goog.scope(function() { + var Component = goog.ui.Component; // scope alias + var MyClass = myproject.foo.MyClass; // scope alias + + // Scope alias of non-Closurized namespace. + var NonClosurizedClass = aaa.bbb.NonClosurizedClass; + + var foo = this.foo_; // non-scope object property alias + var bar = baz; // variable alias + + var component = new Component(); +}); + +""" + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/statetracker.py b/tools/closure_linter/build/lib/closure_linter/statetracker.py new file mode 100644 index 0000000..52e3639 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/statetracker.py @@ -0,0 +1,1294 @@ +#!/usr/bin/env python +# +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Light weight EcmaScript state tracker that reads tokens and tracks state.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + +import re + +from closure_linter import javascripttokenizer +from closure_linter import javascripttokens +from closure_linter import tokenutil +from closure_linter import typeannotation + +# Shorthand +Type = javascripttokens.JavaScriptTokenType + + +class DocFlag(object): + """Generic doc flag object. + + Attribute: + flag_type: param, return, define, type, etc. + flag_token: The flag token. + type_start_token: The first token specifying the flag type, + including braces. + type_end_token: The last token specifying the flag type, + including braces. + type: The type spec string. + jstype: The type spec, a TypeAnnotation instance. + name_token: The token specifying the flag name. + name: The flag name + description_start_token: The first token in the description. + description_end_token: The end token in the description. + description: The description. + """ + + # Please keep these lists alphabetized. + + # The list of standard jsdoc tags is from + STANDARD_DOC = frozenset([ + 'author', + 'bug', + 'classTemplate', + 'consistentIdGenerator', + 'const', + 'constructor', + 'define', + 'deprecated', + 'dict', + 'enum', + 'export', + 'expose', + 'extends', + 'externs', + 'fileoverview', + 'idGenerator', + 'implements', + 'implicitCast', + 'interface', + 'lends', + 'license', + 'ngInject', # This annotation is specific to AngularJS. + 'noalias', + 'nocompile', + 'nosideeffects', + 'override', + 'owner', + 'package', + 'param', + 'preserve', + 'private', + 'protected', + 'public', + 'return', + 'see', + 'stableIdGenerator', + 'struct', + 'supported', + 'template', + 'this', + 'type', + 'typedef', + 'unrestricted', + ]) + + ANNOTATION = frozenset(['preserveTry', 'suppress']) + + LEGAL_DOC = STANDARD_DOC | ANNOTATION + + # Includes all Closure Compiler @suppress types. + # Not all of these annotations are interpreted by Closure Linter. + # + # Specific cases: + # - accessControls is supported by the compiler at the expression + # and method level to suppress warnings about private/protected + # access (method level applies to all references in the method). + # The linter mimics the compiler behavior. + SUPPRESS_TYPES = frozenset([ + 'accessControls', + 'ambiguousFunctionDecl', + 'checkDebuggerStatement', + 'checkRegExp', + 'checkStructDictInheritance', + 'checkTypes', + 'checkVars', + 'const', + 'constantProperty', + 'deprecated', + 'duplicate', + 'es5Strict', + 'externsValidation', + 'extraProvide', + 'extraRequire', + 'fileoverviewTags', + 'globalThis', + 'internetExplorerChecks', + 'invalidCasts', + 'missingProperties', + 'missingProvide', + 'missingRequire', + 'missingReturn', + 'nonStandardJsDocs', + 'strictModuleDepCheck', + 'suspiciousCode', + 'tweakValidation', + 'typeInvalidation', + 'undefinedNames', + 'undefinedVars', + 'underscore', + 'unknownDefines', + 'unnecessaryCasts', + 'unusedPrivateMembers', + 'uselessCode', + 'visibility', + 'with', + ]) + + HAS_DESCRIPTION = frozenset([ + 'define', + 'deprecated', + 'desc', + 'fileoverview', + 'license', + 'param', + 'preserve', + 'return', + 'supported', + ]) + + # Docflags whose argument should be parsed using the typeannotation parser. + HAS_TYPE = frozenset([ + 'const', + 'define', + 'enum', + 'extends', + 'final', + 'implements', + 'mods', + 'package', + 'param', + 'private', + 'protected', + 'public', + 'return', + 'suppress', + 'type', + 'typedef', + ]) + + # Docflags for which it's ok to omit the type (flag without an argument). + CAN_OMIT_TYPE = frozenset([ + 'const', + 'enum', + 'final', + 'package', + 'private', + 'protected', + 'public', + 'suppress', # We'll raise a separate INCORRECT_SUPPRESS_SYNTAX instead. + ]) + + # Docflags that only take a type as an argument and should not parse a + # following description. + TYPE_ONLY = frozenset([ + 'const', + 'enum', + 'extends', + 'implements', + 'package', + 'suppress', + 'type', + ]) + + HAS_NAME = frozenset(['param']) + + EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$') + EMPTY_STRING = re.compile(r'^\s*$') + + def __init__(self, flag_token, error_handler=None): + """Creates the DocFlag object and attaches it to the given start token. + + Args: + flag_token: The starting token of the flag. + error_handler: An optional error handler for errors occurring while + parsing the doctype. + """ + self.flag_token = flag_token + self.flag_type = flag_token.string.strip().lstrip('@') + + # Extract type, if applicable. + self.type = None + self.jstype = None + self.type_start_token = None + self.type_end_token = None + if self.flag_type in self.HAS_TYPE: + brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE], + Type.FLAG_ENDING_TYPES) + if brace: + end_token, contents = _GetMatchingEndBraceAndContents(brace) + self.type = contents + self.jstype = typeannotation.Parse(brace, end_token, + error_handler) + self.type_start_token = brace + self.type_end_token = end_token + elif (self.flag_type in self.TYPE_ONLY and + flag_token.next.type not in Type.FLAG_ENDING_TYPES and + flag_token.line_number == flag_token.next.line_number): + # b/10407058. If the flag is expected to be followed by a type then + # search for type in same line only. If no token after flag in same + # line then conclude that no type is specified. + self.type_start_token = flag_token.next + self.type_end_token, self.type = _GetEndTokenAndContents( + self.type_start_token) + if self.type is not None: + self.type = self.type.strip() + self.jstype = typeannotation.Parse(flag_token, self.type_end_token, + error_handler) + + # Extract name, if applicable. + self.name_token = None + self.name = None + if self.flag_type in self.HAS_NAME: + # Handle bad case, name could be immediately after flag token. + self.name_token = _GetNextPartialIdentifierToken(flag_token) + + # Handle good case, if found token is after type start, look for + # a identifier (substring to cover cases like [cnt] b/4197272) after + # type end, since types contain identifiers. + if (self.type and self.name_token and + tokenutil.Compare(self.name_token, self.type_start_token) > 0): + self.name_token = _GetNextPartialIdentifierToken(self.type_end_token) + + if self.name_token: + self.name = self.name_token.string + + # Extract description, if applicable. + self.description_start_token = None + self.description_end_token = None + self.description = None + if self.flag_type in self.HAS_DESCRIPTION: + search_start_token = flag_token + if self.name_token and self.type_end_token: + if tokenutil.Compare(self.type_end_token, self.name_token) > 0: + search_start_token = self.type_end_token + else: + search_start_token = self.name_token + elif self.name_token: + search_start_token = self.name_token + elif self.type: + search_start_token = self.type_end_token + + interesting_token = tokenutil.Search(search_start_token, + Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES) + if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES: + self.description_start_token = interesting_token + self.description_end_token, self.description = ( + _GetEndTokenAndContents(interesting_token)) + + def HasType(self): + """Returns whether this flag should have a type annotation.""" + return self.flag_type in self.HAS_TYPE + + def __repr__(self): + return '' % (self.flag_type, repr(self.jstype)) + + +class DocComment(object): + """JavaScript doc comment object. + + Attributes: + ordered_params: Ordered list of parameters documented. + start_token: The token that starts the doc comment. + end_token: The token that ends the doc comment. + suppressions: Map of suppression type to the token that added it. + """ + def __init__(self, start_token): + """Create the doc comment object. + + Args: + start_token: The first token in the doc comment. + """ + self.__flags = [] + self.start_token = start_token + self.end_token = None + self.suppressions = {} + self.invalidated = False + + @property + def ordered_params(self): + """Gives the list of parameter names as a list of strings.""" + params = [] + for flag in self.__flags: + if flag.flag_type == 'param' and flag.name: + params.append(flag.name) + return params + + def Invalidate(self): + """Indicate that the JSDoc is well-formed but we had problems parsing it. + + This is a short-circuiting mechanism so that we don't emit false + positives about well-formed doc comments just because we don't support + hot new syntaxes. + """ + self.invalidated = True + + def IsInvalidated(self): + """Test whether Invalidate() has been called.""" + return self.invalidated + + def AddSuppression(self, token): + """Add a new error suppression flag. + + Args: + token: The suppression flag token. + """ + flag = token and token.attached_object + if flag and flag.jstype: + for suppression in flag.jstype.IterIdentifiers(): + self.suppressions[suppression] = token + + def SuppressionOnly(self): + """Returns whether this comment contains only suppression flags.""" + if not self.__flags: + return False + + for flag in self.__flags: + if flag.flag_type != 'suppress': + return False + + return True + + def AddFlag(self, flag): + """Add a new document flag. + + Args: + flag: DocFlag object. + """ + self.__flags.append(flag) + + def InheritsDocumentation(self): + """Test if the jsdoc implies documentation inheritance. + + Returns: + True if documentation may be pulled off the superclass. + """ + return self.HasFlag('inheritDoc') or self.HasFlag('override') + + def HasFlag(self, flag_type): + """Test if the given flag has been set. + + Args: + flag_type: The type of the flag to check. + + Returns: + True if the flag is set. + """ + for flag in self.__flags: + if flag.flag_type == flag_type: + return True + return False + + def GetFlag(self, flag_type): + """Gets the last flag of the given type. + + Args: + flag_type: The type of the flag to get. + + Returns: + The last instance of the given flag type in this doc comment. + """ + for flag in reversed(self.__flags): + if flag.flag_type == flag_type: + return flag + + def GetDocFlags(self): + """Return the doc flags for this comment.""" + return list(self.__flags) + + def _YieldDescriptionTokens(self): + for token in self.start_token: + + if (token is self.end_token or + token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or + token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES): + return + + if token.type not in [ + javascripttokens.JavaScriptTokenType.START_DOC_COMMENT, + javascripttokens.JavaScriptTokenType.END_DOC_COMMENT, + javascripttokens.JavaScriptTokenType.DOC_PREFIX]: + yield token + + @property + def description(self): + return tokenutil.TokensToString( + self._YieldDescriptionTokens()) + + def GetTargetIdentifier(self): + """Returns the identifier (as a string) that this is a comment for. + + Note that this uses method uses GetIdentifierForToken to get the full + identifier, even if broken up by whitespace, newlines, or comments, + and thus could be longer than GetTargetToken().string. + + Returns: + The identifier for the token this comment is for. + """ + token = self.GetTargetToken() + if token: + return tokenutil.GetIdentifierForToken(token) + + def GetTargetToken(self): + """Get this comment's target token. + + Returns: + The token that is the target of this comment, or None if there isn't one. + """ + + # File overviews describe the file, not a token. + if self.HasFlag('fileoverview'): + return + + skip_types = frozenset([ + Type.WHITESPACE, + Type.BLANK_LINE, + Type.START_PAREN]) + + target_types = frozenset([ + Type.FUNCTION_NAME, + Type.IDENTIFIER, + Type.SIMPLE_LVALUE]) + + token = self.end_token.next + while token: + if token.type in target_types: + return token + + # Handles the case of a comment on "var foo = ...' + if token.IsKeyword('var'): + next_code_token = tokenutil.CustomSearch( + token, + lambda t: t.type not in Type.NON_CODE_TYPES) + + if (next_code_token and + next_code_token.IsType(Type.SIMPLE_LVALUE)): + return next_code_token + + return + + # Handles the case of a comment on "function foo () {}" + if token.type is Type.FUNCTION_DECLARATION: + next_code_token = tokenutil.CustomSearch( + token, + lambda t: t.type not in Type.NON_CODE_TYPES) + + if next_code_token.IsType(Type.FUNCTION_NAME): + return next_code_token + + return + + # Skip types will end the search. + if token.type not in skip_types: + return + + token = token.next + + def CompareParameters(self, params): + """Computes the edit distance and list from the function params to the docs. + + Uses the Levenshtein edit distance algorithm, with code modified from + http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python + + Args: + params: The parameter list for the function declaration. + + Returns: + The edit distance, the edit list. + """ + source_len, target_len = len(self.ordered_params), len(params) + edit_lists = [[]] + distance = [[]] + for i in range(target_len+1): + edit_lists[0].append(['I'] * i) + distance[0].append(i) + + for j in range(1, source_len+1): + edit_lists.append([['D'] * j]) + distance.append([j]) + + for i in range(source_len): + for j in range(target_len): + cost = 1 + if self.ordered_params[i] == params[j]: + cost = 0 + + deletion = distance[i][j+1] + 1 + insertion = distance[i+1][j] + 1 + substitution = distance[i][j] + cost + + edit_list = None + best = None + if deletion <= insertion and deletion <= substitution: + # Deletion is best. + best = deletion + edit_list = list(edit_lists[i][j+1]) + edit_list.append('D') + + elif insertion <= substitution: + # Insertion is best. + best = insertion + edit_list = list(edit_lists[i+1][j]) + edit_list.append('I') + edit_lists[i+1].append(edit_list) + + else: + # Substitution is best. + best = substitution + edit_list = list(edit_lists[i][j]) + if cost: + edit_list.append('S') + else: + edit_list.append('=') + + edit_lists[i+1].append(edit_list) + distance[i+1].append(best) + + return distance[source_len][target_len], edit_lists[source_len][target_len] + + def __repr__(self): + """Returns a string representation of this object. + + Returns: + A string representation of this object. + """ + return '' % ( + str(self.ordered_params), str(self.__flags)) + + +# +# Helper methods used by DocFlag and DocComment to parse out flag information. +# + + +def _GetMatchingEndBraceAndContents(start_brace): + """Returns the matching end brace and contents between the two braces. + + If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then + that token is used as the matching ending token. Contents will have all + comment prefixes stripped out of them, and all comment prefixes in between the + start and end tokens will be split out into separate DOC_PREFIX tokens. + + Args: + start_brace: The DOC_START_BRACE token immediately before desired contents. + + Returns: + The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string + of the contents between the matching tokens, minus any comment prefixes. + """ + open_count = 1 + close_count = 0 + contents = [] + + # We don't consider the start brace part of the type string. + token = start_brace.next + while open_count != close_count: + if token.type == Type.DOC_START_BRACE: + open_count += 1 + elif token.type == Type.DOC_END_BRACE: + close_count += 1 + + if token.type != Type.DOC_PREFIX: + contents.append(token.string) + + if token.type in Type.FLAG_ENDING_TYPES: + break + token = token.next + + #Don't include the end token (end brace, end doc comment, etc.) in type. + token = token.previous + contents = contents[:-1] + + return token, ''.join(contents) + + +def _GetNextPartialIdentifierToken(start_token): + """Returns the first token having identifier as substring after a token. + + Searches each token after the start to see if it contains an identifier. + If found, token is returned. If no identifier is found returns None. + Search is abandoned when a FLAG_ENDING_TYPE token is found. + + Args: + start_token: The token to start searching after. + + Returns: + The token found containing identifier, None otherwise. + """ + token = start_token.next + + while token and token.type not in Type.FLAG_ENDING_TYPES: + match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search( + token.string) + if match is not None and token.type == Type.COMMENT: + return token + + token = token.next + + return None + + +def _GetEndTokenAndContents(start_token): + """Returns last content token and all contents before FLAG_ENDING_TYPE token. + + Comment prefixes are split into DOC_PREFIX tokens and stripped from the + returned contents. + + Args: + start_token: The token immediately before the first content token. + + Returns: + The last content token and a string of all contents including start and + end tokens, with comment prefixes stripped. + """ + iterator = start_token + last_line = iterator.line_number + last_token = None + contents = '' + doc_depth = 0 + while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0: + if (iterator.IsFirstInLine() and + DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)): + # If we have a blank comment line, consider that an implicit + # ending of the description. This handles a case like: + # + # * @return {boolean} True + # * + # * Note: This is a sentence. + # + # The note is not part of the @return description, but there was + # no definitive ending token. Rather there was a line containing + # only a doc comment prefix or whitespace. + break + + # b/2983692 + # don't prematurely match against a @flag if inside a doc flag + # need to think about what is the correct behavior for unterminated + # inline doc flags + if (iterator.type == Type.DOC_START_BRACE and + iterator.next.type == Type.DOC_INLINE_FLAG): + doc_depth += 1 + elif (iterator.type == Type.DOC_END_BRACE and + doc_depth > 0): + doc_depth -= 1 + + if iterator.type in Type.FLAG_DESCRIPTION_TYPES: + contents += iterator.string + last_token = iterator + + iterator = iterator.next + if iterator.line_number != last_line: + contents += '\n' + last_line = iterator.line_number + + end_token = last_token + if DocFlag.EMPTY_STRING.match(contents): + contents = None + else: + # Strip trailing newline. + contents = contents[:-1] + + return end_token, contents + + +class Function(object): + """Data about a JavaScript function. + + Attributes: + block_depth: Block depth the function began at. + doc: The DocComment associated with the function. + has_return: If the function has a return value. + has_this: If the function references the 'this' object. + is_assigned: If the function is part of an assignment. + is_constructor: If the function is a constructor. + name: The name of the function, whether given in the function keyword or + as the lvalue the function is assigned to. + start_token: First token of the function (the function' keyword token). + end_token: Last token of the function (the closing '}' token). + parameters: List of parameter names. + """ + + def __init__(self, block_depth, is_assigned, doc, name): + self.block_depth = block_depth + self.is_assigned = is_assigned + self.is_constructor = doc and doc.HasFlag('constructor') + self.is_interface = doc and doc.HasFlag('interface') + self.has_return = False + self.has_throw = False + self.has_this = False + self.name = name + self.doc = doc + self.start_token = None + self.end_token = None + self.parameters = None + + +class StateTracker(object): + """EcmaScript state tracker. + + Tracks block depth, function names, etc. within an EcmaScript token stream. + """ + + OBJECT_LITERAL = 'o' + CODE = 'c' + + def __init__(self, doc_flag=DocFlag): + """Initializes a JavaScript token stream state tracker. + + Args: + doc_flag: An optional custom DocFlag used for validating + documentation flags. + """ + self._doc_flag = doc_flag + self.Reset() + + def Reset(self): + """Resets the state tracker to prepare for processing a new page.""" + self._block_depth = 0 + self._is_block_close = False + self._paren_depth = 0 + self._function_stack = [] + self._functions_by_name = {} + self._last_comment = None + self._doc_comment = None + self._cumulative_params = None + self._block_types = [] + self._last_non_space_token = None + self._last_line = None + self._first_token = None + self._documented_identifiers = set() + self._variables_in_scope = [] + + def DocFlagPass(self, start_token, error_handler): + """Parses doc flags. + + This pass needs to be executed before the aliaspass and we don't want to do + a full-blown statetracker dry run for these. + + Args: + start_token: The token at which to start iterating + error_handler: An error handler for error reporting. + """ + if not start_token: + return + doc_flag_types = (Type.DOC_FLAG, Type.DOC_INLINE_FLAG) + for token in start_token: + if token.type in doc_flag_types: + token.attached_object = self._doc_flag(token, error_handler) + + def InFunction(self): + """Returns true if the current token is within a function. + + Returns: + True if the current token is within a function. + """ + return bool(self._function_stack) + + def InConstructor(self): + """Returns true if the current token is within a constructor. + + Returns: + True if the current token is within a constructor. + """ + return self.InFunction() and self._function_stack[-1].is_constructor + + def InInterfaceMethod(self): + """Returns true if the current token is within an interface method. + + Returns: + True if the current token is within an interface method. + """ + if self.InFunction(): + if self._function_stack[-1].is_interface: + return True + else: + name = self._function_stack[-1].name + prototype_index = name.find('.prototype.') + if prototype_index != -1: + class_function_name = name[0:prototype_index] + if (class_function_name in self._functions_by_name and + self._functions_by_name[class_function_name].is_interface): + return True + + return False + + def InTopLevelFunction(self): + """Returns true if the current token is within a top level function. + + Returns: + True if the current token is within a top level function. + """ + return len(self._function_stack) == 1 and self.InTopLevel() + + def InAssignedFunction(self): + """Returns true if the current token is within a function variable. + + Returns: + True if if the current token is within a function variable + """ + return self.InFunction() and self._function_stack[-1].is_assigned + + def IsFunctionOpen(self): + """Returns true if the current token is a function block open. + + Returns: + True if the current token is a function block open. + """ + return (self._function_stack and + self._function_stack[-1].block_depth == self._block_depth - 1) + + def IsFunctionClose(self): + """Returns true if the current token is a function block close. + + Returns: + True if the current token is a function block close. + """ + return (self._function_stack and + self._function_stack[-1].block_depth == self._block_depth) + + def InBlock(self): + """Returns true if the current token is within a block. + + Returns: + True if the current token is within a block. + """ + return bool(self._block_depth) + + def IsBlockClose(self): + """Returns true if the current token is a block close. + + Returns: + True if the current token is a block close. + """ + return self._is_block_close + + def InObjectLiteral(self): + """Returns true if the current token is within an object literal. + + Returns: + True if the current token is within an object literal. + """ + return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL + + def InObjectLiteralDescendant(self): + """Returns true if the current token has an object literal ancestor. + + Returns: + True if the current token has an object literal ancestor. + """ + return self.OBJECT_LITERAL in self._block_types + + def InParentheses(self): + """Returns true if the current token is within parentheses. + + Returns: + True if the current token is within parentheses. + """ + return bool(self._paren_depth) + + def ParenthesesDepth(self): + """Returns the number of parens surrounding the token. + + Returns: + The number of parenthesis surrounding the token. + """ + return self._paren_depth + + def BlockDepth(self): + """Returns the number of blocks in which the token is nested. + + Returns: + The number of blocks in which the token is nested. + """ + return self._block_depth + + def FunctionDepth(self): + """Returns the number of functions in which the token is nested. + + Returns: + The number of functions in which the token is nested. + """ + return len(self._function_stack) + + def InTopLevel(self): + """Whether we are at the top level in the class. + + This function call is language specific. In some languages like + JavaScript, a function is top level if it is not inside any parenthesis. + In languages such as ActionScript, a function is top level if it is directly + within a class. + """ + raise TypeError('Abstract method InTopLevel not implemented') + + def GetBlockType(self, token): + """Determine the block type given a START_BLOCK token. + + Code blocks come after parameters, keywords like else, and closing parens. + + Args: + token: The current token. Can be assumed to be type START_BLOCK. + Returns: + Code block type for current token. + """ + raise TypeError('Abstract method GetBlockType not implemented') + + def GetParams(self): + """Returns the accumulated input params as an array. + + In some EcmasSript languages, input params are specified like + (param:Type, param2:Type2, ...) + in other they are specified just as + (param, param2) + We handle both formats for specifying parameters here and leave + it to the compilers for each language to detect compile errors. + This allows more code to be reused between lint checkers for various + EcmaScript languages. + + Returns: + The accumulated input params as an array. + """ + params = [] + if self._cumulative_params: + params = re.compile(r'\s+').sub('', self._cumulative_params).split(',') + # Strip out the type from parameters of the form name:Type. + params = map(lambda param: param.split(':')[0], params) + + return params + + def GetLastComment(self): + """Return the last plain comment that could be used as documentation. + + Returns: + The last plain comment that could be used as documentation. + """ + return self._last_comment + + def GetDocComment(self): + """Return the most recent applicable documentation comment. + + Returns: + The last applicable documentation comment. + """ + return self._doc_comment + + def HasDocComment(self, identifier): + """Returns whether the identifier has been documented yet. + + Args: + identifier: The identifier. + + Returns: + Whether the identifier has been documented yet. + """ + return identifier in self._documented_identifiers + + def InDocComment(self): + """Returns whether the current token is in a doc comment. + + Returns: + Whether the current token is in a doc comment. + """ + return self._doc_comment and self._doc_comment.end_token is None + + def GetDocFlag(self): + """Returns the current documentation flags. + + Returns: + The current documentation flags. + """ + return self._doc_flag + + def IsTypeToken(self, t): + if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT, + Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX): + f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT], + None, True) + if (f and f.attached_object.type_start_token is not None and + f.attached_object.type_end_token is not None): + return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and + tokenutil.Compare(t, f.attached_object.type_end_token) < 0) + return False + + def GetFunction(self): + """Return the function the current code block is a part of. + + Returns: + The current Function object. + """ + if self._function_stack: + return self._function_stack[-1] + + def GetBlockDepth(self): + """Return the block depth. + + Returns: + The current block depth. + """ + return self._block_depth + + def GetLastNonSpaceToken(self): + """Return the last non whitespace token.""" + return self._last_non_space_token + + def GetLastLine(self): + """Return the last line.""" + return self._last_line + + def GetFirstToken(self): + """Return the very first token in the file.""" + return self._first_token + + def IsVariableInScope(self, token_string): + """Checks if string is variable in current scope. + + For given string it checks whether the string is a defined variable + (including function param) in current state. + + E.g. if variables defined (variables in current scope) is docs + then docs, docs.length etc will be considered as variable in current + scope. This will help in avoding extra goog.require for variables. + + Args: + token_string: String to check if its is a variable in current scope. + + Returns: + true if given string is a variable in current scope. + """ + for variable in self._variables_in_scope: + if (token_string == variable + or token_string.startswith(variable + '.')): + return True + + return False + + def HandleToken(self, token, last_non_space_token): + """Handles the given token and updates state. + + Args: + token: The token to handle. + last_non_space_token: + """ + self._is_block_close = False + + if not self._first_token: + self._first_token = token + + # Track block depth. + type = token.type + if type == Type.START_BLOCK: + self._block_depth += 1 + + # Subclasses need to handle block start very differently because + # whether a block is a CODE or OBJECT_LITERAL block varies significantly + # by language. + self._block_types.append(self.GetBlockType(token)) + + # When entering a function body, record its parameters. + if self.InFunction(): + function = self._function_stack[-1] + if self._block_depth == function.block_depth + 1: + function.parameters = self.GetParams() + + # Track block depth. + elif type == Type.END_BLOCK: + self._is_block_close = not self.InObjectLiteral() + self._block_depth -= 1 + self._block_types.pop() + + # Track parentheses depth. + elif type == Type.START_PAREN: + self._paren_depth += 1 + + # Track parentheses depth. + elif type == Type.END_PAREN: + self._paren_depth -= 1 + + elif type == Type.COMMENT: + self._last_comment = token.string + + elif type == Type.START_DOC_COMMENT: + self._last_comment = None + self._doc_comment = DocComment(token) + + elif type == Type.END_DOC_COMMENT: + self._doc_comment.end_token = token + + elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG): + # Don't overwrite flags if they were already parsed in a previous pass. + if token.attached_object is None: + flag = self._doc_flag(token) + token.attached_object = flag + else: + flag = token.attached_object + self._doc_comment.AddFlag(flag) + + if flag.flag_type == 'suppress': + self._doc_comment.AddSuppression(token) + + elif type == Type.FUNCTION_DECLARATION: + last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None, + True) + doc = None + # Only top-level functions are eligible for documentation. + if self.InTopLevel(): + doc = self._doc_comment + + name = '' + is_assigned = last_code and (last_code.IsOperator('=') or + last_code.IsOperator('||') or last_code.IsOperator('&&') or + (last_code.IsOperator(':') and not self.InObjectLiteral())) + if is_assigned: + # TODO(robbyw): This breaks for x[2] = ... + # Must use loop to find full function name in the case of line-wrapped + # declarations (bug 1220601) like: + # my.function.foo. + # bar = function() ... + identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True) + while identifier and tokenutil.IsIdentifierOrDot(identifier): + name = identifier.string + name + # Traverse behind us, skipping whitespace and comments. + while True: + identifier = identifier.previous + if not identifier or not identifier.type in Type.NON_CODE_TYPES: + break + + else: + next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) + while next_token and next_token.IsType(Type.FUNCTION_NAME): + name += next_token.string + next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2) + + function = Function(self._block_depth, is_assigned, doc, name) + function.start_token = token + + self._function_stack.append(function) + self._functions_by_name[name] = function + + # Add a delimiter in stack for scope variables to define start of + # function. This helps in popping variables of this function when + # function declaration ends. + self._variables_in_scope.append('') + + elif type == Type.START_PARAMETERS: + self._cumulative_params = '' + + elif type == Type.PARAMETERS: + self._cumulative_params += token.string + self._variables_in_scope.extend(self.GetParams()) + + elif type == Type.KEYWORD and token.string == 'return': + next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) + if not next_token.IsType(Type.SEMICOLON): + function = self.GetFunction() + if function: + function.has_return = True + + elif type == Type.KEYWORD and token.string == 'throw': + function = self.GetFunction() + if function: + function.has_throw = True + + elif type == Type.KEYWORD and token.string == 'var': + function = self.GetFunction() + next_token = tokenutil.Search(token, [Type.IDENTIFIER, + Type.SIMPLE_LVALUE]) + + if next_token: + if next_token.type == Type.SIMPLE_LVALUE: + self._variables_in_scope.append(next_token.values['identifier']) + else: + self._variables_in_scope.append(next_token.string) + + elif type == Type.SIMPLE_LVALUE: + identifier = token.values['identifier'] + jsdoc = self.GetDocComment() + if jsdoc: + self._documented_identifiers.add(identifier) + + self._HandleIdentifier(identifier, True) + + elif type == Type.IDENTIFIER: + self._HandleIdentifier(token.string, False) + + # Detect documented non-assignments. + next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) + if next_token and next_token.IsType(Type.SEMICOLON): + if (self._last_non_space_token and + self._last_non_space_token.IsType(Type.END_DOC_COMMENT)): + self._documented_identifiers.add(token.string) + + def _HandleIdentifier(self, identifier, is_assignment): + """Process the given identifier. + + Currently checks if it references 'this' and annotates the function + accordingly. + + Args: + identifier: The identifer to process. + is_assignment: Whether the identifer is being written to. + """ + if identifier == 'this' or identifier.startswith('this.'): + function = self.GetFunction() + if function: + function.has_this = True + + def HandleAfterToken(self, token): + """Handle updating state after a token has been checked. + + This function should be used for destructive state changes such as + deleting a tracked object. + + Args: + token: The token to handle. + """ + type = token.type + if type == Type.SEMICOLON or type == Type.END_PAREN or ( + type == Type.END_BRACKET and + self._last_non_space_token.type not in ( + Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END)): + # We end on any numeric array index, but keep going for string based + # array indices so that we pick up manually exported identifiers. + self._doc_comment = None + self._last_comment = None + + elif type == Type.END_BLOCK: + self._doc_comment = None + self._last_comment = None + + if self.InFunction() and self.IsFunctionClose(): + # TODO(robbyw): Detect the function's name for better errors. + function = self._function_stack.pop() + function.end_token = token + + # Pop all variables till delimiter ('') those were defined in the + # function being closed so make them out of scope. + while self._variables_in_scope and self._variables_in_scope[-1]: + self._variables_in_scope.pop() + + # Pop delimiter + if self._variables_in_scope: + self._variables_in_scope.pop() + + elif type == Type.END_PARAMETERS and self._doc_comment: + self._doc_comment = None + self._last_comment = None + + if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE): + self._last_non_space_token = token + + self._last_line = token.line diff --git a/tools/closure_linter/build/lib/closure_linter/statetracker_test.py b/tools/closure_linter/build/lib/closure_linter/statetracker_test.py new file mode 100644 index 0000000..494dc64 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/statetracker_test.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the statetracker module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + + + +import unittest as googletest + +from closure_linter import javascripttokens +from closure_linter import statetracker +from closure_linter import testutil + + +class _FakeDocFlag(object): + + def __repr__(self): + return '@%s %s' % (self.flag_type, self.name) + + +class IdentifierTest(googletest.TestCase): + + def testJustIdentifier(self): + a = javascripttokens.JavaScriptToken( + 'abc', javascripttokens.JavaScriptTokenType.IDENTIFIER, 'abc', 1) + + st = statetracker.StateTracker() + st.HandleToken(a, None) + + +class DocCommentTest(googletest.TestCase): + + @staticmethod + def _MakeDocFlagFake(flag_type, name=None): + flag = _FakeDocFlag() + flag.flag_type = flag_type + flag.name = name + return flag + + def testDocFlags(self): + comment = statetracker.DocComment(None) + + a = self._MakeDocFlagFake('param', 'foo') + comment.AddFlag(a) + + b = self._MakeDocFlagFake('param', '') + comment.AddFlag(b) + + c = self._MakeDocFlagFake('param', 'bar') + comment.AddFlag(c) + + self.assertEquals( + ['foo', 'bar'], + comment.ordered_params) + + self.assertEquals( + [a, b, c], + comment.GetDocFlags()) + + def testInvalidate(self): + comment = statetracker.DocComment(None) + + self.assertFalse(comment.invalidated) + self.assertFalse(comment.IsInvalidated()) + + comment.Invalidate() + + self.assertTrue(comment.invalidated) + self.assertTrue(comment.IsInvalidated()) + + def testSuppressionOnly(self): + comment = statetracker.DocComment(None) + + self.assertFalse(comment.SuppressionOnly()) + comment.AddFlag(self._MakeDocFlagFake('suppress')) + self.assertTrue(comment.SuppressionOnly()) + comment.AddFlag(self._MakeDocFlagFake('foo')) + self.assertFalse(comment.SuppressionOnly()) + + def testRepr(self): + comment = statetracker.DocComment(None) + comment.AddFlag(self._MakeDocFlagFake('param', 'foo')) + comment.AddFlag(self._MakeDocFlagFake('param', 'bar')) + + self.assertEquals( + '', + repr(comment)) + + def testDocFlagParam(self): + comment = self._ParseComment(""" + /** + * @param {string} [name] Name of customer. + */""") + flag = comment.GetFlag('param') + self.assertEquals('string', flag.type) + self.assertEquals('string', flag.jstype.ToString()) + self.assertEquals('[name]', flag.name) + + def _ParseComment(self, script): + """Parse a script that contains one comment and return it.""" + _, comments = testutil.ParseFunctionsAndComments(script) + self.assertEquals(1, len(comments)) + return comments[0] + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/strict_test.py b/tools/closure_linter/build/lib/closure_linter/strict_test.py new file mode 100644 index 0000000..2634456 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/strict_test.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# Copyright 2013 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for gjslint --strict. + +Tests errors that can be thrown by gjslint when in strict mode. +""" + + + +import unittest + +import gflags as flags +import unittest as googletest + +from closure_linter import errors +from closure_linter import runner +from closure_linter.common import erroraccumulator + +flags.FLAGS.strict = True + + +class StrictTest(unittest.TestCase): + """Tests scenarios where strict generates warnings.""" + + def testUnclosedString(self): + """Tests warnings are reported when nothing is disabled. + + b/11450054. + """ + original = [ + 'bug = function() {', + ' (\'foo\'\');', + '};', + '', + ] + + expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING, + errors.FILE_IN_BLOCK] + self._AssertErrors(original, expected) + + def _AssertErrors(self, original, expected_errors): + """Asserts that the error fixer corrects original to expected.""" + + # Trap gjslint's output parse it to get messages added. + error_accumulator = erroraccumulator.ErrorAccumulator() + runner.Run('testing.js', error_accumulator, source=original) + error_nums = [e.code for e in error_accumulator.GetErrors()] + + error_nums.sort() + expected_errors.sort() + self.assertListEqual(error_nums, expected_errors) + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/testutil.py b/tools/closure_linter/build/lib/closure_linter/testutil.py new file mode 100644 index 0000000..f7084ee --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/testutil.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utility functions for testing gjslint components.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + +import StringIO + +from closure_linter import ecmametadatapass +from closure_linter import javascriptstatetracker +from closure_linter import javascripttokenizer + + +def TokenizeSource(source): + """Convert a source into a string of tokens. + + Args: + source: A source file as a string or file-like object (iterates lines). + + Returns: + The first token of the resulting token stream. + """ + + if isinstance(source, basestring): + source = StringIO.StringIO(source) + + tokenizer = javascripttokenizer.JavaScriptTokenizer() + return tokenizer.TokenizeFile(source) + + +def TokenizeSourceAndRunEcmaPass(source): + """Tokenize a source and run the EcmaMetaDataPass on it. + + Args: + source: A source file as a string or file-like object (iterates lines). + + Returns: + The first token of the resulting token stream. + """ + start_token = TokenizeSource(source) + ecma_pass = ecmametadatapass.EcmaMetaDataPass() + ecma_pass.Process(start_token) + return start_token + + +def ParseFunctionsAndComments(source, error_handler=None): + """Run the tokenizer and tracker and return comments and functions found. + + Args: + source: A source file as a string or file-like object (iterates lines). + error_handler: An error handler. + + Returns: + The functions and comments as a tuple. + """ + start_token = TokenizeSourceAndRunEcmaPass(source) + + tracker = javascriptstatetracker.JavaScriptStateTracker() + if error_handler is not None: + tracker.DocFlagPass(start_token, error_handler) + + functions = [] + comments = [] + for token in start_token: + tracker.HandleToken(token, tracker.GetLastNonSpaceToken()) + + function = tracker.GetFunction() + if function and function not in functions: + functions.append(function) + + comment = tracker.GetDocComment() + if comment and comment not in comments: + comments.append(comment) + + tracker.HandleAfterToken(token) + + return functions, comments diff --git a/tools/closure_linter/build/lib/closure_linter/tokenutil.py b/tools/closure_linter/build/lib/closure_linter/tokenutil.py new file mode 100644 index 0000000..11e3ccc --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/tokenutil.py @@ -0,0 +1,697 @@ +#!/usr/bin/env python +# +# Copyright 2007 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Token utility functions.""" + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)') + +import copy +import StringIO + +from closure_linter.common import tokens +from closure_linter.javascripttokens import JavaScriptToken +from closure_linter.javascripttokens import JavaScriptTokenType + +# Shorthand +Type = tokens.TokenType + + +def GetFirstTokenInSameLine(token): + """Returns the first token in the same line as token. + + Args: + token: Any token in the line. + + Returns: + The first token in the same line as token. + """ + while not token.IsFirstInLine(): + token = token.previous + return token + + +def GetFirstTokenInPreviousLine(token): + """Returns the first token in the previous line as token. + + Args: + token: Any token in the line. + + Returns: + The first token in the previous line as token, or None if token is on the + first line. + """ + first_in_line = GetFirstTokenInSameLine(token) + if first_in_line.previous: + return GetFirstTokenInSameLine(first_in_line.previous) + + return None + + +def GetLastTokenInSameLine(token): + """Returns the last token in the same line as token. + + Args: + token: Any token in the line. + + Returns: + The last token in the same line as token. + """ + while not token.IsLastInLine(): + token = token.next + return token + + +def GetAllTokensInSameLine(token): + """Returns all tokens in the same line as the given token. + + Args: + token: Any token in the line. + + Returns: + All tokens on the same line as the given token. + """ + first_token = GetFirstTokenInSameLine(token) + last_token = GetLastTokenInSameLine(token) + + tokens_in_line = [] + while first_token != last_token: + tokens_in_line.append(first_token) + first_token = first_token.next + tokens_in_line.append(last_token) + + return tokens_in_line + + +def CustomSearch(start_token, func, end_func=None, distance=None, + reverse=False): + """Returns the first token where func is True within distance of this token. + + Args: + start_token: The token to start searching from + func: The function to call to test a token for applicability + end_func: The function to call to test a token to determine whether to abort + the search. + distance: The number of tokens to look through before failing search. Must + be positive. If unspecified, will search until the end of the token + chain + reverse: When true, search the tokens before this one instead of the tokens + after it + + Returns: + The first token matching func within distance of this token, or None if no + such token is found. + """ + token = start_token + if reverse: + while token and (distance is None or distance > 0): + previous = token.previous + if previous: + if func(previous): + return previous + if end_func and end_func(previous): + return None + + token = previous + if distance is not None: + distance -= 1 + + else: + while token and (distance is None or distance > 0): + next_token = token.next + if next_token: + if func(next_token): + return next_token + if end_func and end_func(next_token): + return None + + token = next_token + if distance is not None: + distance -= 1 + + return None + + +def Search(start_token, token_types, distance=None, reverse=False): + """Returns the first token of type in token_types within distance. + + Args: + start_token: The token to start searching from + token_types: The allowable types of the token being searched for + distance: The number of tokens to look through before failing search. Must + be positive. If unspecified, will search until the end of the token + chain + reverse: When true, search the tokens before this one instead of the tokens + after it + + Returns: + The first token of any type in token_types within distance of this token, or + None if no such token is found. + """ + return CustomSearch(start_token, lambda token: token.IsAnyType(token_types), + None, distance, reverse) + + +def SearchExcept(start_token, token_types, distance=None, reverse=False): + """Returns the first token not of any type in token_types within distance. + + Args: + start_token: The token to start searching from + token_types: The unallowable types of the token being searched for + distance: The number of tokens to look through before failing search. Must + be positive. If unspecified, will search until the end of the token + chain + reverse: When true, search the tokens before this one instead of the tokens + after it + + Returns: + The first token of any type in token_types within distance of this token, or + None if no such token is found. + """ + return CustomSearch(start_token, + lambda token: not token.IsAnyType(token_types), + None, distance, reverse) + + +def SearchUntil(start_token, token_types, end_types, distance=None, + reverse=False): + """Returns the first token of type in token_types before a token of end_type. + + Args: + start_token: The token to start searching from. + token_types: The allowable types of the token being searched for. + end_types: Types of tokens to abort search if we find. + distance: The number of tokens to look through before failing search. Must + be positive. If unspecified, will search until the end of the token + chain + reverse: When true, search the tokens before this one instead of the tokens + after it + + Returns: + The first token of any type in token_types within distance of this token + before any tokens of type in end_type, or None if no such token is found. + """ + return CustomSearch(start_token, lambda token: token.IsAnyType(token_types), + lambda token: token.IsAnyType(end_types), + distance, reverse) + + +def DeleteToken(token): + """Deletes the given token from the linked list. + + Args: + token: The token to delete + """ + # When deleting a token, we do not update the deleted token itself to make + # sure the previous and next pointers are still pointing to tokens which are + # not deleted. Also it is very hard to keep track of all previously deleted + # tokens to update them when their pointers become invalid. So we add this + # flag that any token linked list iteration logic can skip deleted node safely + # when its current token is deleted. + token.is_deleted = True + if token.previous: + token.previous.next = token.next + + if token.next: + token.next.previous = token.previous + + following_token = token.next + while following_token and following_token.metadata.last_code == token: + following_token.metadata.last_code = token.metadata.last_code + following_token = following_token.next + + +def DeleteTokens(token, token_count): + """Deletes the given number of tokens starting with the given token. + + Args: + token: The token to start deleting at. + token_count: The total number of tokens to delete. + """ + for i in xrange(1, token_count): + DeleteToken(token.next) + DeleteToken(token) + + +def InsertTokenBefore(new_token, token): + """Insert new_token before token. + + Args: + new_token: A token to be added to the stream + token: A token already in the stream + """ + new_token.next = token + new_token.previous = token.previous + + new_token.metadata = copy.copy(token.metadata) + + if new_token.IsCode(): + old_last_code = token.metadata.last_code + following_token = token + while (following_token and + following_token.metadata.last_code == old_last_code): + following_token.metadata.last_code = new_token + following_token = following_token.next + + token.previous = new_token + if new_token.previous: + new_token.previous.next = new_token + + if new_token.start_index is None: + if new_token.line_number == token.line_number: + new_token.start_index = token.start_index + else: + previous_token = new_token.previous + if previous_token: + new_token.start_index = (previous_token.start_index + + len(previous_token.string)) + else: + new_token.start_index = 0 + + iterator = new_token.next + while iterator and iterator.line_number == new_token.line_number: + iterator.start_index += len(new_token.string) + iterator = iterator.next + + +def InsertTokenAfter(new_token, token): + """Insert new_token after token. + + Args: + new_token: A token to be added to the stream + token: A token already in the stream + """ + new_token.previous = token + new_token.next = token.next + + new_token.metadata = copy.copy(token.metadata) + + if token.IsCode(): + new_token.metadata.last_code = token + + if new_token.IsCode(): + following_token = token.next + while following_token and following_token.metadata.last_code == token: + following_token.metadata.last_code = new_token + following_token = following_token.next + + token.next = new_token + if new_token.next: + new_token.next.previous = new_token + + if new_token.start_index is None: + if new_token.line_number == token.line_number: + new_token.start_index = token.start_index + len(token.string) + else: + new_token.start_index = 0 + + iterator = new_token.next + while iterator and iterator.line_number == new_token.line_number: + iterator.start_index += len(new_token.string) + iterator = iterator.next + + +def InsertTokensAfter(new_tokens, token): + """Insert multiple tokens after token. + + Args: + new_tokens: An array of tokens to be added to the stream + token: A token already in the stream + """ + # TODO(user): It would be nicer to have InsertTokenAfter defer to here + # instead of vice-versa. + current_token = token + for new_token in new_tokens: + InsertTokenAfter(new_token, current_token) + current_token = new_token + + +def InsertSpaceTokenAfter(token): + """Inserts a space token after the given token. + + Args: + token: The token to insert a space token after + + Returns: + A single space token + """ + space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line, + token.line_number) + InsertTokenAfter(space_token, token) + + +def InsertBlankLineAfter(token): + """Inserts a blank line after the given token. + + Args: + token: The token to insert a blank line after + + Returns: + A single space token + """ + blank_token = JavaScriptToken('', Type.BLANK_LINE, '', + token.line_number + 1) + InsertLineAfter(token, [blank_token]) + + +def InsertLineAfter(token, new_tokens): + """Inserts a new line consisting of new_tokens after the given token. + + Args: + token: The token to insert after. + new_tokens: The tokens that will make up the new line. + """ + insert_location = token + for new_token in new_tokens: + InsertTokenAfter(new_token, insert_location) + insert_location = new_token + + # Update all subsequent line numbers. + next_token = new_tokens[-1].next + while next_token: + next_token.line_number += 1 + next_token = next_token.next + + +def SplitToken(token, position): + """Splits the token into two tokens at position. + + Args: + token: The token to split + position: The position to split at. Will be the beginning of second token. + + Returns: + The new second token. + """ + new_string = token.string[position:] + token.string = token.string[:position] + + new_token = JavaScriptToken(new_string, token.type, token.line, + token.line_number) + InsertTokenAfter(new_token, token) + + return new_token + + +def Compare(token1, token2): + """Compares two tokens and determines their relative order. + + Args: + token1: The first token to compare. + token2: The second token to compare. + + Returns: + A negative integer, zero, or a positive integer as the first token is + before, equal, or after the second in the token stream. + """ + if token2.line_number != token1.line_number: + return token1.line_number - token2.line_number + else: + return token1.start_index - token2.start_index + + +def GoogScopeOrNoneFromStartBlock(token): + """Determines if the given START_BLOCK is part of a goog.scope statement. + + Args: + token: A token of type START_BLOCK. + + Returns: + The goog.scope function call token, or None if such call doesn't exist. + """ + if token.type != JavaScriptTokenType.START_BLOCK: + return None + + # Search for a goog.scope statement, which will be 5 tokens before the + # block. Illustration of the tokens found prior to the start block: + # goog.scope(function() { + # 5 4 3 21 ^ + + maybe_goog_scope = token + for unused_i in xrange(5): + maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and + maybe_goog_scope.previous else None) + if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope': + return maybe_goog_scope + + +def GetTokenRange(start_token, end_token): + """Returns a list of tokens between the two given, inclusive. + + Args: + start_token: Start token in the range. + end_token: End token in the range. + + Returns: + A list of tokens, in order, from start_token to end_token (including start + and end). Returns none if the tokens do not describe a valid range. + """ + + token_range = [] + token = start_token + + while token: + token_range.append(token) + + if token == end_token: + return token_range + + token = token.next + + +def TokensToString(token_iterable): + """Convert a number of tokens into a string. + + Newlines will be inserted whenever the line_number of two neighboring + strings differ. + + Args: + token_iterable: The tokens to turn to a string. + + Returns: + A string representation of the given tokens. + """ + + buf = StringIO.StringIO() + token_list = list(token_iterable) + if not token_list: + return '' + + line_number = token_list[0].line_number + + for token in token_list: + + while line_number < token.line_number: + line_number += 1 + buf.write('\n') + + if line_number > token.line_number: + line_number = token.line_number + buf.write('\n') + + buf.write(token.string) + + return buf.getvalue() + + +def GetPreviousCodeToken(token): + """Returns the code token before the specified token. + + Args: + token: A token. + + Returns: + The code token before the specified token or None if no such token + exists. + """ + + return CustomSearch( + token, + lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES, + reverse=True) + + +def GetNextCodeToken(token): + """Returns the next code token after the specified token. + + Args: + token: A token. + + Returns: + The next code token after the specified token or None if no such token + exists. + """ + + return CustomSearch( + token, + lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES, + reverse=False) + + +def GetIdentifierStart(token): + """Returns the first token in an identifier. + + Given a token which is part of an identifier, returns the token at the start + of the identifier. + + Args: + token: A token which is part of an identifier. + + Returns: + The token at the start of the identifier or None if the identifier was not + of the form 'a.b.c' (e.g. "['a']['b'].c"). + """ + + start_token = token + previous_code_token = GetPreviousCodeToken(token) + + while (previous_code_token and ( + previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or + IsDot(previous_code_token))): + start_token = previous_code_token + previous_code_token = GetPreviousCodeToken(previous_code_token) + + if IsDot(start_token): + return None + + return start_token + + +def GetIdentifierForToken(token): + """Get the symbol specified by a token. + + Given a token, this function additionally concatenates any parts of an + identifying symbol being identified that are split by whitespace or a + newline. + + The function will return None if the token is not the first token of an + identifier. + + Args: + token: The first token of a symbol. + + Returns: + The whole symbol, as a string. + """ + + # Search backward to determine if this token is the first token of the + # identifier. If it is not the first token, return None to signal that this + # token should be ignored. + prev_token = token.previous + while prev_token: + if (prev_token.IsType(JavaScriptTokenType.IDENTIFIER) or + IsDot(prev_token)): + return None + + if (prev_token.IsType(tokens.TokenType.WHITESPACE) or + prev_token.IsAnyType(JavaScriptTokenType.COMMENT_TYPES)): + prev_token = prev_token.previous + else: + break + + # A "function foo()" declaration. + if token.type is JavaScriptTokenType.FUNCTION_NAME: + return token.string + + # A "var foo" declaration (if the previous token is 'var') + previous_code_token = GetPreviousCodeToken(token) + + if previous_code_token and previous_code_token.IsKeyword('var'): + return token.string + + # Otherwise, this is potentially a namespaced (goog.foo.bar) identifier that + # could span multiple lines or be broken up by whitespace. We need + # to concatenate. + identifier_types = set([ + JavaScriptTokenType.IDENTIFIER, + JavaScriptTokenType.SIMPLE_LVALUE + ]) + + assert token.type in identifier_types + + # Start with the first token + symbol_tokens = [token] + + if token.next: + for t in token.next: + last_symbol_token = symbol_tokens[-1] + + # A dot is part of the previous symbol. + if IsDot(t): + symbol_tokens.append(t) + continue + + # An identifier is part of the previous symbol if the previous one was a + # dot. + if t.type in identifier_types: + if IsDot(last_symbol_token): + symbol_tokens.append(t) + continue + else: + break + + # Skip any whitespace + if t.type in JavaScriptTokenType.NON_CODE_TYPES: + continue + + # This is the end of the identifier. Stop iterating. + break + + if symbol_tokens: + return ''.join([t.string for t in symbol_tokens]) + + +def GetStringAfterToken(token): + """Get string after token. + + Args: + token: Search will be done after this token. + + Returns: + String if found after token else None (empty string will also + return None). + + Search until end of string as in case of empty string Type.STRING_TEXT is not + present/found and don't want to return next string. + E.g. + a = ''; + b = 'test'; + When searching for string after 'a' if search is not limited by end of string + then it will return 'test' which is not desirable as there is a empty string + before that. + + This will return None for cases where string is empty or no string found + as in both cases there is no Type.STRING_TEXT. + """ + string_token = SearchUntil(token, JavaScriptTokenType.STRING_TEXT, + [JavaScriptTokenType.SINGLE_QUOTE_STRING_END, + JavaScriptTokenType.DOUBLE_QUOTE_STRING_END]) + if string_token: + return string_token.string + else: + return None + + +def IsDot(token): + """Whether the token represents a "dot" operator (foo.bar).""" + return token.type is JavaScriptTokenType.OPERATOR and token.string == '.' + + +def IsIdentifierOrDot(token): + """Whether the token is either an identifier or a '.'.""" + return (token.type in [JavaScriptTokenType.IDENTIFIER, + JavaScriptTokenType.SIMPLE_LVALUE] or + IsDot(token)) diff --git a/tools/closure_linter/build/lib/closure_linter/tokenutil_test.py b/tools/closure_linter/build/lib/closure_linter/tokenutil_test.py new file mode 100644 index 0000000..c7d3854 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/tokenutil_test.py @@ -0,0 +1,297 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the scopeutil module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + +import unittest as googletest + +from closure_linter import ecmametadatapass +from closure_linter import javascripttokens +from closure_linter import testutil +from closure_linter import tokenutil + + +class FakeToken(object): + pass + + +class TokenUtilTest(googletest.TestCase): + + def testGetTokenRange(self): + + a = FakeToken() + b = FakeToken() + c = FakeToken() + d = FakeToken() + e = FakeToken() + + a.next = b + b.next = c + c.next = d + + self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d)) + + # This is an error as e does not come after a in the token chain. + self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e)) + + def testTokensToString(self): + + a = FakeToken() + b = FakeToken() + c = FakeToken() + d = FakeToken() + e = FakeToken() + + a.string = 'aaa' + b.string = 'bbb' + c.string = 'ccc' + d.string = 'ddd' + e.string = 'eee' + + a.line_number = 5 + b.line_number = 6 + c.line_number = 6 + d.line_number = 10 + e.line_number = 11 + + self.assertEquals( + 'aaa\nbbbccc\n\n\n\nddd\neee', + tokenutil.TokensToString([a, b, c, d, e])) + + self.assertEquals( + 'ddd\neee\naaa\nbbbccc', + tokenutil.TokensToString([d, e, a, b, c]), + 'Neighboring tokens not in line_number order should have a newline ' + 'between them.') + + def testGetPreviousCodeToken(self): + + tokens = testutil.TokenizeSource(""" +start1. // comment + /* another comment */ + end1 +""") + + def _GetTokenStartingWith(token_starts_with): + for t in tokens: + if t.string.startswith(token_starts_with): + return t + + self.assertEquals( + None, + tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1'))) + + self.assertEquals( + '.', + tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string) + + self.assertEquals( + 'start1', + tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('.')).string) + + def testGetNextCodeToken(self): + + tokens = testutil.TokenizeSource(""" +start1. // comment + /* another comment */ + end1 +""") + + def _GetTokenStartingWith(token_starts_with): + for t in tokens: + if t.string.startswith(token_starts_with): + return t + + self.assertEquals( + '.', + tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string) + + self.assertEquals( + 'end1', + tokenutil.GetNextCodeToken(_GetTokenStartingWith('.')).string) + + self.assertEquals( + None, + tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1'))) + + def testGetIdentifierStart(self): + + tokens = testutil.TokenizeSource(""" +start1 . // comment + prototype. /* another comment */ + end1 + +['edge'][case].prototype. + end2 = function() {} +""") + + def _GetTokenStartingWith(token_starts_with): + for t in tokens: + if t.string.startswith(token_starts_with): + return t + + self.assertEquals( + 'start1', + tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string) + + self.assertEquals( + 'start1', + tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string) + + self.assertEquals( + None, + tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2'))) + + def testInsertTokenBefore(self): + + self.AssertInsertTokenAfterBefore(False) + + def testInsertTokenAfter(self): + + self.AssertInsertTokenAfterBefore(True) + + def AssertInsertTokenAfterBefore(self, after): + + new_token = javascripttokens.JavaScriptToken( + 'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1) + + existing_token1 = javascripttokens.JavaScriptToken( + 'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1) + existing_token1.start_index = 0 + existing_token1.metadata = ecmametadatapass.EcmaMetaData() + + existing_token2 = javascripttokens.JavaScriptToken( + ' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1) + existing_token2.start_index = 3 + existing_token2.metadata = ecmametadatapass.EcmaMetaData() + existing_token2.metadata.last_code = existing_token1 + + existing_token1.next = existing_token2 + existing_token2.previous = existing_token1 + + if after: + tokenutil.InsertTokenAfter(new_token, existing_token1) + else: + tokenutil.InsertTokenBefore(new_token, existing_token2) + + self.assertEquals(existing_token1, new_token.previous) + self.assertEquals(existing_token2, new_token.next) + + self.assertEquals(new_token, existing_token1.next) + self.assertEquals(new_token, existing_token2.previous) + + self.assertEquals(existing_token1, new_token.metadata.last_code) + self.assertEquals(new_token, existing_token2.metadata.last_code) + + self.assertEquals(0, existing_token1.start_index) + self.assertEquals(3, new_token.start_index) + self.assertEquals(4, existing_token2.start_index) + + def testGetIdentifierForToken(self): + + tokens = testutil.TokenizeSource(""" +start1.abc.def.prototype. + onContinuedLine + +(start2.abc.def + .hij.klm + .nop) + +start3.abc.def + .hij = function() {}; + +// An absurd multi-liner. +start4.abc.def. + hij. + klm = function() {}; + +start5 . aaa . bbb . ccc + shouldntBePartOfThePreviousSymbol + +start6.abc.def ghi.shouldntBePartOfThePreviousSymbol + +var start7 = 42; + +function start8() { + +} + +start9.abc. // why is there a comment here? + def /* another comment */ + shouldntBePart + +start10.abc // why is there a comment here? + .def /* another comment */ + shouldntBePart + +start11.abc. middle1.shouldNotBeIdentifier +""") + + def _GetTokenStartingWith(token_starts_with): + for t in tokens: + if t.string.startswith(token_starts_with): + return t + + self.assertEquals( + 'start1.abc.def.prototype.onContinuedLine', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1'))) + + self.assertEquals( + 'start2.abc.def.hij.klm.nop', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2'))) + + self.assertEquals( + 'start3.abc.def.hij', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3'))) + + self.assertEquals( + 'start4.abc.def.hij.klm', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4'))) + + self.assertEquals( + 'start5.aaa.bbb.ccc', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5'))) + + self.assertEquals( + 'start6.abc.def', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6'))) + + self.assertEquals( + 'start7', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7'))) + + self.assertEquals( + 'start8', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8'))) + + self.assertEquals( + 'start9.abc.def', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9'))) + + self.assertEquals( + 'start10.abc.def', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10'))) + + self.assertIsNone( + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1'))) + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/build/lib/closure_linter/typeannotation.py b/tools/closure_linter/build/lib/closure_linter/typeannotation.py new file mode 100644 index 0000000..00604c1 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/typeannotation.py @@ -0,0 +1,401 @@ +#!/usr/bin/env python +#*-* coding: utf-8 +"""Closure typeannotation parsing and utilities.""" + + + +from closure_linter import errors +from closure_linter import javascripttokens +from closure_linter.common import error + +# Shorthand +TYPE = javascripttokens.JavaScriptTokenType + + +class TypeAnnotation(object): + """Represents a structured view of a closure type annotation. + + Attribute: + identifier: The name of the type. + key_type: The name part before a colon. + sub_types: The list of sub_types used e.g. for Array.<…> + or_null: The '?' annotation + not_null: The '!' annotation + type_group: If this a a grouping (a|b), but does not include function(a). + return_type: The return type of a function definition. + alias: The actual type set by closurizednamespaceinfo if the identifier uses + an alias to shorten the name. + tokens: An ordered list of tokens used for this type. May contain + TypeAnnotation instances for sub_types, key_type or return_type. + """ + + IMPLICIT_TYPE_GROUP = 2 + + NULLABILITY_UNKNOWN = 2 + + # Frequently used known non-nullable types. + NON_NULLABLE = frozenset([ + 'boolean', 'function', 'number', 'string', 'undefined']) + # Frequently used known nullable types. + NULLABLE_TYPE_WHITELIST = frozenset([ + 'Array', 'Document', 'Element', 'Function', 'Node', 'NodeList', + 'Object']) + + def __init__(self): + self.identifier = '' + self.sub_types = [] + self.or_null = False + self.not_null = False + self.type_group = False + self.alias = None + self.key_type = None + self.record_type = False + self.opt_arg = False + self.return_type = None + self.tokens = [] + + def IsFunction(self): + """Determines whether this is a function definition.""" + return self.identifier == 'function' + + def IsConstructor(self): + """Determines whether this is a function definition for a constructor.""" + key_type = self.sub_types and self.sub_types[0].key_type + return self.IsFunction() and key_type.identifier == 'new' + + def IsRecordType(self): + """Returns True if this type is a record type.""" + return (self.record_type or + bool([t for t in self.sub_types if t.IsRecordType()])) + + def IsVarArgsType(self): + """Determines if the type is a var_args type, i.e. starts with '...'.""" + return self.identifier.startswith('...') or ( + self.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP and + self.sub_types[0].identifier.startswith('...')) + + def IsEmpty(self): + """Returns True if the type is empty.""" + return not self.tokens + + def IsUnknownType(self): + """Returns True if this is the unknown type {?}.""" + return (self.or_null + and not self.identifier + and not self.sub_types + and not self.return_type) + + def Append(self, item): + """Adds a sub_type to this type and finalizes it. + + Args: + item: The TypeAnnotation item to append. + """ + # item is a TypeAnnotation instance, so pylint: disable=protected-access + self.sub_types.append(item._Finalize(self)) + + def __repr__(self): + """Reconstructs the type definition.""" + append = '' + if self.sub_types: + separator = (',' if not self.type_group else '|') + if self.identifier == 'function': + surround = '(%s)' + else: + surround = {False: '{%s}' if self.record_type else '<%s>', + True: '(%s)', + self.IMPLICIT_TYPE_GROUP: '%s'}[self.type_group] + append = surround % separator.join([repr(t) for t in self.sub_types]) + if self.return_type: + append += ':%s' % repr(self.return_type) + append += '=' if self.opt_arg else '' + prefix = '' + ('?' if self.or_null else '') + ('!' if self.not_null else '') + keyword = '%s:' % repr(self.key_type) if self.key_type else '' + return keyword + prefix + '%s' % (self.alias or self.identifier) + append + + def ToString(self): + """Concats the type's tokens to form a string again.""" + ret = [] + for token in self.tokens: + if not isinstance(token, TypeAnnotation): + ret.append(token.string) + else: + ret.append(token.ToString()) + return ''.join(ret) + + def Dump(self, indent=''): + """Dumps this type's structure for debugging purposes.""" + result = [] + for t in self.tokens: + if isinstance(t, TypeAnnotation): + result.append(indent + str(t) + ' =>\n' + t.Dump(indent + ' ')) + else: + result.append(indent + str(t)) + return '\n'.join(result) + + def IterIdentifiers(self): + """Iterates over all identifiers in this type and its subtypes.""" + if self.identifier: + yield self.identifier + for subtype in self.IterTypes(): + for identifier in subtype.IterIdentifiers(): + yield identifier + + def IterTypeGroup(self): + """Iterates over all types in the type group including self. + + Yields: + If this is a implicit or manual type-group: all sub_types. + Otherwise: self + E.g. for @type {Foo.} this will yield only Foo., + for @type {Foo|(Bar|Sample)} this will yield Foo, Bar and Sample. + + """ + if self.type_group: + for sub_type in self.sub_types: + for sub_type in sub_type.IterTypeGroup(): + yield sub_type + else: + yield self + + def IterTypes(self): + """Iterates over each subtype as well as return and key types.""" + if self.return_type: + yield self.return_type + + if self.key_type: + yield self.key_type + + for sub_type in self.sub_types: + yield sub_type + + def GetNullability(self, modifiers=True): + """Computes whether the type may be null. + + Args: + modifiers: Whether the modifiers ? and ! should be considered in the + evaluation. + Returns: + True if the type allows null, False if the type is strictly non nullable + and NULLABILITY_UNKNOWN if the nullability cannot be determined. + """ + + # Explicitly marked nullable types or 'null' are nullable. + if (modifiers and self.or_null) or self.identifier == 'null': + return True + + # Explicitly marked non-nullable types or non-nullable base types: + if ((modifiers and self.not_null) or self.record_type + or self.identifier in self.NON_NULLABLE): + return False + + # A type group is nullable if any of its elements are nullable. + if self.type_group: + maybe_nullable = False + for sub_type in self.sub_types: + nullability = sub_type.GetNullability() + if nullability == self.NULLABILITY_UNKNOWN: + maybe_nullable = nullability + elif nullability: + return True + return maybe_nullable + + # Whitelisted types are nullable. + if self.identifier.rstrip('.') in self.NULLABLE_TYPE_WHITELIST: + return True + + # All other types are unknown (most should be nullable, but + # enums are not and typedefs might not be). + return self.NULLABILITY_UNKNOWN + + def WillAlwaysBeNullable(self): + """Computes whether the ! flag is illegal for this type. + + This is the case if this type or any of the subtypes is marked as + explicitly nullable. + + Returns: + True if the ! flag would be illegal. + """ + if self.or_null or self.identifier == 'null': + return True + + if self.type_group: + return bool([t for t in self.sub_types if t.WillAlwaysBeNullable()]) + + return False + + def _Finalize(self, parent): + """Fixes some parsing issues once the TypeAnnotation is complete.""" + + # Normalize functions whose definition ended up in the key type because + # they defined a return type after a colon. + if self.key_type and self.key_type.identifier == 'function': + current = self.key_type + current.return_type = self + self.key_type = None + # opt_arg never refers to the return type but to the function itself. + current.opt_arg = self.opt_arg + self.opt_arg = False + return current + + # If a typedef just specified the key, it will not end up in the key type. + if parent.record_type and not self.key_type: + current = TypeAnnotation() + current.key_type = self + current.tokens.append(self) + return current + return self + + def FirstToken(self): + """Returns the first token used in this type or any of its subtypes.""" + first = self.tokens[0] + return first.FirstToken() if isinstance(first, TypeAnnotation) else first + + +def Parse(token, token_end, error_handler): + """Parses a type annotation and returns a TypeAnnotation object.""" + return TypeAnnotationParser(error_handler).Parse(token.next, token_end) + + +class TypeAnnotationParser(object): + """A parser for type annotations constructing the TypeAnnotation object.""" + + def __init__(self, error_handler): + self._stack = [] + self._error_handler = error_handler + self._closing_error = False + + def Parse(self, token, token_end): + """Parses a type annotation and returns a TypeAnnotation object.""" + root = TypeAnnotation() + self._stack.append(root) + current = TypeAnnotation() + root.tokens.append(current) + + while token and token != token_end: + if token.type in (TYPE.DOC_TYPE_START_BLOCK, TYPE.DOC_START_BRACE): + if token.string == '(': + if (current.identifier and + current.identifier not in ['function', '...']): + self.Error(token, + 'Invalid identifier for (): "%s"' % current.identifier) + current.type_group = current.identifier != 'function' + elif token.string == '{': + current.record_type = True + current.tokens.append(token) + self._stack.append(current) + current = TypeAnnotation() + self._stack[-1].tokens.append(current) + + elif token.type in (TYPE.DOC_TYPE_END_BLOCK, TYPE.DOC_END_BRACE): + prev = self._stack.pop() + prev.Append(current) + current = prev + + # If an implicit type group was created, close it as well. + if prev.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP: + prev = self._stack.pop() + prev.Append(current) + current = prev + current.tokens.append(token) + + elif token.type == TYPE.DOC_TYPE_MODIFIER: + if token.string == '!': + current.tokens.append(token) + current.not_null = True + elif token.string == '?': + current.tokens.append(token) + current.or_null = True + elif token.string == ':': + current.tokens.append(token) + prev = current + current = TypeAnnotation() + prev.tokens.append(current) + current.key_type = prev + elif token.string == '=': + # For implicit type groups the '=' refers to the parent. + try: + if self._stack[-1].type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP: + self._stack[-1].tokens.append(token) + self._stack[-1].opt_arg = True + else: + current.tokens.append(token) + current.opt_arg = True + except IndexError: + self.ClosingError(token) + elif token.string == '|': + # If a type group has explicitly been opened do a normal append. + # Otherwise we have to open the type group and move the current + # type into it, before appending + if not self._stack[-1].type_group: + type_group = TypeAnnotation() + if current.key_type and current.key_type.identifier != 'function': + type_group.key_type = current.key_type + current.key_type = None + type_group.type_group = TypeAnnotation.IMPLICIT_TYPE_GROUP + # Fix the token order + prev = self._stack[-1].tokens.pop() + self._stack[-1].tokens.append(type_group) + type_group.tokens.append(prev) + self._stack.append(type_group) + self._stack[-1].tokens.append(token) + self.Append(current, error_token=token) + current = TypeAnnotation() + self._stack[-1].tokens.append(current) + elif token.string == ',': + self.Append(current, error_token=token) + current = TypeAnnotation() + self._stack[-1].tokens.append(token) + self._stack[-1].tokens.append(current) + else: + current.tokens.append(token) + self.Error(token, 'Invalid token') + + elif token.type == TYPE.COMMENT: + current.tokens.append(token) + current.identifier += token.string.strip() + + elif token.type in [TYPE.DOC_PREFIX, TYPE.WHITESPACE]: + current.tokens.append(token) + + else: + current.tokens.append(token) + self.Error(token, 'Unexpected token') + + token = token.next + + self.Append(current, error_token=token) + try: + ret = self._stack.pop() + except IndexError: + self.ClosingError(token) + # The type is screwed up, but let's return something. + return current + + if self._stack and (len(self._stack) != 1 or + ret.type_group != TypeAnnotation.IMPLICIT_TYPE_GROUP): + self.Error(token, 'Too many opening items.') + + return ret if len(ret.sub_types) > 1 else ret.sub_types[0] + + def Append(self, type_obj, error_token): + """Appends a new TypeAnnotation object to the current parent.""" + if self._stack: + self._stack[-1].Append(type_obj) + else: + self.ClosingError(error_token) + + def ClosingError(self, token): + """Reports an error about too many closing items, but only once.""" + if not self._closing_error: + self._closing_error = True + self.Error(token, 'Too many closing items.') + + def Error(self, token, message): + """Calls the error_handler to post an error message.""" + if self._error_handler: + self._error_handler.HandleError(error.Error( + errors.JSDOC_DOES_NOT_PARSE, + 'Error parsing jsdoc type at token "%s" (column: %d): %s' % + (token.string, token.start_index, message), token)) diff --git a/tools/closure_linter/build/lib/closure_linter/typeannotation_test.py b/tools/closure_linter/build/lib/closure_linter/typeannotation_test.py new file mode 100644 index 0000000..da9dfa3 --- /dev/null +++ b/tools/closure_linter/build/lib/closure_linter/typeannotation_test.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python +"""Unit tests for the typeannotation module.""" + + + + +import unittest as googletest + +from closure_linter import testutil +from closure_linter.common import erroraccumulator + +CRAZY_TYPE = ('Array.))>') + + +class TypeErrorException(Exception): + """Exception for TypeErrors.""" + + def __init__(self, errors): + super(TypeErrorException, self).__init__() + self.errors = errors + + +class TypeParserTest(googletest.TestCase): + """Tests for typeannotation parsing.""" + + def _ParseComment(self, script): + """Parse a script that contains one comment and return it.""" + accumulator = erroraccumulator.ErrorAccumulator() + _, comments = testutil.ParseFunctionsAndComments(script, accumulator) + if accumulator.GetErrors(): + raise TypeErrorException(accumulator.GetErrors()) + self.assertEquals(1, len(comments)) + return comments[0] + + def _ParseType(self, type_str): + """Creates a comment to parse and returns the parsed type.""" + comment = self._ParseComment('/** @type {%s} **/' % type_str) + return comment.GetDocFlags()[0].jstype + + def assertProperReconstruction(self, type_str, matching_str=None): + """Parses the type and asserts the its repr matches the type. + + If matching_str is specified, it will assert that the repr matches this + string instead. + + Args: + type_str: The type string to parse. + matching_str: A string the __repr__ of the parsed type should match. + Returns: + The parsed js_type. + """ + parsed_type = self._ParseType(type_str) + # Use listEqual assertion to more easily identify the difference + self.assertListEqual(list(matching_str or type_str), + list(repr(parsed_type))) + self.assertEquals(matching_str or type_str, repr(parsed_type)) + + # Newlines will be inserted by the file writer. + self.assertEquals(type_str.replace('\n', ''), parsed_type.ToString()) + return parsed_type + + def assertNullable(self, type_str, nullable=True): + parsed_type = self.assertProperReconstruction(type_str) + self.assertEquals(nullable, parsed_type.GetNullability(), + '"%s" should %sbe nullable' % + (type_str, 'not ' if nullable else '')) + + def assertNotNullable(self, type_str): + return self.assertNullable(type_str, nullable=False) + + def testReconstruction(self): + self.assertProperReconstruction('*') + self.assertProperReconstruction('number') + self.assertProperReconstruction('(((number)))') + self.assertProperReconstruction('!number') + self.assertProperReconstruction('?!number') + self.assertProperReconstruction('number=') + self.assertProperReconstruction('number=!?', '?!number=') + self.assertProperReconstruction('number|?string') + self.assertProperReconstruction('(number|string)') + self.assertProperReconstruction('?(number|string)') + self.assertProperReconstruction('Object.') + self.assertProperReconstruction('function(new:Object)') + self.assertProperReconstruction('function(new:Object):number') + self.assertProperReconstruction('function(new:Object,Element):number') + self.assertProperReconstruction('function(this:T,...)') + self.assertProperReconstruction('{a:?number}') + self.assertProperReconstruction('{a:?number,b:(number|string)}') + self.assertProperReconstruction('{c:{nested_element:*}|undefined}') + self.assertProperReconstruction('{handleEvent:function(?):?}') + self.assertProperReconstruction('function():?|null') + self.assertProperReconstruction('null|function():?|bar') + + def testOptargs(self): + self.assertProperReconstruction('number=') + self.assertProperReconstruction('number|string=') + self.assertProperReconstruction('(number|string)=') + self.assertProperReconstruction('(number|string=)') + self.assertProperReconstruction('(number=|string)') + self.assertProperReconstruction('function(...):number=') + + def testIndepth(self): + # Do an deeper check of the crazy identifier + crazy = self.assertProperReconstruction(CRAZY_TYPE) + self.assertEquals('Array.', crazy.identifier) + self.assertEquals(1, len(crazy.sub_types)) + func1 = crazy.sub_types[0] + func2 = func1.return_type + self.assertEquals('function', func1.identifier) + self.assertEquals('function', func2.identifier) + self.assertEquals(3, len(func1.sub_types)) + self.assertEquals(1, len(func2.sub_types)) + self.assertEquals('Object.', func2.sub_types[0].sub_types[0].identifier) + + def testIterIdentifiers(self): + nested_identifiers = self._ParseType('(a|{b:(c|function(new:d):e)})') + for identifier in ('a', 'b', 'c', 'd', 'e'): + self.assertIn(identifier, nested_identifiers.IterIdentifiers()) + + def testIsEmpty(self): + self.assertTrue(self._ParseType('').IsEmpty()) + self.assertFalse(self._ParseType('?').IsEmpty()) + self.assertFalse(self._ParseType('!').IsEmpty()) + self.assertFalse(self._ParseType('').IsEmpty()) + + def testIsConstructor(self): + self.assertFalse(self._ParseType('').IsConstructor()) + self.assertFalse(self._ParseType('Array.').IsConstructor()) + self.assertTrue(self._ParseType('function(new:T)').IsConstructor()) + + def testIsVarArgsType(self): + self.assertTrue(self._ParseType('...number').IsVarArgsType()) + self.assertTrue(self._ParseType('...Object|Array').IsVarArgsType()) + self.assertTrue(self._ParseType('...(Object|Array)').IsVarArgsType()) + self.assertFalse(self._ParseType('Object|...Array').IsVarArgsType()) + self.assertFalse(self._ParseType('(...Object|Array)').IsVarArgsType()) + + def testIsUnknownType(self): + self.assertTrue(self._ParseType('?').IsUnknownType()) + self.assertTrue(self._ParseType('Foo.').sub_types[0].IsUnknownType()) + self.assertFalse(self._ParseType('?|!').IsUnknownType()) + self.assertTrue(self._ParseType('?|!').sub_types[0].IsUnknownType()) + self.assertFalse(self._ParseType('!').IsUnknownType()) + + long_type = 'function():?|{handleEvent:function(?=):?,sample:?}|?=' + record = self._ParseType(long_type) + # First check that there's not just one type with 3 return types, but three + # top-level types. + self.assertEquals(3, len(record.sub_types)) + + # Now extract all unknown type instances and verify that they really are. + handle_event, sample = record.sub_types[1].sub_types + for i, sub_type in enumerate([ + record.sub_types[0].return_type, + handle_event.return_type, + handle_event.sub_types[0], + sample, + record.sub_types[2]]): + self.assertTrue(sub_type.IsUnknownType(), + 'Type %d should be the unknown type: %s\n%s' % ( + i, sub_type.tokens, record.Dump())) + + def testTypedefNames(self): + easy = self._ParseType('{a}') + self.assertTrue(easy.record_type) + + easy = self.assertProperReconstruction('{a}', '{a:}').sub_types[0] + self.assertEquals('a', easy.key_type.identifier) + self.assertEquals('', easy.identifier) + + easy = self.assertProperReconstruction('{a:b}').sub_types[0] + self.assertEquals('a', easy.key_type.identifier) + self.assertEquals('b', easy.identifier) + + def assertTypeError(self, type_str): + """Asserts that parsing the given type raises a linter error.""" + self.assertRaises(TypeErrorException, self._ParseType, type_str) + + def testParseBadTypes(self): + """Tests that several errors in types don't break the parser.""" + self.assertTypeError('<') + self.assertTypeError('>') + self.assertTypeError('Foo.=') + self.assertTypeError('Foo.>=') + self.assertTypeError('(') + self.assertTypeError(')') + self.assertTypeError('Foo.') + self._ParseType(':') + self._ParseType(':foo') + self.assertTypeError(':)foo') + self.assertTypeError('(a|{b:(c|function(new:d):e') + + def testNullable(self): + self.assertNullable('null') + self.assertNullable('Object') + self.assertNullable('?string') + self.assertNullable('?number') + + self.assertNotNullable('string') + self.assertNotNullable('number') + self.assertNotNullable('boolean') + self.assertNotNullable('function(Object)') + self.assertNotNullable('function(Object):Object') + self.assertNotNullable('function(?Object):?Object') + self.assertNotNullable('!Object') + + self.assertNotNullable('boolean|string') + self.assertNotNullable('(boolean|string)') + + self.assertNullable('(boolean|string|null)') + self.assertNullable('(?boolean)') + self.assertNullable('?(boolean)') + + self.assertNullable('(boolean|Object)') + self.assertNotNullable('(boolean|(string|{a:}))') + + def testSpaces(self): + """Tests that spaces don't change the outcome.""" + type_str = (' A < b | ( c | ? ! d e f ) > | ' + 'function ( x : . . . ) : { y : z = } ') + two_spaces = type_str.replace(' ', ' ') + no_spaces = type_str.replace(' ', '') + newlines = type_str.replace(' ', '\n * ') + self.assertProperReconstruction(no_spaces) + self.assertProperReconstruction(type_str, no_spaces) + self.assertProperReconstruction(two_spaces, no_spaces) + self.assertProperReconstruction(newlines, no_spaces) + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter.egg-info/PKG-INFO b/tools/closure_linter/closure_linter.egg-info/PKG-INFO index 918e243..8055c15 100644 --- a/tools/closure_linter/closure_linter.egg-info/PKG-INFO +++ b/tools/closure_linter/closure_linter.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: closure-linter -Version: 2.2.6 +Version: 2.3.17 Summary: Closure Linter Home-page: http://code.google.com/p/closure-linter Author: The Closure Linter Authors diff --git a/tools/closure_linter/closure_linter.egg-info/SOURCES.txt b/tools/closure_linter/closure_linter.egg-info/SOURCES.txt index b64d829..a193cdf 100644 --- a/tools/closure_linter/closure_linter.egg-info/SOURCES.txt +++ b/tools/closure_linter/closure_linter.egg-info/SOURCES.txt @@ -1,12 +1,20 @@ README setup.py closure_linter/__init__.py +closure_linter/aliaspass.py +closure_linter/aliaspass_test.py closure_linter/checker.py closure_linter/checkerbase.py +closure_linter/closurizednamespacesinfo.py +closure_linter/closurizednamespacesinfo_test.py closure_linter/ecmalintrules.py closure_linter/ecmametadatapass.py +closure_linter/error_check.py closure_linter/error_fixer.py +closure_linter/error_fixer_test.py +closure_linter/errorrecord.py closure_linter/errorrules.py +closure_linter/errorrules_test.py closure_linter/errors.py closure_linter/fixjsstyle.py closure_linter/fixjsstyle_test.py @@ -18,8 +26,21 @@ closure_linter/javascriptstatetracker.py closure_linter/javascriptstatetracker_test.py closure_linter/javascripttokenizer.py closure_linter/javascripttokens.py +closure_linter/not_strict_test.py +closure_linter/requireprovidesorter.py +closure_linter/requireprovidesorter_test.py +closure_linter/runner.py +closure_linter/runner_test.py +closure_linter/scopeutil.py +closure_linter/scopeutil_test.py closure_linter/statetracker.py +closure_linter/statetracker_test.py +closure_linter/strict_test.py +closure_linter/testutil.py closure_linter/tokenutil.py +closure_linter/tokenutil_test.py +closure_linter/typeannotation.py +closure_linter/typeannotation_test.py closure_linter.egg-info/PKG-INFO closure_linter.egg-info/SOURCES.txt closure_linter.egg-info/dependency_links.txt @@ -30,7 +51,7 @@ closure_linter/common/__init__.py closure_linter/common/error.py closure_linter/common/erroraccumulator.py closure_linter/common/errorhandler.py -closure_linter/common/errorprinter.py +closure_linter/common/erroroutput.py closure_linter/common/filetestcase.py closure_linter/common/htmlutil.py closure_linter/common/lintrunner.py @@ -38,4 +59,5 @@ closure_linter/common/matcher.py closure_linter/common/position.py closure_linter/common/simplefileflags.py closure_linter/common/tokenizer.py -closure_linter/common/tokens.py \ No newline at end of file +closure_linter/common/tokens.py +closure_linter/common/tokens_test.py \ No newline at end of file diff --git a/tools/closure_linter/closure_linter/__init__.py b/tools/closure_linter/closure_linter/__init__.py index 4265cc3..1798c8c 100755 --- a/tools/closure_linter/closure_linter/__init__.py +++ b/tools/closure_linter/closure_linter/__init__.py @@ -1 +1,16 @@ #!/usr/bin/env python +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Package indicator for gjslint.""" diff --git a/tools/closure_linter/closure_linter/aliaspass.py b/tools/closure_linter/closure_linter/aliaspass.py new file mode 100644 index 0000000..bb37bfa --- /dev/null +++ b/tools/closure_linter/closure_linter/aliaspass.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pass that scans for goog.scope aliases and lint/usage errors.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + +from closure_linter import ecmametadatapass +from closure_linter import errors +from closure_linter import javascripttokens +from closure_linter import scopeutil +from closure_linter import tokenutil +from closure_linter.common import error + + +# TODO(nnaze): Create a Pass interface and move this class, EcmaMetaDataPass, +# and related classes onto it. + + +def _GetAliasForIdentifier(identifier, alias_map): + """Returns the aliased_symbol name for an identifier. + + Example usage: + >>> alias_map = {'MyClass': 'goog.foo.MyClass'} + >>> _GetAliasForIdentifier('MyClass.prototype.action', alias_map) + 'goog.foo.MyClass.prototype.action' + + >>> _GetAliasForIdentifier('MyClass.prototype.action', {}) + None + + Args: + identifier: The identifier. + alias_map: A dictionary mapping a symbol to an alias. + + Returns: + The aliased symbol name or None if not found. + """ + ns = identifier.split('.', 1)[0] + aliased_symbol = alias_map.get(ns) + if aliased_symbol: + return aliased_symbol + identifier[len(ns):] + + +def _SetTypeAlias(js_type, alias_map): + """Updates the alias for identifiers in a type. + + Args: + js_type: A typeannotation.TypeAnnotation instance. + alias_map: A dictionary mapping a symbol to an alias. + """ + aliased_symbol = _GetAliasForIdentifier(js_type.identifier, alias_map) + if aliased_symbol: + js_type.alias = aliased_symbol + for sub_type in js_type.IterTypes(): + _SetTypeAlias(sub_type, alias_map) + + +class AliasPass(object): + """Pass to identify goog.scope() usages. + + Identifies goog.scope() usages and finds lint/usage errors. Notes any + aliases of symbols in Closurized namespaces (that is, reassignments + such as "var MyClass = goog.foo.MyClass;") and annotates identifiers + when they're using an alias (so they may be expanded to the full symbol + later -- that "MyClass.prototype.action" refers to + "goog.foo.MyClass.prototype.action" when expanded.). + """ + + def __init__(self, closurized_namespaces=None, error_handler=None): + """Creates a new pass. + + Args: + closurized_namespaces: A set of Closurized namespaces (e.g. 'goog'). + error_handler: An error handler to report lint errors to. + """ + + self._error_handler = error_handler + + # If we have namespaces, freeze the set. + if closurized_namespaces: + closurized_namespaces = frozenset(closurized_namespaces) + + self._closurized_namespaces = closurized_namespaces + + def Process(self, start_token): + """Runs the pass on a token stream. + + Args: + start_token: The first token in the stream. + """ + + if start_token is None: + return + + # TODO(nnaze): Add more goog.scope usage checks. + self._CheckGoogScopeCalls(start_token) + + # If we have closurized namespaces, identify aliased identifiers. + if self._closurized_namespaces: + context = start_token.metadata.context + root_context = context.GetRoot() + self._ProcessRootContext(root_context) + + def _CheckGoogScopeCalls(self, start_token): + """Check goog.scope calls for lint/usage errors.""" + + def IsScopeToken(token): + return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and + token.string == 'goog.scope') + + # Find all the goog.scope tokens in the file + scope_tokens = [t for t in start_token if IsScopeToken(t)] + + for token in scope_tokens: + scope_context = token.metadata.context + + if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and + scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT): + self._MaybeReportError( + error.Error(errors.INVALID_USE_OF_GOOG_SCOPE, + 'goog.scope call not in global scope', token)) + + # There should be only one goog.scope reference. Register errors for + # every instance after the first. + for token in scope_tokens[1:]: + self._MaybeReportError( + error.Error(errors.EXTRA_GOOG_SCOPE_USAGE, + 'More than one goog.scope call in file.', token)) + + def _MaybeReportError(self, err): + """Report an error to the handler (if registered).""" + if self._error_handler: + self._error_handler.HandleError(err) + + @classmethod + def _YieldAllContexts(cls, context): + """Yields all contexts that are contained by the given context.""" + yield context + for child_context in context.children: + for descendent_child in cls._YieldAllContexts(child_context): + yield descendent_child + + @staticmethod + def _IsTokenInParentBlock(token, parent_block): + """Determines whether the given token is contained by the given block. + + Args: + token: A token + parent_block: An EcmaContext. + + Returns: + Whether the token is in a context that is or is a child of the given + parent_block context. + """ + context = token.metadata.context + + while context: + if context is parent_block: + return True + context = context.parent + + return False + + def _ProcessRootContext(self, root_context): + """Processes all goog.scope blocks under the root context.""" + + assert root_context.type is ecmametadatapass.EcmaContext.ROOT + + # Process aliases in statements in the root scope for goog.module-style + # aliases. + global_alias_map = {} + for context in root_context.children: + if context.type == ecmametadatapass.EcmaContext.STATEMENT: + for statement_child in context.children: + if statement_child.type == ecmametadatapass.EcmaContext.VAR: + match = scopeutil.MatchModuleAlias(statement_child) + if match: + # goog.require aliases cannot use further aliases, the symbol is + # the second part of match, directly. + symbol = match[1] + if scopeutil.IsInClosurizedNamespace(symbol, + self._closurized_namespaces): + global_alias_map[match[0]] = symbol + + # Process each block to find aliases. + for context in root_context.children: + self._ProcessBlock(context, global_alias_map) + + def _ProcessBlock(self, context, global_alias_map): + """Scans a goog.scope block to find aliases and mark alias tokens.""" + alias_map = global_alias_map.copy() + + # Iterate over every token in the context. Each token points to one + # context, but multiple tokens may point to the same context. We only want + # to check each context once, so keep track of those we've seen. + seen_contexts = set() + token = context.start_token + while token and self._IsTokenInParentBlock(token, context): + token_context = token.metadata.context if token.metadata else None + + # Check to see if this token is an alias. + if token_context and token_context not in seen_contexts: + seen_contexts.add(token_context) + + # If this is a alias statement in the goog.scope block. + if (token_context.type == ecmametadatapass.EcmaContext.VAR and + scopeutil.IsGoogScopeBlock(token_context.parent.parent)): + match = scopeutil.MatchAlias(token_context) + + # If this is an alias, remember it in the map. + if match: + alias, symbol = match + symbol = _GetAliasForIdentifier(symbol, alias_map) or symbol + if scopeutil.IsInClosurizedNamespace(symbol, + self._closurized_namespaces): + alias_map[alias] = symbol + + # If this token is an identifier that matches an alias, + # mark the token as an alias to the original symbol. + if (token.type is javascripttokens.JavaScriptTokenType.SIMPLE_LVALUE or + token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER): + identifier = tokenutil.GetIdentifierForToken(token) + if identifier: + aliased_symbol = _GetAliasForIdentifier(identifier, alias_map) + if aliased_symbol: + token.metadata.aliased_symbol = aliased_symbol + + elif token.type == javascripttokens.JavaScriptTokenType.DOC_FLAG: + flag = token.attached_object + if flag and flag.HasType() and flag.jstype: + _SetTypeAlias(flag.jstype, alias_map) + + token = token.next # Get next token diff --git a/tools/closure_linter/closure_linter/aliaspass_test.py b/tools/closure_linter/closure_linter/aliaspass_test.py new file mode 100755 index 0000000..7042e53 --- /dev/null +++ b/tools/closure_linter/closure_linter/aliaspass_test.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the aliaspass module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + +import unittest as googletest + +from closure_linter import aliaspass +from closure_linter import errors +from closure_linter import javascriptstatetracker +from closure_linter import testutil +from closure_linter.common import erroraccumulator + + +def _GetTokenByLineAndString(start_token, string, line_number): + for token in start_token: + if token.line_number == line_number and token.string == string: + return token + + +class AliasPassTest(googletest.TestCase): + + def testInvalidGoogScopeCall(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCOPE_SCRIPT) + + error_accumulator = erroraccumulator.ErrorAccumulator() + alias_pass = aliaspass.AliasPass( + error_handler=error_accumulator) + alias_pass.Process(start_token) + + alias_errors = error_accumulator.GetErrors() + self.assertEquals(1, len(alias_errors)) + + alias_error = alias_errors[0] + + self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, alias_error.code) + self.assertEquals('goog.scope', alias_error.token.string) + + def testAliasedIdentifiers(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT) + alias_pass = aliaspass.AliasPass(set(['goog', 'myproject'])) + alias_pass.Process(start_token) + + alias_token = _GetTokenByLineAndString(start_token, 'Event', 4) + self.assertTrue(alias_token.metadata.is_alias_definition) + + my_class_token = _GetTokenByLineAndString(start_token, 'myClass', 9) + self.assertIsNone(my_class_token.metadata.aliased_symbol) + + component_token = _GetTokenByLineAndString(start_token, 'Component', 17) + self.assertEquals('goog.ui.Component', + component_token.metadata.aliased_symbol) + + event_token = _GetTokenByLineAndString(start_token, 'Event.Something', 17) + self.assertEquals('goog.events.Event.Something', + event_token.metadata.aliased_symbol) + + non_closurized_token = _GetTokenByLineAndString( + start_token, 'NonClosurizedClass', 18) + self.assertIsNone(non_closurized_token.metadata.aliased_symbol) + + long_start_token = _GetTokenByLineAndString(start_token, 'Event', 24) + self.assertEquals('goog.events.Event.MultilineIdentifier.someMethod', + long_start_token.metadata.aliased_symbol) + + def testAliasedDoctypes(self): + """Tests that aliases are correctly expanded within type annotations.""" + start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT) + tracker = javascriptstatetracker.JavaScriptStateTracker() + tracker.DocFlagPass(start_token, error_handler=None) + + alias_pass = aliaspass.AliasPass(set(['goog', 'myproject'])) + alias_pass.Process(start_token) + + flag_token = _GetTokenByLineAndString(start_token, '@type', 22) + self.assertEquals( + 'goog.events.Event.>', + repr(flag_token.attached_object.jstype)) + + def testModuleAlias(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass(""" +goog.module('goog.test'); +var Alias = goog.require('goog.Alias'); +Alias.use(); +""") + alias_pass = aliaspass.AliasPass(set(['goog'])) + alias_pass.Process(start_token) + alias_token = _GetTokenByLineAndString(start_token, 'Alias', 3) + self.assertTrue(alias_token.metadata.is_alias_definition) + + def testMultipleGoogScopeCalls(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass( + _TEST_MULTIPLE_SCOPE_SCRIPT) + + error_accumulator = erroraccumulator.ErrorAccumulator() + + alias_pass = aliaspass.AliasPass( + set(['goog', 'myproject']), + error_handler=error_accumulator) + alias_pass.Process(start_token) + + alias_errors = error_accumulator.GetErrors() + + self.assertEquals(3, len(alias_errors)) + + error = alias_errors[0] + self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, error.code) + self.assertEquals(7, error.token.line_number) + + error = alias_errors[1] + self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code) + self.assertEquals(7, error.token.line_number) + + error = alias_errors[2] + self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code) + self.assertEquals(11, error.token.line_number) + + +_TEST_ALIAS_SCRIPT = """ +goog.scope(function() { +var events = goog.events; // scope alias +var Event = events. + Event; // nested multiline scope alias + +// This should not be registered as an aliased identifier because +// it appears before the alias. +var myClass = new MyClass(); + +var Component = goog.ui.Component; // scope alias +var MyClass = myproject.foo.MyClass; // scope alias + +// Scope alias of non-Closurized namespace. +var NonClosurizedClass = aaa.bbb.NonClosurizedClass; + +var component = new Component(Event.Something); +var nonClosurized = NonClosurizedClass(); + +/** + * A created namespace with a really long identifier. + * @type {events.Event.} + */ +Event. + MultilineIdentifier. + someMethod = function() {}; +}); +""" + +_TEST_SCOPE_SCRIPT = """ +function foo () { + // This goog.scope call is invalid. + goog.scope(function() { + + }); +} +""" + +_TEST_MULTIPLE_SCOPE_SCRIPT = """ +goog.scope(function() { + // do nothing +}); + +function foo() { + var test = goog.scope; // We should not see goog.scope mentioned. +} + +// This goog.scope invalid. There can be only one. +goog.scope(function() { + +}); +""" + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter/checker.py b/tools/closure_linter/closure_linter/checker.py index 4cdac93..1c98417 100755 --- a/tools/closure_linter/closure_linter/checker.py +++ b/tools/closure_linter/closure_linter/checker.py @@ -21,62 +21,88 @@ __author__ = ('robbyw@google.com (Robert Walker)', import gflags as flags +from closure_linter import aliaspass from closure_linter import checkerbase -from closure_linter import ecmametadatapass -from closure_linter import errors +from closure_linter import closurizednamespacesinfo from closure_linter import javascriptlintrules -from closure_linter import javascriptstatetracker -from closure_linter.common import errorprinter -from closure_linter.common import lintrunner -flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'], - 'List of files with relaxed documentation checks. Will not ' - 'report errors for missing documentation, some missing ' - 'descriptions, or methods whose @return tags don\'t have a ' - 'matching return statement.') + +flags.DEFINE_list('closurized_namespaces', '', + 'Namespace prefixes, used for testing of' + 'goog.provide/require') +flags.DEFINE_list('ignored_extra_namespaces', '', + 'Fully qualified namespaces that should be not be reported ' + 'as extra by the linter.') class JavaScriptStyleChecker(checkerbase.CheckerBase): """Checker that applies JavaScriptLintRules.""" - def __init__(self, error_handler): + def __init__(self, state_tracker, error_handler): """Initialize an JavaScriptStyleChecker object. Args: - error_handler: Error handler to pass all errors to + state_tracker: State tracker. + error_handler: Error handler to pass all errors to. """ + self._namespaces_info = None + self._alias_pass = None + if flags.FLAGS.closurized_namespaces: + self._namespaces_info = ( + closurizednamespacesinfo.ClosurizedNamespacesInfo( + flags.FLAGS.closurized_namespaces, + flags.FLAGS.ignored_extra_namespaces)) + + self._alias_pass = aliaspass.AliasPass( + flags.FLAGS.closurized_namespaces, error_handler) + checkerbase.CheckerBase.__init__( self, error_handler=error_handler, - lint_rules=javascriptlintrules.JavaScriptLintRules(), - state_tracker=javascriptstatetracker.JavaScriptStateTracker( - closurized_namespaces=flags.FLAGS.closurized_namespaces), - metadata_pass=ecmametadatapass.EcmaMetaDataPass(), - limited_doc_files=flags.FLAGS.limited_doc_files) + lint_rules=javascriptlintrules.JavaScriptLintRules( + self._namespaces_info), + state_tracker=state_tracker) + def Check(self, start_token, limited_doc_checks=False, is_html=False, + stop_token=None): + """Checks a token stream for lint warnings/errors. -class GJsLintRunner(lintrunner.LintRunner): - """Wrapper class to run GJsLint.""" - - def Run(self, filenames, error_handler=None): - """Run GJsLint on the given filenames. + Adds a separate pass for computing dependency information based on + goog.require and goog.provide statements prior to the main linting pass. Args: - filenames: The filenames to check - error_handler: An optional ErrorHandler object, an ErrorPrinter is used if - none is specified. - - Returns: - error_count, file_count: The number of errors and the number of files that - contain errors. + start_token: The first token in the token stream. + limited_doc_checks: Whether to perform limited checks. + is_html: Whether this token stream is HTML. + stop_token: If given, checks should stop at this token. """ - if not error_handler: - error_handler = errorprinter.ErrorPrinter(errors.NEW_ERRORS) + self._lint_rules.Initialize(self, limited_doc_checks, is_html) + + self._state_tracker.DocFlagPass(start_token, self._error_handler) - checker = JavaScriptStyleChecker(error_handler) + if self._alias_pass: + self._alias_pass.Process(start_token) - # Check the list of files. - for filename in filenames: - checker.Check(filename) + # To maximize the amount of errors that get reported before a parse error + # is displayed, don't run the dependency pass if a parse error exists. + if self._namespaces_info: + self._namespaces_info.Reset() + self._ExecutePass(start_token, self._DependencyPass, stop_token) - return error_handler + self._ExecutePass(start_token, self._LintPass, stop_token) + + # If we have a stop_token, we didn't end up reading the whole file and, + # thus, don't call Finalize to do end-of-file checks. + if not stop_token: + self._lint_rules.Finalize(self._state_tracker) + + def _DependencyPass(self, token): + """Processes an individual token for dependency information. + + Used to encapsulate the logic needed to process an individual token so that + it can be passed to _ExecutePass. + + Args: + token: The token to process. + """ + self._namespaces_info.ProcessToken(token, self._state_tracker) diff --git a/tools/closure_linter/closure_linter/checkerbase.py b/tools/closure_linter/closure_linter/checkerbase.py index 123cb72..6679ded 100755 --- a/tools/closure_linter/closure_linter/checkerbase.py +++ b/tools/closure_linter/closure_linter/checkerbase.py @@ -16,26 +16,16 @@ """Base classes for writing checkers that operate on tokens.""" +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)', 'jacobr@google.com (Jacob Richman)') -import traceback - -import gflags as flags -from closure_linter import ecmametadatapass from closure_linter import errorrules -from closure_linter import errors -from closure_linter import javascripttokenizer from closure_linter.common import error -from closure_linter.common import htmlutil -FLAGS = flags.FLAGS -flags.DEFINE_boolean('debug_tokens', False, - 'Whether to print all tokens for debugging.') - -flags.DEFINE_boolean('error_trace', False, - 'Whether to show error exceptions.') class LintRulesBase(object): """Base class for all classes defining the lint rules for a language.""" @@ -61,6 +51,14 @@ class LintRulesBase(object): if errorrules.ShouldReportError(code): self.__checker.HandleError(code, message, token, position, fix_data) + def _SetLimitedDocChecks(self, limited_doc_checks): + """Sets whether doc checking is relaxed for this file. + + Args: + limited_doc_checks: Whether doc checking is relaxed for this file. + """ + self._limited_doc_checks = limited_doc_checks + def CheckToken(self, token, parser_state): """Checks a token, given the current parser_state, for warnings and errors. @@ -73,12 +71,11 @@ class LintRulesBase(object): """ raise TypeError('Abstract method CheckToken not implemented') - def Finalize(self, parser_state, tokenizer_mode): + def Finalize(self, parser_state): """Perform all checks that need to occur after all lines are processed. Args: parser_state: State of the parser after parsing all tokens - tokenizer_mode: Mode of the tokenizer after parsing the entire page Raises: TypeError: If not overridden. @@ -89,8 +86,7 @@ class LintRulesBase(object): class CheckerBase(object): """This class handles checking a LintRules object against a file.""" - def __init__(self, error_handler, lint_rules, state_tracker, - limited_doc_files=None, metadata_pass=None): + def __init__(self, error_handler, lint_rules, state_tracker): """Initialize a checker object. Args: @@ -98,17 +94,13 @@ class CheckerBase(object): lint_rules: LintRules object defining lint errors given a token and state_tracker object. state_tracker: Object that tracks the current state in the token stream. - limited_doc_files: List of filenames that are not required to have - documentation comments. - metadata_pass: Object that builds metadata about the token stream. + """ - self.__error_handler = error_handler - self.__lint_rules = lint_rules - self.__state_tracker = state_tracker - self.__metadata_pass = metadata_pass - self.__limited_doc_files = limited_doc_files - self.__tokenizer = javascripttokenizer.JavaScriptTokenizer() - self.__has_errors = False + self._error_handler = error_handler + self._lint_rules = lint_rules + self._state_tracker = state_tracker + + self._has_errors = False def HandleError(self, code, message, token, position=None, fix_data=None): @@ -122,8 +114,8 @@ class CheckerBase(object): position: The position of the error, defaults to None. fix_data: Metadata used for fixing the error. """ - self.__has_errors = True - self.__error_handler.HandleError( + self._has_errors = True + self._error_handler.HandleError( error.Error(code, message, token, position, fix_data)) def HasErrors(self): @@ -132,106 +124,69 @@ class CheckerBase(object): Returns: True if the style checker has found any errors. """ - return self.__has_errors + return self._has_errors + + def Check(self, start_token, limited_doc_checks=False, is_html=False, + stop_token=None): + """Checks a token stream, reporting errors to the error reporter. + + Args: + start_token: First token in token stream. + limited_doc_checks: Whether doc checking is relaxed for this file. + is_html: Whether the file being checked is an HTML file with extracted + contents. + stop_token: If given, check should stop at this token. + """ - def Check(self, filename): - """Checks the file, printing warnings and errors as they are found. + self._lint_rules.Initialize(self, limited_doc_checks, is_html) + self._ExecutePass(start_token, self._LintPass, stop_token=stop_token) + self._lint_rules.Finalize(self._state_tracker) + + def _LintPass(self, token): + """Checks an individual token for lint warnings/errors. + + Used to encapsulate the logic needed to check an individual token so that it + can be passed to _ExecutePass. Args: - filename: The name of the file to check. + token: The token to check. """ - try: - f = open(filename) - except IOError: - self.__error_handler.HandleFile(filename, None) - self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None) - self.__error_handler.FinishFile() - return - - try: - if filename.endswith('.html') or filename.endswith('.htm'): - self.CheckLines(filename, htmlutil.GetScriptLines(f), True) - else: - self.CheckLines(filename, f, False) - finally: - f.close() - - def CheckLines(self, filename, lines_iter, is_html): - """Checks a file, given as an iterable of lines, for warnings and errors. + self._lint_rules.CheckToken(token, self._state_tracker) + + def _ExecutePass(self, token, pass_function, stop_token=None): + """Calls the given function for every token in the given token stream. + + As each token is passed to the given function, state is kept up to date and, + depending on the error_trace flag, errors are either caught and reported, or + allowed to bubble up so developers can see the full stack trace. If a parse + error is specified, the pass will proceed as normal until the token causing + the parse error is reached. Args: - filename: The name of the file to check. - lines_iter: An iterator that yields one line of the file at a time. - is_html: Whether the file being checked is an HTML file with extracted - contents. + token: The first token in the token stream. + pass_function: The function to call for each token in the token stream. + stop_token: The last token to check (if given). - Returns: - A boolean indicating whether the full file could be checked or if checking - failed prematurely. + Raises: + Exception: If any error occurred while calling the given function. """ - limited_doc_checks = False - if self.__limited_doc_files: - for limited_doc_filename in self.__limited_doc_files: - if filename.endswith(limited_doc_filename): - limited_doc_checks = True - break - - state_tracker = self.__state_tracker - lint_rules = self.__lint_rules - state_tracker.Reset() - lint_rules.Initialize(self, limited_doc_checks, is_html) - - token = self.__tokenizer.TokenizeFile(lines_iter) - - parse_error = None - if self.__metadata_pass: - try: - self.__metadata_pass.Reset() - self.__metadata_pass.Process(token) - except ecmametadatapass.ParseError, caught_parse_error: - if FLAGS.error_trace: - traceback.print_exc() - parse_error = caught_parse_error - except Exception: - print 'Internal error in %s' % filename - traceback.print_exc() - return False - - self.__error_handler.HandleFile(filename, token) + self._state_tracker.Reset() while token: - if FLAGS.debug_tokens: - print token - - if parse_error and parse_error.token == token: - # Report any parse errors from above once we find the token. - message = ('Error parsing file at token "%s". Unable to ' - 'check the rest of file.' % token.string) - self.HandleError(errors.FILE_DOES_NOT_PARSE, message, token) - self.__error_handler.FinishFile() - return False - - if FLAGS.error_trace: - state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken()) - else: - try: - state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken()) - except: - self.HandleError(errors.FILE_DOES_NOT_PARSE, - ('Error parsing file at token "%s". Unable to ' - 'check the rest of file.' % token.string), - token) - self.__error_handler.FinishFile() - return False - - # Check the token for style guide violations. - lint_rules.CheckToken(token, state_tracker) - - state_tracker.HandleAfterToken(token) - - # Move to the next token. - token = token.next + # When we are looking at a token and decided to delete the whole line, we + # will delete all of them in the "HandleToken()" below. So the current + # token and subsequent ones may already be deleted here. The way we + # delete a token does not wipe out the previous and next pointers of the + # deleted token. So we need to check the token itself to make sure it is + # not deleted. + if not token.is_deleted: + # End the pass at the stop token + if stop_token and token is stop_token: + return + + self._state_tracker.HandleToken( + token, self._state_tracker.GetLastNonSpaceToken()) + pass_function(token) + self._state_tracker.HandleAfterToken(token) - lint_rules.Finalize(state_tracker, self.__tokenizer.mode) - self.__error_handler.FinishFile() - return True + token = token.next diff --git a/tools/closure_linter/closure_linter/closurizednamespacesinfo.py b/tools/closure_linter/closure_linter/closurizednamespacesinfo.py new file mode 100755 index 0000000..e7cbfd3 --- /dev/null +++ b/tools/closure_linter/closure_linter/closurizednamespacesinfo.py @@ -0,0 +1,578 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Logic for computing dependency information for closurized JavaScript files. + +Closurized JavaScript files express dependencies using goog.require and +goog.provide statements. In order for the linter to detect when a statement is +missing or unnecessary, all identifiers in the JavaScript file must first be +processed to determine if they constitute the creation or usage of a dependency. +""" + + + +import re + +from closure_linter import javascripttokens +from closure_linter import tokenutil + +# pylint: disable=g-bad-name +TokenType = javascripttokens.JavaScriptTokenType + +DEFAULT_EXTRA_NAMESPACES = [ + 'goog.testing.asserts', + 'goog.testing.jsunit', +] + + +class UsedNamespace(object): + """A type for information about a used namespace.""" + + def __init__(self, namespace, identifier, token, alias_definition): + """Initializes the instance. + + Args: + namespace: the namespace of an identifier used in the file + identifier: the complete identifier + token: the token that uses the namespace + alias_definition: a boolean stating whether the namespace is only to used + for an alias definition and should not be required. + """ + self.namespace = namespace + self.identifier = identifier + self.token = token + self.alias_definition = alias_definition + + def GetLine(self): + return self.token.line_number + + def __repr__(self): + return 'UsedNamespace(%s)' % ', '.join( + ['%s=%s' % (k, repr(v)) for k, v in self.__dict__.iteritems()]) + + +class ClosurizedNamespacesInfo(object): + """Dependency information for closurized JavaScript files. + + Processes token streams for dependency creation or usage and provides logic + for determining if a given require or provide statement is unnecessary or if + there are missing require or provide statements. + """ + + def __init__(self, closurized_namespaces, ignored_extra_namespaces): + """Initializes an instance the ClosurizedNamespacesInfo class. + + Args: + closurized_namespaces: A list of namespace prefixes that should be + processed for dependency information. Non-matching namespaces are + ignored. + ignored_extra_namespaces: A list of namespaces that should not be reported + as extra regardless of whether they are actually used. + """ + self._closurized_namespaces = closurized_namespaces + self._ignored_extra_namespaces = (ignored_extra_namespaces + + DEFAULT_EXTRA_NAMESPACES) + self.Reset() + + def Reset(self): + """Resets the internal state to prepare for processing a new file.""" + + # A list of goog.provide tokens in the order they appeared in the file. + self._provide_tokens = [] + + # A list of goog.require tokens in the order they appeared in the file. + self._require_tokens = [] + + # Namespaces that are already goog.provided. + self._provided_namespaces = [] + + # Namespaces that are already goog.required. + self._required_namespaces = [] + + # Note that created_namespaces and used_namespaces contain both namespaces + # and identifiers because there are many existing cases where a method or + # constant is provided directly instead of its namespace. Ideally, these + # two lists would only have to contain namespaces. + + # A list of tuples where the first element is the namespace of an identifier + # created in the file, the second is the identifier itself and the third is + # the line number where it's created. + self._created_namespaces = [] + + # A list of UsedNamespace instances. + self._used_namespaces = [] + + # A list of seemingly-unnecessary namespaces that are goog.required() and + # annotated with @suppress {extraRequire}. + self._suppressed_requires = [] + + # A list of goog.provide tokens which are duplicates. + self._duplicate_provide_tokens = [] + + # A list of goog.require tokens which are duplicates. + self._duplicate_require_tokens = [] + + # Whether this file is in a goog.scope. Someday, we may add support + # for checking scopified namespaces, but for now let's just fail + # in a more reasonable way. + self._scopified_file = False + + # TODO(user): Handle the case where there are 2 different requires + # that can satisfy the same dependency, but only one is necessary. + + def GetProvidedNamespaces(self): + """Returns the namespaces which are already provided by this file. + + Returns: + A list of strings where each string is a 'namespace' corresponding to an + existing goog.provide statement in the file being checked. + """ + return set(self._provided_namespaces) + + def GetRequiredNamespaces(self): + """Returns the namespaces which are already required by this file. + + Returns: + A list of strings where each string is a 'namespace' corresponding to an + existing goog.require statement in the file being checked. + """ + return set(self._required_namespaces) + + def IsExtraProvide(self, token): + """Returns whether the given goog.provide token is unnecessary. + + Args: + token: A goog.provide token. + + Returns: + True if the given token corresponds to an unnecessary goog.provide + statement, otherwise False. + """ + namespace = tokenutil.GetStringAfterToken(token) + + if self.GetClosurizedNamespace(namespace) is None: + return False + + if token in self._duplicate_provide_tokens: + return True + + # TODO(user): There's probably a faster way to compute this. + for created_namespace, created_identifier, _ in self._created_namespaces: + if namespace == created_namespace or namespace == created_identifier: + return False + + return True + + def IsExtraRequire(self, token): + """Returns whether the given goog.require token is unnecessary. + + Args: + token: A goog.require token. + + Returns: + True if the given token corresponds to an unnecessary goog.require + statement, otherwise False. + """ + namespace = tokenutil.GetStringAfterToken(token) + + if self.GetClosurizedNamespace(namespace) is None: + return False + + if namespace in self._ignored_extra_namespaces: + return False + + if token in self._duplicate_require_tokens: + return True + + if namespace in self._suppressed_requires: + return False + + # If the namespace contains a component that is initial caps, then that + # must be the last component of the namespace. + parts = namespace.split('.') + if len(parts) > 1 and parts[-2][0].isupper(): + return True + + # TODO(user): There's probably a faster way to compute this. + for ns in self._used_namespaces: + if (not ns.alias_definition and ( + namespace == ns.namespace or namespace == ns.identifier)): + return False + + return True + + def GetMissingProvides(self): + """Returns the dict of missing provided namespaces for the current file. + + Returns: + Returns a dictionary of key as string and value as integer where each + string(key) is a namespace that should be provided by this file, but is + not and integer(value) is first line number where it's defined. + """ + missing_provides = dict() + for namespace, identifier, line_number in self._created_namespaces: + if (not self._IsPrivateIdentifier(identifier) and + namespace not in self._provided_namespaces and + identifier not in self._provided_namespaces and + namespace not in self._required_namespaces and + namespace not in missing_provides): + missing_provides[namespace] = line_number + + return missing_provides + + def GetMissingRequires(self): + """Returns the dict of missing required namespaces for the current file. + + For each non-private identifier used in the file, find either a + goog.require, goog.provide or a created identifier that satisfies it. + goog.require statements can satisfy the identifier by requiring either the + namespace of the identifier or the identifier itself. goog.provide + statements can satisfy the identifier by providing the namespace of the + identifier. A created identifier can only satisfy the used identifier if + it matches it exactly (necessary since things can be defined on a + namespace in more than one file). Note that provided namespaces should be + a subset of created namespaces, but we check both because in some cases we + can't always detect the creation of the namespace. + + Returns: + Returns a dictionary of key as string and value integer where each + string(key) is a namespace that should be required by this file, but is + not and integer(value) is first line number where it's used. + """ + external_dependencies = set(self._required_namespaces) + + # Assume goog namespace is always available. + external_dependencies.add('goog') + # goog.module is treated as a builtin, too (for goog.module.get). + external_dependencies.add('goog.module') + + created_identifiers = set() + for unused_namespace, identifier, unused_line_number in ( + self._created_namespaces): + created_identifiers.add(identifier) + + missing_requires = dict() + illegal_alias_statements = dict() + + def ShouldRequireNamespace(namespace, identifier): + """Checks if a namespace would normally be required.""" + return ( + not self._IsPrivateIdentifier(identifier) and + namespace not in external_dependencies and + namespace not in self._provided_namespaces and + identifier not in external_dependencies and + identifier not in created_identifiers and + namespace not in missing_requires) + + # First check all the used identifiers where we know that their namespace + # needs to be provided (unless they are optional). + for ns in self._used_namespaces: + namespace = ns.namespace + identifier = ns.identifier + if (not ns.alias_definition and + ShouldRequireNamespace(namespace, identifier)): + missing_requires[namespace] = ns.GetLine() + + # Now that all required namespaces are known, we can check if the alias + # definitions (that are likely being used for typeannotations that don't + # need explicit goog.require statements) are already covered. If not + # the user shouldn't use the alias. + for ns in self._used_namespaces: + if (not ns.alias_definition or + not ShouldRequireNamespace(ns.namespace, ns.identifier)): + continue + if self._FindNamespace(ns.identifier, self._provided_namespaces, + created_identifiers, external_dependencies, + missing_requires): + continue + namespace = ns.identifier.rsplit('.', 1)[0] + illegal_alias_statements[namespace] = ns.token + + return missing_requires, illegal_alias_statements + + def _FindNamespace(self, identifier, *namespaces_list): + """Finds the namespace of an identifier given a list of other namespaces. + + Args: + identifier: An identifier whose parent needs to be defined. + e.g. for goog.bar.foo we search something that provides + goog.bar. + *namespaces_list: var args of iterables of namespace identifiers + Returns: + The namespace that the given identifier is part of or None. + """ + identifier = identifier.rsplit('.', 1)[0] + identifier_prefix = identifier + '.' + for namespaces in namespaces_list: + for namespace in namespaces: + if namespace == identifier or namespace.startswith(identifier_prefix): + return namespace + return None + + def _IsPrivateIdentifier(self, identifier): + """Returns whether the given identifier is private.""" + pieces = identifier.split('.') + for piece in pieces: + if piece.endswith('_'): + return True + return False + + def IsFirstProvide(self, token): + """Returns whether token is the first provide token.""" + return self._provide_tokens and token == self._provide_tokens[0] + + def IsFirstRequire(self, token): + """Returns whether token is the first require token.""" + return self._require_tokens and token == self._require_tokens[0] + + def IsLastProvide(self, token): + """Returns whether token is the last provide token.""" + return self._provide_tokens and token == self._provide_tokens[-1] + + def IsLastRequire(self, token): + """Returns whether token is the last require token.""" + return self._require_tokens and token == self._require_tokens[-1] + + def ProcessToken(self, token, state_tracker): + """Processes the given token for dependency information. + + Args: + token: The token to process. + state_tracker: The JavaScript state tracker. + """ + + # Note that this method is in the critical path for the linter and has been + # optimized for performance in the following ways: + # - Tokens are checked by type first to minimize the number of function + # calls necessary to determine if action needs to be taken for the token. + # - The most common tokens types are checked for first. + # - The number of function calls has been minimized (thus the length of this + # function. + + if token.type == TokenType.IDENTIFIER: + # TODO(user): Consider saving the whole identifier in metadata. + whole_identifier_string = tokenutil.GetIdentifierForToken(token) + if whole_identifier_string is None: + # We only want to process the identifier one time. If the whole string + # identifier is None, that means this token was part of a multi-token + # identifier, but it was not the first token of the identifier. + return + + # In the odd case that a goog.require is encountered inside a function, + # just ignore it (e.g. dynamic loading in test runners). + if token.string == 'goog.require' and not state_tracker.InFunction(): + self._require_tokens.append(token) + namespace = tokenutil.GetStringAfterToken(token) + if namespace in self._required_namespaces: + self._duplicate_require_tokens.append(token) + else: + self._required_namespaces.append(namespace) + + # If there is a suppression for the require, add a usage for it so it + # gets treated as a regular goog.require (i.e. still gets sorted). + if self._HasSuppression(state_tracker, 'extraRequire'): + self._suppressed_requires.append(namespace) + self._AddUsedNamespace(state_tracker, namespace, token) + + elif token.string == 'goog.provide': + self._provide_tokens.append(token) + namespace = tokenutil.GetStringAfterToken(token) + if namespace in self._provided_namespaces: + self._duplicate_provide_tokens.append(token) + else: + self._provided_namespaces.append(namespace) + + # If there is a suppression for the provide, add a creation for it so it + # gets treated as a regular goog.provide (i.e. still gets sorted). + if self._HasSuppression(state_tracker, 'extraProvide'): + self._AddCreatedNamespace(state_tracker, namespace, token.line_number) + + elif token.string == 'goog.scope': + self._scopified_file = True + + elif token.string == 'goog.setTestOnly': + + # Since the message is optional, we don't want to scan to later lines. + for t in tokenutil.GetAllTokensInSameLine(token): + if t.type == TokenType.STRING_TEXT: + message = t.string + + if re.match(r'^\w+(\.\w+)+$', message): + # This looks like a namespace. If it's a Closurized namespace, + # consider it created. + base_namespace = message.split('.', 1)[0] + if base_namespace in self._closurized_namespaces: + self._AddCreatedNamespace(state_tracker, message, + token.line_number) + + break + else: + jsdoc = state_tracker.GetDocComment() + if token.metadata and token.metadata.aliased_symbol: + whole_identifier_string = token.metadata.aliased_symbol + elif (token.string == 'goog.module.get' and + not self._HasSuppression(state_tracker, 'extraRequire')): + # Cannot use _AddUsedNamespace as this is not an identifier, but + # already the entire namespace that's required. + namespace = tokenutil.GetStringAfterToken(token) + namespace = UsedNamespace(namespace, namespace, token, + alias_definition=False) + self._used_namespaces.append(namespace) + if jsdoc and jsdoc.HasFlag('typedef'): + self._AddCreatedNamespace(state_tracker, whole_identifier_string, + token.line_number, + namespace=self.GetClosurizedNamespace( + whole_identifier_string)) + else: + is_alias_definition = (token.metadata and + token.metadata.is_alias_definition) + self._AddUsedNamespace(state_tracker, whole_identifier_string, + token, is_alias_definition) + + elif token.type == TokenType.SIMPLE_LVALUE: + identifier = token.values['identifier'] + start_token = tokenutil.GetIdentifierStart(token) + if start_token and start_token != token: + # Multi-line identifier being assigned. Get the whole identifier. + identifier = tokenutil.GetIdentifierForToken(start_token) + else: + start_token = token + # If an alias is defined on the start_token, use it instead. + if (start_token and + start_token.metadata and + start_token.metadata.aliased_symbol and + not start_token.metadata.is_alias_definition): + identifier = start_token.metadata.aliased_symbol + + if identifier: + namespace = self.GetClosurizedNamespace(identifier) + if state_tracker.InFunction(): + self._AddUsedNamespace(state_tracker, identifier, token) + elif namespace and namespace != 'goog': + self._AddCreatedNamespace(state_tracker, identifier, + token.line_number, namespace=namespace) + + elif token.type == TokenType.DOC_FLAG: + flag = token.attached_object + flag_type = flag.flag_type + if flag and flag.HasType() and flag.jstype: + is_interface = state_tracker.GetDocComment().HasFlag('interface') + if flag_type == 'implements' or (flag_type == 'extends' + and is_interface): + identifier = flag.jstype.alias or flag.jstype.identifier + self._AddUsedNamespace(state_tracker, identifier, token) + # Since we process doctypes only for implements and extends, the + # type is a simple one and we don't need any iteration for subtypes. + + def _AddCreatedNamespace(self, state_tracker, identifier, line_number, + namespace=None): + """Adds the namespace of an identifier to the list of created namespaces. + + If the identifier is annotated with a 'missingProvide' suppression, it is + not added. + + Args: + state_tracker: The JavaScriptStateTracker instance. + identifier: The identifier to add. + line_number: Line number where namespace is created. + namespace: The namespace of the identifier or None if the identifier is + also the namespace. + """ + if not namespace: + namespace = identifier + + if self._HasSuppression(state_tracker, 'missingProvide'): + return + + self._created_namespaces.append([namespace, identifier, line_number]) + + def _AddUsedNamespace(self, state_tracker, identifier, token, + is_alias_definition=False): + """Adds the namespace of an identifier to the list of used namespaces. + + If the identifier is annotated with a 'missingRequire' suppression, it is + not added. + + Args: + state_tracker: The JavaScriptStateTracker instance. + identifier: An identifier which has been used. + token: The token in which the namespace is used. + is_alias_definition: If the used namespace is part of an alias_definition. + Aliased symbols need their parent namespace to be available, if it is + not yet required through another symbol, an error will be thrown. + """ + if self._HasSuppression(state_tracker, 'missingRequire'): + return + + namespace = self.GetClosurizedNamespace(identifier) + # b/5362203 If its a variable in scope then its not a required namespace. + if namespace and not state_tracker.IsVariableInScope(namespace): + namespace = UsedNamespace(namespace, identifier, token, + is_alias_definition) + self._used_namespaces.append(namespace) + + def _HasSuppression(self, state_tracker, suppression): + jsdoc = state_tracker.GetDocComment() + return jsdoc and suppression in jsdoc.suppressions + + def GetClosurizedNamespace(self, identifier): + """Given an identifier, returns the namespace that identifier is from. + + Args: + identifier: The identifier to extract a namespace from. + + Returns: + The namespace the given identifier resides in, or None if one could not + be found. + """ + if identifier.startswith('goog.global'): + # Ignore goog.global, since it is, by definition, global. + return None + + parts = identifier.split('.') + for namespace in self._closurized_namespaces: + if not identifier.startswith(namespace + '.'): + continue + + # The namespace for a class is the shortest prefix ending in a class + # name, which starts with a capital letter but is not a capitalized word. + # + # We ultimately do not want to allow requiring or providing of inner + # classes/enums. Instead, a file should provide only the top-level class + # and users should require only that. + namespace = [] + for part in parts: + if part == 'prototype' or part.isupper(): + return '.'.join(namespace) + namespace.append(part) + if part[0].isupper(): + return '.'.join(namespace) + + # At this point, we know there's no class or enum, so the namespace is + # just the identifier with the last part removed. With the exception of + # apply, inherits, and call, which should also be stripped. + if parts[-1] in ('apply', 'inherits', 'call'): + parts.pop() + parts.pop() + + # If the last part ends with an underscore, it is a private variable, + # method, or enum. The namespace is whatever is before it. + if parts and parts[-1].endswith('_'): + parts.pop() + + return '.'.join(parts) + + return None diff --git a/tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py b/tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py new file mode 100755 index 0000000..7aeae21 --- /dev/null +++ b/tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py @@ -0,0 +1,873 @@ +#!/usr/bin/env python +# +# Copyright 2010 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for ClosurizedNamespacesInfo.""" + + + +import unittest as googletest +from closure_linter import aliaspass +from closure_linter import closurizednamespacesinfo +from closure_linter import ecmametadatapass +from closure_linter import javascriptstatetracker +from closure_linter import javascripttokens +from closure_linter import testutil +from closure_linter import tokenutil + +# pylint: disable=g-bad-name +TokenType = javascripttokens.JavaScriptTokenType + + +def _ToLineDict(illegal_alias_stmts): + """Replaces tokens with the respective line number.""" + return {k: v.line_number for k, v in illegal_alias_stmts.iteritems()} + + +class ClosurizedNamespacesInfoTest(googletest.TestCase): + """Tests for ClosurizedNamespacesInfo.""" + + _test_cases = { + 'goog.global.anything': None, + 'package.CONSTANT': 'package', + 'package.methodName': 'package', + 'package.subpackage.methodName': 'package.subpackage', + 'package.subpackage.methodName.apply': 'package.subpackage', + 'package.ClassName.something': 'package.ClassName', + 'package.ClassName.Enum.VALUE.methodName': 'package.ClassName', + 'package.ClassName.CONSTANT': 'package.ClassName', + 'package.namespace.CONSTANT.methodName': 'package.namespace', + 'package.ClassName.inherits': 'package.ClassName', + 'package.ClassName.apply': 'package.ClassName', + 'package.ClassName.methodName.apply': 'package.ClassName', + 'package.ClassName.methodName.call': 'package.ClassName', + 'package.ClassName.prototype.methodName': 'package.ClassName', + 'package.ClassName.privateMethod_': 'package.ClassName', + 'package.className.privateProperty_': 'package.className', + 'package.className.privateProperty_.methodName': 'package.className', + 'package.ClassName.PrivateEnum_': 'package.ClassName', + 'package.ClassName.prototype.methodName.apply': 'package.ClassName', + 'package.ClassName.property.subProperty': 'package.ClassName', + 'package.className.prototype.something.somethingElse': 'package.className' + } + + def testGetClosurizedNamespace(self): + """Tests that the correct namespace is returned for various identifiers.""" + namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( + closurized_namespaces=['package'], ignored_extra_namespaces=[]) + for identifier, expected_namespace in self._test_cases.items(): + actual_namespace = namespaces_info.GetClosurizedNamespace(identifier) + self.assertEqual( + expected_namespace, + actual_namespace, + 'expected namespace "' + str(expected_namespace) + + '" for identifier "' + str(identifier) + '" but was "' + + str(actual_namespace) + '"') + + def testIgnoredExtraNamespaces(self): + """Tests that ignored_extra_namespaces are ignored.""" + token = self._GetRequireTokens('package.Something') + namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( + closurized_namespaces=['package'], + ignored_extra_namespaces=['package.Something']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Should be valid since it is in ignored namespaces.') + + namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( + ['package'], []) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'Should be invalid since it is not in ignored namespaces.') + + def testIsExtraProvide_created(self): + """Tests that provides for created namespaces are not extra.""" + input_lines = [ + 'goog.provide(\'package.Foo\');', + 'package.Foo = function() {};' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraProvide(token), + 'Should not be extra since it is created.') + + def testIsExtraProvide_createdIdentifier(self): + """Tests that provides for created identifiers are not extra.""" + input_lines = [ + 'goog.provide(\'package.Foo.methodName\');', + 'package.Foo.methodName = function() {};' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraProvide(token), + 'Should not be extra since it is created.') + + def testIsExtraProvide_notCreated(self): + """Tests that provides for non-created namespaces are extra.""" + input_lines = ['goog.provide(\'package.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertTrue(namespaces_info.IsExtraProvide(token), + 'Should be extra since it is not created.') + + def testIsExtraProvide_notCreatedMultipartClosurizedNamespace(self): + """Tests that provides for non-created namespaces are extra.""" + input_lines = ['goog.provide(\'multi.part.namespace.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['multi.part']) + + self.assertTrue(namespaces_info.IsExtraProvide(token), + 'Should be extra since it is not created.') + + def testIsExtraProvide_duplicate(self): + """Tests that providing a namespace twice makes the second one extra.""" + input_lines = [ + 'goog.provide(\'package.Foo\');', + 'goog.provide(\'package.Foo\');', + 'package.Foo = function() {};' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + # Advance to the second goog.provide token. + token = tokenutil.Search(token.next, TokenType.IDENTIFIER) + + self.assertTrue(namespaces_info.IsExtraProvide(token), + 'Should be extra since it is already provided.') + + def testIsExtraProvide_notClosurized(self): + """Tests that provides of non-closurized namespaces are not extra.""" + input_lines = ['goog.provide(\'notclosurized.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraProvide(token), + 'Should not be extra since it is not closurized.') + + def testIsExtraRequire_used(self): + """Tests that requires for used namespaces are not extra.""" + input_lines = [ + 'goog.require(\'package.Foo\');', + 'var x = package.Foo.methodName();' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Should not be extra since it is used.') + + def testIsExtraRequire_usedIdentifier(self): + """Tests that requires for used methods on classes are extra.""" + input_lines = [ + 'goog.require(\'package.Foo.methodName\');', + 'var x = package.Foo.methodName();' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'Should require the package, not the method specifically.') + + def testIsExtraRequire_notUsed(self): + """Tests that requires for unused namespaces are extra.""" + input_lines = ['goog.require(\'package.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'Should be extra since it is not used.') + + def testIsExtraRequire_notUsedMultiPartClosurizedNamespace(self): + """Tests unused require with multi-part closurized namespaces.""" + + input_lines = ['goog.require(\'multi.part.namespace.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['multi.part']) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'Should be extra since it is not used.') + + def testIsExtraRequire_notClosurized(self): + """Tests that requires of non-closurized namespaces are not extra.""" + input_lines = ['goog.require(\'notclosurized.Foo\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Should not be extra since it is not closurized.') + + def testIsExtraRequire_objectOnClass(self): + """Tests that requiring an object on a class is extra.""" + input_lines = [ + 'goog.require(\'package.Foo.Enum\');', + 'var x = package.Foo.Enum.VALUE1;', + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'The whole class, not the object, should be required.'); + + def testIsExtraRequire_constantOnClass(self): + """Tests that requiring a constant on a class is extra.""" + input_lines = [ + 'goog.require(\'package.Foo.CONSTANT\');', + 'var x = package.Foo.CONSTANT', + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertTrue(namespaces_info.IsExtraRequire(token), + 'The class, not the constant, should be required.'); + + def testIsExtraRequire_constantNotOnClass(self): + """Tests that requiring a constant not on a class is OK.""" + input_lines = [ + 'goog.require(\'package.subpackage.CONSTANT\');', + 'var x = package.subpackage.CONSTANT', + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Constants can be required except on classes.'); + + def testIsExtraRequire_methodNotOnClass(self): + """Tests that requiring a method not on a class is OK.""" + input_lines = [ + 'goog.require(\'package.subpackage.method\');', + 'var x = package.subpackage.method()', + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Methods can be required except on classes.'); + + def testIsExtraRequire_defaults(self): + """Tests that there are no warnings about extra requires for test utils""" + input_lines = ['goog.require(\'goog.testing.jsunit\');'] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['goog']) + + self.assertFalse(namespaces_info.IsExtraRequire(token), + 'Should not be extra since it is for testing.') + + def testGetMissingProvides_provided(self): + """Tests that provided functions don't cause a missing provide.""" + input_lines = [ + 'goog.provide(\'package.Foo\');', + 'package.Foo = function() {};' + ] + + namespaces_info = self._GetNamespacesInfoForScript( + input_lines, ['package']) + + self.assertEquals(0, len(namespaces_info.GetMissingProvides())) + + def testGetMissingProvides_providedIdentifier(self): + """Tests that provided identifiers don't cause a missing provide.""" + input_lines = [ + 'goog.provide(\'package.Foo.methodName\');', + 'package.Foo.methodName = function() {};' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + self.assertEquals(0, len(namespaces_info.GetMissingProvides())) + + def testGetMissingProvides_providedParentIdentifier(self): + """Tests that provided identifiers on a class don't cause a missing provide + on objects attached to that class.""" + input_lines = [ + 'goog.provide(\'package.foo.ClassName\');', + 'package.foo.ClassName.methodName = function() {};', + 'package.foo.ClassName.ObjectName = 1;', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + self.assertEquals(0, len(namespaces_info.GetMissingProvides())) + + def testGetMissingProvides_unprovided(self): + """Tests that unprovided functions cause a missing provide.""" + input_lines = ['package.Foo = function() {};'] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + + missing_provides = namespaces_info.GetMissingProvides() + self.assertEquals(1, len(missing_provides)) + missing_provide = missing_provides.popitem() + self.assertEquals('package.Foo', missing_provide[0]) + self.assertEquals(1, missing_provide[1]) + + def testGetMissingProvides_privatefunction(self): + """Tests that unprovided private functions don't cause a missing provide.""" + input_lines = ['package.Foo_ = function() {};'] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + self.assertEquals(0, len(namespaces_info.GetMissingProvides())) + + def testGetMissingProvides_required(self): + """Tests that required namespaces don't cause a missing provide.""" + input_lines = [ + 'goog.require(\'package.Foo\');', + 'package.Foo.methodName = function() {};' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + self.assertEquals(0, len(namespaces_info.GetMissingProvides())) + + def testGetMissingRequires_required(self): + """Tests that required namespaces don't cause a missing require.""" + input_lines = [ + 'goog.require(\'package.Foo\');', + 'package.Foo();' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_requiredIdentifier(self): + """Tests that required namespaces satisfy identifiers on that namespace.""" + input_lines = [ + 'goog.require(\'package.Foo\');', + 'package.Foo.methodName();' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_requiredNamespace(self): + """Tests that required namespaces satisfy the namespace.""" + input_lines = [ + 'goog.require(\'package.soy.fooTemplate\');', + 'render(package.soy.fooTemplate);' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_requiredParentClass(self): + """Tests that requiring a parent class of an object is sufficient to prevent + a missing require on that object.""" + input_lines = [ + 'goog.require(\'package.Foo\');', + 'package.Foo.methodName();', + 'package.Foo.methodName(package.Foo.ObjectName);' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_unrequired(self): + """Tests that unrequired namespaces cause a missing require.""" + input_lines = ['package.Foo();'] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(1, len(missing_requires)) + missing_req = missing_requires.popitem() + self.assertEquals('package.Foo', missing_req[0]) + self.assertEquals(1, missing_req[1]) + + def testGetMissingRequires_provided(self): + """Tests that provided namespaces satisfy identifiers on that namespace.""" + input_lines = [ + 'goog.provide(\'package.Foo\');', + 'package.Foo.methodName();' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_created(self): + """Tests that created namespaces do not satisfy usage of an identifier.""" + input_lines = [ + 'package.Foo = function();', + 'package.Foo.methodName();', + 'package.Foo.anotherMethodName1();', + 'package.Foo.anotherMethodName2();' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(1, len(missing_requires)) + missing_require = missing_requires.popitem() + self.assertEquals('package.Foo', missing_require[0]) + # Make sure line number of first occurrence is reported + self.assertEquals(2, missing_require[1]) + + def testGetMissingRequires_createdIdentifier(self): + """Tests that created identifiers satisfy usage of the identifier.""" + input_lines = [ + 'package.Foo.methodName = function();', + 'package.Foo.methodName();' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(0, len(missing_requires)) + + def testGetMissingRequires_implements(self): + """Tests that a parametrized type requires the correct identifier.""" + input_lines = [ + '/** @constructor @implements {package.Bar} */', + 'package.Foo = function();', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertItemsEqual({'package.Bar': 1}, missing_requires) + + def testGetMissingRequires_objectOnClass(self): + """Tests that we should require a class, not the object on the class.""" + input_lines = [ + 'goog.require(\'package.Foo.Enum\');', + 'var x = package.Foo.Enum.VALUE1;', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(1, len(missing_requires), + 'The whole class, not the object, should be required.') + + def testGetMissingRequires_variableWithSameName(self): + """Tests that we should not goog.require variables and parameters. + + b/5362203 Variables in scope are not missing namespaces. + """ + input_lines = [ + 'goog.provide(\'Foo\');', + 'Foo.A = function();', + 'Foo.A.prototype.method = function(ab) {', + ' if (ab) {', + ' var docs;', + ' var lvalue = new Obj();', + ' // Variable in scope hence not goog.require here.', + ' docs.foo.abc = 1;', + ' lvalue.next();', + ' }', + ' // Since js is function scope this should also not goog.require.', + ' docs.foo.func();', + ' // Its not a variable in scope hence goog.require.', + ' dummy.xyz.reset();', + ' return this.method2();', + '};', + 'Foo.A.prototype.method1 = function(docs, abcd, xyz) {', + ' // Parameter hence not goog.require.', + ' docs.nodes.length = 2;', + ' lvalue.abc.reset();', + '};' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['Foo', + 'docs', + 'lvalue', + 'dummy']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals(2, len(missing_requires)) + self.assertItemsEqual( + {'dummy.xyz': 14, + 'lvalue.abc': 20}, missing_requires) + + def testIsFirstProvide(self): + """Tests operation of the isFirstProvide method.""" + input_lines = [ + 'goog.provide(\'package.Foo\');', + 'package.Foo.methodName();' + ] + + token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + input_lines, ['package']) + self.assertTrue(namespaces_info.IsFirstProvide(token)) + + def testGetWholeIdentifierString(self): + """Tests that created identifiers satisfy usage of the identifier.""" + input_lines = [ + 'package.Foo.', + ' veryLong.', + ' identifier;' + ] + + token = testutil.TokenizeSource(input_lines) + + self.assertEquals('package.Foo.veryLong.identifier', + tokenutil.GetIdentifierForToken(token)) + + self.assertEquals(None, + tokenutil.GetIdentifierForToken(token.next)) + + def testScopified(self): + """Tests that a goog.scope call is noticed.""" + input_lines = [ + 'goog.scope(function() {', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + self.assertTrue(namespaces_info._scopified_file) + + def testScope_unusedAlias(self): + """Tests that an unused alias symbol is illegal.""" + input_lines = [ + 'goog.scope(function() {', + 'var Event = goog.events.Event;', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts)) + + def testScope_usedMultilevelAlias(self): + """Tests that an used alias symbol in a deep namespace is ok.""" + input_lines = [ + 'goog.require(\'goog.Events\');', + 'goog.scope(function() {', + 'var Event = goog.Events.DeepNamespace.Event;', + 'Event();', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({}, illegal_alias_stmts) + + def testScope_usedAlias(self): + """Tests that aliased symbols result in correct requires.""" + input_lines = [ + 'goog.scope(function() {', + 'var Event = goog.events.Event;', + 'var dom = goog.dom;', + 'Event(dom.classes.get);', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, illegal_alias_stmts) + self.assertEquals({'goog.dom.classes': 4, 'goog.events.Event': 4}, + missing_requires) + + def testModule_alias(self): + """Tests that goog.module style aliases are supported.""" + input_lines = [ + 'goog.module(\'test.module\');', + 'var Unused = goog.require(\'goog.Unused\');', + 'var AliasedClass = goog.require(\'goog.AliasedClass\');', + 'var x = new AliasedClass();', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + namespaceToken = self._GetRequireTokens('goog.AliasedClass') + self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken), + 'AliasedClass should be marked as used') + unusedToken = self._GetRequireTokens('goog.Unused') + self.assertTrue(namespaces_info.IsExtraRequire(unusedToken), + 'Unused should be marked as not used') + + def testModule_aliasInScope(self): + """Tests that goog.module style aliases are supported.""" + input_lines = [ + 'goog.module(\'test.module\');', + 'var AliasedClass = goog.require(\'goog.AliasedClass\');', + 'goog.scope(function() {', + 'var x = new AliasedClass();', + '});', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + namespaceToken = self._GetRequireTokens('goog.AliasedClass') + self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken), + 'AliasedClass should be marked as used') + + def testModule_getAlwaysProvided(self): + """Tests that goog.module.get is recognized as a built-in.""" + input_lines = [ + 'goog.provide(\'test.MyClass\');', + 'goog.require(\'goog.someModule\');', + 'goog.scope(function() {', + 'var someModule = goog.module.get(\'goog.someModule\');', + 'test.MyClass = function() {};', + '});', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + self.assertEquals({}, namespaces_info.GetMissingRequires()[0]) + + def testModule_requireForGet(self): + """Tests that goog.module.get needs a goog.require call.""" + input_lines = [ + 'goog.provide(\'test.MyClass\');', + 'function foo() {', + ' var someModule = goog.module.get(\'goog.someModule\');', + ' someModule.doSth();', + '}', + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + self.assertEquals({'goog.someModule': 3}, + namespaces_info.GetMissingRequires()[0]) + + def testScope_usedTypeAlias(self): + """Tests aliased symbols in type annotations.""" + input_lines = [ + 'goog.scope(function() {', + 'var Event = goog.events.Event;', + '/** @type {Event} */;', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts)) + + def testScope_partialAlias_typeOnly(self): + """Tests a partial alias only used in type annotations. + + In this example, some goog.events namespace would need to be required + so that evaluating goog.events.bar doesn't throw an error. + """ + input_lines = [ + 'goog.scope(function() {', + 'var bar = goog.events.bar;', + '/** @type {bar.Foo} */;', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts)) + + def testScope_partialAlias(self): + """Tests a partial alias in conjunction with a type annotation. + + In this example, the partial alias is already defined by another type, + therefore the doc-only type doesn't need to be required. + """ + input_lines = [ + 'goog.scope(function() {', + 'var bar = goog.events.bar;', + '/** @type {bar.Event} */;', + 'bar.EventType();' + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({'goog.events.bar.EventType': 4}, missing_requires) + self.assertEquals({}, illegal_alias_stmts) + + def testScope_partialAliasRequires(self): + """Tests partial aliases with correct requires.""" + input_lines = [ + 'goog.require(\'goog.events.bar.EventType\');', + 'goog.scope(function() {', + 'var bar = goog.events.bar;', + '/** @type {bar.Event} */;', + 'bar.EventType();' + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({}, illegal_alias_stmts) + + def testScope_partialAliasRequiresBoth(self): + """Tests partial aliases with correct requires.""" + input_lines = [ + 'goog.require(\'goog.events.bar.Event\');', + 'goog.require(\'goog.events.bar.EventType\');', + 'goog.scope(function() {', + 'var bar = goog.events.bar;', + '/** @type {bar.Event} */;', + 'bar.EventType();' + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({}, illegal_alias_stmts) + event_token = self._GetRequireTokens('goog.events.bar.Event') + self.assertTrue(namespaces_info.IsExtraRequire(event_token)) + + def testScope_partialAliasNoSubtypeRequires(self): + """Tests that partial aliases don't yield subtype requires (regression).""" + input_lines = [ + 'goog.provide(\'goog.events.Foo\');', + 'goog.scope(function() {', + 'goog.events.Foo = {};', + 'var Foo = goog.events.Foo;' + 'Foo.CssName_ = {};' + 'var CssName_ = Foo.CssName_;' + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, _ = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + + def testScope_aliasNamespace(self): + """Tests that an unused alias namespace is not required when available. + + In the example goog.events.Bar is not required, because the namespace + goog.events is already defined because goog.events.Foo is required. + """ + input_lines = [ + 'goog.require(\'goog.events.Foo\');', + 'goog.scope(function() {', + 'var Bar = goog.events.Bar;', + '/** @type {Bar} */;', + 'goog.events.Foo;', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({}, illegal_alias_stmts) + + def testScope_aliasNamespaceIllegal(self): + """Tests that an unused alias namespace is not required when available.""" + input_lines = [ + 'goog.scope(function() {', + 'var Bar = goog.events.Bar;', + '/** @type {Bar} */;', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, missing_requires) + self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts)) + + def testScope_provides(self): + """Tests that aliased symbols result in correct provides.""" + input_lines = [ + 'goog.scope(function() {', + 'goog.bar = {};', + 'var bar = goog.bar;', + 'bar.Foo = {};', + '});' + ] + + namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog']) + missing_provides = namespaces_info.GetMissingProvides() + self.assertEquals({'goog.bar.Foo': 4}, missing_provides) + _, illegal_alias_stmts = namespaces_info.GetMissingRequires() + self.assertEquals({}, illegal_alias_stmts) + + def testSetTestOnlyNamespaces(self): + """Tests that a namespace in setTestOnly makes it a valid provide.""" + namespaces_info = self._GetNamespacesInfoForScript([ + 'goog.setTestOnly(\'goog.foo.barTest\');' + ], ['goog']) + + token = self._GetProvideTokens('goog.foo.barTest') + self.assertFalse(namespaces_info.IsExtraProvide(token)) + + token = self._GetProvideTokens('goog.foo.bazTest') + self.assertTrue(namespaces_info.IsExtraProvide(token)) + + def testSetTestOnlyComment(self): + """Ensure a comment in setTestOnly does not cause a created namespace.""" + namespaces_info = self._GetNamespacesInfoForScript([ + 'goog.setTestOnly(\'this is a comment\');' + ], ['goog']) + + self.assertEquals( + [], namespaces_info._created_namespaces, + 'A comment in setTestOnly should not modify created namespaces.') + + def _GetNamespacesInfoForScript(self, script, closurized_namespaces=None): + _, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript( + script, closurized_namespaces) + + return namespaces_info + + def _GetStartTokenAndNamespacesInfoForScript( + self, script, closurized_namespaces): + + token = testutil.TokenizeSource(script) + return token, self._GetInitializedNamespacesInfo( + token, closurized_namespaces, []) + + def _GetInitializedNamespacesInfo(self, token, closurized_namespaces, + ignored_extra_namespaces): + """Returns a namespaces info initialized with the given token stream.""" + namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( + closurized_namespaces=closurized_namespaces, + ignored_extra_namespaces=ignored_extra_namespaces) + state_tracker = javascriptstatetracker.JavaScriptStateTracker() + + ecma_pass = ecmametadatapass.EcmaMetaDataPass() + ecma_pass.Process(token) + + state_tracker.DocFlagPass(token, error_handler=None) + + alias_pass = aliaspass.AliasPass(closurized_namespaces) + alias_pass.Process(token) + + while token: + state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken()) + namespaces_info.ProcessToken(token, state_tracker) + state_tracker.HandleAfterToken(token) + token = token.next + + return namespaces_info + + def _GetProvideTokens(self, namespace): + """Returns a list of tokens for a goog.require of the given namespace.""" + line_text = 'goog.require(\'' + namespace + '\');\n' + return testutil.TokenizeSource([line_text]) + + def _GetRequireTokens(self, namespace): + """Returns a list of tokens for a goog.require of the given namespace.""" + line_text = 'goog.require(\'' + namespace + '\');\n' + return testutil.TokenizeSource([line_text]) + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter/common/__init__.py b/tools/closure_linter/closure_linter/common/__init__.py index 4265cc3..5793043 100755 --- a/tools/closure_linter/closure_linter/common/__init__.py +++ b/tools/closure_linter/closure_linter/common/__init__.py @@ -1 +1,16 @@ #!/usr/bin/env python +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Package indicator for gjslint.common.""" diff --git a/tools/closure_linter/closure_linter/common/error.py b/tools/closure_linter/closure_linter/common/error.py index 0e3b476..4209c23 100755 --- a/tools/closure_linter/closure_linter/common/error.py +++ b/tools/closure_linter/closure_linter/common/error.py @@ -23,7 +23,7 @@ __author__ = ('robbyw@google.com (Robert Walker)', class Error(object): """Object representing a style error.""" - def __init__(self, code, message, token, position, fix_data): + def __init__(self, code, message, token=None, position=None, fix_data=None): """Initialize the error object. Args: diff --git a/tools/closure_linter/closure_linter/common/erroraccumulator.py b/tools/closure_linter/closure_linter/common/erroraccumulator.py index 7bb0c97..55844ba 100755 --- a/tools/closure_linter/closure_linter/common/erroraccumulator.py +++ b/tools/closure_linter/closure_linter/common/erroraccumulator.py @@ -35,7 +35,7 @@ class ErrorAccumulator(errorhandler.ErrorHandler): Args: error: The error object """ - self._errors.append((error.token.line_number, error.code)) + self._errors.append(error) def GetErrors(self): """Returns the accumulated errors. diff --git a/tools/closure_linter/closure_linter/common/erroroutput.py b/tools/closure_linter/closure_linter/common/erroroutput.py new file mode 100644 index 0000000..149738b --- /dev/null +++ b/tools/closure_linter/closure_linter/common/erroroutput.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utility functions to format errors.""" + + +__author__ = ('robbyw@google.com (Robert Walker)', + 'ajp@google.com (Andy Perelson)', + 'nnaze@google.com (Nathan Naze)') + + +def GetUnixErrorOutput(filename, error, new_error=False): + """Get a output line for an error in UNIX format.""" + + line = '' + + if error.token: + line = '%d' % error.token.line_number + + error_code = '%04d' % error.code + if new_error: + error_code = 'New Error ' + error_code + return '%s:%s:(%s) %s' % (filename, line, error_code, error.message) + + +def GetErrorOutput(error, new_error=False): + """Get a output line for an error in regular format.""" + + line = '' + if error.token: + line = 'Line %d, ' % error.token.line_number + + code = 'E:%04d' % error.code + + error_message = error.message + if new_error: + error_message = 'New Error ' + error_message + + return '%s%s: %s' % (line, code, error.message) diff --git a/tools/closure_linter/closure_linter/common/errorprinter.py b/tools/closure_linter/closure_linter/common/errorprinter.py deleted file mode 100755 index c975406..0000000 --- a/tools/closure_linter/closure_linter/common/errorprinter.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2008 The Closure Linter Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS-IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Linter error handler class that prints errors to stdout.""" - -__author__ = ('robbyw@google.com (Robert Walker)', - 'ajp@google.com (Andy Perelson)') - -from closure_linter.common import error -from closure_linter.common import errorhandler - -Error = error.Error - - -# The error message is of the format: -# Line , E:: message -DEFAULT_FORMAT = 1 - -# The error message is of the format: -# filename:[line number]:message -UNIX_FORMAT = 2 - - -class ErrorPrinter(errorhandler.ErrorHandler): - """ErrorHandler that prints errors to stdout.""" - - def __init__(self, new_errors=None): - """Initializes this error printer. - - Args: - new_errors: A sequence of error codes representing recently introduced - errors, defaults to None. - """ - # Number of errors - self._error_count = 0 - - # Number of new errors - self._new_error_count = 0 - - # Number of files checked - self._total_file_count = 0 - - # Number of files with errors - self._error_file_count = 0 - - # Dict of file name to number of errors - self._file_table = {} - - # List of errors for each file - self._file_errors = None - - # Current file - self._filename = None - - self._format = DEFAULT_FORMAT - - if new_errors: - self._new_errors = frozenset(new_errors) - else: - self._new_errors = frozenset(set()) - - def SetFormat(self, format): - """Sets the print format of errors. - - Args: - format: One of {DEFAULT_FORMAT, UNIX_FORMAT}. - """ - self._format = format - - def HandleFile(self, filename, first_token): - """Notifies this ErrorPrinter that subsequent errors are in filename. - - Sets the current file name, and sets a flag stating the header for this file - has not been printed yet. - - Should be called by a linter before a file is style checked. - - Args: - filename: The name of the file about to be checked. - first_token: The first token in the file, or None if there was an error - opening the file - """ - if self._filename and self._file_table[self._filename]: - print - - self._filename = filename - self._file_table[filename] = 0 - self._total_file_count += 1 - self._file_errors = [] - - def HandleError(self, error): - """Prints a formatted error message about the specified error. - - The error message is of the format: - Error #, line #: message - - Args: - error: The error object - """ - self._file_errors.append(error) - self._file_table[self._filename] += 1 - self._error_count += 1 - - if self._new_errors and error.code in self._new_errors: - self._new_error_count += 1 - - def _PrintError(self, error): - """Prints a formatted error message about the specified error. - - Args: - error: The error object - """ - new_error = self._new_errors and error.code in self._new_errors - if self._format == DEFAULT_FORMAT: - line = '' - if error.token: - line = 'Line %d, ' % error.token.line_number - - code = 'E:%04d' % error.code - if new_error: - print '%s%s: (New error) %s' % (line, code, error.message) - else: - print '%s%s: %s' % (line, code, error.message) - else: - # UNIX format - filename = self._filename - line = '' - if error.token: - line = '%d' % error.token.line_number - - error_code = '%04d' % error.code - if new_error: - error_code = 'New Error ' + error_code - print '%s:%s:(%s) %s' % (filename, line, error_code, error.message) - - def FinishFile(self): - """Finishes handling the current file.""" - if self._file_errors: - self._error_file_count += 1 - - if self._format != UNIX_FORMAT: - print '----- FILE : %s -----' % (self._filename) - - self._file_errors.sort(Error.Compare) - - for error in self._file_errors: - self._PrintError(error) - - def HasErrors(self): - """Whether this error printer encountered any errors. - - Returns: - True if the error printer encountered any errors. - """ - return self._error_count - - def HasNewErrors(self): - """Whether this error printer encountered any new errors. - - Returns: - True if the error printer encountered any new errors. - """ - return self._new_error_count - - def HasOldErrors(self): - """Whether this error printer encountered any old errors. - - Returns: - True if the error printer encountered any old errors. - """ - return self._error_count - self._new_error_count - - def PrintSummary(self): - """Print a summary of the number of errors and files.""" - if self.HasErrors() or self.HasNewErrors(): - print ('Found %d errors, including %d new errors, in %d files ' - '(%d files OK).' % ( - self._error_count, - self._new_error_count, - self._error_file_count, - self._total_file_count - self._error_file_count)) - else: - print '%d files checked, no errors found.' % self._total_file_count - - def PrintFileSummary(self): - """Print a detailed summary of the number of errors in each file.""" - keys = self._file_table.keys() - keys.sort() - for filename in keys: - print '%s: %d' % (filename, self._file_table[filename]) diff --git a/tools/closure_linter/closure_linter/common/filetestcase.py b/tools/closure_linter/closure_linter/common/filetestcase.py index ae4b883..7cd83cd 100755 --- a/tools/closure_linter/closure_linter/common/filetestcase.py +++ b/tools/closure_linter/closure_linter/common/filetestcase.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,6 +25,7 @@ __author__ = ('robbyw@google.com (Robert Walker)', import re +import gflags as flags import unittest as googletest from closure_linter.common import erroraccumulator @@ -41,21 +41,27 @@ class AnnotatedFileTestCase(googletest.TestCase): _EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P[+-]?[0-9]+):)?' r'\s*(?P%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE) - def __init__(self, filename, runner, converter): + def __init__(self, filename, lint_callable, converter): """Create a single file lint test case. Args: filename: Filename to test. - runner: Object implementing the LintRunner interface that lints a file. + lint_callable: Callable that lints a file. This is usually runner.Run(). converter: Function taking an error string and returning an error code. """ googletest.TestCase.__init__(self, 'runTest') self._filename = filename self._messages = [] - self._runner = runner + self._lint_callable = lint_callable self._converter = converter + def setUp(self): + flags.FLAGS.dot_on_next_line = True + + def tearDown(self): + flags.FLAGS.dot_on_next_line = False + def shortDescription(self): """Provides a description for the test.""" return 'Run linter on %s' % self._filename @@ -65,7 +71,7 @@ class AnnotatedFileTestCase(googletest.TestCase): try: filename = self._filename stream = open(filename) - except IOError, ex: + except IOError as ex: raise IOError('Could not find testdata resource for %s: %s' % (self._filename, ex)) @@ -96,10 +102,14 @@ class AnnotatedFileTestCase(googletest.TestCase): return messages def _ProcessFileAndGetMessages(self, filename): - """Trap gpylint's output parse it to get messages added.""" - errors = erroraccumulator.ErrorAccumulator() - self._runner.Run([filename], errors) + """Trap gjslint's output parse it to get messages added.""" + error_accumulator = erroraccumulator.ErrorAccumulator() + self._lint_callable(filename, error_accumulator) + + errors = error_accumulator.GetErrors() + + # Convert to expected tuple format. - errors = errors.GetErrors() - errors.sort() - return errors + error_msgs = [(error.token.line_number, error.code) for error in errors] + error_msgs.sort() + return error_msgs diff --git a/tools/closure_linter/closure_linter/common/tokenizer.py b/tools/closure_linter/closure_linter/common/tokenizer.py index 0234720..9420ea3 100755 --- a/tools/closure_linter/closure_linter/common/tokenizer.py +++ b/tools/closure_linter/closure_linter/common/tokenizer.py @@ -90,7 +90,8 @@ class Tokenizer(object): Returns: The newly created Token object. """ - return tokens.Token(string, token_type, line, line_number, values) + return tokens.Token(string, token_type, line, line_number, values, + line_number) def __TokenizeLine(self, line): """Tokenizes the given line. diff --git a/tools/closure_linter/closure_linter/common/tokens.py b/tools/closure_linter/closure_linter/common/tokens.py index 5eaffa8..4703998 100755 --- a/tools/closure_linter/closure_linter/common/tokens.py +++ b/tools/closure_linter/closure_linter/common/tokens.py @@ -47,7 +47,8 @@ class Token(object): a separate metadata pass. """ - def __init__(self, string, token_type, line, line_number, values=None): + def __init__(self, string, token_type, line, line_number, values=None, + orig_line_number=None): """Creates a new Token object. Args: @@ -58,13 +59,18 @@ class Token(object): values: A dict of named values within the token. For instance, a function declaration may have a value called 'name' which captures the name of the function. + orig_line_number: The line number of the original file this token comes + from. This should be only set during the tokenization process. For newly + created error fix tokens after that, it should be None. """ self.type = token_type self.string = string self.length = len(string) self.line = line self.line_number = line_number + self.orig_line_number = orig_line_number self.values = values + self.is_deleted = False # These parts can only be computed when the file is fully tokenized self.previous = None @@ -123,3 +129,17 @@ class Token(object): return '' % (self.type, self.string, self.values, self.line_number, self.metadata) + + def __iter__(self): + """Returns a token iterator.""" + node = self + while node: + yield node + node = node.next + + def __reversed__(self): + """Returns a reverse-direction token iterator.""" + node = self + while node: + yield node + node = node.previous diff --git a/tools/closure_linter/closure_linter/common/tokens_test.py b/tools/closure_linter/closure_linter/common/tokens_test.py new file mode 100644 index 0000000..01ec89d --- /dev/null +++ b/tools/closure_linter/closure_linter/common/tokens_test.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# Copyright 2011 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'nnaze@google.com (Nathan Naze)' + +import unittest as googletest +from closure_linter.common import tokens + + +def _CreateDummyToken(): + return tokens.Token('foo', None, 1, 1) + + +def _CreateDummyTokens(count): + dummy_tokens = [] + for _ in xrange(count): + dummy_tokens.append(_CreateDummyToken()) + return dummy_tokens + + +def _SetTokensAsNeighbors(neighbor_tokens): + for i in xrange(len(neighbor_tokens)): + prev_index = i - 1 + next_index = i + 1 + + if prev_index >= 0: + neighbor_tokens[i].previous = neighbor_tokens[prev_index] + + if next_index < len(neighbor_tokens): + neighbor_tokens[i].next = neighbor_tokens[next_index] + + +class TokensTest(googletest.TestCase): + + def testIsFirstInLine(self): + + # First token in file (has no previous). + self.assertTrue(_CreateDummyToken().IsFirstInLine()) + + a, b = _CreateDummyTokens(2) + _SetTokensAsNeighbors([a, b]) + + # Tokens on same line + a.line_number = 30 + b.line_number = 30 + + self.assertFalse(b.IsFirstInLine()) + + # Tokens on different lines + b.line_number = 31 + self.assertTrue(b.IsFirstInLine()) + + def testIsLastInLine(self): + # Last token in file (has no next). + self.assertTrue(_CreateDummyToken().IsLastInLine()) + + a, b = _CreateDummyTokens(2) + _SetTokensAsNeighbors([a, b]) + + # Tokens on same line + a.line_number = 30 + b.line_number = 30 + self.assertFalse(a.IsLastInLine()) + + b.line_number = 31 + self.assertTrue(a.IsLastInLine()) + + def testIsType(self): + a = tokens.Token('foo', 'fakeType1', 1, 1) + self.assertTrue(a.IsType('fakeType1')) + self.assertFalse(a.IsType('fakeType2')) + + def testIsAnyType(self): + a = tokens.Token('foo', 'fakeType1', 1, 1) + self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2'])) + self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4'])) + + def testRepr(self): + a = tokens.Token('foo', 'fakeType1', 1, 1) + self.assertEquals('', str(a)) + + def testIter(self): + dummy_tokens = _CreateDummyTokens(5) + _SetTokensAsNeighbors(dummy_tokens) + a, b, c, d, e = dummy_tokens + + i = iter(a) + self.assertListEqual([a, b, c, d, e], list(i)) + + def testReverseIter(self): + dummy_tokens = _CreateDummyTokens(5) + _SetTokensAsNeighbors(dummy_tokens) + a, b, c, d, e = dummy_tokens + + ri = reversed(e) + self.assertListEqual([e, d, c, b, a], list(ri)) + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter/ecmalintrules.py b/tools/closure_linter/closure_linter/ecmalintrules.py index a971b44..c07dffc 100755 --- a/tools/closure_linter/closure_linter/ecmalintrules.py +++ b/tools/closure_linter/closure_linter/ecmalintrules.py @@ -23,25 +23,28 @@ __author__ = ('robbyw@google.com (Robert Walker)', import re +import gflags as flags + from closure_linter import checkerbase from closure_linter import ecmametadatapass +from closure_linter import error_check +from closure_linter import errorrules from closure_linter import errors from closure_linter import indentation -from closure_linter import javascripttokens from closure_linter import javascripttokenizer +from closure_linter import javascripttokens from closure_linter import statetracker from closure_linter import tokenutil from closure_linter.common import error -from closure_linter.common import htmlutil -from closure_linter.common import lintrunner from closure_linter.common import position -from closure_linter.common import tokens -import gflags as flags + FLAGS = flags.FLAGS -flags.DEFINE_boolean('strict', False, - 'Whether to validate against the stricter Closure style.') flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow') +# TODO(user): When flipping this to True, remove logic from unit tests +# that overrides this flag. +flags.DEFINE_boolean('dot_on_next_line', False, 'Require dots to be' + 'placed on the next line for wrapped expressions') # TODO(robbyw): Check for extra parens on return statements # TODO(robbyw): Check for 0px in strings @@ -53,8 +56,10 @@ Context = ecmametadatapass.EcmaContext Error = error.Error Modes = javascripttokenizer.JavaScriptModes Position = position.Position +Rule = error_check.Rule Type = javascripttokens.JavaScriptTokenType + class EcmaScriptLintRules(checkerbase.LintRulesBase): """EmcaScript lint style checking rules. @@ -67,14 +72,15 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): language. """ - # Static constants. - MAX_LINE_LENGTH = 80 + # It will be initialized in constructor so the flags are initialized. + max_line_length = -1 + # Static constants. MISSING_PARAMETER_SPACE = re.compile(r',\S') - EXTRA_SPACE = re.compile('(\(\s|\s\))') + EXTRA_SPACE = re.compile(r'(\(\s|\s\))') - ENDS_WITH_SPACE = re.compile('\s$') + ENDS_WITH_SPACE = re.compile(r'\s$') ILLEGAL_TAB = re.compile(r'\t') @@ -85,12 +91,18 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)') # Acceptable tokens to remove for line too long testing. - LONG_LINE_IGNORE = frozenset(['*', '//', '@see'] + + LONG_LINE_IGNORE = frozenset( + ['*', '//', '@see'] + ['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE]) + JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([ + '@fileoverview', '@param', '@return', '@returns']) + def __init__(self): """Initialize this lint rule object.""" checkerbase.LintRulesBase.__init__(self) + if EcmaScriptLintRules.max_line_length == -1: + EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength() def Initialize(self, checker, limited_doc_checks, is_html): """Initialize this lint rule object before parsing a new file.""" @@ -107,6 +119,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): Args: last_token: The last token in the line. + state: parser_state object that indicates the current state in the page """ # Start from the last token so that we have the flag object attached to # and DOC_FLAG tokens. @@ -119,8 +132,8 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): while token and token.line_number == line_number: if state.IsTypeToken(token): line.insert(0, 'x' * len(token.string)) - elif token.type in (Type.IDENTIFIER, Type.NORMAL): - # Dots are acceptable places to wrap. + elif token.type in (Type.IDENTIFIER, Type.OPERATOR): + # Dots are acceptable places to wrap (may be tokenized as identifiers). line.insert(0, token.string.replace('.', ' ')) else: line.insert(0, token.string) @@ -130,7 +143,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): line = line.rstrip('\n\r\f') try: length = len(unicode(line, 'utf-8')) - except: + except (LookupError, UnicodeDecodeError): # Unknown encoding. The line length may be wrong, as was originally the # case for utf-8 (see bug 1735846). For now just accept the default # length, but as we find problems we can either add test for other @@ -138,7 +151,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): # false positives at the cost of more false negatives. length = len(line) - if length > self.MAX_LINE_LENGTH: + if length > EcmaScriptLintRules.max_line_length: # If the line matches one of the exceptions, then it's ok. for long_line_regexp in self.GetLongLineExceptions(): @@ -150,43 +163,42 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): parts = set(line.split()) # We allow two "words" (type and name) when the line contains @param - max = 1 + max_parts = 1 if '@param' in parts: - max = 2 + max_parts = 2 # Custom tags like @requires may have url like descriptions, so ignore # the tag, similar to how we handle @see. custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags]) - if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags)) > max): - self._HandleError(errors.LINE_TOO_LONG, + if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags)) + > max_parts): + self._HandleError( + errors.LINE_TOO_LONG, 'Line too long (%d characters).' % len(line), last_token) - def _CheckJsDocType(self, token): + def _CheckJsDocType(self, token, js_type): """Checks the given type for style errors. Args: token: The DOC_FLAG token for the flag whose type to check. + js_type: The flag's typeannotation.TypeAnnotation instance. """ - flag = token.attached_object - type = flag.type - if type and type is not None and not type.isspace(): - pieces = self.TYPE_SPLIT.split(type) - if len(pieces) == 1 and type.count('|') == 1 and ( - type.endswith('|null') or type.startswith('null|')): - self._HandleError(errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL, - 'Prefer "?Type" to "Type|null": "%s"' % type, token) - - for p in pieces: - if p.count('|') and p.count('?'): - # TODO(robbyw): We should do actual parsing of JsDoc types. As is, - # this won't report an error for {number|Array.?}, etc. - self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE, - 'JsDoc types cannot contain both "?" and "|": "%s"' % p, token) - - if FLAGS.strict and (flag.type_start_token.type != Type.DOC_START_BRACE or - flag.type_end_token.type != Type.DOC_END_BRACE): - self._HandleError(errors.MISSING_BRACES_AROUND_TYPE, - 'Type must always be surrounded by curly braces.', token) + if not js_type: return + + if js_type.type_group and len(js_type.sub_types) == 2: + identifiers = [t.identifier for t in js_type.sub_types] + if 'null' in identifiers: + # Don't warn if the identifier is a template type (e.g. {TYPE|null}. + if not identifiers[0].isupper() and not identifiers[1].isupper(): + self._HandleError( + errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL, + 'Prefer "?Type" to "Type|null": "%s"' % js_type, token) + + # TODO(user): We should report an error for wrong usage of '?' and '|' + # e.g. {?number|string|null} etc. + + for sub_type in js_type.IterTypes(): + self._CheckJsDocType(token, sub_type) def _CheckForMissingSpaceBeforeToken(self, token): """Checks for a missing space at the beginning of a token. @@ -206,7 +218,60 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): errors.MISSING_SPACE, 'Missing space before "%s"' % token.string, token, - Position.AtBeginning()) + position=Position.AtBeginning()) + + def _CheckOperator(self, token): + """Checks an operator for spacing and line style. + + Args: + token: The operator token. + """ + last_code = token.metadata.last_code + + if not self._ExpectSpaceBeforeOperator(token): + if (token.previous and token.previous.type == Type.WHITESPACE and + last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and + last_code.line_number == token.line_number): + self._HandleError( + errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string, + token.previous, position=Position.All(token.previous.string)) + + elif (token.previous and + not token.previous.IsComment() and + not tokenutil.IsDot(token) and + token.previous.type in Type.EXPRESSION_ENDER_TYPES): + self._HandleError(errors.MISSING_SPACE, + 'Missing space before "%s"' % token.string, token, + position=Position.AtBeginning()) + + # Check wrapping of operators. + next_code = tokenutil.GetNextCodeToken(token) + + is_dot = tokenutil.IsDot(token) + wrapped_before = last_code and last_code.line_number != token.line_number + wrapped_after = next_code and next_code.line_number != token.line_number + + if FLAGS.dot_on_next_line and is_dot and wrapped_after: + self._HandleError( + errors.LINE_ENDS_WITH_DOT, + '"." must go on the following line', + token) + if (not is_dot and wrapped_before and + not token.metadata.IsUnaryOperator()): + self._HandleError( + errors.LINE_STARTS_WITH_OPERATOR, + 'Binary operator must go on previous line "%s"' % token.string, + token) + + def _IsLabel(self, token): + # A ':' token is considered part of a label if it occurs in a case + # statement, a plain label, or an object literal, i.e. is not part of a + # ternary. + + return (token.string == ':' and + token.metadata.context.type in (Context.LITERAL_ELEMENT, + Context.CASE_BLOCK, + Context.STATEMENT)) def _ExpectSpaceBeforeOperator(self, token): """Returns whether a space should appear before the given operator token. @@ -220,13 +285,13 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): if token.string == ',' or token.metadata.IsUnaryPostOperator(): return False + if tokenutil.IsDot(token): + return False + # Colons should appear in labels, object literals, the case of a switch # statement, and ternary operator. Only want a space in the case of the # ternary operator. - if (token.string == ':' and - token.metadata.context.type in (Context.LITERAL_ELEMENT, - Context.CASE_BLOCK, - Context.STATEMENT)): + if self._IsLabel(token): return False if token.metadata.IsUnaryOperator() and token.IsFirstInLine(): @@ -246,10 +311,10 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): last_in_line = token.IsLastInLine() last_non_space_token = state.GetLastNonSpaceToken() - type = token.type + token_type = token.type # Process the line change. - if not self._is_html and FLAGS.strict: + if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION): # TODO(robbyw): Support checking indentation in HTML files. indentation_errors = self._indentation.CheckToken(token, state) for indentation_error in indentation_errors: @@ -258,11 +323,12 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): if last_in_line: self._CheckLineLength(token, state) - if type == Type.PARAMETERS: + if token_type == Type.PARAMETERS: # Find missing spaces in parameter lists. if self.MISSING_PARAMETER_SPACE.search(token.string): + fix_data = ', '.join([s.strip() for s in token.string.split(',')]) self._HandleError(errors.MISSING_SPACE, 'Missing space after ","', - token) + token, position=None, fix_data=fix_data.strip()) # Find extra spaces at the beginning of parameter lists. Make sure # we aren't at the beginning of a continuing multi-line list. @@ -270,54 +336,56 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): space_count = len(token.string) - len(token.string.lstrip()) if space_count: self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("', - token, Position(0, space_count)) + token, position=Position(0, space_count)) - elif (type == Type.START_BLOCK and + elif (token_type == Type.START_BLOCK and token.metadata.context.type == Context.BLOCK): self._CheckForMissingSpaceBeforeToken(token) - elif type == Type.END_BLOCK: - # This check is for object literal end block tokens, but there is no need - # to test that condition since a comma at the end of any other kind of - # block is undoubtedly a parse error. + elif token_type == Type.END_BLOCK: last_code = token.metadata.last_code - if last_code.IsOperator(','): - self._HandleError(errors.COMMA_AT_END_OF_LITERAL, - 'Illegal comma at end of object literal', last_code, - Position.All(last_code.string)) - if state.InFunction() and state.IsFunctionClose(): - is_immediately_called = (token.next and - token.next.type == Type.START_PAREN) if state.InTopLevelFunction(): - # When the function was top-level and not immediately called, check - # that it's terminated by a semi-colon. - if state.InAssignedFunction(): - if not is_immediately_called and (last_in_line or - not token.next.type == Type.SEMICOLON): - self._HandleError(errors.MISSING_SEMICOLON_AFTER_FUNCTION, - 'Missing semicolon after function assigned to a variable', - token, Position.AtEnd(token.string)) - else: + # A semicolons should not be included at the end of a function + # declaration. + if not state.InAssignedFunction(): if not last_in_line and token.next.type == Type.SEMICOLON: - self._HandleError(errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION, + self._HandleError( + errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION, 'Illegal semicolon after function declaration', - token.next, Position.All(token.next.string)) + token.next, position=Position.All(token.next.string)) + + # A semicolon should be included at the end of a function expression + # that is not immediately called or used by a dot operator. + if (state.InAssignedFunction() and token.next + and token.next.type != Type.SEMICOLON): + next_token = tokenutil.GetNextCodeToken(token) + is_immediately_used = (next_token.type == Type.START_PAREN or + tokenutil.IsDot(next_token)) + if not is_immediately_used: + self._HandleError( + errors.MISSING_SEMICOLON_AFTER_FUNCTION, + 'Missing semicolon after function assigned to a variable', + token, position=Position.AtEnd(token.string)) - if (state.InInterfaceMethod() and last_code.type != Type.START_BLOCK): + if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK: self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE, - 'Interface methods cannot contain code', last_code) + 'Interface methods cannot contain code', last_code) elif (state.IsBlockClose() and token.next and token.next.type == Type.SEMICOLON): - self._HandleError(errors.REDUNDANT_SEMICOLON, - 'No semicolon is required to end a code block', - token.next, Position.All(token.next.string)) - - elif type == Type.SEMICOLON: + if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL + and last_code.metadata.context.type != Context.OBJECT_LITERAL): + self._HandleError( + errors.REDUNDANT_SEMICOLON, + 'No semicolon is required to end a code block', + token.next, position=Position.All(token.next.string)) + + elif token_type == Type.SEMICOLON: if token.previous and token.previous.type == Type.WHITESPACE: - self._HandleError(errors.EXTRA_SPACE, 'Extra space before ";"', - token.previous, Position.All(token.previous.string)) + self._HandleError( + errors.EXTRA_SPACE, 'Extra space before ";"', + token.previous, position=Position.All(token.previous.string)) if token.next and token.next.line_number == token.line_number: if token.metadata.context.type != Context.FOR_GROUP_BLOCK: @@ -326,10 +394,11 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): elif token.next.type not in ( Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN): - self._HandleError(errors.MISSING_SPACE, + self._HandleError( + errors.MISSING_SPACE, 'Missing space after ";" in for statement', token.next, - Position.AtBeginning()) + position=Position.AtBeginning()) last_code = token.metadata.last_code if last_code and last_code.type == Type.SEMICOLON: @@ -338,7 +407,8 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): # NOTE(user): This is not a perfect check, and will not throw an error # for cases like: for (var i = 0;; i < n; i++) {}, but then your code # probably won't work either. - for_token = tokenutil.CustomSearch(last_code, + for_token = tokenutil.CustomSearch( + last_code, lambda token: token.type == Type.KEYWORD and token.string == 'for', end_func=lambda token: token.type == Type.SEMICOLON, distance=None, @@ -346,113 +416,83 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): if not for_token: self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon', - token, Position.All(token.string)) - - elif type == Type.START_PAREN: - if token.previous and token.previous.type == Type.KEYWORD: + token, position=Position.All(token.string)) + + elif token_type == Type.START_PAREN: + # Ensure that opening parentheses have a space before any keyword + # that is not being invoked like a member function. + if (token.previous and token.previous.type == Type.KEYWORD and + (not token.previous.metadata or + not token.previous.metadata.last_code or + not token.previous.metadata.last_code.string or + token.previous.metadata.last_code.string[-1:] != '.')): self._HandleError(errors.MISSING_SPACE, 'Missing space before "("', - token, Position.AtBeginning()) + token, position=Position.AtBeginning()) elif token.previous and token.previous.type == Type.WHITESPACE: before_space = token.previous.previous + # Ensure that there is no extra space before a function invocation, + # even if the function being invoked happens to be a keyword. if (before_space and before_space.line_number == token.line_number and - before_space.type == Type.IDENTIFIER): - self._HandleError(errors.EXTRA_SPACE, 'Extra space before "("', - token.previous, Position.All(token.previous.string)) - - elif type == Type.START_BRACKET: - if (not first_in_line and token.previous.type == Type.WHITESPACE and - last_non_space_token and - last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES): - self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["', - token.previous, Position.All(token.previous.string)) - # If the [ token is the first token in a line we shouldn't complain - # about a missing space before [. This is because some Ecma script - # languages allow syntax like: - # [Annotation] - # class MyClass {...} - # So we don't want to blindly warn about missing spaces before [. - # In the the future, when rules for computing exactly how many spaces - # lines should be indented are added, then we can return errors for - # [ tokens that are improperly indented. - # For example: - # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName = - # [a,b,c]; - # should trigger a proper indentation warning message as [ is not indented - # by four spaces. - elif (not first_in_line and token.previous and - not token.previous.type in ( - [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] + - Type.EXPRESSION_ENDER_TYPES)): - self._HandleError(errors.MISSING_SPACE, 'Missing space before "["', - token, Position.AtBeginning()) - - elif type in (Type.END_PAREN, Type.END_BRACKET): + before_space.type == Type.IDENTIFIER or + (before_space.type == Type.KEYWORD and before_space.metadata and + before_space.metadata.last_code and + before_space.metadata.last_code.string and + before_space.metadata.last_code.string[-1:] == '.')): + self._HandleError( + errors.EXTRA_SPACE, 'Extra space before "("', + token.previous, position=Position.All(token.previous.string)) + + elif token_type == Type.START_BRACKET: + self._HandleStartBracket(token, last_non_space_token) + elif token_type in (Type.END_PAREN, Type.END_BRACKET): # Ensure there is no space before closing parentheses, except when # it's in a for statement with an omitted section, or when it's at the # beginning of a line. if (token.previous and token.previous.type == Type.WHITESPACE and not token.previous.IsFirstInLine() and not (last_non_space_token and last_non_space_token.line_number == - token.line_number and + token.line_number and last_non_space_token.type == Type.SEMICOLON)): - self._HandleError(errors.EXTRA_SPACE, 'Extra space before "%s"' % - token.string, token.previous, Position.All(token.previous.string)) - - if token.type == Type.END_BRACKET: - last_code = token.metadata.last_code - if last_code.IsOperator(','): - self._HandleError(errors.COMMA_AT_END_OF_LITERAL, - 'Illegal comma at end of array literal', last_code, - Position.All(last_code.string)) + self._HandleError( + errors.EXTRA_SPACE, 'Extra space before "%s"' % + token.string, token.previous, + position=Position.All(token.previous.string)) - elif type == Type.WHITESPACE: + elif token_type == Type.WHITESPACE: if self.ILLEGAL_TAB.search(token.string): if token.IsFirstInLine(): - self._HandleError(errors.ILLEGAL_TAB, - 'Illegal tab in whitespace before "%s"' % token.next.string, - token, Position.All(token.string)) + if token.next: + self._HandleError( + errors.ILLEGAL_TAB, + 'Illegal tab in whitespace before "%s"' % token.next.string, + token, position=Position.All(token.string)) + else: + self._HandleError( + errors.ILLEGAL_TAB, + 'Illegal tab in whitespace', + token, position=Position.All(token.string)) else: - self._HandleError(errors.ILLEGAL_TAB, + self._HandleError( + errors.ILLEGAL_TAB, 'Illegal tab in whitespace after "%s"' % token.previous.string, - token, Position.All(token.string)) + token, position=Position.All(token.string)) # Check whitespace length if it's not the first token of the line and # if it's not immediately before a comment. if last_in_line: # Check for extra whitespace at the end of a line. self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line', - token, Position.All(token.string)) + token, position=Position.All(token.string)) elif not first_in_line and not token.next.IsComment(): if token.length > 1: - self._HandleError(errors.EXTRA_SPACE, 'Extra space after "%s"' % + self._HandleError( + errors.EXTRA_SPACE, 'Extra space after "%s"' % token.previous.string, token, - Position(1, len(token.string) - 1)) - - elif type == Type.OPERATOR: - last_code = token.metadata.last_code + position=Position(1, len(token.string) - 1)) - if not self._ExpectSpaceBeforeOperator(token): - if (token.previous and token.previous.type == Type.WHITESPACE and - last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)): - self._HandleError(errors.EXTRA_SPACE, - 'Extra space before "%s"' % token.string, token.previous, - Position.All(token.previous.string)) - - elif (token.previous and - not token.previous.IsComment() and - token.previous.type in Type.EXPRESSION_ENDER_TYPES): - self._HandleError(errors.MISSING_SPACE, - 'Missing space before "%s"' % token.string, token, - Position.AtBeginning()) - - # Check that binary operators are not used to start lines. - if ((not last_code or last_code.line_number != token.line_number) and - not token.metadata.IsUnaryOperator()): - self._HandleError(errors.LINE_STARTS_WITH_OPERATOR, - 'Binary operator should go on previous line "%s"' % token.string, - token) - - elif type == Type.DOC_FLAG: + elif token_type == Type.OPERATOR: + self._CheckOperator(token) + elif token_type == Type.DOC_FLAG: flag = token.attached_object if flag.flag_type == 'bug': @@ -462,21 +502,25 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): if not string.isdigit(): self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG, - '@bug should be followed by a bug number', token) + '@bug should be followed by a bug number', token) elif flag.flag_type == 'suppress': if flag.type is None: # A syntactically invalid suppress tag will get tokenized as a normal # flag, indicating an error. - self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX, + self._HandleError( + errors.INCORRECT_SUPPRESS_SYNTAX, 'Invalid suppress syntax: should be @suppress {errortype}. ' 'Spaces matter.', token) - elif flag.type not in state.GetDocFlag().SUPPRESS_TYPES: - self._HandleError(errors.INVALID_SUPPRESS_TYPE, - 'Invalid suppression type: %s' % flag.type, - token) - - elif FLAGS.strict and flag.flag_type == 'author': + else: + for suppress_type in flag.jstype.IterIdentifiers(): + if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES: + self._HandleError( + errors.INVALID_SUPPRESS_TYPE, + 'Invalid suppression type: %s' % suppress_type, token) + + elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and + flag.flag_type == 'author'): # TODO(user): In non strict mode check the author tag for as much as # it exists, though the full form checked below isn't required. string = token.next.string @@ -494,12 +538,12 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): if num_spaces < 1: self._HandleError(errors.MISSING_SPACE, 'Missing space after email address', - token.next, Position(result.start(2), 0)) + token.next, position=Position(result.start(2), 0)) elif num_spaces > 1: - self._HandleError(errors.EXTRA_SPACE, - 'Extra space after email address', - token.next, - Position(result.start(2) + 1, num_spaces - 1)) + self._HandleError( + errors.EXTRA_SPACE, 'Extra space after email address', + token.next, + position=Position(result.start(2) + 1, num_spaces - 1)) # Check for extra spaces before email address. Can't be too few, if # not at least one we wouldn't match @author tag. @@ -507,80 +551,61 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): if num_spaces > 1: self._HandleError(errors.EXTRA_SPACE, 'Extra space before email address', - token.next, Position(1, num_spaces - 1)) + token.next, position=Position(1, num_spaces - 1)) elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and not self._limited_doc_checks): if flag.flag_type == 'param': if flag.name is None: self._HandleError(errors.MISSING_JSDOC_PARAM_NAME, - 'Missing name in @param tag', token) + 'Missing name in @param tag', token) if not flag.description or flag.description is None: flag_name = token.type if 'name' in token.values: flag_name = '@' + token.values['name'] - self._HandleError(errors.MISSING_JSDOC_TAG_DESCRIPTION, - 'Missing description in %s tag' % flag_name, token) - else: - self._CheckForMissingSpaceBeforeToken(flag.description_start_token) - - # We want punctuation to be inside of any tags ending a description, - # so strip tags before checking description. See bug 1127192. Note - # that depending on how lines break, the real description end token - # may consist only of stripped html and the effective end token can - # be different. - end_token = flag.description_end_token - end_string = htmlutil.StripTags(end_token.string).strip() - while (end_string == '' and not - end_token.type in Type.FLAG_ENDING_TYPES): - end_token = end_token.previous - if end_token.type in Type.FLAG_DESCRIPTION_TYPES: - end_string = htmlutil.StripTags(end_token.string).rstrip() - - if not (end_string.endswith('.') or end_string.endswith('?') or - end_string.endswith('!')): - # Find the position for the missing punctuation, inside of any html - # tags. - desc_str = end_token.string.rstrip() - while desc_str.endswith('>'): - start_tag_index = desc_str.rfind('<') - if start_tag_index < 0: - break - desc_str = desc_str[:start_tag_index].rstrip() - end_position = Position(len(desc_str), 0) + if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED: self._HandleError( - errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER, - ('%s descriptions must end with valid punctuation such as a ' - 'period.' % token.string), - end_token, end_position) + errors.MISSING_JSDOC_TAG_DESCRIPTION, + 'Missing description in %s tag' % flag_name, token) + else: + self._CheckForMissingSpaceBeforeToken(flag.description_start_token) - if flag.flag_type in state.GetDocFlag().HAS_TYPE: + if flag.HasType(): if flag.type_start_token is not None: self._CheckForMissingSpaceBeforeToken( token.attached_object.type_start_token) - if flag.type and flag.type != '' and not flag.type.isspace(): - self._CheckJsDocType(token) - - if type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG): - if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and - token.values['name'] not in FLAGS.custom_jsdoc_tags): - self._HandleError(errors.INVALID_JSDOC_TAG, - 'Invalid JsDoc tag: %s' % token.values['name'], token) + if flag.jstype and not flag.jstype.IsEmpty(): + self._CheckJsDocType(token, flag.jstype) - if (FLAGS.strict and token.values['name'] == 'inheritDoc' and - type == Type.DOC_INLINE_FLAG): - self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC, - 'Unnecessary braces around @inheritDoc', - token) - - elif type == Type.SIMPLE_LVALUE: + if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and ( + flag.type_start_token.type != Type.DOC_START_BRACE or + flag.type_end_token.type != Type.DOC_END_BRACE): + self._HandleError( + errors.MISSING_BRACES_AROUND_TYPE, + 'Type must always be surrounded by curly braces.', token) + + if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG): + if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and + token.values['name'] not in FLAGS.custom_jsdoc_tags): + self._HandleError( + errors.INVALID_JSDOC_TAG, + 'Invalid JsDoc tag: %s' % token.values['name'], token) + + if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and + token.values['name'] == 'inheritDoc' and + token_type == Type.DOC_INLINE_FLAG): + self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC, + 'Unnecessary braces around @inheritDoc', + token) + + elif token_type == Type.SIMPLE_LVALUE: identifier = token.values['identifier'] if ((not state.InFunction() or state.InConstructor()) and - not state.InParentheses() and not state.InObjectLiteralDescendant()): + state.InTopLevel() and not state.InObjectLiteralDescendant()): jsdoc = state.GetDocComment() if not state.HasDocComment(identifier): # Only test for documentation on identifiers with .s in them to @@ -592,45 +617,62 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): self._limited_doc_checks): comment = state.GetLastComment() if not (comment and comment.lower().count('jsdoc inherited')): - self._HandleError(errors.MISSING_MEMBER_DOCUMENTATION, + self._HandleError( + errors.MISSING_MEMBER_DOCUMENTATION, "No docs found for member '%s'" % identifier, - token); + token) elif jsdoc and (not state.InConstructor() or identifier.startswith('this.')): # We are at the top level and the function/member is documented. if identifier.endswith('_') and not identifier.endswith('__'): - if jsdoc.HasFlag('override'): - self._HandleError(errors.INVALID_OVERRIDE_PRIVATE, - '%s should not override a private member.' % identifier, - jsdoc.GetFlag('override').flag_token) # Can have a private class which inherits documentation from a # public superclass. - if jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor'): - self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE, + # + # @inheritDoc is deprecated in favor of using @override, and they + if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor') + and ('accessControls' not in jsdoc.suppressions)): + self._HandleError( + errors.INVALID_OVERRIDE_PRIVATE, + '%s should not override a private member.' % identifier, + jsdoc.GetFlag('override').flag_token) + if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor') + and ('accessControls' not in jsdoc.suppressions)): + self._HandleError( + errors.INVALID_INHERIT_DOC_PRIVATE, '%s should not inherit from a private member.' % identifier, jsdoc.GetFlag('inheritDoc').flag_token) if (not jsdoc.HasFlag('private') and - not ('underscore' in jsdoc.suppressions)): - self._HandleError(errors.MISSING_PRIVATE, + ('underscore' not in jsdoc.suppressions) and not + ((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and + ('accessControls' in jsdoc.suppressions))): + self._HandleError( + errors.MISSING_PRIVATE, 'Member "%s" must have @private JsDoc.' % identifier, token) if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions: - self._HandleError(errors.UNNECESSARY_SUPPRESS, + self._HandleError( + errors.UNNECESSARY_SUPPRESS, '@suppress {underscore} is not necessary with @private', jsdoc.suppressions['underscore']) - elif jsdoc.HasFlag('private'): - self._HandleError(errors.EXTRA_PRIVATE, + elif (jsdoc.HasFlag('private') and + not self.InExplicitlyTypedLanguage()): + # It is convention to hide public fields in some ECMA + # implementations from documentation using the @private tag. + self._HandleError( + errors.EXTRA_PRIVATE, 'Member "%s" must not have @private JsDoc' % identifier, token) - if ((jsdoc.HasFlag('desc') or jsdoc.HasFlag('hidden')) - and not identifier.startswith('MSG_') - and identifier.find('.MSG_') == -1): - # TODO(user): Update error message to show the actual invalid - # tag, either @desc or @hidden. - self._HandleError(errors.INVALID_USE_OF_DESC_TAG, - 'Member "%s" should not have @desc JsDoc' % identifier, - token) + # These flags are only legal on localizable message definitions; + # such variables always begin with the prefix MSG_. + for f in ('desc', 'hidden', 'meaning'): + if (jsdoc.HasFlag(f) + and not identifier.startswith('MSG_') + and identifier.find('.MSG_') == -1): + self._HandleError( + errors.INVALID_USE_OF_DESC_TAG, + 'Member "%s" should not have @%s JsDoc' % (identifier, f), + token) # Check for illegaly assigning live objects as prototype property values. index = identifier.find('.prototype.') @@ -641,28 +683,30 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): if next_code and ( next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or next_code.IsOperator('new')): - self._HandleError(errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE, + self._HandleError( + errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE, 'Member %s cannot have a non-primitive value' % identifier, token) - elif type == Type.END_PARAMETERS: + elif token_type == Type.END_PARAMETERS: # Find extra space at the end of parameter lists. We check the token # prior to the current one when it is a closing paren. if (token.previous and token.previous.type == Type.PARAMETERS and self.ENDS_WITH_SPACE.search(token.previous.string)): self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"', - token.previous) + token.previous) jsdoc = state.GetDocComment() if state.GetFunction().is_interface: if token.previous and token.previous.type == Type.PARAMETERS: - self._HandleError(errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS, + self._HandleError( + errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS, 'Interface constructor cannot have parameters', token.previous) elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see') - and not jsdoc.InheritsDocumentation() - and not state.InObjectLiteralDescendant() and not - jsdoc.IsInvalidated()): + and not jsdoc.InheritsDocumentation() + and not state.InObjectLiteralDescendant() and not + jsdoc.IsInvalidated()): distance, edit = jsdoc.CompareParameters(state.GetParams()) if distance: params_iter = iter(state.GetParams()) @@ -677,68 +721,108 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): # Languages that don't allow variables to by typed such as # JavaScript care but languages such as ActionScript or Java # that allow variables to be typed don't care. - self.HandleMissingParameterDoc(token, params_iter.next()) + if not self._limited_doc_checks: + self.HandleMissingParameterDoc(token, params_iter.next()) elif op == 'D': # Deletion self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION, - 'Found docs for non-existing parameter: "%s"' % - docs_iter.next(), token) + 'Found docs for non-existing parameter: "%s"' % + docs_iter.next(), token) elif op == 'S': # Substitution - self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION, - 'Parameter mismatch: got "%s", expected "%s"' % - (params_iter.next(), docs_iter.next()), token) + if not self._limited_doc_checks: + self._HandleError( + errors.WRONG_PARAMETER_DOCUMENTATION, + 'Parameter mismatch: got "%s", expected "%s"' % + (params_iter.next(), docs_iter.next()), token) else: # Equality - just advance the iterators params_iter.next() docs_iter.next() - elif type == Type.STRING_TEXT: + elif token_type == Type.STRING_TEXT: # If this is the first token after the start of the string, but it's at # the end of a line, we know we have a multi-line string. - if token.previous.type in (Type.SINGLE_QUOTE_STRING_START, + if token.previous.type in ( + Type.SINGLE_QUOTE_STRING_START, Type.DOUBLE_QUOTE_STRING_START) and last_in_line: self._HandleError(errors.MULTI_LINE_STRING, - 'Multi-line strings are not allowed', token) - + 'Multi-line strings are not allowed', token) # This check is orthogonal to the ones above, and repeats some types, so # it is a plain if and not an elif. if token.type in Type.COMMENT_TYPES: if self.ILLEGAL_TAB.search(token.string): self._HandleError(errors.ILLEGAL_TAB, - 'Illegal tab in comment "%s"' % token.string, token) + 'Illegal tab in comment "%s"' % token.string, token) trimmed = token.string.rstrip() if last_in_line and token.string != trimmed: # Check for extra whitespace at the end of a line. - self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line', - token, Position(len(trimmed), len(token.string) - len(trimmed))) + self._HandleError( + errors.EXTRA_SPACE, 'Extra space at end of line', token, + position=Position(len(trimmed), len(token.string) - len(trimmed))) # This check is also orthogonal since it is based on metadata. if token.metadata.is_implied_semicolon: self._HandleError(errors.MISSING_SEMICOLON, - 'Missing semicolon at end of line', token) + 'Missing semicolon at end of line', token) + + def _HandleStartBracket(self, token, last_non_space_token): + """Handles a token that is an open bracket. + + Args: + token: The token to handle. + last_non_space_token: The last token that was not a space. + """ + if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and + last_non_space_token and + last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES): + self._HandleError( + errors.EXTRA_SPACE, 'Extra space before "["', + token.previous, position=Position.All(token.previous.string)) + # If the [ token is the first token in a line we shouldn't complain + # about a missing space before [. This is because some Ecma script + # languages allow syntax like: + # [Annotation] + # class MyClass {...} + # So we don't want to blindly warn about missing spaces before [. + # In the the future, when rules for computing exactly how many spaces + # lines should be indented are added, then we can return errors for + # [ tokens that are improperly indented. + # For example: + # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName = + # [a,b,c]; + # should trigger a proper indentation warning message as [ is not indented + # by four spaces. + elif (not token.IsFirstInLine() and token.previous and + token.previous.type not in ( + [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] + + Type.EXPRESSION_ENDER_TYPES)): + self._HandleError(errors.MISSING_SPACE, 'Missing space before "["', + token, position=Position.AtBeginning()) + + def Finalize(self, state): + """Perform all checks that need to occur after all lines are processed. + + Args: + state: State of the parser after parsing all tokens - def Finalize(self, state, tokenizer_mode): + Raises: + TypeError: If not overridden. + """ last_non_space_token = state.GetLastNonSpaceToken() # Check last line for ending with newline. - if state.GetLastLine() and not (state.GetLastLine().isspace() or + if state.GetLastLine() and not ( + state.GetLastLine().isspace() or state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()): self._HandleError( errors.FILE_MISSING_NEWLINE, 'File does not end with new line. (%s)' % state.GetLastLine(), last_non_space_token) - # Check that the mode is not mid comment, argument list, etc. - if not tokenizer_mode == Modes.TEXT_MODE: - self._HandleError( - errors.FILE_IN_BLOCK, - 'File ended in mode "%s".' % tokenizer_mode, - last_non_space_token) - try: self._indentation.Finalize() except Exception, e: @@ -748,5 +832,13 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase): last_non_space_token) def GetLongLineExceptions(self): - """Gets a list of regexps for lines which can be longer than the limit.""" + """Gets a list of regexps for lines which can be longer than the limit. + + Returns: + A list of regexps, used as matches (rather than searches). + """ return [] + + def InExplicitlyTypedLanguage(self): + """Returns whether this ecma implementation is explicitly typed.""" + return False diff --git a/tools/closure_linter/closure_linter/ecmametadatapass.py b/tools/closure_linter/closure_linter/ecmametadatapass.py index 2c797b3..5062161 100755 --- a/tools/closure_linter/closure_linter/ecmametadatapass.py +++ b/tools/closure_linter/closure_linter/ecmametadatapass.py @@ -115,18 +115,30 @@ class EcmaContext(object): BLOCK_TYPES = frozenset([ ROOT, BLOCK, CASE_BLOCK, FOR_GROUP_BLOCK, IMPLIED_BLOCK]) - def __init__(self, type, start_token, parent): + def __init__(self, context_type, start_token, parent=None): """Initializes the context object. Args: + context_type: The context type. + start_token: The token where this context starts. + parent: The parent context. + + Attributes: type: The context type. start_token: The token where this context starts. + end_token: The token where this context ends. parent: The parent context. + children: The child contexts of this context, in order. """ - self.type = type + self.type = context_type self.start_token = start_token self.end_token = None - self.parent = parent + + self.parent = None + self.children = [] + + if parent: + parent.AddChild(self) def __repr__(self): """Returns a string representation of the context object.""" @@ -137,6 +149,32 @@ class EcmaContext(object): context = context.parent return 'Context(%s)' % ' > '.join(stack) + def AddChild(self, child): + """Adds a child to this context and sets child's parent to this context. + + Args: + child: A child EcmaContext. The child's parent will be set to this + context. + """ + + child.parent = self + + self.children.append(child) + self.children.sort(EcmaContext._CompareContexts) + + def GetRoot(self): + """Get the root context that contains this context, if any.""" + context = self + while context: + if context.type is EcmaContext.ROOT: + return context + context = context.parent + + @staticmethod + def _CompareContexts(context1, context2): + """Sorts contexts 1 and 2 by start token document position.""" + return tokenutil.Compare(context1.start_token, context2.start_token) + class EcmaMetaData(object): """Token metadata for EcmaScript languages. @@ -146,6 +184,11 @@ class EcmaMetaData(object): context: The context this token appears in. operator_type: The operator type, will be one of the *_OPERATOR constants defined below. + aliased_symbol: The full symbol being identified, as a string (e.g. an + 'XhrIo' alias for 'goog.net.XhrIo'). Only applicable to identifier + tokens. This is set in aliaspass.py and is a best guess. + is_alias_definition: True if the symbol is part of an alias definition. + If so, these symbols won't be counted towards goog.requires/provides. """ UNARY_OPERATOR = 'unary' @@ -164,6 +207,8 @@ class EcmaMetaData(object): self.is_implied_semicolon = False self.is_implied_block = False self.is_implied_block_close = False + self.aliased_symbol = None + self.is_alias_definition = False def __repr__(self): """Returns a string representation of the context object.""" @@ -172,6 +217,8 @@ class EcmaMetaData(object): parts.append('optype: %r' % self.operator_type) if self.is_implied_semicolon: parts.append('implied;') + if self.aliased_symbol: + parts.append('alias for: %s' % self.aliased_symbol) return 'MetaData(%s)' % ', '.join(parts) def IsUnaryOperator(self): @@ -196,21 +243,21 @@ class EcmaMetaDataPass(object): self._AddContext(EcmaContext.ROOT) self._last_code = None - def _CreateContext(self, type): + def _CreateContext(self, context_type): """Overridable by subclasses to create the appropriate context type.""" - return EcmaContext(type, self._token, self._context) + return EcmaContext(context_type, self._token, self._context) def _CreateMetaData(self): """Overridable by subclasses to create the appropriate metadata type.""" return EcmaMetaData() - def _AddContext(self, type): + def _AddContext(self, context_type): """Adds a context of the given type to the context stack. Args: - type: The type of context to create + context_type: The type of context to create """ - self._context = self._CreateContext(type) + self._context = self._CreateContext(context_type) def _PopContext(self): """Moves up one level in the context stack. @@ -233,7 +280,7 @@ class EcmaMetaDataPass(object): """Pops the context stack until a context of the given type is popped. Args: - stop_types: The types of context to pop to - stops at the first match. + *stop_types: The types of context to pop to - stops at the first match. Returns: The context object of the given type that was popped. @@ -364,10 +411,14 @@ class EcmaMetaDataPass(object): self._AddContext(EcmaContext.SWITCH) elif (token_type == TokenType.KEYWORD and - token.string in ('case', 'default')): + token.string in ('case', 'default') and + self._context.type != EcmaContext.OBJECT_LITERAL): # Pop up to but not including the switch block. while self._context.parent.type != EcmaContext.SWITCH: self._PopContext() + if self._context.parent is None: + raise ParseError(token, 'Encountered case/default statement ' + 'without switch statement') elif token.IsOperator('?'): self._AddContext(EcmaContext.TERNARY_TRUE) @@ -386,9 +437,9 @@ class EcmaMetaDataPass(object): # ternary_false > ternary_true > statement > root elif (self._context.type == EcmaContext.TERNARY_FALSE and self._context.parent.type == EcmaContext.TERNARY_TRUE): - self._PopContext() # Leave current ternary false context. - self._PopContext() # Leave current parent ternary true - self._AddContext(EcmaContext.TERNARY_FALSE) + self._PopContext() # Leave current ternary false context. + self._PopContext() # Leave current parent ternary true + self._AddContext(EcmaContext.TERNARY_FALSE) elif self._context.parent.type == EcmaContext.SWITCH: self._AddContext(EcmaContext.CASE_BLOCK) @@ -444,25 +495,27 @@ class EcmaMetaDataPass(object): is_implied_block = self._context == EcmaContext.IMPLIED_BLOCK is_last_code_in_line = token.IsCode() and ( not next_code or next_code.line_number != token.line_number) - is_continued_identifier = (token.type == TokenType.IDENTIFIER and - token.string.endswith('.')) is_continued_operator = (token.type == TokenType.OPERATOR and not token.metadata.IsUnaryPostOperator()) is_continued_dot = token.string == '.' next_code_is_operator = next_code and next_code.type == TokenType.OPERATOR - next_code_is_dot = next_code and next_code.string == '.' - is_end_of_block = (token.type == TokenType.END_BLOCK and + is_end_of_block = ( + token.type == TokenType.END_BLOCK and token.metadata.context.type != EcmaContext.OBJECT_LITERAL) is_multiline_string = token.type == TokenType.STRING_TEXT + is_continued_var_decl = (token.IsKeyword('var') and + next_code and + (next_code.type in [TokenType.IDENTIFIER, + TokenType.SIMPLE_LVALUE]) and + token.line_number < next_code.line_number) next_code_is_block = next_code and next_code.type == TokenType.START_BLOCK if (is_last_code_in_line and self._StatementCouldEndInContext() and not is_multiline_string and not is_end_of_block and - not is_continued_identifier and + not is_continued_var_decl and not is_continued_operator and not is_continued_dot and - not next_code_is_dot and not next_code_is_operator and not is_implied_block and not next_code_is_block): @@ -470,7 +523,7 @@ class EcmaMetaDataPass(object): self._EndStatement() def _StatementCouldEndInContext(self): - """Returns whether the current statement (if any) may end in this context.""" + """Returns if the current statement (if any) may end in this context.""" # In the basic statement or variable declaration context, statement can # always end in this context. if self._context.type in (EcmaContext.STATEMENT, EcmaContext.VAR): diff --git a/tools/closure_linter/closure_linter/error_check.py b/tools/closure_linter/closure_linter/error_check.py new file mode 100755 index 0000000..8d657fe --- /dev/null +++ b/tools/closure_linter/closure_linter/error_check.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python +# +# Copyright 2011 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Specific JSLint errors checker.""" + + + +import gflags as flags + +FLAGS = flags.FLAGS + + +class Rule(object): + """Different rules to check.""" + + # Documentations for specific rules goes in flag definition. + BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level' + INDENTATION = 'indentation' + WELL_FORMED_AUTHOR = 'well_formed_author' + NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc' + BRACES_AROUND_TYPE = 'braces_around_type' + OPTIONAL_TYPE_MARKER = 'optional_type_marker' + VARIABLE_ARG_MARKER = 'variable_arg_marker' + UNUSED_PRIVATE_MEMBERS = 'unused_private_members' + UNUSED_LOCAL_VARIABLES = 'unused_local_variables' + + # Rule to raise all known errors. + ALL = 'all' + + # All rules that are to be checked when using the strict flag. E.g. the rules + # that are specific to the stricter Closure style. + CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL, + INDENTATION, + WELL_FORMED_AUTHOR, + NO_BRACES_AROUND_INHERIT_DOC, + BRACES_AROUND_TYPE, + OPTIONAL_TYPE_MARKER, + VARIABLE_ARG_MARKER]) + + +flags.DEFINE_boolean('strict', False, + 'Whether to validate against the stricter Closure style. ' + 'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.') +flags.DEFINE_multistring('jslint_error', [], + 'List of specific lint errors to check. Here is a list' + ' of accepted values:\n' + ' - ' + Rule.ALL + ': enables all following errors.\n' + ' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates' + 'number of blank lines between blocks at top level.\n' + ' - ' + Rule.INDENTATION + ': checks correct ' + 'indentation of code.\n' + ' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the ' + '@author JsDoc tags.\n' + ' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': ' + 'forbids braces around @inheritdoc JsDoc tags.\n' + ' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces ' + 'around types in JsDoc tags.\n' + ' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct ' + 'use of optional marker = in param types.\n' + ' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for ' + 'unused private variables.\n' + ' - ' + Rule.UNUSED_LOCAL_VARIABLES + ': checks for ' + 'unused local variables.\n') + + +def ShouldCheck(rule): + """Returns whether the optional rule should be checked. + + Computes different flags (strict, jslint_error, jslint_noerror) to find out if + this specific rule should be checked. + + Args: + rule: Name of the rule (see Rule). + + Returns: + True if the rule should be checked according to the flags, otherwise False. + """ + if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error: + return True + # Checks strict rules. + return FLAGS.strict and rule in Rule.CLOSURE_RULES diff --git a/tools/closure_linter/closure_linter/error_fixer.py b/tools/closure_linter/closure_linter/error_fixer.py index 904cf86..88f9c72 100755 --- a/tools/closure_linter/closure_linter/error_fixer.py +++ b/tools/closure_linter/closure_linter/error_fixer.py @@ -16,6 +16,9 @@ """Main class responsible for automatically fixing simple style violations.""" +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + __author__ = 'robbyw@google.com (Robert Walker)' import re @@ -24,6 +27,7 @@ import gflags as flags from closure_linter import errors from closure_linter import javascriptstatetracker from closure_linter import javascripttokens +from closure_linter import requireprovidesorter from closure_linter import tokenutil from closure_linter.common import errorhandler @@ -33,24 +37,46 @@ Type = javascripttokens.JavaScriptTokenType END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$') +# Regex to represent common mistake inverting author name and email as +# @author User Name (user@company) +INVERTED_AUTHOR_SPEC = re.compile(r'(?P\s*)' + r'(?P[^(]+)' + r'(?P\s+)' + r'\(' + r'(?P[^\s]+@[^)\s]+)' + r'\)' + r'(?P.*)') + FLAGS = flags.FLAGS flags.DEFINE_boolean('disable_indentation_fixing', False, 'Whether to disable automatic fixing of indentation.') +flags.DEFINE_list('fix_error_codes', [], 'A list of specific error codes to ' + 'fix. Defaults to all supported error codes when empty. ' + 'See errors.py for a list of error codes.') + class ErrorFixer(errorhandler.ErrorHandler): """Object that fixes simple style errors.""" - def __init__(self, external_file = None): + def __init__(self, external_file=None): """Initialize the error fixer. Args: external_file: If included, all output will be directed to this file instead of overwriting the files the errors are found in. """ + errorhandler.ErrorHandler.__init__(self) + self._file_name = None self._file_token = None self._external_file = external_file + try: + self._fix_error_codes = set([errors.ByName(error.upper()) for error in + FLAGS.fix_error_codes]) + except KeyError as ke: + raise ValueError('Unknown error code ' + ke.args[0]) + def HandleFile(self, filename, first_token): """Notifies this ErrorPrinter that subsequent errors are in filename. @@ -59,6 +85,7 @@ class ErrorFixer(errorhandler.ErrorHandler): first_token: The first token in the file. """ self._file_name = filename + self._file_is_html = filename.endswith('.html') or filename.endswith('.htm') self._file_token = first_token self._file_fix_count = 0 self._file_changed_lines = set() @@ -76,6 +103,44 @@ class ErrorFixer(errorhandler.ErrorHandler): for token in tokens: self._file_changed_lines.add(token.line_number) + def _FixJsDocPipeNull(self, js_type): + """Change number|null or null|number to ?number. + + Args: + js_type: The typeannotation.TypeAnnotation instance to fix. + """ + + # Recurse into all sub_types if the error was at a deeper level. + map(self._FixJsDocPipeNull, js_type.IterTypes()) + + if js_type.type_group and len(js_type.sub_types) == 2: + # Find and remove the null sub_type: + sub_type = None + for sub_type in js_type.sub_types: + if sub_type.identifier == 'null': + map(tokenutil.DeleteToken, sub_type.tokens) + self._AddFix(sub_type.tokens) + break + else: + return + + first_token = js_type.FirstToken() + question_mark = Token('?', Type.DOC_TYPE_MODIFIER, first_token.line, + first_token.line_number) + tokenutil.InsertTokenBefore(question_mark, first_token) + js_type.tokens.insert(0, question_mark) + js_type.tokens.remove(sub_type) + js_type.or_null = True + + # Now also remove the separator, which is in the parent's token list, + # either before or after the sub_type, there is exactly one. Scan for it. + for token in js_type.tokens: + if (token and isinstance(token, Token) and + token.type == Type.DOC_TYPE_MODIFIER and token.string == '|'): + tokenutil.DeleteToken(token) + self._AddFix(token) + break + def HandleError(self, error): """Attempts to fix the error. @@ -85,20 +150,33 @@ class ErrorFixer(errorhandler.ErrorHandler): code = error.code token = error.token + if self._fix_error_codes and code not in self._fix_error_codes: + return + if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL: + self._FixJsDocPipeNull(token.attached_object.jstype) + + elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE: + iterator = token.attached_object.type_end_token + if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace(): + iterator = iterator.previous + + ending_space = len(iterator.string) - len(iterator.string.rstrip()) + iterator.string = '%s=%s' % (iterator.string.rstrip(), + ' ' * ending_space) + + # Create a new flag object with updated type info. + token.attached_object = javascriptstatetracker.JsDocFlag(token) + self._AddFix(token) + + elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE: iterator = token.attached_object.type_start_token if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace(): iterator = iterator.next - leading_space = len(iterator.string) - len(iterator.string.lstrip()) - iterator.string = '%s?%s' % (' ' * leading_space, - iterator.string.lstrip()) - - # Cover the no outer brace case where the end token is part of the type. - while iterator and iterator != token.attached_object.type_end_token.next: - iterator.string = iterator.string.replace( - 'null|', '').replace('|null', '') - iterator = iterator.next + starting_space = len(iterator.string) - len(iterator.string.lstrip()) + iterator.string = '%s...%s' % (' ' * starting_space, + iterator.string.lstrip()) # Create a new flag object with updated type info. token.attached_object = javascriptstatetracker.JsDocFlag(token) @@ -116,7 +194,7 @@ class ErrorFixer(errorhandler.ErrorHandler): elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION, errors.REDUNDANT_SEMICOLON, errors.COMMA_AT_END_OF_LITERAL): - tokenutil.DeleteToken(token) + self._DeleteToken(token) self._AddFix(token) elif code == errors.INVALID_JSDOC_TAG: @@ -129,7 +207,10 @@ class ErrorFixer(errorhandler.ErrorHandler): self._AddFix(token) elif code == errors.MISSING_SPACE: - if error.position: + if error.fix_data: + token.string = error.fix_data + self._AddFix(token) + elif error.position: if error.position.IsAtBeginning(): tokenutil.InsertSpaceTokenAfter(token.previous) elif error.position.IsAtEnd(token.string): @@ -143,19 +224,15 @@ class ErrorFixer(errorhandler.ErrorHandler): token.string = error.position.Set(token.string, '') self._AddFix(token) - elif code == errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER: - token.string = error.position.Set(token.string, '.') - self._AddFix(token) - elif code == errors.MISSING_LINE: if error.position.IsAtBeginning(): - tokenutil.InsertLineAfter(token.previous) + tokenutil.InsertBlankLineAfter(token.previous) else: - tokenutil.InsertLineAfter(token) + tokenutil.InsertBlankLineAfter(token) self._AddFix(token) elif code == errors.EXTRA_LINE: - tokenutil.DeleteToken(token) + self._DeleteToken(token) self._AddFix(token) elif code == errors.WRONG_BLANK_LINE_COUNT: @@ -167,29 +244,30 @@ class ErrorFixer(errorhandler.ErrorHandler): should_delete = False if num_lines < 0: - num_lines = num_lines * -1 + num_lines *= -1 should_delete = True - for i in xrange(1, num_lines + 1): + for unused_i in xrange(1, num_lines + 1): if should_delete: # TODO(user): DeleteToken should update line numbers. - tokenutil.DeleteToken(token.previous) + self._DeleteToken(token.previous) else: - tokenutil.InsertLineAfter(token.previous) + tokenutil.InsertBlankLineAfter(token.previous) self._AddFix(token) elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING: end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END) if end_quote: - single_quote_start = Token("'", Type.SINGLE_QUOTE_STRING_START, - token.line, token.line_number) - single_quote_end = Token("'", Type.SINGLE_QUOTE_STRING_START, - end_quote.line, token.line_number) + single_quote_start = Token( + "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number) + single_quote_end = Token( + "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line, + token.line_number) tokenutil.InsertTokenAfter(single_quote_start, token) tokenutil.InsertTokenAfter(single_quote_end, end_quote) - tokenutil.DeleteToken(token) - tokenutil.DeleteToken(end_quote) + self._DeleteToken(token) + self._DeleteToken(end_quote) self._AddFix([token, end_quote]) elif code == errors.MISSING_BRACES_AROUND_TYPE: @@ -197,15 +275,15 @@ class ErrorFixer(errorhandler.ErrorHandler): start_token = token.attached_object.type_start_token if start_token.type != Type.DOC_START_BRACE: - leading_space = (len(start_token.string) - - len(start_token.string.lstrip())) + leading_space = ( + len(start_token.string) - len(start_token.string.lstrip())) if leading_space: start_token = tokenutil.SplitToken(start_token, leading_space) # Fix case where start and end token were the same. if token.attached_object.type_end_token == start_token.previous: token.attached_object.type_end_token = start_token - new_token = Token("{", Type.DOC_START_BRACE, start_token.line, + new_token = Token('{', Type.DOC_START_BRACE, start_token.line, start_token.line_number) tokenutil.InsertTokenAfter(new_token, start_token.previous) token.attached_object.type_start_token = new_token @@ -217,7 +295,7 @@ class ErrorFixer(errorhandler.ErrorHandler): # FLAG_ENDING_TYPE token, if there wasn't a starting brace then # the end token is the last token of the actual type. last_type = end_token - if not len(fixed_tokens): + if not fixed_tokens: last_type = end_token.previous while last_type.string.isspace(): @@ -233,7 +311,7 @@ class ErrorFixer(errorhandler.ErrorHandler): tokenutil.SplitToken(last_type, len(last_type.string) - trailing_space) - new_token = Token("}", Type.DOC_END_BRACE, last_type.line, + new_token = Token('}', Type.DOC_END_BRACE, last_type.line, last_type.line_number) tokenutil.InsertTokenAfter(new_token, last_type) token.attached_object.type_end_token = new_token @@ -241,35 +319,86 @@ class ErrorFixer(errorhandler.ErrorHandler): self._AddFix(fixed_tokens) - elif code in (errors.GOOG_REQUIRES_NOT_ALPHABETIZED, - errors.GOOG_PROVIDES_NOT_ALPHABETIZED): - tokens = error.fix_data - strings = map(lambda x: x.string, tokens) - sorted_strings = sorted(strings) + elif code == errors.LINE_STARTS_WITH_OPERATOR: + # Remove whitespace following the operator so the line starts clean. + self._StripSpace(token, before=False) - index = 0 - changed_tokens = [] - for token in tokens: - if token.string != sorted_strings[index]: - token.string = sorted_strings[index] - changed_tokens.append(token) - index += 1 + # Remove the operator. + tokenutil.DeleteToken(token) + self._AddFix(token) + + insertion_point = tokenutil.GetPreviousCodeToken(token) - self._AddFix(changed_tokens) + # Insert a space between the previous token and the new operator. + space = Token(' ', Type.WHITESPACE, insertion_point.line, + insertion_point.line_number) + tokenutil.InsertTokenAfter(space, insertion_point) + + # Insert the operator on the end of the previous line. + new_token = Token(token.string, token.type, insertion_point.line, + insertion_point.line_number) + tokenutil.InsertTokenAfter(new_token, space) + self._AddFix(new_token) + + elif code == errors.LINE_ENDS_WITH_DOT: + # Remove whitespace preceding the operator to remove trailing whitespace. + self._StripSpace(token, before=True) + + # Remove the dot. + tokenutil.DeleteToken(token) + self._AddFix(token) + + insertion_point = tokenutil.GetNextCodeToken(token) + + # Insert the dot at the beginning of the next line of code. + new_token = Token(token.string, token.type, insertion_point.line, + insertion_point.line_number) + tokenutil.InsertTokenBefore(new_token, insertion_point) + self._AddFix(new_token) + + elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED: + require_start_token = error.fix_data + sorter = requireprovidesorter.RequireProvideSorter() + sorter.FixRequires(require_start_token) + + self._AddFix(require_start_token) + + elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED: + provide_start_token = error.fix_data + sorter = requireprovidesorter.RequireProvideSorter() + sorter.FixProvides(provide_start_token) + + self._AddFix(provide_start_token) elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC: if token.previous.string == '{' and token.next.string == '}': - tokenutil.DeleteToken(token.previous) - tokenutil.DeleteToken(token.next) + self._DeleteToken(token.previous) + self._DeleteToken(token.next) self._AddFix([token]) + elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION: + match = INVERTED_AUTHOR_SPEC.match(token.string) + if match: + token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'), + match.group('email'), + match.group('whitespace_after_name'), + match.group('name'), + match.group('trailing_characters')) + self._AddFix(token) + elif (code == errors.WRONG_INDENTATION and - not FLAGS.disable_indentation_fixing): + not FLAGS.disable_indentation_fixing): token = tokenutil.GetFirstTokenInSameLine(token) actual = error.position.start expected = error.position.length - if token.type in (Type.WHITESPACE, Type.PARAMETERS): + # Cases where first token is param but with leading spaces. + if (len(token.string.lstrip()) == len(token.string) - actual and + token.string.lstrip()): + token.string = token.string.lstrip() + actual = 0 + + if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0: token.string = token.string.lstrip() + (' ' * expected) self._AddFix([token]) else: @@ -282,52 +411,205 @@ class ErrorFixer(errorhandler.ErrorHandler): tokenutil.InsertTokenAfter(new_token, token.previous) self._AddFix([token]) - elif code == errors.EXTRA_GOOG_REQUIRE: - fixed_tokens = [] - while token: - if token.type == Type.IDENTIFIER: - if token.string not in ['goog.require', 'goog.provide']: - # Stop iterating over tokens once we're out of the requires and - # provides. - break - if token.string == 'goog.require': - # Text of form: goog.require('required'), skipping past open paren - # and open quote to the string text. - required = token.next.next.next.string - if required in error.fix_data: - fixed_tokens.append(token) - # Want to delete: goog.require + open paren + open single-quote + - # text + close single-quote + close paren + semi-colon = 7. - tokenutil.DeleteTokens(token, 7) - token = token.next + elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT, + errors.MISSING_END_OF_SCOPE_COMMENT]: + # Only fix cases where }); is found with no trailing content on the line + # other than a comment. Value of 'token' is set to } for this error. + if (token.type == Type.END_BLOCK and + token.next.type == Type.END_PAREN and + token.next.next.type == Type.SEMICOLON): + current_token = token.next.next.next + removed_tokens = [] + while current_token and current_token.line_number == token.line_number: + if current_token.IsAnyType(Type.WHITESPACE, + Type.START_SINGLE_LINE_COMMENT, + Type.COMMENT): + removed_tokens.append(current_token) + current_token = current_token.next + else: + return + + if removed_tokens: + self._DeleteTokens(removed_tokens[0], len(removed_tokens)) + + whitespace_token = Token(' ', Type.WHITESPACE, token.line, + token.line_number) + start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT, + token.line, token.line_number) + comment_token = Token(' goog.scope', Type.COMMENT, token.line, + token.line_number) + insertion_tokens = [whitespace_token, start_comment_token, + comment_token] + + tokenutil.InsertTokensAfter(insertion_tokens, token.next.next) + self._AddFix(removed_tokens + insertion_tokens) + + elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]: + tokens_in_line = tokenutil.GetAllTokensInSameLine(token) + num_delete_tokens = len(tokens_in_line) + # If line being deleted is preceded and succeed with blank lines then + # delete one blank line also. + if (tokens_in_line[0].previous and tokens_in_line[-1].next + and tokens_in_line[0].previous.type == Type.BLANK_LINE + and tokens_in_line[-1].next.type == Type.BLANK_LINE): + num_delete_tokens += 1 + self._DeleteTokens(tokens_in_line[0], num_delete_tokens) + self._AddFix(tokens_in_line) + + elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]: + missing_namespaces = error.fix_data[0] + need_blank_line = error.fix_data[1] or (not token.previous) + + insert_location = Token('', Type.NORMAL, '', token.line_number - 1) + dummy_first_token = insert_location + tokenutil.InsertTokenBefore(insert_location, token) + + # If inserting a blank line check blank line does not exist before + # token to avoid extra blank lines. + if (need_blank_line and insert_location.previous + and insert_location.previous.type != Type.BLANK_LINE): + tokenutil.InsertBlankLineAfter(insert_location) + insert_location = insert_location.next + + for missing_namespace in missing_namespaces: + new_tokens = self._GetNewRequireOrProvideTokens( + code == errors.MISSING_GOOG_PROVIDE, + missing_namespace, insert_location.line_number + 1) + tokenutil.InsertLineAfter(insert_location, new_tokens) + insert_location = new_tokens[-1] + self._AddFix(new_tokens) + + # If inserting a blank line check blank line does not exist after + # token to avoid extra blank lines. + if (need_blank_line and insert_location.next + and insert_location.next.type != Type.BLANK_LINE): + tokenutil.InsertBlankLineAfter(insert_location) + + tokenutil.DeleteToken(dummy_first_token) + + def _StripSpace(self, token, before): + """Strip whitespace tokens either preceding or following the given token. - self._AddFix(fixed_tokens) + Args: + token: The token. + before: If true, strip space before the token, if false, after it. + """ + token = token.previous if before else token.next + while token and token.type == Type.WHITESPACE: + tokenutil.DeleteToken(token) + token = token.previous if before else token.next + + def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number): + """Returns a list of tokens to create a goog.require/provide statement. + + Args: + is_provide: True if getting tokens for a provide, False for require. + namespace: The required or provided namespaces to get tokens for. + line_number: The line number the new require or provide statement will be + on. + + Returns: + Tokens to create a new goog.require or goog.provide statement. + """ + string = 'goog.require' + if is_provide: + string = 'goog.provide' + line_text = string + '(\'' + namespace + '\');\n' + return [ + Token(string, Type.IDENTIFIER, line_text, line_number), + Token('(', Type.START_PAREN, line_text, line_number), + Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number), + Token(namespace, Type.STRING_TEXT, line_text, line_number), + Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number), + Token(')', Type.END_PAREN, line_text, line_number), + Token(';', Type.SEMICOLON, line_text, line_number) + ] + + def _DeleteToken(self, token): + """Deletes the specified token from the linked list of tokens. + + Updates instance variables pointing to tokens such as _file_token if + they reference the deleted token. + + Args: + token: The token to delete. + """ + if token == self._file_token: + self._file_token = token.next + + tokenutil.DeleteToken(token) + + def _DeleteTokens(self, token, token_count): + """Deletes the given number of tokens starting with the given token. + + Updates instance variables pointing to tokens such as _file_token if + they reference the deleted token. + + Args: + token: The first token to delete. + token_count: The total number of tokens to delete. + """ + if token == self._file_token: + for unused_i in xrange(token_count): + self._file_token = self._file_token.next + + tokenutil.DeleteTokens(token, token_count) def FinishFile(self): """Called when the current file has finished style checking. - Used to go back and fix any errors in the file. + Used to go back and fix any errors in the file. It currently supports both + js and html files. For js files it does a simple dump of all tokens, but in + order to support html file, we need to merge the original file with the new + token set back together. This works because the tokenized html file is the + original html file with all non js lines kept but blanked out with one blank + line token per line of html. """ if self._file_fix_count: + # Get the original file content for html. + if self._file_is_html: + f = open(self._file_name, 'r') + original_lines = f.readlines() + f.close() + f = self._external_file if not f: - print "Fixed %d errors in %s" % (self._file_fix_count, self._file_name) + error_noun = 'error' if self._file_fix_count == 1 else 'errors' + print 'Fixed %d %s in %s' % ( + self._file_fix_count, error_noun, self._file_name) f = open(self._file_name, 'w') token = self._file_token + # Finding the first not deleted token. + while token.is_deleted: + token = token.next + # If something got inserted before first token (e.g. due to sorting) + # then move to start. Bug 8398202. + while token.previous: + token = token.previous char_count = 0 + line = '' while token: - f.write(token.string) + line += token.string char_count += len(token.string) if token.IsLastInLine(): - f.write('\n') + # We distinguish if a blank line in html was from stripped original + # file or newly added error fix by looking at the "org_line_number" + # field on the token. It is only set in the tokenizer, so for all + # error fixes, the value should be None. + if (line or not self._file_is_html or + token.orig_line_number is None): + f.write(line) + f.write('\n') + else: + f.write(original_lines[token.orig_line_number - 1]) + line = '' if char_count > 80 and token.line_number in self._file_changed_lines: - print "WARNING: Line %d of %s is now longer than 80 characters." % ( + print 'WARNING: Line %d of %s is now longer than 80 characters.' % ( token.line_number, self._file_name) char_count = 0 - self._file_changed_lines token = token.next diff --git a/tools/closure_linter/closure_linter/error_fixer_test.py b/tools/closure_linter/closure_linter/error_fixer_test.py new file mode 100644 index 0000000..49f449d --- /dev/null +++ b/tools/closure_linter/closure_linter/error_fixer_test.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the error_fixer module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + + + +import unittest as googletest +from closure_linter import error_fixer +from closure_linter import testutil + + +class ErrorFixerTest(googletest.TestCase): + """Unit tests for error_fixer.""" + + def setUp(self): + self.error_fixer = error_fixer.ErrorFixer() + + def testDeleteToken(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT) + second_token = start_token.next + self.error_fixer.HandleFile('test_file', start_token) + + self.error_fixer._DeleteToken(start_token) + + self.assertEqual(second_token, self.error_fixer._file_token) + + def testDeleteTokens(self): + start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT) + fourth_token = start_token.next.next.next + self.error_fixer.HandleFile('test_file', start_token) + + self.error_fixer._DeleteTokens(start_token, 3) + + self.assertEqual(fourth_token, self.error_fixer._file_token) + +_TEST_SCRIPT = """\ +var x = 3; +""" + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter/errorrecord.py b/tools/closure_linter/closure_linter/errorrecord.py new file mode 100644 index 0000000..ce9fb90 --- /dev/null +++ b/tools/closure_linter/closure_linter/errorrecord.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""A simple, pickle-serializable class to represent a lint error.""" + +__author__ = 'nnaze@google.com (Nathan Naze)' + +import gflags as flags + +from closure_linter import errors +from closure_linter.common import erroroutput + +FLAGS = flags.FLAGS + + +class ErrorRecord(object): + """Record-keeping struct that can be serialized back from a process. + + Attributes: + path: Path to the file. + error_string: Error string for the user. + new_error: Whether this is a "new error" (see errors.NEW_ERRORS). + """ + + def __init__(self, path, error_string, new_error): + self.path = path + self.error_string = error_string + self.new_error = new_error + + +def MakeErrorRecord(path, error): + """Make an error record with correctly formatted error string. + + Errors are not able to be serialized (pickled) over processes because of + their pointers to the complex token/context graph. We use an intermediary + serializable class to pass back just the relevant information. + + Args: + path: Path of file the error was found in. + error: An error.Error instance. + + Returns: + _ErrorRecord instance. + """ + new_error = error.code in errors.NEW_ERRORS + + if FLAGS.unix_mode: + error_string = erroroutput.GetUnixErrorOutput( + path, error, new_error=new_error) + else: + error_string = erroroutput.GetErrorOutput(error, new_error=new_error) + + return ErrorRecord(path, error_string, new_error) diff --git a/tools/closure_linter/closure_linter/errorrules.py b/tools/closure_linter/closure_linter/errorrules.py index afb6fa9..b1b72aa 100755 --- a/tools/closure_linter/closure_linter/errorrules.py +++ b/tools/closure_linter/closure_linter/errorrules.py @@ -25,18 +25,48 @@ from closure_linter import errors FLAGS = flags.FLAGS flags.DEFINE_boolean('jsdoc', True, 'Whether to report errors for missing JsDoc.') +flags.DEFINE_list('disable', None, + 'Disable specific error. Usage Ex.: gjslint --disable 1,' + '0011 foo.js.') +flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed ' + 'without warning.', lower_bound=1) + +disabled_error_nums = None + + +def GetMaxLineLength(): + """Returns allowed maximum length of line. + + Returns: + Length of line allowed without any warning. + """ + return FLAGS.max_line_length def ShouldReportError(error): """Whether the given error should be reported. - + Returns: - True for all errors except missing documentation errors. For these, - it returns the value of the jsdoc flag. + True for all errors except missing documentation errors and disabled + errors. For missing documentation, it returns the value of the + jsdoc flag. """ - return FLAGS.jsdoc or error not in ( + global disabled_error_nums + if disabled_error_nums is None: + disabled_error_nums = [] + if FLAGS.disable: + for error_str in FLAGS.disable: + error_num = 0 + try: + error_num = int(error_str) + except ValueError: + pass + disabled_error_nums.append(error_num) + + return ((FLAGS.jsdoc or error not in ( errors.MISSING_PARAMETER_DOCUMENTATION, errors.MISSING_RETURN_DOCUMENTATION, errors.MISSING_MEMBER_DOCUMENTATION, errors.MISSING_PRIVATE, - errors.MISSING_JSDOC_TAG_THIS) + errors.MISSING_JSDOC_TAG_THIS)) and + (not FLAGS.disable or error not in disabled_error_nums)) diff --git a/tools/closure_linter/closure_linter/errorrules_test.py b/tools/closure_linter/closure_linter/errorrules_test.py new file mode 100644 index 0000000..cb90378 --- /dev/null +++ b/tools/closure_linter/closure_linter/errorrules_test.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# Copyright 2013 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Medium tests for the gjslint errorrules. + +Currently its just verifying that warnings can't be disabled. +""" + + + +import gflags as flags +import unittest as googletest + +from closure_linter import errors +from closure_linter import runner +from closure_linter.common import erroraccumulator + +flags.FLAGS.strict = True +flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js') +flags.FLAGS.closurized_namespaces = ('goog', 'dummy') + + +class ErrorRulesTest(googletest.TestCase): + """Test case to for gjslint errorrules.""" + + def testNoMaxLineLengthFlagExists(self): + """Tests that --max_line_length flag does not exists.""" + self.assertTrue('max_line_length' not in flags.FLAGS.FlagDict()) + + def testGetMaxLineLength(self): + """Tests warning are reported for line greater than 80. + """ + + # One line > 100 and one line > 80 and < 100. So should produce two + # line too long error. + original = [ + 'goog.require(\'dummy.aa\');', + '', + 'function a() {', + ' dummy.aa.i = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13' + ' + 14 + 15 + 16 + 17 + 18 + 19 + 20;', + ' dummy.aa.j = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13' + ' + 14 + 15 + 16 + 17 + 18;', + '}', + '' + ] + + # Expect line too long. + expected = [errors.LINE_TOO_LONG, errors.LINE_TOO_LONG] + + self._AssertErrors(original, expected) + + def testNoDisableFlagExists(self): + """Tests that --disable flag does not exists.""" + self.assertTrue('disable' not in flags.FLAGS.FlagDict()) + + def testWarningsNotDisabled(self): + """Tests warnings are reported when nothing is disabled. + """ + original = [ + 'goog.require(\'dummy.aa\');', + 'goog.require(\'dummy.Cc\');', + 'goog.require(\'dummy.Dd\');', + '', + 'function a() {', + ' dummy.aa.i = 1;', + ' dummy.Cc.i = 1;', + ' dummy.Dd.i = 1;', + '}', + ] + + expected = [errors.GOOG_REQUIRES_NOT_ALPHABETIZED, + errors.FILE_MISSING_NEWLINE] + + self._AssertErrors(original, expected) + + def _AssertErrors(self, original, expected_errors, include_header=True): + """Asserts that the error fixer corrects original to expected.""" + if include_header: + original = self._GetHeader() + original + + # Trap gjslint's output parse it to get messages added. + error_accumulator = erroraccumulator.ErrorAccumulator() + runner.Run('testing.js', error_accumulator, source=original) + error_nums = [e.code for e in error_accumulator.GetErrors()] + + error_nums.sort() + expected_errors.sort() + self.assertListEqual(error_nums, expected_errors) + + def _GetHeader(self): + """Returns a fake header for a JavaScript file.""" + return [ + '// Copyright 2011 Google Inc. All Rights Reserved.', + '', + '/**', + ' * @fileoverview Fake file overview.', + ' * @author fake@google.com (Fake Person)', + ' */', + '' + ] + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter/errors.py b/tools/closure_linter/closure_linter/errors.py index 7c86941..356ee0c 100755 --- a/tools/closure_linter/closure_linter/errors.py +++ b/tools/closure_linter/closure_linter/errors.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,6 +18,7 @@ __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') + def ByName(name): """Get the error code for the given error name. @@ -55,8 +55,11 @@ ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100 LINE_TOO_LONG = 110 LINE_STARTS_WITH_OPERATOR = 120 COMMA_AT_END_OF_LITERAL = 121 +LINE_ENDS_WITH_DOT = 122 MULTI_LINE_STRING = 130 UNNECESSARY_DOUBLE_QUOTED_STRING = 131 +UNUSED_PRIVATE_MEMBER = 132 +UNUSED_LOCAL_VARIABLE = 133 # Requires, provides GOOG_REQUIRES_NOT_ALPHABETIZED = 140 @@ -64,6 +67,8 @@ GOOG_PROVIDES_NOT_ALPHABETIZED = 141 MISSING_GOOG_REQUIRE = 142 MISSING_GOOG_PROVIDE = 143 EXTRA_GOOG_REQUIRE = 144 +EXTRA_GOOG_PROVIDE = 145 +ALIAS_STMT_NEEDS_GOOG_REQUIRE = 146 # JsDoc INVALID_JSDOC_TAG = 200 @@ -89,7 +94,11 @@ UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226 INVALID_AUTHOR_TAG_DESCRIPTION = 227 JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230 JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231 -JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER = 240 +JSDOC_MISSING_OPTIONAL_TYPE = 232 +JSDOC_MISSING_OPTIONAL_PREFIX = 233 +JSDOC_MISSING_VAR_ARGS_TYPE = 234 +JSDOC_MISSING_VAR_ARGS_NAME = 235 +JSDOC_DOES_NOT_PARSE = 236 # TODO(robbyw): Split this in to more specific syntax problems. INCORRECT_SUPPRESS_SYNTAX = 250 INVALID_SUPPRESS_TYPE = 251 @@ -103,6 +112,15 @@ FILE_IN_BLOCK = 301 INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400 INTERFACE_METHOD_CANNOT_HAVE_CODE = 401 +# Comments +MISSING_END_OF_SCOPE_COMMENT = 500 +MALFORMED_END_OF_SCOPE_COMMENT = 501 + +# goog.scope - Namespace aliasing +# TODO(nnaze) Add additional errors here and in aliaspass.py +INVALID_USE_OF_GOOG_SCOPE = 600 +EXTRA_GOOG_SCOPE_USAGE = 601 + # ActionScript specific errors: # TODO(user): move these errors to their own file and move all JavaScript # specific errors to their own file as well. @@ -125,7 +143,12 @@ NEW_ERRORS = frozenset([ # Errors added after 2.0.2: WRONG_INDENTATION, MISSING_SEMICOLON, - # Errors added after 2.2.5: - WRONG_BLANK_LINE_COUNT, - EXTRA_GOOG_REQUIRE, + # Errors added after 2.3.9: + JSDOC_MISSING_VAR_ARGS_TYPE, + JSDOC_MISSING_VAR_ARGS_NAME, + # Errors added after 2.3.15: + ALIAS_STMT_NEEDS_GOOG_REQUIRE, + JSDOC_DOES_NOT_PARSE, + LINE_ENDS_WITH_DOT, + # Errors added after 2.3.17: ]) diff --git a/tools/closure_linter/closure_linter/fixjsstyle.py b/tools/closure_linter/closure_linter/fixjsstyle.py index 8782e64..2d65e03 100755 --- a/tools/closure_linter/closure_linter/fixjsstyle.py +++ b/tools/closure_linter/closure_linter/fixjsstyle.py @@ -18,15 +18,23 @@ __author__ = 'robbyw@google.com (Robert Walker)' +import StringIO import sys import gflags as flags -from closure_linter import checker + from closure_linter import error_fixer +from closure_linter import runner from closure_linter.common import simplefileflags as fileflags +FLAGS = flags.FLAGS +flags.DEFINE_list('additional_extensions', None, 'List of additional file ' + 'extensions (not js) that should be treated as ' + 'JavaScript files.') +flags.DEFINE_boolean('dry_run', False, 'Do not modify the file, only print it.') + -def main(argv = None): +def main(argv=None): """Main function. Args: @@ -35,13 +43,24 @@ def main(argv = None): if argv is None: argv = flags.FLAGS(sys.argv) - files = fileflags.GetFileList(argv, 'JavaScript', ['.js']) + suffixes = ['.js'] + if FLAGS.additional_extensions: + suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions] + + files = fileflags.GetFileList(argv, 'JavaScript', suffixes) - style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer()) + output_buffer = None + if FLAGS.dry_run: + output_buffer = StringIO.StringIO() + + fixer = error_fixer.ErrorFixer(output_buffer) # Check the list of files. for filename in files: - style_checker.Check(filename) + runner.Run(filename, fixer) + if FLAGS.dry_run: + print output_buffer.getvalue() + if __name__ == '__main__': main() diff --git a/tools/closure_linter/closure_linter/fixjsstyle_test.py b/tools/closure_linter/closure_linter/fixjsstyle_test.py index 42e9c59..34de3f8 100755 --- a/tools/closure_linter/closure_linter/fixjsstyle_test.py +++ b/tools/closure_linter/closure_linter/fixjsstyle_test.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,8 +21,9 @@ import StringIO import gflags as flags import unittest as googletest -from closure_linter import checker from closure_linter import error_fixer +from closure_linter import runner + _RESOURCE_PREFIX = 'closure_linter/testdata' @@ -31,30 +31,584 @@ flags.FLAGS.strict = True flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js') flags.FLAGS.closurized_namespaces = ('goog', 'dummy') + class FixJsStyleTest(googletest.TestCase): """Test case to for gjslint auto-fixing.""" + def setUp(self): + flags.FLAGS.dot_on_next_line = True + + def tearDown(self): + flags.FLAGS.dot_on_next_line = False + def testFixJsStyle(self): - input_filename = None - try: - input_filename = '%s/fixjsstyle.in.js' % (_RESOURCE_PREFIX) + test_cases = [ + ['fixjsstyle.in.js', 'fixjsstyle.out.js'], + ['indentation.js', 'fixjsstyle.indentation.out.js'], + ['fixjsstyle.html.in.html', 'fixjsstyle.html.out.html'], + ['fixjsstyle.oplineend.in.js', 'fixjsstyle.oplineend.out.js']] + for [running_input_file, running_output_file] in test_cases: + print 'Checking %s vs %s' % (running_input_file, running_output_file) + input_filename = None + golden_filename = None + current_filename = None + try: + input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file) + current_filename = input_filename - golden_filename = '%s/fixjsstyle.out.js' % (_RESOURCE_PREFIX) - except IOError, ex: - raise IOError('Could not find testdata resource for %s: %s' % - (self._filename, ex)) + golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file) + current_filename = golden_filename + except IOError as ex: + raise IOError('Could not find testdata resource for %s: %s' % + (current_filename, ex)) - # Autofix the file, sending output to a fake file. - actual = StringIO.StringIO() - style_checker = checker.JavaScriptStyleChecker( - error_fixer.ErrorFixer(actual)) - style_checker.Check(input_filename) + if running_input_file == 'fixjsstyle.in.js': + with open(input_filename) as f: + for line in f: + # Go to last line. + pass + self.assertTrue(line == line.rstrip(), '%s file should not end ' + 'with a new line.' % (input_filename)) + + # Autofix the file, sending output to a fake file. + actual = StringIO.StringIO() + runner.Run(input_filename, error_fixer.ErrorFixer(actual)) + + # Now compare the files. + actual.seek(0) + expected = open(golden_filename, 'r') + + # Uncomment to generate new golden files and run + # open('/'.join(golden_filename.split('/')[4:]), 'w').write(actual.read()) + # actual.seek(0) + + self.assertEqual(actual.readlines(), expected.readlines()) + + def testAddProvideFirstLine(self): + """Tests handling of case where goog.provide is added.""" + original = [ + 'dummy.bb.cc = 1;', + ] + + expected = [ + 'goog.provide(\'dummy.bb\');', + '', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + '', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testAddRequireFirstLine(self): + """Tests handling of case where goog.require is added.""" + original = [ + 'a = dummy.bb.cc;', + ] + + expected = [ + 'goog.require(\'dummy.bb\');', + '', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + '', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testDeleteProvideAndAddProvideFirstLine(self): + """Tests handling of case where goog.provide is deleted and added. + + Bug 14832597. + """ + original = [ + 'goog.provide(\'dummy.aa\');', + '', + 'dummy.bb.cc = 1;', + ] + + expected = [ + 'goog.provide(\'dummy.bb\');', + '', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + 'goog.provide(\'dummy.aa\');', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testDeleteProvideAndAddRequireFirstLine(self): + """Tests handling where goog.provide is deleted and goog.require added. + + Bug 14832597. + """ + original = [ + 'goog.provide(\'dummy.aa\');', + '', + 'a = dummy.bb.cc;', + ] + + expected = [ + 'goog.require(\'dummy.bb\');', + '', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + 'goog.provide(\'dummy.aa\');', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testDeleteRequireAndAddRequireFirstLine(self): + """Tests handling of case where goog.require is deleted and added. + + Bug 14832597. + """ + original = [ + 'goog.require(\'dummy.aa\');', + '', + 'a = dummy.bb.cc;', + ] + + expected = [ + 'goog.require(\'dummy.bb\');', + '', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + 'goog.require(\'dummy.aa\');', + 'a = dummy.bb.cc;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testDeleteRequireAndAddProvideFirstLine(self): + """Tests handling where goog.require is deleted and goog.provide added. + + Bug 14832597. + """ + original = [ + 'goog.require(\'dummy.aa\');', + '', + 'dummy.bb.cc = 1;', + ] + + expected = [ + 'goog.provide(\'dummy.bb\');', + '', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + original = [ + 'goog.require(\'dummy.aa\');', + 'dummy.bb.cc = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testMultipleProvideInsert(self): + original = [ + 'goog.provide(\'dummy.bb\');', + 'goog.provide(\'dummy.dd\');', + '', + 'dummy.aa.ff = 1;', + 'dummy.bb.ff = 1;', + 'dummy.cc.ff = 1;', + 'dummy.dd.ff = 1;', + 'dummy.ee.ff = 1;', + ] + + expected = [ + 'goog.provide(\'dummy.aa\');', + 'goog.provide(\'dummy.bb\');', + 'goog.provide(\'dummy.cc\');', + 'goog.provide(\'dummy.dd\');', + 'goog.provide(\'dummy.ee\');', + '', + 'dummy.aa.ff = 1;', + 'dummy.bb.ff = 1;', + 'dummy.cc.ff = 1;', + 'dummy.dd.ff = 1;', + 'dummy.ee.ff = 1;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testMultipleRequireInsert(self): + original = [ + 'goog.require(\'dummy.bb\');', + 'goog.require(\'dummy.dd\');', + '', + 'a = dummy.aa.ff;', + 'b = dummy.bb.ff;', + 'c = dummy.cc.ff;', + 'd = dummy.dd.ff;', + 'e = dummy.ee.ff;', + ] + + expected = [ + 'goog.require(\'dummy.aa\');', + 'goog.require(\'dummy.bb\');', + 'goog.require(\'dummy.cc\');', + 'goog.require(\'dummy.dd\');', + 'goog.require(\'dummy.ee\');', + '', + 'a = dummy.aa.ff;', + 'b = dummy.bb.ff;', + 'c = dummy.cc.ff;', + 'd = dummy.dd.ff;', + 'e = dummy.ee.ff;', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testUnsortedRequires(self): + """Tests handling of unsorted goog.require statements without header. + + Bug 8398202. + """ + original = [ + 'goog.require(\'dummy.aa\');', + 'goog.require(\'dummy.Cc\');', + 'goog.require(\'dummy.Dd\');', + '', + 'function a() {', + ' dummy.aa.i = 1;', + ' dummy.Cc.i = 1;', + ' dummy.Dd.i = 1;', + '}', + ] - # Now compare the files. + expected = [ + 'goog.require(\'dummy.Cc\');', + 'goog.require(\'dummy.Dd\');', + 'goog.require(\'dummy.aa\');', + '', + 'function a() {', + ' dummy.aa.i = 1;', + ' dummy.Cc.i = 1;', + ' dummy.Dd.i = 1;', + '}', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testMissingExtraAndUnsortedRequires(self): + """Tests handling of missing extra and unsorted goog.require statements.""" + original = [ + 'goog.require(\'dummy.aa\');', + 'goog.require(\'dummy.Cc\');', + 'goog.require(\'dummy.Dd\');', + '', + 'var x = new dummy.Bb();', + 'dummy.Cc.someMethod();', + 'dummy.aa.someMethod();', + ] + + expected = [ + 'goog.require(\'dummy.Bb\');', + 'goog.require(\'dummy.Cc\');', + 'goog.require(\'dummy.aa\');', + '', + 'var x = new dummy.Bb();', + 'dummy.Cc.someMethod();', + 'dummy.aa.someMethod();', + ] + + self._AssertFixes(original, expected) + + def testExtraRequireOnFirstLine(self): + """Tests handling of extra goog.require statement on the first line. + + There was a bug when fixjsstyle quits with an exception. It happened if + - the first line of the file is an extra goog.require() statement, + - goog.require() statements are not sorted. + """ + original = [ + 'goog.require(\'dummy.aa\');', + 'goog.require(\'dummy.cc\');', + 'goog.require(\'dummy.bb\');', + '', + 'var x = new dummy.bb();', + 'var y = new dummy.cc();', + ] + + expected = [ + 'goog.require(\'dummy.bb\');', + 'goog.require(\'dummy.cc\');', + '', + 'var x = new dummy.bb();', + 'var y = new dummy.cc();', + ] + + self._AssertFixes(original, expected, include_header=False) + + def testUnsortedProvides(self): + """Tests handling of unsorted goog.provide statements without header. + + Bug 8398202. + """ + original = [ + 'goog.provide(\'dummy.aa\');', + 'goog.provide(\'dummy.Cc\');', + 'goog.provide(\'dummy.Dd\');', + '', + 'dummy.aa = function() {};' + 'dummy.Cc = function() {};' + 'dummy.Dd = function() {};' + ] + + expected = [ + 'goog.provide(\'dummy.Cc\');', + 'goog.provide(\'dummy.Dd\');', + 'goog.provide(\'dummy.aa\');', + '', + 'dummy.aa = function() {};' + 'dummy.Cc = function() {};' + 'dummy.Dd = function() {};' + ] + + self._AssertFixes(original, expected, include_header=False) + + def testMissingExtraAndUnsortedProvides(self): + """Tests handling of missing extra and unsorted goog.provide statements.""" + original = [ + 'goog.provide(\'dummy.aa\');', + 'goog.provide(\'dummy.Cc\');', + 'goog.provide(\'dummy.Dd\');', + '', + 'dummy.Cc = function() {};', + 'dummy.Bb = function() {};', + 'dummy.aa.someMethod = function();', + ] + + expected = [ + 'goog.provide(\'dummy.Bb\');', + 'goog.provide(\'dummy.Cc\');', + 'goog.provide(\'dummy.aa\');', + '', + 'dummy.Cc = function() {};', + 'dummy.Bb = function() {};', + 'dummy.aa.someMethod = function();', + ] + + self._AssertFixes(original, expected) + + def testNoRequires(self): + """Tests positioning of missing requires without existing requires.""" + original = [ + 'goog.provide(\'dummy.Something\');', + '', + 'dummy.Something = function() {};', + '', + 'var x = new dummy.Bb();', + ] + + expected = [ + 'goog.provide(\'dummy.Something\');', + '', + 'goog.require(\'dummy.Bb\');', + '', + 'dummy.Something = function() {};', + '', + 'var x = new dummy.Bb();', + ] + + self._AssertFixes(original, expected) + + def testNoProvides(self): + """Tests positioning of missing provides without existing provides.""" + original = [ + 'goog.require(\'dummy.Bb\');', + '', + 'dummy.Something = function() {};', + '', + 'var x = new dummy.Bb();', + ] + + expected = [ + 'goog.provide(\'dummy.Something\');', + '', + 'goog.require(\'dummy.Bb\');', + '', + 'dummy.Something = function() {};', + '', + 'var x = new dummy.Bb();', + ] + + self._AssertFixes(original, expected) + + def testOutputOkayWhenFirstTokenIsDeleted(self): + """Tests that autofix output is is correct when first token is deleted. + + Regression test for bug 4581567 + """ + original = ['"use strict";'] + expected = ["'use strict';"] + + self._AssertFixes(original, expected, include_header=False) + + def testGoogScopeIndentation(self): + """Tests Handling a typical end-of-scope indentation fix.""" + original = [ + 'goog.scope(function() {', + ' // TODO(brain): Take over the world.', + '}); // goog.scope', + ] + + expected = [ + 'goog.scope(function() {', + '// TODO(brain): Take over the world.', + '}); // goog.scope', + ] + + self._AssertFixes(original, expected) + + def testMissingEndOfScopeComment(self): + """Tests Handling a missing comment at end of goog.scope.""" + original = [ + 'goog.scope(function() {', + '});', + ] + + expected = [ + 'goog.scope(function() {', + '}); // goog.scope', + ] + + self._AssertFixes(original, expected) + + def testMissingEndOfScopeCommentWithOtherComment(self): + """Tests handling an irrelevant comment at end of goog.scope.""" + original = [ + 'goog.scope(function() {', + "}); // I don't belong here!", + ] + + expected = [ + 'goog.scope(function() {', + '}); // goog.scope', + ] + + self._AssertFixes(original, expected) + + def testMalformedEndOfScopeComment(self): + """Tests Handling a malformed comment at end of goog.scope.""" + original = [ + 'goog.scope(function() {', + '}); // goog.scope FTW', + ] + + expected = [ + 'goog.scope(function() {', + '}); // goog.scope', + ] + + self._AssertFixes(original, expected) + + def testEndsWithIdentifier(self): + """Tests Handling case where script ends with identifier. Bug 7643404.""" + original = [ + 'goog.provide(\'xyz\');', + '', + 'abc' + ] + + expected = [ + 'goog.provide(\'xyz\');', + '', + 'abc;' + ] + + self._AssertFixes(original, expected) + + def testFileStartsWithSemicolon(self): + """Tests handling files starting with semicolon. + + b/10062516 + """ + original = [ + ';goog.provide(\'xyz\');', + '', + 'abc;' + ] + + expected = [ + 'goog.provide(\'xyz\');', + '', + 'abc;' + ] + + self._AssertFixes(original, expected, include_header=False) + + def testCodeStartsWithSemicolon(self): + """Tests handling code in starting with semicolon after comments. + + b/10062516 + """ + original = [ + ';goog.provide(\'xyz\');', + '', + 'abc;' + ] + + expected = [ + 'goog.provide(\'xyz\');', + '', + 'abc;' + ] + + self._AssertFixes(original, expected) + + def _AssertFixes(self, original, expected, include_header=True): + """Asserts that the error fixer corrects original to expected.""" + if include_header: + original = self._GetHeader() + original + expected = self._GetHeader() + expected + + actual = StringIO.StringIO() + runner.Run('testing.js', error_fixer.ErrorFixer(actual), original) actual.seek(0) - expected = open(golden_filename, 'r') - self.assertEqual(actual.readlines(), expected.readlines()) + expected = [x + '\n' for x in expected] + + self.assertListEqual(actual.readlines(), expected) + + def _GetHeader(self): + """Returns a fake header for a JavaScript file.""" + return [ + '// Copyright 2011 Google Inc. All Rights Reserved.', + '', + '/**', + ' * @fileoverview Fake file overview.', + ' * @author fake@google.com (Fake Person)', + ' */', + '' + ] if __name__ == '__main__': diff --git a/tools/closure_linter/closure_linter/full_test.py b/tools/closure_linter/closure_linter/full_test.py index f11f235..d0a1557 100755 --- a/tools/closure_linter/closure_linter/full_test.py +++ b/tools/closure_linter/closure_linter/full_test.py @@ -23,7 +23,6 @@ devtools/javascript/gpylint/full_test.py __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') -import re import os import sys import unittest @@ -31,8 +30,9 @@ import unittest import gflags as flags import unittest as googletest -from closure_linter import checker +from closure_linter import error_check from closure_linter import errors +from closure_linter import runner from closure_linter.common import filetestcase _RESOURCE_PREFIX = 'closure_linter/testdata' @@ -40,38 +40,57 @@ _RESOURCE_PREFIX = 'closure_linter/testdata' flags.FLAGS.strict = True flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires') flags.FLAGS.closurized_namespaces = ('goog', 'dummy') -flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js') +flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js', + 'limited_doc_checks.js') +flags.FLAGS.jslint_error = error_check.Rule.ALL # List of files under testdata to test. # We need to list files explicitly since pyglib can't list directories. +# TODO(user): Figure out how to list the directory. _TEST_FILES = [ 'all_js_wrapped.js', 'blank_lines.js', 'ends_with_block.js', + 'empty_file.js', 'externs.js', + 'externs_jsdoc.js', + 'goog_scope.js', 'html_parse_error.html', 'indentation.js', 'interface.js', 'jsdoc.js', + 'limited_doc_checks.js', 'minimal.js', 'other.js', + 'provide_blank.js', + 'provide_extra.js', + 'provide_missing.js', + 'require_alias.js', 'require_all_caps.js', + 'require_blank.js', 'require_extra.js', 'require_function.js', 'require_function_missing.js', 'require_function_through_both.js', 'require_function_through_namespace.js', 'require_interface.js', + 'require_interface_alias.js', + 'require_interface_base.js', 'require_lower_case.js', + 'require_missing.js', 'require_numeric.js', - 'require_provide_ok.js', + 'require_provide_blank.js', 'require_provide_missing.js', + 'require_provide_ok.js', + 'semicolon_missing.js', 'simple.html', 'spaces.js', 'tokenizer.js', 'unparseable.js', - 'utf8.html' - ] + 'unused_local_variables.js', + 'unused_private_members.js', + 'utf8.html', +] class GJsLintTestSuite(unittest.TestSuite): @@ -91,8 +110,11 @@ class GJsLintTestSuite(unittest.TestSuite): test_files = _TEST_FILES for test_file in test_files: resource_path = os.path.join(_RESOURCE_PREFIX, test_file) - self.addTest(filetestcase.AnnotatedFileTestCase(resource_path, - checker.GJsLintRunner(), errors.ByName)) + self.addTest( + filetestcase.AnnotatedFileTestCase( + resource_path, + runner.Run, + errors.ByName)) if __name__ == '__main__': # Don't let main parse args; it happens in the TestSuite. diff --git a/tools/closure_linter/closure_linter/gjslint.py b/tools/closure_linter/closure_linter/gjslint.py index e33bddd..824e025 100755 --- a/tools/closure_linter/closure_linter/gjslint.py +++ b/tools/closure_linter/closure_linter/gjslint.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# # Copyright 2007 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -33,33 +32,201 @@ is in tokenizer.py and checker.py. """ __author__ = ('robbyw@google.com (Robert Walker)', - 'ajp@google.com (Andy Perelson)') + 'ajp@google.com (Andy Perelson)', + 'nnaze@google.com (Nathan Naze)',) +import errno +import itertools +import os +import platform +import re import sys import time -from closure_linter import checker -from closure_linter import errors -from closure_linter.common import errorprinter -from closure_linter.common import simplefileflags as fileflags import gflags as flags +from closure_linter import errorrecord +from closure_linter import runner +from closure_linter.common import erroraccumulator +from closure_linter.common import simplefileflags as fileflags + +# Attempt import of multiprocessing (should be available in Python 2.6 and up). +try: + # pylint: disable=g-import-not-at-top + import multiprocessing +except ImportError: + multiprocessing = None FLAGS = flags.FLAGS flags.DEFINE_boolean('unix_mode', False, 'Whether to emit warnings in standard unix format.') flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.') flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.') +flags.DEFINE_boolean('quiet', False, 'Whether to minimize logged messages. ' + 'Most useful for per-file linting, such as that performed ' + 'by the presubmit linter service.') flags.DEFINE_boolean('check_html', False, 'Whether to check javascript in html files.') flags.DEFINE_boolean('summary', False, 'Whether to show an error count summary.') +flags.DEFINE_list('additional_extensions', None, 'List of additional file ' + 'extensions (not js) that should be treated as ' + 'JavaScript files.') +flags.DEFINE_boolean('multiprocess', + platform.system() is 'Linux' and bool(multiprocessing), + 'Whether to attempt parallelized linting using the ' + 'multiprocessing module. Enabled by default on Linux ' + 'if the multiprocessing module is present (Python 2.6+). ' + 'Otherwise disabled by default. ' + 'Disabling may make debugging easier.') +flags.ADOPT_module_key_flags(fileflags) +flags.ADOPT_module_key_flags(runner) + GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time', - '--check_html', '--summary'] + '--check_html', '--summary', '--quiet'] + + + +def _MultiprocessCheckPaths(paths): + """Run _CheckPath over mutltiple processes. + + Tokenization, passes, and checks are expensive operations. Running in a + single process, they can only run on one CPU/core. Instead, + shard out linting over all CPUs with multiprocessing to parallelize. + + Args: + paths: paths to check. + + Yields: + errorrecord.ErrorRecords for any found errors. + """ + + pool = multiprocessing.Pool() + + path_results = pool.imap(_CheckPath, paths) + for results in path_results: + for result in results: + yield result + + # Force destruct before returning, as this can sometimes raise spurious + # "interrupted system call" (EINTR), which we can ignore. + try: + pool.close() + pool.join() + del pool + except OSError as err: + if err.errno is not errno.EINTR: + raise err + + +def _CheckPaths(paths): + """Run _CheckPath on all paths in one thread. + + Args: + paths: paths to check. + + Yields: + errorrecord.ErrorRecords for any found errors. + """ + + for path in paths: + results = _CheckPath(path) + for record in results: + yield record + + +def _CheckPath(path): + """Check a path and return any errors. + + Args: + path: paths to check. + + Returns: + A list of errorrecord.ErrorRecords for any found errors. + """ + + error_handler = erroraccumulator.ErrorAccumulator() + runner.Run(path, error_handler) + + make_error_record = lambda err: errorrecord.MakeErrorRecord(path, err) + return map(make_error_record, error_handler.GetErrors()) -def FormatTime(t): +def _GetFilePaths(argv): + suffixes = ['.js'] + if FLAGS.additional_extensions: + suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions] + if FLAGS.check_html: + suffixes += ['.html', '.htm'] + return fileflags.GetFileList(argv, 'JavaScript', suffixes) + + +# Error printing functions + + +def _PrintFileSummary(paths, records): + """Print a detailed summary of the number of errors in each file.""" + + paths = list(paths) + paths.sort() + + for path in paths: + path_errors = [e for e in records if e.path == path] + print '%s: %d' % (path, len(path_errors)) + + +def _PrintFileSeparator(path): + print '----- FILE : %s -----' % path + + +def _PrintSummary(paths, error_records): + """Print a summary of the number of errors and files.""" + + error_count = len(error_records) + all_paths = set(paths) + all_paths_count = len(all_paths) + + if error_count is 0: + print '%d files checked, no errors found.' % all_paths_count + + new_error_count = len([e for e in error_records if e.new_error]) + + error_paths = set([e.path for e in error_records]) + error_paths_count = len(error_paths) + no_error_paths_count = all_paths_count - error_paths_count + + if (error_count or new_error_count) and not FLAGS.quiet: + error_noun = 'error' if error_count == 1 else 'errors' + new_error_noun = 'error' if new_error_count == 1 else 'errors' + error_file_noun = 'file' if error_paths_count == 1 else 'files' + ok_file_noun = 'file' if no_error_paths_count == 1 else 'files' + print ('Found %d %s, including %d new %s, in %d %s (%d %s OK).' % + (error_count, + error_noun, + new_error_count, + new_error_noun, + error_paths_count, + error_file_noun, + no_error_paths_count, + ok_file_noun)) + + +def _PrintErrorRecords(error_records): + """Print error records strings in the expected format.""" + + current_path = None + for record in error_records: + + if current_path != record.path: + current_path = record.path + if not FLAGS.unix_mode: + _PrintFileSeparator(current_path) + + print record.error_string + + +def _FormatTime(t): """Formats a duration as a human-readable string. Args: @@ -74,7 +241,9 @@ def FormatTime(t): return '%.2fs' % t -def main(argv = None): + + +def main(argv=None): """Main function. Args: @@ -82,33 +251,41 @@ def main(argv = None): """ if argv is None: argv = flags.FLAGS(sys.argv) - + if FLAGS.time: - start_time = time.time() + start_time = time.time() suffixes = ['.js'] + if FLAGS.additional_extensions: + suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions] if FLAGS.check_html: suffixes += ['.html', '.htm'] - files = fileflags.GetFileList(argv, 'JavaScript', suffixes) + paths = fileflags.GetFileList(argv, 'JavaScript', suffixes) - error_handler = None - if FLAGS.unix_mode: - error_handler = errorprinter.ErrorPrinter(errors.NEW_ERRORS) - error_handler.SetFormat(errorprinter.UNIX_FORMAT) + if FLAGS.multiprocess: + records_iter = _MultiprocessCheckPaths(paths) + else: + records_iter = _CheckPaths(paths) + + records_iter, records_iter_copy = itertools.tee(records_iter, 2) + _PrintErrorRecords(records_iter_copy) - runner = checker.GJsLintRunner() - result = runner.Run(files, error_handler) - result.PrintSummary() + error_records = list(records_iter) + _PrintSummary(paths, error_records) exit_code = 0 - if result.HasOldErrors(): + + # If there are any errors + if error_records: exit_code += 1 - if result.HasNewErrors(): + + # If there are any new errors + if [r for r in error_records if r.new_error]: exit_code += 2 if exit_code: if FLAGS.summary: - result.PrintFileSummary() + _PrintFileSummary(paths, error_records) if FLAGS.beep: # Make a beep noise. @@ -124,16 +301,16 @@ def main(argv = None): else: fix_args.append(flag) - print """ + if not FLAGS.quiet: + print """ Some of the errors reported by GJsLint may be auto-fixable using the script fixjsstyle. Please double check any changes it makes and report any bugs. The script can be run by executing: -fixjsstyle %s -""" % ' '.join(fix_args) +fixjsstyle %s """ % ' '.join(fix_args) if FLAGS.time: - print 'Done in %s.' % FormatTime(time.time() - start_time) + print 'Done in %s.' % _FormatTime(time.time() - start_time) sys.exit(exit_code) diff --git a/tools/closure_linter/closure_linter/indentation.py b/tools/closure_linter/closure_linter/indentation.py index d740607..d48ad2b 100755 --- a/tools/closure_linter/closure_linter/indentation.py +++ b/tools/closure_linter/closure_linter/indentation.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# # Copyright 2010 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +17,8 @@ __author__ = ('robbyw@google.com (Robert Walker)') +import gflags as flags + from closure_linter import ecmametadatapass from closure_linter import errors from closure_linter import javascripttokens @@ -25,7 +26,6 @@ from closure_linter import tokenutil from closure_linter.common import error from closure_linter.common import position -import gflags as flags flags.DEFINE_boolean('debug_indentation', False, 'Whether to print debugging information for indentation.') @@ -89,7 +89,7 @@ class TokenInfo(object): self.overridden_by = None self.is_permanent_override = False self.is_block = is_block - self.is_transient = not is_block and not token.type in ( + self.is_transient = not is_block and token.type not in ( Type.START_PAREN, Type.START_PARAMETERS) self.line_number = token.line_number @@ -121,7 +121,7 @@ class IndentationRules(object): if self._stack: old_stack = self._stack self._stack = [] - raise Exception("INTERNAL ERROR: indentation stack is not empty: %r" % + raise Exception('INTERNAL ERROR: indentation stack is not empty: %r' % old_stack) def CheckToken(self, token, state): @@ -152,26 +152,40 @@ class IndentationRules(object): self._PopTo(Type.START_BRACKET) elif token_type == Type.END_BLOCK: - self._PopTo(Type.START_BLOCK) + start_token = self._PopTo(Type.START_BLOCK) + # Check for required goog.scope comment. + if start_token: + goog_scope = tokenutil.GoogScopeOrNoneFromStartBlock(start_token.token) + if goog_scope is not None: + if not token.line.endswith('; // goog.scope\n'): + if (token.line.find('//') > -1 and + token.line.find('goog.scope') > + token.line.find('//')): + indentation_errors.append([ + errors.MALFORMED_END_OF_SCOPE_COMMENT, + ('Malformed end of goog.scope comment. Please use the ' + 'exact following syntax to close the scope:\n' + '}); // goog.scope'), + token, + Position(token.start_index, token.length)]) + else: + indentation_errors.append([ + errors.MISSING_END_OF_SCOPE_COMMENT, + ('Missing comment for end of goog.scope which opened at line ' + '%d. End the scope with:\n' + '}); // goog.scope' % + (start_token.line_number)), + token, + Position(token.start_index, token.length)]) elif token_type == Type.KEYWORD and token.string in ('case', 'default'): self._Add(self._PopTo(Type.START_BLOCK)) - elif is_first and token.string == '.': - # This token should have been on the previous line, so treat it as if it - # was there. - info = TokenInfo(token) - info.line_number = token.line_number - 1 - self._Add(info) - elif token_type == Type.SEMICOLON: self._PopTransient() - not_binary_operator = (token_type != Type.OPERATOR or - token.metadata.IsUnaryOperator()) - not_dot = token.string != '.' - if is_first and not_binary_operator and not_dot and token.type not in ( - Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT): + if (is_first and + token_type not in (Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT)): if flags.FLAGS.debug_indentation: print 'Line #%d: stack %r' % (token.line_number, stack) @@ -198,21 +212,21 @@ class IndentationRules(object): indentation_errors.append([ errors.WRONG_INDENTATION, 'Wrong indentation: expected any of {%s} but got %d' % ( - ', '.join( - ['%d' % x for x in expected]), actual), + ', '.join('%d' % x for x in expected if x < 80), actual), token, Position(actual, expected[0])]) self._start_index_offset[token.line_number] = expected[0] - actual # Add tokens that could increase indentation. if token_type == Type.START_BRACKET: - self._Add(TokenInfo(token=token, + self._Add(TokenInfo( + token=token, is_block=token.metadata.context.type == Context.ARRAY_LITERAL)) elif token_type == Type.START_BLOCK or token.metadata.is_implied_block: self._Add(TokenInfo(token=token, is_block=True)) - elif token_type in (Type.START_PAREN, Type.START_PARAMETERS): + elif token_type in (Type.START_PAREN, Type.START_PARAMETERS): self._Add(TokenInfo(token=token, is_block=False)) elif token_type == Type.KEYWORD and token.string == 'return': @@ -229,12 +243,15 @@ class IndentationRules(object): # Add some tokens only if they appear at the end of the line. is_last = self._IsLastCodeInLine(token) if is_last: + next_code_token = tokenutil.GetNextCodeToken(token) + # Increase required indentation if this is an overlong wrapped statement + # ending in an operator. if token_type == Type.OPERATOR: if token.string == ':': - if (stack and stack[-1].token.string == '?'): + if stack and stack[-1].token.string == '?': # When a ternary : is on a different line than its '?', it doesn't # add indentation. - if (token.line_number == stack[-1].token.line_number): + if token.line_number == stack[-1].token.line_number: self._Add(TokenInfo(token)) elif token.metadata.context.type == Context.CASE_BLOCK: # Pop transient tokens from say, line continuations, e.g., @@ -249,7 +266,6 @@ class IndentationRules(object): # When in an object literal, acts as operator indicating line # continuations. self._Add(TokenInfo(token)) - pass else: # ':' might also be a statement label, no effect on indentation in # this case. @@ -263,13 +279,16 @@ class IndentationRules(object): self._Add(TokenInfo(token)) elif token.metadata.context.type != Context.PARAMETERS: self._PopTransient() - - elif (token.string.endswith('.') - and token_type in (Type.IDENTIFIER, Type.NORMAL)): + # Increase required indentation if this is the end of a statement that's + # continued with an operator on the next line (e.g. the '.'). + elif (next_code_token and next_code_token.type == Type.OPERATOR and + not next_code_token.metadata.IsUnaryOperator()): self._Add(TokenInfo(token)) elif token_type == Type.PARAMETERS and token.string.endswith(','): # Parameter lists. self._Add(TokenInfo(token)) + elif token.IsKeyword('var'): + self._Add(TokenInfo(token)) elif token.metadata.is_implied_semicolon: self._PopTransient() elif token.IsAssignment(): @@ -297,6 +316,12 @@ class IndentationRules(object): def _IsHardStop(self, token): """Determines if the given token can have a hard stop after it. + Args: + token: token to examine + + Returns: + Whether the token can have a hard stop after it. + Hard stops are indentations defined by the position of another token as in indentation lined up with return, (, [, and ?. """ @@ -341,7 +366,15 @@ class IndentationRules(object): # Handle hard stops after (, [, return, =, and ? if self._IsHardStop(token): override_is_hard_stop = (token_info.overridden_by and - self._IsHardStop(token_info.overridden_by.token)) + self._IsHardStop( + token_info.overridden_by.token)) + if token.type == Type.START_PAREN and token.previous: + # For someFunction(...) we allow to indent at the beginning of the + # identifier +4 + prev = token.previous + if (prev.type == Type.IDENTIFIER and + prev.line_number == token.line_number): + hard_stops.add(prev.start_index + 4) if not override_is_hard_stop: start_index = token.start_index if token.line_number in self._start_index_offset: @@ -353,7 +386,7 @@ class IndentationRules(object): elif token.string == 'return' and not token_info.overridden_by: hard_stops.add(start_index + 7) - elif (token.type == Type.START_BRACKET): + elif token.type == Type.START_BRACKET: hard_stops.add(start_index + 1) elif token.IsAssignment(): @@ -423,6 +456,31 @@ class IndentationRules(object): if token.type not in Type.NON_CODE_TYPES: return False + def _AllFunctionPropertyAssignTokens(self, start_token, end_token): + """Checks if tokens are (likely) a valid function property assignment. + + Args: + start_token: Start of the token range. + end_token: End of the token range. + + Returns: + True if all tokens between start_token and end_token are legal tokens + within a function declaration and assignment into a property. + """ + for token in tokenutil.GetTokenRange(start_token, end_token): + fn_decl_tokens = (Type.FUNCTION_DECLARATION, + Type.PARAMETERS, + Type.START_PARAMETERS, + Type.END_PARAMETERS, + Type.END_PAREN) + if (token.type not in fn_decl_tokens and + token.IsCode() and + not tokenutil.IsIdentifierOrDot(token) and + not token.IsAssignment() and + not (token.type == Type.OPERATOR and token.string == ',')): + return False + return True + def _Add(self, token_info): """Adds the given token info to the stack. @@ -434,9 +492,35 @@ class IndentationRules(object): return if token_info.is_block or token_info.token.type == Type.START_PAREN: - index = 1 - while index <= len(self._stack): - stack_info = self._stack[-index] + scope_token = tokenutil.GoogScopeOrNoneFromStartBlock(token_info.token) + token_info.overridden_by = TokenInfo(scope_token) if scope_token else None + + if (token_info.token.type == Type.START_BLOCK and + token_info.token.metadata.context.type == Context.BLOCK): + # Handle function() {} assignments: their block contents get special + # treatment and are allowed to just indent by two whitespace. + # For example + # long.long.name = function( + # a) { + # In this case the { and the = are on different lines. But the + # override should still apply for all previous stack tokens that are + # part of an assignment of a block. + + has_assignment = any(x for x in self._stack if x.token.IsAssignment()) + if has_assignment: + last_token = token_info.token.previous + for stack_info in reversed(self._stack): + if (last_token and + not self._AllFunctionPropertyAssignTokens(stack_info.token, + last_token)): + break + stack_info.overridden_by = token_info + stack_info.is_permanent_override = True + last_token = stack_info.token + + index = len(self._stack) - 1 + while index >= 0: + stack_info = self._stack[index] stack_token = stack_info.token if stack_info.line_number == token_info.line_number: @@ -451,24 +535,14 @@ class IndentationRules(object): # a: 10 # }, # 30); + # b/11450054. If a string is not closed properly then close_block + # could be null. close_block = token_info.token.metadata.context.end_token - stack_info.is_permanent_override = \ - close_block.line_number != token_info.token.line_number - elif (token_info.token.type == Type.START_BLOCK and - token_info.token.metadata.context.type == Context.BLOCK and - (stack_token.IsAssignment() or - stack_token.type == Type.IDENTIFIER)): - # When starting a function block, the override can transcend lines. - # For example - # long.long.name = function( - # a) { - # In this case the { and the = are on different lines. But the - # override should still apply. - stack_info.overridden_by = token_info - stack_info.is_permanent_override = True + stack_info.is_permanent_override = close_block and ( + close_block.line_number != token_info.token.line_number) else: break - index += 1 + index -= 1 self._stack.append(token_info) diff --git a/tools/closure_linter/closure_linter/javascriptlintrules.py b/tools/closure_linter/closure_linter/javascriptlintrules.py old mode 100755 new mode 100644 index 6b9f1be..9578009 --- a/tools/closure_linter/closure_linter/javascriptlintrules.py +++ b/tools/closure_linter/closure_linter/javascriptlintrules.py @@ -1,6 +1,5 @@ #!/usr/bin/env python -# -# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# Copyright 2011 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,50 +23,46 @@ __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)', 'jacobr@google.com (Jacob Richman)') -import gflags as flags +import re + from closure_linter import ecmalintrules +from closure_linter import error_check from closure_linter import errors from closure_linter import javascripttokenizer from closure_linter import javascripttokens +from closure_linter import requireprovidesorter from closure_linter import tokenutil from closure_linter.common import error from closure_linter.common import position -FLAGS = flags.FLAGS -flags.DEFINE_list('closurized_namespaces', '', - 'Namespace prefixes, used for testing of' - 'goog.provide/require') -flags.DEFINE_list('ignored_extra_namespaces', '', - 'Fully qualified namespaces that should be not be reported ' - 'as extra by the linter.') - # Shorthand Error = error.Error Position = position.Position +Rule = error_check.Rule Type = javascripttokens.JavaScriptTokenType class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): """JavaScript lint rules that catch JavaScript specific style errors.""" + def __init__(self, namespaces_info): + """Initializes a JavaScriptLintRules instance.""" + ecmalintrules.EcmaScriptLintRules.__init__(self) + self._namespaces_info = namespaces_info + self._declared_private_member_tokens = {} + self._declared_private_members = set() + self._used_private_members = set() + # A stack of dictionaries, one for each function scope entered. Each + # dictionary is keyed by an identifier that defines a local variable and has + # a token as its value. + self._unused_local_variables_by_scope = [] + def HandleMissingParameterDoc(self, token, param_name): """Handle errors associated with a parameter missing a param tag.""" self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION, 'Missing docs for parameter: "%s"' % param_name, token) - def __ContainsRecordType(self, token): - """Check whether the given token contains a record type. - - Args: - token: The token being checked - """ - # If we see more than one left-brace in the string of an annotation token, - # then there's a record type in there. - return (token and token.type == Type.DOC_FLAG and - token.attached_object.type is not None and - token.attached_object.type.find('{') != token.string.rfind('{')) - - + # pylint: disable=too-many-statements def CheckToken(self, token, state): """Checks a token, given the current parser_state, for warnings and errors. @@ -75,33 +70,94 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): token: The current token under consideration state: parser_state object that indicates the current state in the page """ - if self.__ContainsRecordType(token): - # We should bail out and not emit any warnings for this annotation. - # TODO(nicksantos): Support record types for real. - state.GetDocComment().Invalidate() - return # Call the base class's CheckToken function. super(JavaScriptLintRules, self).CheckToken(token, state) # Store some convenience variables - first_in_line = token.IsFirstInLine() - last_in_line = token.IsLastInLine() - type = token.type - - if type == Type.DOC_FLAG: + namespaces_info = self._namespaces_info + + if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES): + self._CheckUnusedLocalVariables(token, state) + + if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS): + # Find all assignments to private members. + if token.type == Type.SIMPLE_LVALUE: + identifier = token.string + if identifier.endswith('_') and not identifier.endswith('__'): + doc_comment = state.GetDocComment() + suppressed = doc_comment and ( + 'underscore' in doc_comment.suppressions or + 'unusedPrivateMembers' in doc_comment.suppressions) + if not suppressed: + # Look for static members defined on a provided namespace. + if namespaces_info: + namespace = namespaces_info.GetClosurizedNamespace(identifier) + provided_namespaces = namespaces_info.GetProvidedNamespaces() + else: + namespace = None + provided_namespaces = set() + + # Skip cases of this.something_.somethingElse_. + regex = re.compile(r'^this\.[a-zA-Z_]+$') + if namespace in provided_namespaces or regex.match(identifier): + variable = identifier.split('.')[-1] + self._declared_private_member_tokens[variable] = token + self._declared_private_members.add(variable) + elif not identifier.endswith('__'): + # Consider setting public members of private members to be a usage. + for piece in identifier.split('.'): + if piece.endswith('_'): + self._used_private_members.add(piece) + + # Find all usages of private members. + if token.type == Type.IDENTIFIER: + for piece in token.string.split('.'): + if piece.endswith('_'): + self._used_private_members.add(piece) + + if token.type == Type.DOC_FLAG: flag = token.attached_object if flag.flag_type == 'param' and flag.name_token is not None: self._CheckForMissingSpaceBeforeToken( token.attached_object.name_token) + if flag.type is not None and flag.name is not None: + if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER): + # Check for variable arguments marker in type. + if flag.jstype.IsVarArgsType() and flag.name != 'var_args': + self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_NAME, + 'Variable length argument %s must be renamed ' + 'to var_args.' % flag.name, + token) + elif not flag.jstype.IsVarArgsType() and flag.name == 'var_args': + self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_TYPE, + 'Variable length argument %s type must start ' + 'with \'...\'.' % flag.name, + token) + + if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER): + # Check for optional marker in type. + if (flag.jstype.opt_arg and + not flag.name.startswith('opt_')): + self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX, + 'Optional parameter name %s must be prefixed ' + 'with opt_.' % flag.name, + token) + elif (not flag.jstype.opt_arg and + flag.name.startswith('opt_')): + self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE, + 'Optional parameter %s type must end with =.' % + flag.name, + token) + if flag.flag_type in state.GetDocFlag().HAS_TYPE: # Check for both missing type token and empty type braces '{}' - # Missing suppress types are reported separately and we allow enums - # without types. - if (flag.flag_type not in ('suppress', 'enum') and - (flag.type == None or flag.type == '' or flag.type.isspace())): + # Missing suppress types are reported separately and we allow enums, + # const, private, public and protected without types. + if (flag.flag_type not in state.GetDocFlag().CAN_OMIT_TYPE + and (not flag.jstype or flag.jstype.IsEmpty())): self._HandleError(errors.MISSING_JSDOC_TAG_TYPE, 'Missing type in %s tag' % token.string, token) @@ -112,43 +168,64 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): 'Type should be immediately after %s tag' % token.string, token) - elif type == Type.DOUBLE_QUOTE_STRING_START: - next = token.next - while next.type == Type.STRING_TEXT: + elif token.type == Type.DOUBLE_QUOTE_STRING_START: + next_token = token.next + while next_token.type == Type.STRING_TEXT: if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search( - next.string): + next_token.string): break - next = next.next + next_token = next_token.next else: self._HandleError( errors.UNNECESSARY_DOUBLE_QUOTED_STRING, 'Single-quoted string preferred over double-quoted string.', token, - Position.All(token.string)) + position=Position.All(token.string)) + + elif token.type == Type.END_DOC_COMMENT: + doc_comment = state.GetDocComment() + + # When @externs appears in a @fileoverview comment, it should trigger + # the same limited doc checks as a special filename like externs.js. + if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'): + self._SetLimitedDocChecks(True) - elif type == Type.END_DOC_COMMENT: - if (FLAGS.strict and not self._is_html and state.InTopLevel() and - not state.InBlock()): + if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and + not self._is_html and + state.InTopLevel() and + not state.InNonScopeBlock()): # Check if we're in a fileoverview or constructor JsDoc. - doc_comment = state.GetDocComment() - is_constructor = (doc_comment.HasFlag('constructor') or + is_constructor = ( + doc_comment.HasFlag('constructor') or doc_comment.HasFlag('interface')) - is_file_overview = doc_comment.HasFlag('fileoverview') + # @fileoverview is an optional tag so if the dosctring is the first + # token in the file treat it as a file level docstring. + is_file_level_comment = ( + doc_comment.HasFlag('fileoverview') or + not doc_comment.start_token.previous) # If the comment is not a file overview, and it does not immediately # precede some code, skip it. # NOTE: The tokenutil methods are not used here because of their # behavior at the top of a file. - next = token.next - if (not next or - (not is_file_overview and next.type in Type.NON_CODE_TYPES)): + next_token = token.next + if (not next_token or + (not is_file_level_comment and + next_token.type in Type.NON_CODE_TYPES)): + return + + # Don't require extra blank lines around suppression of extra + # goog.require errors. + if (doc_comment.SuppressionOnly() and + next_token.type == Type.IDENTIFIER and + next_token.string in ['goog.provide', 'goog.require']): return # Find the start of this block (include comments above the block, unless # this is a file overview). block_start = doc_comment.start_token - if not is_file_overview: + if not is_file_level_comment: token = block_start.previous while token and token.type in Type.COMMENT_TYPES: block_start = token @@ -170,23 +247,27 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): error_message = False expected_blank_lines = 0 - if is_file_overview and blank_lines == 0: + # Only need blank line before file overview if it is not the beginning + # of the file, e.g. copyright is first. + if is_file_level_comment and blank_lines == 0 and block_start.previous: error_message = 'Should have a blank line before a file overview.' expected_blank_lines = 1 elif is_constructor and blank_lines != 3: - error_message = ('Should have 3 blank lines before a constructor/' - 'interface.') + error_message = ( + 'Should have 3 blank lines before a constructor/interface.') expected_blank_lines = 3 - elif not is_file_overview and not is_constructor and blank_lines != 2: + elif (not is_file_level_comment and not is_constructor and + blank_lines != 2): error_message = 'Should have 2 blank lines between top-level blocks.' expected_blank_lines = 2 if error_message: - self._HandleError(errors.WRONG_BLANK_LINE_COUNT, error_message, - block_start, Position.AtBeginning(), - expected_blank_lines - blank_lines) + self._HandleError( + errors.WRONG_BLANK_LINE_COUNT, error_message, + block_start, position=Position.AtBeginning(), + fix_data=expected_blank_lines - blank_lines) - elif type == Type.END_BLOCK: + elif token.type == Type.END_BLOCK: if state.InFunction() and state.IsFunctionClose(): is_immediately_called = (token.next and token.next.type == Type.START_PAREN) @@ -202,44 +283,88 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): self._HandleError( errors.MISSING_RETURN_DOCUMENTATION, 'Missing @return JsDoc in function with non-trivial return', - function.doc.end_token, Position.AtBeginning()) - elif (not function.has_return and function.doc and + function.doc.end_token, position=Position.AtBeginning()) + elif (not function.has_return and + not function.has_throw and + function.doc and function.doc.HasFlag('return') and not state.InInterfaceMethod()): - return_flag = function.doc.GetFlag('return') - if (return_flag.type is None or ( - 'undefined' not in return_flag.type and - 'void' not in return_flag.type and - '*' not in return_flag.type)): + flag = function.doc.GetFlag('return') + valid_no_return_names = ['undefined', 'void', '*'] + invalid_return = flag.jstype is None or not any( + sub_type.identifier in valid_no_return_names + for sub_type in flag.jstype.IterTypeGroup()) + + if invalid_return: self._HandleError( errors.UNNECESSARY_RETURN_DOCUMENTATION, 'Found @return JsDoc on function that returns nothing', - return_flag.flag_token, Position.AtBeginning()) + flag.flag_token, position=Position.AtBeginning()) + + # b/4073735. Method in object literal definition of prototype can + # safely reference 'this'. + prototype_object_literal = False + block_start = None + previous_code = None + previous_previous_code = None + + # Search for cases where prototype is defined as object literal. + # previous_previous_code + # | previous_code + # | | block_start + # | | | + # a.b.prototype = { + # c : function() { + # this.d = 1; + # } + # } + + # If in object literal, find first token of block so to find previous + # tokens to check above condition. + if state.InObjectLiteral(): + block_start = state.GetCurrentBlockStart() + + # If an object literal then get previous token (code type). For above + # case it should be '='. + if block_start: + previous_code = tokenutil.SearchExcept(block_start, + Type.NON_CODE_TYPES, + reverse=True) + + # If previous token to block is '=' then get its previous token. + if previous_code and previous_code.IsOperator('='): + previous_previous_code = tokenutil.SearchExcept(previous_code, + Type.NON_CODE_TYPES, + reverse=True) + + # If variable/token before '=' ends with '.prototype' then its above + # case of prototype defined with object literal. + prototype_object_literal = (previous_previous_code and + previous_previous_code.string.endswith( + '.prototype')) - if state.InFunction() and state.IsFunctionClose(): - is_immediately_called = (token.next and - token.next.type == Type.START_PAREN) if (function.has_this and function.doc and not function.doc.HasFlag('this') and not function.is_constructor and not function.is_interface and - '.prototype.' not in function.name): + '.prototype.' not in function.name and + not prototype_object_literal): self._HandleError( errors.MISSING_JSDOC_TAG_THIS, 'Missing @this JsDoc in function referencing "this". (' 'this usually means you are trying to reference "this" in ' 'a static function, or you have forgotten to mark a ' 'constructor with @constructor)', - function.doc.end_token, Position.AtBeginning()) + function.doc.end_token, position=Position.AtBeginning()) - elif type == Type.IDENTIFIER: + elif token.type == Type.IDENTIFIER: if token.string == 'goog.inherits' and not state.InFunction(): if state.GetLastNonSpaceToken().line_number == token.line_number: self._HandleError( errors.MISSING_LINE, 'Missing newline between constructor and goog.inherits', token, - Position.AtBeginning()) + position=Position.AtBeginning()) extra_space = state.GetLastNonSpaceToken().next while extra_space != token: @@ -253,7 +378,92 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): # TODO(robbyw): Test the last function was a constructor. # TODO(robbyw): Test correct @extends and @implements documentation. - elif type == Type.OPERATOR: + elif (token.string == 'goog.provide' and + not state.InFunction() and + namespaces_info is not None): + namespace = tokenutil.GetStringAfterToken(token) + + # Report extra goog.provide statement. + if not namespace or namespaces_info.IsExtraProvide(token): + if not namespace: + msg = 'Empty namespace in goog.provide' + else: + msg = 'Unnecessary goog.provide: ' + namespace + + # Hint to user if this is a Test namespace. + if namespace.endswith('Test'): + msg += (' *Test namespaces must be mentioned in the ' + 'goog.setTestOnly() call') + + self._HandleError( + errors.EXTRA_GOOG_PROVIDE, + msg, + token, position=Position.AtBeginning()) + + if namespaces_info.IsLastProvide(token): + # Report missing provide statements after the last existing provide. + missing_provides = namespaces_info.GetMissingProvides() + if missing_provides: + self._ReportMissingProvides( + missing_provides, + tokenutil.GetLastTokenInSameLine(token).next, + False) + + # If there are no require statements, missing requires should be + # reported after the last provide. + if not namespaces_info.GetRequiredNamespaces(): + missing_requires, illegal_alias_statements = ( + namespaces_info.GetMissingRequires()) + if missing_requires: + self._ReportMissingRequires( + missing_requires, + tokenutil.GetLastTokenInSameLine(token).next, + True) + if illegal_alias_statements: + self._ReportIllegalAliasStatement(illegal_alias_statements) + + elif (token.string == 'goog.require' and + not state.InFunction() and + namespaces_info is not None): + namespace = tokenutil.GetStringAfterToken(token) + + # If there are no provide statements, missing provides should be + # reported before the first require. + if (namespaces_info.IsFirstRequire(token) and + not namespaces_info.GetProvidedNamespaces()): + missing_provides = namespaces_info.GetMissingProvides() + if missing_provides: + self._ReportMissingProvides( + missing_provides, + tokenutil.GetFirstTokenInSameLine(token), + True) + + # Report extra goog.require statement. + if not namespace or namespaces_info.IsExtraRequire(token): + if not namespace: + msg = 'Empty namespace in goog.require' + else: + msg = 'Unnecessary goog.require: ' + namespace + + self._HandleError( + errors.EXTRA_GOOG_REQUIRE, + msg, + token, position=Position.AtBeginning()) + + # Report missing goog.require statements. + if namespaces_info.IsLastRequire(token): + missing_requires, illegal_alias_statements = ( + namespaces_info.GetMissingRequires()) + if missing_requires: + self._ReportMissingRequires( + missing_requires, + tokenutil.GetLastTokenInSameLine(token).next, + False) + if illegal_alias_statements: + self._ReportIllegalAliasStatement(illegal_alias_statements) + + elif token.type == Type.OPERATOR: + last_in_line = token.IsLastInLine() # If the token is unary and appears to be used in a unary context # it's ok. Otherwise, if it's at the end of the line or immediately # before a comment, it's ok. @@ -262,15 +472,18 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): if (not token.metadata.IsUnaryOperator() and not last_in_line and not token.next.IsComment() and not token.next.IsOperator(',') - and not token.next.type in (Type.WHITESPACE, Type.END_PAREN, + and not tokenutil.IsDot(token) + and token.next.type not in (Type.WHITESPACE, Type.END_PAREN, Type.END_BRACKET, Type.SEMICOLON, Type.START_BRACKET)): self._HandleError( errors.MISSING_SPACE, 'Missing space after "%s"' % token.string, token, - Position.AtEnd(token.string)) - elif type == Type.WHITESPACE: + position=Position.AtEnd(token.string)) + elif token.type == Type.WHITESPACE: + first_in_line = token.IsFirstInLine() + last_in_line = token.IsLastInLine() # Check whitespace length if it's not the first token of the line and # if it's not immediately before a comment. if not last_in_line and not first_in_line and not token.next.IsComment(): @@ -282,114 +495,260 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): errors.EXTRA_SPACE, 'Extra space after "%s"' % token.previous.string, token, - Position.All(token.string)) + position=Position.All(token.string)) + elif token.type == Type.SEMICOLON: + previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, + reverse=True) + if not previous_token: + self._HandleError( + errors.REDUNDANT_SEMICOLON, + 'Semicolon without any statement', + token, + position=Position.AtEnd(token.string)) + elif (previous_token.type == Type.KEYWORD and + previous_token.string not in ['break', 'continue', 'return']): + self._HandleError( + errors.REDUNDANT_SEMICOLON, + ('Semicolon after \'%s\' without any statement.' + ' Looks like an error.' % previous_token.string), + token, + position=Position.AtEnd(token.string)) - def Finalize(self, state, tokenizer_mode): - """Perform all checks that need to occur after all lines are processed.""" - # Call the base class's Finalize function. - super(JavaScriptLintRules, self).Finalize(state, tokenizer_mode) - - # Check for sorted requires statements. - goog_require_tokens = state.GetGoogRequireTokens() - requires = [require_token.string for require_token in goog_require_tokens] - sorted_requires = sorted(requires) - index = 0 - bad = False - for item in requires: - if item != sorted_requires[index]: - bad = True + def _CheckUnusedLocalVariables(self, token, state): + """Checks for unused local variables in function blocks. + + Args: + token: The token to check. + state: The state tracker. + """ + # We don't use state.InFunction because that disregards scope functions. + in_function = state.FunctionDepth() > 0 + if token.type == Type.SIMPLE_LVALUE or token.type == Type.IDENTIFIER: + if in_function: + identifier = token.string + # Check whether the previous token was var. + previous_code_token = tokenutil.CustomSearch( + token, + lambda t: t.type not in Type.NON_CODE_TYPES, + reverse=True) + if previous_code_token and previous_code_token.IsKeyword('var'): + # Add local variable declaration to the top of the unused locals + # stack. + self._unused_local_variables_by_scope[-1][identifier] = token + elif token.type == Type.IDENTIFIER: + # This covers most cases where the variable is used as an identifier. + self._MarkLocalVariableUsed(token.string) + elif token.type == Type.SIMPLE_LVALUE and '.' in identifier: + # This covers cases where a value is assigned to a property of the + # variable. + self._MarkLocalVariableUsed(token.string) + elif token.type == Type.START_BLOCK: + if in_function and state.IsFunctionOpen(): + # Push a new map onto the stack + self._unused_local_variables_by_scope.append({}) + elif token.type == Type.END_BLOCK: + if state.IsFunctionClose(): + # Pop the stack and report any remaining locals as unused. + unused_local_variables = self._unused_local_variables_by_scope.pop() + for unused_token in unused_local_variables.values(): + self._HandleError( + errors.UNUSED_LOCAL_VARIABLE, + 'Unused local variable: %s.' % unused_token.string, + unused_token) + elif token.type == Type.DOC_FLAG: + # Flags that use aliased symbols should be counted. + flag = token.attached_object + js_type = flag and flag.jstype + if flag and flag.flag_type in state.GetDocFlag().HAS_TYPE and js_type: + self._MarkAliasUsed(js_type) + + def _MarkAliasUsed(self, js_type): + """Marks aliases in a type as used. + + Recursively iterates over all subtypes in a jsdoc type annotation and + tracks usage of aliased symbols (which may be local variables). + Marks the local variable as used in the scope nearest to the current + scope that matches the given token. + + Args: + js_type: The jsdoc type, a typeannotation.TypeAnnotation object. + """ + if js_type.alias: + self._MarkLocalVariableUsed(js_type.identifier) + for sub_type in js_type.IterTypes(): + self._MarkAliasUsed(sub_type) + + def _MarkLocalVariableUsed(self, identifier): + """Marks the local variable as used in the relevant scope. + + Marks the local variable in the scope nearest to the current scope that + matches the given identifier as used. + + Args: + identifier: The identifier representing the potential usage of a local + variable. + """ + identifier = identifier.split('.', 1)[0] + # Find the first instance of the identifier in the stack of function scopes + # and mark it used. + for unused_local_variables in reversed( + self._unused_local_variables_by_scope): + if identifier in unused_local_variables: + del unused_local_variables[identifier] break - index += 1 - if bad: + def _ReportMissingProvides(self, missing_provides, token, need_blank_line): + """Reports missing provide statements to the error handler. + + Args: + missing_provides: A dictionary of string(key) and integer(value) where + each string(key) is a namespace that should be provided, but is not + and integer(value) is first line number where it's required. + token: The token where the error was detected (also where the new provides + will be inserted. + need_blank_line: Whether a blank line needs to be inserted after the new + provides are inserted. May be True, False, or None, where None + indicates that the insert location is unknown. + """ + + missing_provides_msg = 'Missing the following goog.provide statements:\n' + missing_provides_msg += '\n'.join(['goog.provide(\'%s\');' % x for x in + sorted(missing_provides)]) + missing_provides_msg += '\n' + + missing_provides_msg += '\nFirst line where provided: \n' + missing_provides_msg += '\n'.join( + [' %s : line %d' % (x, missing_provides[x]) for x in + sorted(missing_provides)]) + missing_provides_msg += '\n' + + self._HandleError( + errors.MISSING_GOOG_PROVIDE, + missing_provides_msg, + token, position=Position.AtBeginning(), + fix_data=(missing_provides.keys(), need_blank_line)) + + def _ReportMissingRequires(self, missing_requires, token, need_blank_line): + """Reports missing require statements to the error handler. + + Args: + missing_requires: A dictionary of string(key) and integer(value) where + each string(key) is a namespace that should be required, but is not + and integer(value) is first line number where it's required. + token: The token where the error was detected (also where the new requires + will be inserted. + need_blank_line: Whether a blank line needs to be inserted before the new + requires are inserted. May be True, False, or None, where None + indicates that the insert location is unknown. + """ + + missing_requires_msg = 'Missing the following goog.require statements:\n' + missing_requires_msg += '\n'.join(['goog.require(\'%s\');' % x for x in + sorted(missing_requires)]) + missing_requires_msg += '\n' + + missing_requires_msg += '\nFirst line where required: \n' + missing_requires_msg += '\n'.join( + [' %s : line %d' % (x, missing_requires[x]) for x in + sorted(missing_requires)]) + missing_requires_msg += '\n' + + self._HandleError( + errors.MISSING_GOOG_REQUIRE, + missing_requires_msg, + token, position=Position.AtBeginning(), + fix_data=(missing_requires.keys(), need_blank_line)) + + def _ReportIllegalAliasStatement(self, illegal_alias_statements): + """Reports alias statements that would need a goog.require.""" + for namespace, token in illegal_alias_statements.iteritems(): self._HandleError( - errors.GOOG_REQUIRES_NOT_ALPHABETIZED, - 'goog.require classes must be alphabetized. The correct code is:\n' + - '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x, - sorted_requires)), - goog_require_tokens[index], - position=Position.AtBeginning(), - fix_data=goog_require_tokens) - - # Check for sorted provides statements. - goog_provide_tokens = state.GetGoogProvideTokens() - provides = [provide_token.string for provide_token in goog_provide_tokens] - sorted_provides = sorted(provides) - index = 0 - bad = False - for item in provides: - if item != sorted_provides[index]: - bad = True - break - index += 1 + errors.ALIAS_STMT_NEEDS_GOOG_REQUIRE, + 'The alias definition would need the namespace \'%s\' which is not ' + 'required through any other symbol.' % namespace, + token, position=Position.AtBeginning()) - if bad: + def Finalize(self, state): + """Perform all checks that need to occur after all lines are processed.""" + # Call the base class's Finalize function. + super(JavaScriptLintRules, self).Finalize(state) + + if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS): + # Report an error for any declared private member that was never used. + unused_private_members = (self._declared_private_members - + self._used_private_members) + + for variable in unused_private_members: + token = self._declared_private_member_tokens[variable] + self._HandleError(errors.UNUSED_PRIVATE_MEMBER, + 'Unused private member: %s.' % token.string, + token) + + # Clear state to prepare for the next file. + self._declared_private_member_tokens = {} + self._declared_private_members = set() + self._used_private_members = set() + + namespaces_info = self._namespaces_info + if namespaces_info is not None: + # If there are no provide or require statements, missing provides and + # requires should be reported on line 1. + if (not namespaces_info.GetProvidedNamespaces() and + not namespaces_info.GetRequiredNamespaces()): + missing_provides = namespaces_info.GetMissingProvides() + if missing_provides: + self._ReportMissingProvides( + missing_provides, state.GetFirstToken(), None) + + missing_requires, illegal_alias = namespaces_info.GetMissingRequires() + if missing_requires: + self._ReportMissingRequires( + missing_requires, state.GetFirstToken(), None) + if illegal_alias: + self._ReportIllegalAliasStatement(illegal_alias) + + self._CheckSortedRequiresProvides(state.GetFirstToken()) + + def _CheckSortedRequiresProvides(self, token): + """Checks that all goog.require and goog.provide statements are sorted. + + Note that this method needs to be run after missing statements are added to + preserve alphabetical order. + + Args: + token: The first token in the token stream. + """ + sorter = requireprovidesorter.RequireProvideSorter() + first_provide_token = sorter.CheckProvides(token) + if first_provide_token: + new_order = sorter.GetFixedProvideString(first_provide_token) self._HandleError( errors.GOOG_PROVIDES_NOT_ALPHABETIZED, 'goog.provide classes must be alphabetized. The correct code is:\n' + - '\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x, - sorted_provides)), - goog_provide_tokens[index], + new_order, + first_provide_token, position=Position.AtBeginning(), - fix_data=goog_provide_tokens) + fix_data=first_provide_token) - if FLAGS.closurized_namespaces: - # Check that we provide everything we need. - provided_namespaces = state.GetProvidedNamespaces() - missing_provides = provided_namespaces - set(provides) - if missing_provides: - self._HandleError( - errors.MISSING_GOOG_PROVIDE, - 'Missing the following goog.provide statements:\n' + - '\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x, - sorted(missing_provides))), - state.GetFirstToken(), position=Position.AtBeginning(), - fix_data=missing_provides) - - # Compose a set of all available namespaces. Explicitly omit goog - # because if you can call goog.require, you already have goog. - available_namespaces = (set(requires) | set(provides) | set(['goog']) | - provided_namespaces) - - # Check that we require everything we need. - missing_requires = set() - for namespace_variants in state.GetUsedNamespaces(): - # Namespace variants is a list of potential things to require. If we - # find we're missing one, we are lazy and choose to require the first - # in the sequence - which should be the namespace. - if not set(namespace_variants) & available_namespaces: - missing_requires.add(namespace_variants[0]) - - if missing_requires: - self._HandleError( - errors.MISSING_GOOG_REQUIRE, - 'Missing the following goog.require statements:\n' + - '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x, - sorted(missing_requires))), - state.GetFirstToken(), position=Position.AtBeginning(), - fix_data=missing_requires) - - # Check that we don't require things we don't actually use. - namespace_variants = state.GetUsedNamespaces() - used_namespaces = set() - for a, b in namespace_variants: - used_namespaces.add(a) - used_namespaces.add(b) - - extra_requires = set() - for i in requires: - baseNamespace = i.split('.')[0] - if (i not in used_namespaces and - baseNamespace in FLAGS.closurized_namespaces and - i not in FLAGS.ignored_extra_namespaces): - extra_requires.add(i) - - if extra_requires: - self._HandleError( - errors.EXTRA_GOOG_REQUIRE, - 'The following goog.require statements appear unnecessary:\n' + - '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x, - sorted(extra_requires))), - state.GetFirstToken(), position=Position.AtBeginning(), - fix_data=extra_requires) + first_require_token = sorter.CheckRequires(token) + if first_require_token: + new_order = sorter.GetFixedRequireString(first_require_token) + self._HandleError( + errors.GOOG_REQUIRES_NOT_ALPHABETIZED, + 'goog.require classes must be alphabetized. The correct code is:\n' + + new_order, + first_require_token, + position=Position.AtBeginning(), + fix_data=first_require_token) + def GetLongLineExceptions(self): + """Gets a list of regexps for lines which can be longer than the limit. + + Returns: + A list of regexps, used as matches (rather than searches). + """ + return [ + re.compile(r'(var .+\s*=\s*)?goog\.require\(.+\);?\s*$'), + re.compile(r'goog\.(provide|module|setTestOnly)\(.+\);?\s*$'), + re.compile(r'[\s/*]*@visibility\s*{.*}[\s*/]*$'), + ] diff --git a/tools/closure_linter/closure_linter/javascriptstatetracker.py b/tools/closure_linter/closure_linter/javascriptstatetracker.py index 9cce376..e0a42f6 100755 --- a/tools/closure_linter/closure_linter/javascriptstatetracker.py +++ b/tools/closure_linter/closure_linter/javascriptstatetracker.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -36,7 +35,8 @@ class JsDocFlag(statetracker.DocFlag): including braces. type_end_token: The last token specifying the flag JS type, including braces. - type: The JavaScript type spec. + type: The type spec string. + jstype: The type spec, a TypeAnnotation instance. name_token: The token specifying the flag name. name: The flag name description_start_token: The first token in the description. @@ -50,18 +50,10 @@ class JsDocFlag(statetracker.DocFlag): # TODO(robbyw): determine which of these, if any, should be illegal. EXTENDED_DOC = frozenset([ 'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link', - 'protected', 'notypecheck', 'throws']) + 'meaning', 'provideGoog', 'throws']) LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC - def __init__(self, flag_token): - """Creates the JsDocFlag object and attaches it to the given start token. - - Args: - flag_token: The starting token of the flag. - """ - statetracker.DocFlag.__init__(self, flag_token) - class JavaScriptStateTracker(statetracker.StateTracker): """JavaScript state tracker. @@ -70,25 +62,15 @@ class JavaScriptStateTracker(statetracker.StateTracker): functionality needed for JavaScript. """ - def __init__(self, closurized_namespaces=''): - """Initializes a JavaScript token stream state tracker. - - Args: - closurized_namespaces: An optional list of namespace prefixes used for - testing of goog.provide/require. - """ + def __init__(self): + """Initializes a JavaScript token stream state tracker.""" statetracker.StateTracker.__init__(self, JsDocFlag) - self.__closurized_namespaces = closurized_namespaces def Reset(self): - """Resets the state tracker to prepare for processing a new page.""" + self._scope_depth = 0 + self._block_stack = [] super(JavaScriptStateTracker, self).Reset() - self.__goog_require_tokens = [] - self.__goog_provide_tokens = [] - self.__provided_namespaces = set() - self.__used_namespaces = [] - def InTopLevel(self): """Compute whether we are at the top level in the class. @@ -100,23 +82,26 @@ class JavaScriptStateTracker(statetracker.StateTracker): Returns: Whether we are at the top level in the class. """ - return not self.InParentheses() + return self._scope_depth == self.ParenthesesDepth() + + def InFunction(self): + """Returns true if the current token is within a function. - def GetGoogRequireTokens(self): - """Returns list of require tokens.""" - return self.__goog_require_tokens + This js-specific override ignores goog.scope functions. - def GetGoogProvideTokens(self): - """Returns list of provide tokens.""" - return self.__goog_provide_tokens + Returns: + True if the current token is within a function. + """ + return self._scope_depth != self.FunctionDepth() - def GetProvidedNamespaces(self): - """Returns list of provided namespaces.""" - return self.__provided_namespaces + def InNonScopeBlock(self): + """Compute whether we are nested within a non-goog.scope block. - def GetUsedNamespaces(self): - """Returns list of used namespaces, is a list of sequences.""" - return self.__used_namespaces + Returns: + True if the token is not enclosed in a block that does not originate from + a goog.scope statement. False otherwise. + """ + return self._scope_depth != self.BlockDepth() def GetBlockType(self, token): """Determine the block type given a START_BLOCK token. @@ -128,111 +113,38 @@ class JavaScriptStateTracker(statetracker.StateTracker): Returns: Code block type for current token. """ - last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None, - True) + last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True) if last_code.type in (Type.END_PARAMETERS, Type.END_PAREN, Type.KEYWORD) and not last_code.IsKeyword('return'): return self.CODE else: return self.OBJECT_LITERAL + def GetCurrentBlockStart(self): + """Gets the start token of current block. + + Returns: + Starting token of current block. None if not in block. + """ + if self._block_stack: + return self._block_stack[-1] + else: + return None + def HandleToken(self, token, last_non_space_token): """Handles the given token and updates state. Args: token: The token to handle. - last_non_space_token: + last_non_space_token: The last non space token encountered """ + if token.type == Type.START_BLOCK: + self._block_stack.append(token) + if token.type == Type.IDENTIFIER and token.string == 'goog.scope': + self._scope_depth += 1 + if token.type == Type.END_BLOCK: + start_token = self._block_stack.pop() + if tokenutil.GoogScopeOrNoneFromStartBlock(start_token): + self._scope_depth -= 1 super(JavaScriptStateTracker, self).HandleToken(token, last_non_space_token) - - if token.IsType(Type.IDENTIFIER): - if token.string == 'goog.require': - class_token = tokenutil.Search(token, Type.STRING_TEXT) - self.__goog_require_tokens.append(class_token) - - elif token.string == 'goog.provide': - class_token = tokenutil.Search(token, Type.STRING_TEXT) - self.__goog_provide_tokens.append(class_token) - - elif self.__closurized_namespaces: - self.__AddUsedNamespace(token.string) - if token.IsType(Type.SIMPLE_LVALUE) and not self.InFunction(): - identifier = token.values['identifier'] - - if self.__closurized_namespaces: - namespace = self.GetClosurizedNamespace(identifier) - if namespace and identifier == namespace: - self.__provided_namespaces.add(namespace) - if (self.__closurized_namespaces and - token.IsType(Type.DOC_FLAG) and - token.attached_object.flag_type == 'implements'): - # Interfaces should be goog.require'd. - doc_start = tokenutil.Search(token, Type.DOC_START_BRACE) - interface = tokenutil.Search(doc_start, Type.COMMENT) - self.__AddUsedNamespace(interface.string) - - def __AddUsedNamespace(self, identifier): - """Adds the namespace of an identifier to the list of used namespaces. - - Args: - identifier: An identifier which has been used. - """ - namespace = self.GetClosurizedNamespace(identifier) - - if namespace: - # We add token.string as a 'namespace' as it is something that could - # potentially be provided to satisfy this dependency. - self.__used_namespaces.append([namespace, identifier]) - - def GetClosurizedNamespace(self, identifier): - """Given an identifier, returns the namespace that identifier is from. - - Args: - identifier: The identifier to extract a namespace from. - - Returns: - The namespace the given identifier resides in, or None if one could not - be found. - """ - parts = identifier.split('.') - for part in parts: - if part.endswith('_'): - # Ignore private variables / inner classes. - return None - - if identifier.startswith('goog.global'): - # Ignore goog.global, since it is, by definition, global. - return None - - for namespace in self.__closurized_namespaces: - if identifier.startswith(namespace + '.'): - last_part = parts[-1] - if not last_part: - # TODO(robbyw): Handle this: it's a multi-line identifier. - return None - - if last_part in ('apply', 'inherits', 'call'): - # Calling one of Function's methods usually indicates use of a - # superclass. - parts.pop() - last_part = parts[-1] - - for i in xrange(1, len(parts)): - part = parts[i] - if part.isupper(): - # If an identifier is of the form foo.bar.BAZ.x or foo.bar.BAZ, - # the namespace is foo.bar. - return '.'.join(parts[:i]) - if part == 'prototype': - # If an identifier is of the form foo.bar.prototype.x, the - # namespace is foo.bar. - return '.'.join(parts[:i]) - - if last_part.isupper() or not last_part[0].isupper(): - # Strip off the last part of an enum or constant reference. - parts.pop() - - return '.'.join(parts) - - return None diff --git a/tools/closure_linter/closure_linter/javascriptstatetracker_test.py b/tools/closure_linter/closure_linter/javascriptstatetracker_test.py old mode 100755 new mode 100644 index e4288b7..76dabd2 --- a/tools/closure_linter/closure_linter/javascriptstatetracker_test.py +++ b/tools/closure_linter/closure_linter/javascriptstatetracker_test.py @@ -1,7 +1,6 @@ #!/usr/bin/env python # -# Copyright 2010 The Closure Linter Authors. All Rights Reserved. -# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -14,40 +13,266 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Unit tests for JavaScriptStateTracker.""" +"""Unit tests for the javascriptstatetracker module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header +__author__ = ('nnaze@google.com (Nathan Naze)') import unittest as googletest -from closure_linter import javascriptstatetracker - -class JavaScriptStateTrackerTest(googletest.TestCase): - - __test_cases = { - 'package.CONSTANT' : 'package', - 'package.methodName' : 'package', - 'package.subpackage.methodName' : 'package.subpackage', - 'package.ClassName.something' : 'package.ClassName', - 'package.ClassName.Enum.VALUE.methodName' : 'package.ClassName.Enum', - 'package.ClassName.CONSTANT' : 'package.ClassName', - 'package.ClassName.inherits' : 'package.ClassName', - 'package.ClassName.apply' : 'package.ClassName', - 'package.ClassName.methodName.apply' : 'package.ClassName', - 'package.ClassName.methodName.call' : 'package.ClassName', - 'package.ClassName.prototype.methodName' : 'package.ClassName', - 'package.ClassName.privateMethod_' : None, - 'package.ClassName.prototype.methodName.apply' : 'package.ClassName' - } - - def testGetClosurizedNamespace(self): - stateTracker = javascriptstatetracker.JavaScriptStateTracker(['package']) - for identifier, expected_namespace in self.__test_cases.items(): - actual_namespace = stateTracker.GetClosurizedNamespace(identifier) - self.assertEqual(expected_namespace, actual_namespace, - 'expected namespace "' + str(expected_namespace) + - '" for identifier "' + str(identifier) + '" but was "' + - str(actual_namespace) + '"') + +from closure_linter import javascripttokens +from closure_linter import testutil +from closure_linter import tokenutil + + +_FUNCTION_SCRIPT = """\ +var a = 3; + +function foo(aaa, bbb, ccc) { + var b = 4; +} + + +/** + * JSDoc comment. + */ +var bar = function(ddd, eee, fff) { + +}; + + +/** + * Verify that nested functions get their proper parameters recorded. + */ +var baz = function(ggg, hhh, iii) { + var qux = function(jjj, kkk, lll) { + }; + // make sure that entering a new block does not change baz' parameters. + {}; +}; + +""" + + +class FunctionTest(googletest.TestCase): + + def testFunctionParse(self): + functions, _ = testutil.ParseFunctionsAndComments(_FUNCTION_SCRIPT) + self.assertEquals(4, len(functions)) + + # First function + function = functions[0] + self.assertEquals(['aaa', 'bbb', 'ccc'], function.parameters) + + start_token = function.start_token + end_token = function.end_token + + self.assertEquals( + javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, + function.start_token.type) + + self.assertEquals('function', start_token.string) + self.assertEquals(3, start_token.line_number) + self.assertEquals(0, start_token.start_index) + + self.assertEquals('}', end_token.string) + self.assertEquals(5, end_token.line_number) + self.assertEquals(0, end_token.start_index) + + self.assertEquals('foo', function.name) + + self.assertIsNone(function.doc) + + # Second function + function = functions[1] + self.assertEquals(['ddd', 'eee', 'fff'], function.parameters) + + start_token = function.start_token + end_token = function.end_token + + self.assertEquals( + javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, + function.start_token.type) + + self.assertEquals('function', start_token.string) + self.assertEquals(11, start_token.line_number) + self.assertEquals(10, start_token.start_index) + + self.assertEquals('}', end_token.string) + self.assertEquals(13, end_token.line_number) + self.assertEquals(0, end_token.start_index) + + self.assertEquals('bar', function.name) + + self.assertIsNotNone(function.doc) + + # Check function JSDoc + doc = function.doc + doc_tokens = tokenutil.GetTokenRange(doc.start_token, doc.end_token) + + comment_type = javascripttokens.JavaScriptTokenType.COMMENT + comment_tokens = filter(lambda t: t.type is comment_type, doc_tokens) + + self.assertEquals('JSDoc comment.', + tokenutil.TokensToString(comment_tokens).strip()) + + # Third function + function = functions[2] + self.assertEquals(['ggg', 'hhh', 'iii'], function.parameters) + + start_token = function.start_token + end_token = function.end_token + + self.assertEquals( + javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, + function.start_token.type) + + self.assertEquals('function', start_token.string) + self.assertEquals(19, start_token.line_number) + self.assertEquals(10, start_token.start_index) + + self.assertEquals('}', end_token.string) + self.assertEquals(24, end_token.line_number) + self.assertEquals(0, end_token.start_index) + + self.assertEquals('baz', function.name) + self.assertIsNotNone(function.doc) + + # Fourth function (inside third function) + function = functions[3] + self.assertEquals(['jjj', 'kkk', 'lll'], function.parameters) + + start_token = function.start_token + end_token = function.end_token + + self.assertEquals( + javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION, + function.start_token.type) + + self.assertEquals('function', start_token.string) + self.assertEquals(20, start_token.line_number) + self.assertEquals(12, start_token.start_index) + + self.assertEquals('}', end_token.string) + self.assertEquals(21, end_token.line_number) + self.assertEquals(2, end_token.start_index) + + self.assertEquals('qux', function.name) + self.assertIsNone(function.doc) + + + +class CommentTest(googletest.TestCase): + + def testGetDescription(self): + comment = self._ParseComment(""" + /** + * Comment targeting goog.foo. + * + * This is the second line. + * @param {number} foo The count of foo. + */ + target;""") + + self.assertEqual( + 'Comment targeting goog.foo.\n\nThis is the second line.', + comment.description) + + def testCommentGetTarget(self): + self.assertCommentTarget('goog.foo', """ + /** + * Comment targeting goog.foo. + */ + goog.foo = 6; + """) + + self.assertCommentTarget('bar', """ + /** + * Comment targeting bar. + */ + var bar = "Karate!"; + """) + + self.assertCommentTarget('doThing', """ + /** + * Comment targeting doThing. + */ + function doThing() {}; + """) + + self.assertCommentTarget('this.targetProperty', """ + goog.bar.Baz = function() { + /** + * Comment targeting targetProperty. + */ + this.targetProperty = 3; + }; + """) + + self.assertCommentTarget('goog.bar.prop', """ + /** + * Comment targeting goog.bar.prop. + */ + goog.bar.prop; + """) + + self.assertCommentTarget('goog.aaa.bbb', """ + /** + * Comment targeting goog.aaa.bbb. + */ + (goog.aaa.bbb) + """) + + self.assertCommentTarget('theTarget', """ + /** + * Comment targeting symbol preceded by newlines, whitespace, + * and parens -- things we ignore. + */ + (theTarget) + """) + + self.assertCommentTarget(None, """ + /** + * @fileoverview File overview. + */ + (notATarget) + """) + + self.assertCommentTarget(None, """ + /** + * Comment that doesn't find a target. + */ + """) + + self.assertCommentTarget('theTarget.is.split.across.lines', """ + /** + * Comment that addresses a symbol split across lines. + */ + (theTarget.is.split + .across.lines) + """) + + self.assertCommentTarget('theTarget.is.split.across.lines', """ + /** + * Comment that addresses a symbol split across lines. + */ + (theTarget.is.split. + across.lines) + """) + + def _ParseComment(self, script): + """Parse a script that contains one comment and return it.""" + _, comments = testutil.ParseFunctionsAndComments(script) + self.assertEquals(1, len(comments)) + return comments[0] + + def assertCommentTarget(self, target, script): + comment = self._ParseComment(script) + self.assertEquals(target, comment.GetTargetIdentifier()) + if __name__ == '__main__': googletest.main() - diff --git a/tools/closure_linter/closure_linter/javascripttokenizer.py b/tools/closure_linter/closure_linter/javascripttokenizer.py index 097d3fd..2ee5b81 100755 --- a/tools/closure_linter/closure_linter/javascripttokenizer.py +++ b/tools/closure_linter/closure_linter/javascripttokenizer.py @@ -51,7 +51,7 @@ class JavaScriptTokenizer(tokenizer.Tokenizer): """ # Useful patterns for JavaScript parsing. - IDENTIFIER_CHAR = r'A-Za-z0-9_$.'; + IDENTIFIER_CHAR = r'A-Za-z0-9_$' # Number patterns based on: # http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html @@ -92,6 +92,9 @@ class JavaScriptTokenizer(tokenizer.Tokenizer): # like in email addresses in the @author tag. DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+') DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+') + # Match anything that is allowed in a type definition, except for tokens + # needed to parse it (and the lookahead assertion for "*/"). + DOC_COMMENT_TYPE_TEXT = re.compile(r'([^*|!?=<>(){}:,\s]|\*(?!/))+') # Match the prefix ' * ' that starts every line of jsdoc. Want to include # spaces after the '*', but nothing else that occurs after a '*', and don't @@ -141,13 +144,25 @@ class JavaScriptTokenizer(tokenizer.Tokenizer): # delete, in, instanceof, new, typeof - included as operators. # this - included in identifiers. # null, undefined - not included, should go in some "special constant" list. - KEYWORD_LIST = ['break', 'case', 'catch', 'continue', 'default', 'do', 'else', - 'finally', 'for', 'if', 'return', 'switch', 'throw', 'try', 'var', - 'while', 'with'] - # Match a keyword string followed by a non-identifier character in order to - # not match something like doSomething as do + Something. - KEYWORD = re.compile('(%s)((?=[^%s])|$)' % ( - '|'.join(KEYWORD_LIST), IDENTIFIER_CHAR)) + KEYWORD_LIST = [ + 'break', + 'case', + 'catch', + 'continue', + 'default', + 'do', + 'else', + 'finally', + 'for', + 'if', + 'return', + 'switch', + 'throw', + 'try', + 'var', + 'while', + 'with', + ] # List of regular expressions to match as operators. Some notes: for our # purposes, the comma behaves similarly enough to a normal operator that we @@ -155,19 +170,62 @@ class JavaScriptTokenizer(tokenizer.Tokenizer): # characters - this may not match some very esoteric uses of the in operator. # Operators that are subsets of larger operators must come later in this list # for proper matching, e.g., '>>' must come AFTER '>>>'. - OPERATOR_LIST = [',', r'\+\+', '===', '!==', '>>>=', '>>>', '==', '>=', '<=', - '!=', '<<=', '>>=', '<<', '>>', '>', '<', r'\+=', r'\+', - '--', '\^=', '-=', '-', '/=', '/', r'\*=', r'\*', '%=', '%', - '&&', r'\|\|', '&=', '&', r'\|=', r'\|', '=', '!', ':', '\?', - r'\bdelete\b', r'\bin\b', r'\binstanceof\b', r'\bnew\b', - r'\btypeof\b', r'\bvoid\b'] + OPERATOR_LIST = [ + ',', + r'\+\+', + '===', + '!==', + '>>>=', + '>>>', + '==', + '>=', + '<=', + '!=', + '<<=', + '>>=', + '<<', + '>>', + '=>', + '>', + '<', + r'\+=', + r'\+', + '--', + r'\^=', + '-=', + '-', + '/=', + '/', + r'\*=', + r'\*', + '%=', + '%', + '&&', + r'\|\|', + '&=', + '&', + r'\|=', + r'\|', + '=', + '!', + ':', + r'\?', + r'\^', + r'\bdelete\b', + r'\bin\b', + r'\binstanceof\b', + r'\bnew\b', + r'\btypeof\b', + r'\bvoid\b', + r'\.', + ] OPERATOR = re.compile('|'.join(OPERATOR_LIST)) WHITESPACE = re.compile(r'\s+') SEMICOLON = re.compile(r';') # Technically JavaScript identifiers can't contain '.', but we treat a set of - # nested identifiers as a single identifier. - NESTED_IDENTIFIER = r'[a-zA-Z_$][%s.]*' % IDENTIFIER_CHAR + # nested identifiers as a single identifier, except for trailing dots. + NESTED_IDENTIFIER = r'[a-zA-Z_$]([%s]|\.[a-zA-Z_$])*' % IDENTIFIER_CHAR IDENTIFIER = re.compile(NESTED_IDENTIFIER) SIMPLE_LVALUE = re.compile(r""" @@ -181,13 +239,33 @@ class JavaScriptTokenizer(tokenizer.Tokenizer): # beginning of the line, after whitespace, or after a '{'. The look-behind # check is necessary to not match someone@google.com as a flag. DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P[a-zA-Z]+)') - # To properly parse parameter names, we need to tokenize whitespace into a - # token. - DOC_FLAG_LEX_SPACES = re.compile(r'(^|(?<=\s))@(?P%s)\b' % - '|'.join(['param'])) + # To properly parse parameter names and complex doctypes containing + # whitespace, we need to tokenize whitespace into a token after certain + # doctags. All statetracker.HAS_TYPE that are not listed here must not contain + # any whitespace in their types. + DOC_FLAG_LEX_SPACES = re.compile( + r'(^|(?<=\s))@(?P%s)\b' % + '|'.join([ + 'const', + 'enum', + 'extends', + 'final', + 'implements', + 'param', + 'private', + 'protected', + 'public', + 'return', + 'type', + 'typedef' + ])) DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P[a-zA-Z]+)') + DOC_TYPE_BLOCK_START = re.compile(r'[<(]') + DOC_TYPE_BLOCK_END = re.compile(r'[>)]') + DOC_TYPE_MODIFIERS = re.compile(r'[!?|,:=]') + # Star followed by non-slash, i.e a star that does not end a comment. # This is used for TYPE_GROUP below. SAFE_STAR = r'(\*(?!/))' @@ -201,145 +279,165 @@ class JavaScriptTokenizer(tokenizer.Tokenizer): Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG), Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE), - Matcher(DOC_FLAG, Type.DOC_FLAG), + + # Encountering a doc flag should leave lex spaces mode. + Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE), # Tokenize braces so we can find types. Matcher(START_BLOCK, Type.DOC_START_BRACE), Matcher(END_BLOCK, Type.DOC_END_BRACE), - Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)] - - - # The token matcher groups work as follows: it is an list of Matcher objects. - # The matchers will be tried in this order, and the first to match will be - # returned. Hence the order is important because the matchers that come first - # overrule the matchers that come later. - JAVASCRIPT_MATCHERS = { - # Matchers for basic text mode. - JavaScriptModes.TEXT_MODE: [ - # Check a big group - strings, starting comments, and regexes - all - # of which could be intertwined. 'string with /regex/', - # /regex with 'string'/, /* comment with /regex/ and string */ (and so on) - Matcher(START_DOC_COMMENT, Type.START_DOC_COMMENT, - JavaScriptModes.DOC_COMMENT_MODE), - Matcher(START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT, - JavaScriptModes.BLOCK_COMMENT_MODE), - Matcher(END_OF_LINE_SINGLE_LINE_COMMENT, - Type.START_SINGLE_LINE_COMMENT), - Matcher(START_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT, - JavaScriptModes.LINE_COMMENT_MODE), - Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START, - JavaScriptModes.SINGLE_QUOTE_STRING_MODE), - Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START, - JavaScriptModes.DOUBLE_QUOTE_STRING_MODE), - Matcher(REGEX, Type.REGEX), - - # Next we check for start blocks appearing outside any of the items above. - Matcher(START_BLOCK, Type.START_BLOCK), - Matcher(END_BLOCK, Type.END_BLOCK), - - # Then we search for function declarations. - Matcher(FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION, - JavaScriptModes.FUNCTION_MODE), - - # Next, we convert non-function related parens to tokens. - Matcher(OPENING_PAREN, Type.START_PAREN), - Matcher(CLOSING_PAREN, Type.END_PAREN), - - # Next, we convert brackets to tokens. - Matcher(OPENING_BRACKET, Type.START_BRACKET), - Matcher(CLOSING_BRACKET, Type.END_BRACKET), - - # Find numbers. This has to happen before operators because scientific - # notation numbers can have + and - in them. - Matcher(NUMBER, Type.NUMBER), - - # Find operators and simple assignments - Matcher(SIMPLE_LVALUE, Type.SIMPLE_LVALUE), - Matcher(OPERATOR, Type.OPERATOR), - - # Find key words and whitespace - Matcher(KEYWORD, Type.KEYWORD), - Matcher(WHITESPACE, Type.WHITESPACE), - - # Find identifiers - Matcher(IDENTIFIER, Type.IDENTIFIER), - - # Finally, we convert semicolons to tokens. - Matcher(SEMICOLON, Type.SEMICOLON)], - - - # Matchers for single quote strings. - JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [ - Matcher(SINGLE_QUOTE_TEXT, Type.STRING_TEXT), - Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END, - JavaScriptModes.TEXT_MODE)], - - - # Matchers for double quote strings. - JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [ - Matcher(DOUBLE_QUOTE_TEXT, Type.STRING_TEXT), - Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END, - JavaScriptModes.TEXT_MODE)], - - - # Matchers for block comments. - JavaScriptModes.BLOCK_COMMENT_MODE: [ - # First we check for exiting a block comment. - Matcher(END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT, - JavaScriptModes.TEXT_MODE), - - # Match non-comment-ending text.. - Matcher(BLOCK_COMMENT_TEXT, Type.COMMENT)], - - - # Matchers for doc comments. - JavaScriptModes.DOC_COMMENT_MODE: COMMON_DOC_MATCHERS + [ - Matcher(DOC_COMMENT_TEXT, Type.COMMENT)], - JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: COMMON_DOC_MATCHERS + [ - Matcher(WHITESPACE, Type.COMMENT), - Matcher(DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)], + # And some more to parse types. + Matcher(DOC_TYPE_BLOCK_START, Type.DOC_TYPE_START_BLOCK), + Matcher(DOC_TYPE_BLOCK_END, Type.DOC_TYPE_END_BLOCK), - # Matchers for single line comments. - JavaScriptModes.LINE_COMMENT_MODE: [ - # We greedy match until the end of the line in line comment mode. - Matcher(ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)], - - - # Matchers for code after the function keyword. - JavaScriptModes.FUNCTION_MODE: [ - # Must match open paren before anything else and move into parameter mode, - # otherwise everything inside the parameter list is parsed incorrectly. - Matcher(OPENING_PAREN, Type.START_PARAMETERS, - JavaScriptModes.PARAMETER_MODE), - Matcher(WHITESPACE, Type.WHITESPACE), - Matcher(IDENTIFIER, Type.FUNCTION_NAME)], - - - # Matchers for function parameters - JavaScriptModes.PARAMETER_MODE: [ - # When in function parameter mode, a closing paren is treated specially. - # Everything else is treated as lines of parameters. - Matcher(CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS, - JavaScriptModes.TEXT_MODE), - Matcher(PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]} + Matcher(DOC_TYPE_MODIFIERS, Type.DOC_TYPE_MODIFIER), + Matcher(DOC_COMMENT_TYPE_TEXT, Type.COMMENT), + Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)] # When text is not matched, it is given this default type based on mode. # If unspecified in this map, the default default is Type.NORMAL. JAVASCRIPT_DEFAULT_TYPES = { - JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT, - JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT + JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT, + JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT } - def __init__(self, parse_js_doc = True): + @classmethod + def BuildMatchers(cls): + """Builds the token matcher group. + + The token matcher groups work as follows: it is a list of Matcher objects. + The matchers will be tried in this order, and the first to match will be + returned. Hence the order is important because the matchers that come first + overrule the matchers that come later. + + Returns: + The completed token matcher group. + """ + # Match a keyword string followed by a non-identifier character in order to + # not match something like doSomething as do + Something. + keyword = re.compile('(%s)((?=[^%s])|$)' % ( + '|'.join(cls.KEYWORD_LIST), cls.IDENTIFIER_CHAR)) + return { + + # Matchers for basic text mode. + JavaScriptModes.TEXT_MODE: [ + # Check a big group - strings, starting comments, and regexes - all + # of which could be intertwined. 'string with /regex/', + # /regex with 'string'/, /* comment with /regex/ and string */ (and + # so on) + Matcher(cls.START_DOC_COMMENT, Type.START_DOC_COMMENT, + JavaScriptModes.DOC_COMMENT_MODE), + Matcher(cls.START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT, + JavaScriptModes.BLOCK_COMMENT_MODE), + Matcher(cls.END_OF_LINE_SINGLE_LINE_COMMENT, + Type.START_SINGLE_LINE_COMMENT), + Matcher(cls.START_SINGLE_LINE_COMMENT, + Type.START_SINGLE_LINE_COMMENT, + JavaScriptModes.LINE_COMMENT_MODE), + Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START, + JavaScriptModes.SINGLE_QUOTE_STRING_MODE), + Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START, + JavaScriptModes.DOUBLE_QUOTE_STRING_MODE), + Matcher(cls.REGEX, Type.REGEX), + + # Next we check for start blocks appearing outside any of the items + # above. + Matcher(cls.START_BLOCK, Type.START_BLOCK), + Matcher(cls.END_BLOCK, Type.END_BLOCK), + + # Then we search for function declarations. + Matcher(cls.FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION, + JavaScriptModes.FUNCTION_MODE), + + # Next, we convert non-function related parens to tokens. + Matcher(cls.OPENING_PAREN, Type.START_PAREN), + Matcher(cls.CLOSING_PAREN, Type.END_PAREN), + + # Next, we convert brackets to tokens. + Matcher(cls.OPENING_BRACKET, Type.START_BRACKET), + Matcher(cls.CLOSING_BRACKET, Type.END_BRACKET), + + # Find numbers. This has to happen before operators because + # scientific notation numbers can have + and - in them. + Matcher(cls.NUMBER, Type.NUMBER), + + # Find operators and simple assignments + Matcher(cls.SIMPLE_LVALUE, Type.SIMPLE_LVALUE), + Matcher(cls.OPERATOR, Type.OPERATOR), + + # Find key words and whitespace. + Matcher(keyword, Type.KEYWORD), + Matcher(cls.WHITESPACE, Type.WHITESPACE), + + # Find identifiers. + Matcher(cls.IDENTIFIER, Type.IDENTIFIER), + + # Finally, we convert semicolons to tokens. + Matcher(cls.SEMICOLON, Type.SEMICOLON)], + + # Matchers for single quote strings. + JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [ + Matcher(cls.SINGLE_QUOTE_TEXT, Type.STRING_TEXT), + Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END, + JavaScriptModes.TEXT_MODE)], + + # Matchers for double quote strings. + JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [ + Matcher(cls.DOUBLE_QUOTE_TEXT, Type.STRING_TEXT), + Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END, + JavaScriptModes.TEXT_MODE)], + + # Matchers for block comments. + JavaScriptModes.BLOCK_COMMENT_MODE: [ + # First we check for exiting a block comment. + Matcher(cls.END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT, + JavaScriptModes.TEXT_MODE), + + # Match non-comment-ending text.. + Matcher(cls.BLOCK_COMMENT_TEXT, Type.COMMENT)], + + # Matchers for doc comments. + JavaScriptModes.DOC_COMMENT_MODE: cls.COMMON_DOC_MATCHERS + [ + Matcher(cls.DOC_COMMENT_TEXT, Type.COMMENT)], + + JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: cls.COMMON_DOC_MATCHERS + [ + Matcher(cls.WHITESPACE, Type.COMMENT), + Matcher(cls.DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)], + + # Matchers for single line comments. + JavaScriptModes.LINE_COMMENT_MODE: [ + # We greedy match until the end of the line in line comment mode. + Matcher(cls.ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)], + + # Matchers for code after the function keyword. + JavaScriptModes.FUNCTION_MODE: [ + # Must match open paren before anything else and move into parameter + # mode, otherwise everything inside the parameter list is parsed + # incorrectly. + Matcher(cls.OPENING_PAREN, Type.START_PARAMETERS, + JavaScriptModes.PARAMETER_MODE), + Matcher(cls.WHITESPACE, Type.WHITESPACE), + Matcher(cls.IDENTIFIER, Type.FUNCTION_NAME)], + + # Matchers for function parameters + JavaScriptModes.PARAMETER_MODE: [ + # When in function parameter mode, a closing paren is treated + # specially. Everything else is treated as lines of parameters. + Matcher(cls.CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS, + JavaScriptModes.TEXT_MODE), + Matcher(cls.PARAMETERS, Type.PARAMETERS, + JavaScriptModes.PARAMETER_MODE)]} + + def __init__(self, parse_js_doc=True): """Create a tokenizer object. Args: parse_js_doc: Whether to do detailed parsing of javascript doc comments, or simply treat them as normal comments. Defaults to parsing JsDoc. """ - matchers = self.JAVASCRIPT_MATCHERS + matchers = self.BuildMatchers() if not parse_js_doc: # Make a copy so the original doesn't get modified. matchers = copy.deepcopy(matchers) @@ -362,4 +460,4 @@ class JavaScriptTokenizer(tokenizer.Tokenizer): name of the function. """ return javascripttokens.JavaScriptToken(string, token_type, line, - line_number, values) + line_number, values, line_number) diff --git a/tools/closure_linter/closure_linter/javascripttokens.py b/tools/closure_linter/closure_linter/javascripttokens.py index f46d4e1..f5815d2 100755 --- a/tools/closure_linter/closure_linter/javascripttokens.py +++ b/tools/closure_linter/closure_linter/javascripttokens.py @@ -53,6 +53,9 @@ class JavaScriptTokenType(tokens.TokenType): DOC_START_BRACE = 'doc {' DOC_END_BRACE = 'doc }' DOC_PREFIX = 'comment prefix: * ' + DOC_TYPE_START_BLOCK = 'Type <' + DOC_TYPE_END_BLOCK = 'Type >' + DOC_TYPE_MODIFIER = 'modifier' SIMPLE_LVALUE = 'lvalue=' KEYWORD = 'keyword' OPERATOR = 'operator' @@ -62,14 +65,17 @@ class JavaScriptTokenType(tokens.TokenType): SINGLE_QUOTE_STRING_START, SINGLE_QUOTE_STRING_END, DOUBLE_QUOTE_STRING_START, DOUBLE_QUOTE_STRING_END, STRING_TEXT]) - COMMENT_TYPES = frozenset([START_SINGLE_LINE_COMMENT, COMMENT, + COMMENT_TYPES = frozenset([ + START_SINGLE_LINE_COMMENT, COMMENT, START_BLOCK_COMMENT, START_DOC_COMMENT, END_BLOCK_COMMENT, END_DOC_COMMENT, DOC_START_BRACE, DOC_END_BRACE, - DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX]) + DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX, + DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER]) FLAG_DESCRIPTION_TYPES = frozenset([ - DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE]) + DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE, + DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER]) FLAG_ENDING_TYPES = frozenset([DOC_FLAG, END_DOC_COMMENT]) diff --git a/tools/closure_linter/closure_linter/not_strict_test.py b/tools/closure_linter/closure_linter/not_strict_test.py new file mode 100755 index 0000000..c92c13e --- /dev/null +++ b/tools/closure_linter/closure_linter/not_strict_test.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# +# Copyright 2011 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for gjslint --nostrict. + +Tests errors that can be thrown by gjslint when not in strict mode. +""" + + + +import os +import sys +import unittest + +import gflags as flags +import unittest as googletest + +from closure_linter import errors +from closure_linter import runner +from closure_linter.common import filetestcase + +_RESOURCE_PREFIX = 'closure_linter/testdata' + +flags.FLAGS.strict = False +flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires') +flags.FLAGS.closurized_namespaces = ('goog', 'dummy') +flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js', + 'limited_doc_checks.js') + + +# List of files under testdata to test. +# We need to list files explicitly since pyglib can't list directories. +_TEST_FILES = [ + 'not_strict.js' + ] + + +class GJsLintTestSuite(unittest.TestSuite): + """Test suite to run a GJsLintTest for each of several files. + + If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in + testdata to test. Otherwise, _TEST_FILES is used. + """ + + def __init__(self, tests=()): + unittest.TestSuite.__init__(self, tests) + + argv = sys.argv and sys.argv[1:] or [] + if argv: + test_files = argv + else: + test_files = _TEST_FILES + for test_file in test_files: + resource_path = os.path.join(_RESOURCE_PREFIX, test_file) + self.addTest(filetestcase.AnnotatedFileTestCase(resource_path, + runner.Run, + errors.ByName)) + +if __name__ == '__main__': + # Don't let main parse args; it happens in the TestSuite. + googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite') diff --git a/tools/closure_linter/closure_linter/requireprovidesorter.py b/tools/closure_linter/closure_linter/requireprovidesorter.py new file mode 100755 index 0000000..e7e08a1 --- /dev/null +++ b/tools/closure_linter/closure_linter/requireprovidesorter.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python +# +# Copyright 2011 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains logic for sorting goog.provide and goog.require statements. + +Closurized JavaScript files use goog.provide and goog.require statements at the +top of the file to manage dependencies. These statements should be sorted +alphabetically, however, it is common for them to be accompanied by inline +comments or suppression annotations. In order to sort these statements without +disrupting their comments and annotations, the association between statements +and comments/annotations must be maintained while sorting. + + RequireProvideSorter: Handles checking/fixing of provide/require statements. +""" + + + +from closure_linter import javascripttokens +from closure_linter import tokenutil + +# Shorthand +Type = javascripttokens.JavaScriptTokenType + + +class RequireProvideSorter(object): + """Checks for and fixes alphabetization of provide and require statements. + + When alphabetizing, comments on the same line or comments directly above a + goog.provide or goog.require statement are associated with that statement and + stay with the statement as it gets sorted. + """ + + def CheckProvides(self, token): + """Checks alphabetization of goog.provide statements. + + Iterates over tokens in given token stream, identifies goog.provide tokens, + and checks that they occur in alphabetical order by the object being + provided. + + Args: + token: A token in the token stream before any goog.provide tokens. + + Returns: + The first provide token in the token stream. + + None is returned if all goog.provide statements are already sorted. + """ + provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide') + provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens) + sorted_provide_strings = sorted(provide_strings) + if provide_strings != sorted_provide_strings: + return provide_tokens[0] + return None + + def CheckRequires(self, token): + """Checks alphabetization of goog.require statements. + + Iterates over tokens in given token stream, identifies goog.require tokens, + and checks that they occur in alphabetical order by the dependency being + required. + + Args: + token: A token in the token stream before any goog.require tokens. + + Returns: + The first require token in the token stream. + + None is returned if all goog.require statements are already sorted. + """ + require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require') + require_strings = self._GetRequireOrProvideTokenStrings(require_tokens) + sorted_require_strings = sorted(require_strings) + if require_strings != sorted_require_strings: + return require_tokens[0] + return None + + def FixProvides(self, token): + """Sorts goog.provide statements in the given token stream alphabetically. + + Args: + token: The first token in the token stream. + """ + self._FixProvidesOrRequires( + self._GetRequireOrProvideTokens(token, 'goog.provide')) + + def FixRequires(self, token): + """Sorts goog.require statements in the given token stream alphabetically. + + Args: + token: The first token in the token stream. + """ + self._FixProvidesOrRequires( + self._GetRequireOrProvideTokens(token, 'goog.require')) + + def _FixProvidesOrRequires(self, tokens): + """Sorts goog.provide or goog.require statements. + + Args: + tokens: A list of goog.provide or goog.require tokens in the order they + appear in the token stream. i.e. the first token in this list must + be the first goog.provide or goog.require token. + """ + strings = self._GetRequireOrProvideTokenStrings(tokens) + sorted_strings = sorted(strings) + + # Make a separate pass to remove any blank lines between goog.require/ + # goog.provide tokens. + first_token = tokens[0] + last_token = tokens[-1] + i = last_token + while i != first_token and i is not None: + if i.type is Type.BLANK_LINE: + tokenutil.DeleteToken(i) + i = i.previous + + # A map from required/provided object name to tokens that make up the line + # it was on, including any comments immediately before it or after it on the + # same line. + tokens_map = self._GetTokensMap(tokens) + + # Iterate over the map removing all tokens. + for name in tokens_map: + tokens_to_delete = tokens_map[name] + for i in tokens_to_delete: + tokenutil.DeleteToken(i) + + # Save token to rest of file. Sorted token will be inserted before this. + rest_of_file = tokens_map[strings[-1]][-1].next + + # Re-add all tokens in the map in alphabetical order. + insert_after = tokens[0].previous + for string in sorted_strings: + for i in tokens_map[string]: + if rest_of_file: + tokenutil.InsertTokenBefore(i, rest_of_file) + else: + tokenutil.InsertTokenAfter(i, insert_after) + insert_after = i + + def _GetRequireOrProvideTokens(self, token, token_string): + """Gets all goog.provide or goog.require tokens in the given token stream. + + Args: + token: The first token in the token stream. + token_string: One of 'goog.provide' or 'goog.require' to indicate which + tokens to find. + + Returns: + A list of goog.provide or goog.require tokens in the order they appear in + the token stream. + """ + tokens = [] + while token: + if token.type == Type.IDENTIFIER: + if token.string == token_string: + tokens.append(token) + elif token.string not in [ + 'goog.provide', 'goog.require', 'goog.setTestOnly']: + # These 3 identifiers are at the top of the file. So if any other + # identifier is encountered, return. + # TODO(user): Once it's decided what ordering goog.require + # should use, add 'goog.module' to the list above and implement the + # decision. + break + token = token.next + + return tokens + + def _GetRequireOrProvideTokenStrings(self, tokens): + """Gets a list of strings corresponding to the given list of tokens. + + The string will be the next string in the token stream after each token in + tokens. This is used to find the object being provided/required by a given + goog.provide or goog.require token. + + Args: + tokens: A list of goog.provide or goog.require tokens. + + Returns: + A list of object names that are being provided or required by the given + list of tokens. For example: + + ['object.a', 'object.c', 'object.b'] + """ + token_strings = [] + for token in tokens: + if not token.is_deleted: + name = tokenutil.GetStringAfterToken(token) + token_strings.append(name) + return token_strings + + def _GetTokensMap(self, tokens): + """Gets a map from object name to tokens associated with that object. + + Starting from the goog.provide/goog.require token, searches backwards in the + token stream for any lines that start with a comment. These lines are + associated with the goog.provide/goog.require token. Also associates any + tokens on the same line as the goog.provide/goog.require token with that + token. + + Args: + tokens: A list of goog.provide or goog.require tokens. + + Returns: + A dictionary that maps object names to the tokens associated with the + goog.provide or goog.require of that object name. For example: + + { + 'object.a': [JavaScriptToken, JavaScriptToken, ...], + 'object.b': [...] + } + + The list of tokens includes any comment lines above the goog.provide or + goog.require statement and everything after the statement on the same + line. For example, all of the following would be associated with + 'object.a': + + /** @suppress {extraRequire} */ + goog.require('object.a'); // Some comment. + """ + tokens_map = {} + for token in tokens: + object_name = tokenutil.GetStringAfterToken(token) + # If the previous line starts with a comment, presume that the comment + # relates to the goog.require or goog.provide and keep them together when + # sorting. + first_token = token + previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token) + while (previous_first_token and + previous_first_token.IsAnyType(Type.COMMENT_TYPES)): + first_token = previous_first_token + previous_first_token = tokenutil.GetFirstTokenInPreviousLine( + first_token) + + # Find the last token on the line. + last_token = tokenutil.GetLastTokenInSameLine(token) + + all_tokens = self._GetTokenList(first_token, last_token) + tokens_map[object_name] = all_tokens + return tokens_map + + def _GetTokenList(self, first_token, last_token): + """Gets a list of all tokens from first_token to last_token, inclusive. + + Args: + first_token: The first token to get. + last_token: The last token to get. + + Returns: + A list of all tokens between first_token and last_token, including both + first_token and last_token. + + Raises: + Exception: If the token stream ends before last_token is reached. + """ + token_list = [] + token = first_token + while token != last_token: + if not token: + raise Exception('ran out of tokens') + token_list.append(token) + token = token.next + token_list.append(last_token) + + return token_list + + def GetFixedRequireString(self, token): + """Get fixed/sorted order of goog.require statements. + + Args: + token: The first token in the token stream. + + Returns: + A string for correct sorted order of goog.require. + """ + return self._GetFixedRequireOrProvideString( + self._GetRequireOrProvideTokens(token, 'goog.require')) + + def GetFixedProvideString(self, token): + """Get fixed/sorted order of goog.provide statements. + + Args: + token: The first token in the token stream. + + Returns: + A string for correct sorted order of goog.provide. + """ + return self._GetFixedRequireOrProvideString( + self._GetRequireOrProvideTokens(token, 'goog.provide')) + + def _GetFixedRequireOrProvideString(self, tokens): + """Sorts goog.provide or goog.require statements. + + Args: + tokens: A list of goog.provide or goog.require tokens in the order they + appear in the token stream. i.e. the first token in this list must + be the first goog.provide or goog.require token. + + Returns: + A string for sorted goog.require or goog.provide statements + """ + + # A map from required/provided object name to tokens that make up the line + # it was on, including any comments immediately before it or after it on the + # same line. + tokens_map = self._GetTokensMap(tokens) + sorted_strings = sorted(tokens_map.keys()) + + new_order = '' + for string in sorted_strings: + for i in tokens_map[string]: + new_order += i.string + if i.IsLastInLine(): + new_order += '\n' + + return new_order diff --git a/tools/closure_linter/closure_linter/requireprovidesorter_test.py b/tools/closure_linter/closure_linter/requireprovidesorter_test.py new file mode 100644 index 0000000..fecb6d0 --- /dev/null +++ b/tools/closure_linter/closure_linter/requireprovidesorter_test.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for RequireProvideSorter.""" + + + +import unittest as googletest +from closure_linter import javascripttokens +from closure_linter import requireprovidesorter +from closure_linter import testutil + +# pylint: disable=g-bad-name +TokenType = javascripttokens.JavaScriptTokenType + + +class RequireProvideSorterTest(googletest.TestCase): + """Tests for RequireProvideSorter.""" + + def testGetFixedProvideString(self): + """Tests that fixed string constains proper comments also.""" + input_lines = [ + 'goog.provide(\'package.xyz\');', + '/** @suppress {extraprovide} **/', + 'goog.provide(\'package.abcd\');' + ] + + expected_lines = [ + '/** @suppress {extraprovide} **/', + 'goog.provide(\'package.abcd\');', + 'goog.provide(\'package.xyz\');' + ] + + token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) + + sorter = requireprovidesorter.RequireProvideSorter() + fixed_provide_string = sorter.GetFixedProvideString(token) + + self.assertEquals(expected_lines, fixed_provide_string.splitlines()) + + def testGetFixedRequireString(self): + """Tests that fixed string constains proper comments also.""" + input_lines = [ + 'goog.require(\'package.xyz\');', + '/** This is needed for scope. **/', + 'goog.require(\'package.abcd\');' + ] + + expected_lines = [ + '/** This is needed for scope. **/', + 'goog.require(\'package.abcd\');', + 'goog.require(\'package.xyz\');' + ] + + token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) + + sorter = requireprovidesorter.RequireProvideSorter() + fixed_require_string = sorter.GetFixedRequireString(token) + + self.assertEquals(expected_lines, fixed_require_string.splitlines()) + + def testFixRequires_removeBlankLines(self): + """Tests that blank lines are omitted in sorted goog.require statements.""" + input_lines = [ + 'goog.provide(\'package.subpackage.Whatever\');', + '', + 'goog.require(\'package.subpackage.ClassB\');', + '', + 'goog.require(\'package.subpackage.ClassA\');' + ] + expected_lines = [ + 'goog.provide(\'package.subpackage.Whatever\');', + '', + 'goog.require(\'package.subpackage.ClassA\');', + 'goog.require(\'package.subpackage.ClassB\');' + ] + token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) + + sorter = requireprovidesorter.RequireProvideSorter() + sorter.FixRequires(token) + + self.assertEquals(expected_lines, self._GetLines(token)) + + def fixRequiresTest_withTestOnly(self, position): + """Regression-tests sorting even with a goog.setTestOnly statement. + + Args: + position: The position in the list where to insert the goog.setTestOnly + statement. Will be used to test all possible combinations for + this test. + """ + input_lines = [ + 'goog.provide(\'package.subpackage.Whatever\');', + '', + 'goog.require(\'package.subpackage.ClassB\');', + 'goog.require(\'package.subpackage.ClassA\');' + ] + expected_lines = [ + 'goog.provide(\'package.subpackage.Whatever\');', + '', + 'goog.require(\'package.subpackage.ClassA\');', + 'goog.require(\'package.subpackage.ClassB\');' + ] + input_lines.insert(position, 'goog.setTestOnly();') + expected_lines.insert(position, 'goog.setTestOnly();') + + token = testutil.TokenizeSourceAndRunEcmaPass(input_lines) + + sorter = requireprovidesorter.RequireProvideSorter() + sorter.FixRequires(token) + + self.assertEquals(expected_lines, self._GetLines(token)) + + def testFixRequires_withTestOnly(self): + """Regression-tests sorting even after a goog.setTestOnly statement.""" + + # goog.setTestOnly at first line. + self.fixRequiresTest_withTestOnly(position=0) + + # goog.setTestOnly after goog.provide. + self.fixRequiresTest_withTestOnly(position=1) + + # goog.setTestOnly before goog.require. + self.fixRequiresTest_withTestOnly(position=2) + + # goog.setTestOnly after goog.require. + self.fixRequiresTest_withTestOnly(position=4) + + def _GetLines(self, token): + """Returns an array of lines based on the specified token stream.""" + lines = [] + line = '' + while token: + line += token.string + if token.IsLastInLine(): + lines.append(line) + line = '' + token = token.next + return lines + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter/runner.py b/tools/closure_linter/closure_linter/runner.py new file mode 100644 index 0000000..04e7fa4 --- /dev/null +++ b/tools/closure_linter/closure_linter/runner.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Main lint function. Tokenizes file, runs passes, and feeds to checker.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = 'nnaze@google.com (Nathan Naze)' + +import traceback + +import gflags as flags + +from closure_linter import checker +from closure_linter import ecmalintrules +from closure_linter import ecmametadatapass +from closure_linter import error_check +from closure_linter import errors +from closure_linter import javascriptstatetracker +from closure_linter import javascripttokenizer + +from closure_linter.common import error +from closure_linter.common import htmlutil +from closure_linter.common import tokens + +flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'], + 'List of files with relaxed documentation checks. Will not ' + 'report errors for missing documentation, some missing ' + 'descriptions, or methods whose @return tags don\'t have a ' + 'matching return statement.') +flags.DEFINE_boolean('error_trace', False, + 'Whether to show error exceptions.') +flags.ADOPT_module_key_flags(checker) +flags.ADOPT_module_key_flags(ecmalintrules) +flags.ADOPT_module_key_flags(error_check) + + +def _GetLastNonWhiteSpaceToken(start_token): + """Get the last non-whitespace token in a token stream.""" + ret_token = None + + whitespace_tokens = frozenset([ + tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE]) + for t in start_token: + if t.type not in whitespace_tokens: + ret_token = t + + return ret_token + + +def _IsHtml(filename): + return filename.endswith('.html') or filename.endswith('.htm') + + +def _Tokenize(fileobj): + """Tokenize a file. + + Args: + fileobj: file-like object (or iterable lines) with the source. + + Returns: + The first token in the token stream and the ending mode of the tokenizer. + """ + tokenizer = javascripttokenizer.JavaScriptTokenizer() + start_token = tokenizer.TokenizeFile(fileobj) + return start_token, tokenizer.mode + + +def _IsLimitedDocCheck(filename, limited_doc_files): + """Whether this this a limited-doc file. + + Args: + filename: The filename. + limited_doc_files: Iterable of strings. Suffixes of filenames that should + be limited doc check. + + Returns: + Whether the file should be limited check. + """ + for limited_doc_filename in limited_doc_files: + if filename.endswith(limited_doc_filename): + return True + return False + + +def Run(filename, error_handler, source=None): + """Tokenize, run passes, and check the given file. + + Args: + filename: The path of the file to check + error_handler: The error handler to report errors to. + source: A file-like object with the file source. If omitted, the file will + be read from the filename path. + """ + if not source: + try: + source = open(filename) + except IOError: + error_handler.HandleFile(filename, None) + error_handler.HandleError( + error.Error(errors.FILE_NOT_FOUND, 'File not found')) + error_handler.FinishFile() + return + + if _IsHtml(filename): + source_file = htmlutil.GetScriptLines(source) + else: + source_file = source + + token, tokenizer_mode = _Tokenize(source_file) + + error_handler.HandleFile(filename, token) + + # If we did not end in the basic mode, this a failed parse. + if tokenizer_mode is not javascripttokenizer.JavaScriptModes.TEXT_MODE: + error_handler.HandleError( + error.Error(errors.FILE_IN_BLOCK, + 'File ended in mode "%s".' % tokenizer_mode, + _GetLastNonWhiteSpaceToken(token))) + + # Run the ECMA pass + error_token = None + + ecma_pass = ecmametadatapass.EcmaMetaDataPass() + error_token = RunMetaDataPass(token, ecma_pass, error_handler, filename) + + is_limited_doc_check = ( + _IsLimitedDocCheck(filename, flags.FLAGS.limited_doc_files)) + + _RunChecker(token, error_handler, + is_limited_doc_check, + is_html=_IsHtml(filename), + stop_token=error_token) + + error_handler.FinishFile() + + +def RunMetaDataPass(start_token, metadata_pass, error_handler, filename=''): + """Run a metadata pass over a token stream. + + Args: + start_token: The first token in a token stream. + metadata_pass: Metadata pass to run. + error_handler: The error handler to report errors to. + filename: Filename of the source. + + Returns: + The token where the error occurred (if any). + """ + + try: + metadata_pass.Process(start_token) + except ecmametadatapass.ParseError, parse_err: + if flags.FLAGS.error_trace: + traceback.print_exc() + error_token = parse_err.token + error_msg = str(parse_err) + error_handler.HandleError( + error.Error(errors.FILE_DOES_NOT_PARSE, + ('Error parsing file at token "%s". Unable to ' + 'check the rest of file.' + '\nError "%s"' % (error_token, error_msg)), error_token)) + return error_token + except Exception: # pylint: disable=broad-except + traceback.print_exc() + error_handler.HandleError( + error.Error( + errors.FILE_DOES_NOT_PARSE, + 'Internal error in %s' % filename)) + + +def _RunChecker(start_token, error_handler, + limited_doc_checks, is_html, + stop_token=None): + + state_tracker = javascriptstatetracker.JavaScriptStateTracker() + + style_checker = checker.JavaScriptStyleChecker( + state_tracker=state_tracker, + error_handler=error_handler) + + style_checker.Check(start_token, + is_html=is_html, + limited_doc_checks=limited_doc_checks, + stop_token=stop_token) diff --git a/tools/closure_linter/closure_linter/runner_test.py b/tools/closure_linter/closure_linter/runner_test.py new file mode 100644 index 0000000..da5857d --- /dev/null +++ b/tools/closure_linter/closure_linter/runner_test.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# +# Copyright 2008 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the runner module.""" + +__author__ = ('nnaze@google.com (Nathan Naze)') + +import StringIO + + +import mox + + +import unittest as googletest + +from closure_linter import errors +from closure_linter import runner +from closure_linter.common import error +from closure_linter.common import errorhandler +from closure_linter.common import tokens + + +class LimitedDocTest(googletest.TestCase): + + def testIsLimitedDocCheck(self): + self.assertTrue(runner._IsLimitedDocCheck('foo_test.js', ['_test.js'])) + self.assertFalse(runner._IsLimitedDocCheck('foo_bar.js', ['_test.js'])) + + self.assertTrue(runner._IsLimitedDocCheck( + 'foo_moo.js', ['moo.js', 'quack.js'])) + self.assertFalse(runner._IsLimitedDocCheck( + 'foo_moo.js', ['woof.js', 'quack.js'])) + + +class RunnerTest(googletest.TestCase): + + def setUp(self): + self.mox = mox.Mox() + + def testRunOnMissingFile(self): + mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler) + + def ValidateError(err): + return (isinstance(err, error.Error) and + err.code is errors.FILE_NOT_FOUND and + err.token is None) + + mock_error_handler.HandleFile('does_not_exist.js', None) + mock_error_handler.HandleError(mox.Func(ValidateError)) + mock_error_handler.FinishFile() + + self.mox.ReplayAll() + + runner.Run('does_not_exist.js', mock_error_handler) + + self.mox.VerifyAll() + + def testBadTokenization(self): + mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler) + + def ValidateError(err): + return (isinstance(err, error.Error) and + err.code is errors.FILE_IN_BLOCK and + err.token.string == '}') + + mock_error_handler.HandleFile('foo.js', mox.IsA(tokens.Token)) + mock_error_handler.HandleError(mox.Func(ValidateError)) + mock_error_handler.HandleError(mox.IsA(error.Error)) + mock_error_handler.FinishFile() + + self.mox.ReplayAll() + + source = StringIO.StringIO(_BAD_TOKENIZATION_SCRIPT) + runner.Run('foo.js', mock_error_handler, source) + + self.mox.VerifyAll() + + +_BAD_TOKENIZATION_SCRIPT = """ +function foo () { + var a = 3; + var b = 2; + return b + a; /* Comment not closed +} +""" + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter/scopeutil.py b/tools/closure_linter/closure_linter/scopeutil.py new file mode 100644 index 0000000..a7ca9b6 --- /dev/null +++ b/tools/closure_linter/closure_linter/scopeutil.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools to match goog.scope alias statements.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + +import itertools + +from closure_linter import ecmametadatapass +from closure_linter import tokenutil +from closure_linter.javascripttokens import JavaScriptTokenType + + + +def IsGoogScopeBlock(context): + """Whether the given context is a goog.scope block. + + This function only checks that the block is a function block inside + a goog.scope() call. + + TODO(nnaze): Implement goog.scope checks that verify the call is + in the root context and contains only a single function literal. + + Args: + context: An EcmaContext of type block. + + Returns: + Whether the context is a goog.scope block. + """ + + if context.type != ecmametadatapass.EcmaContext.BLOCK: + return False + + if not _IsFunctionLiteralBlock(context): + return False + + # Check that this function is contained by a group + # of form "goog.scope(...)". + parent = context.parent + if parent and parent.type is ecmametadatapass.EcmaContext.GROUP: + + last_code_token = parent.start_token.metadata.last_code + + if (last_code_token and + last_code_token.type is JavaScriptTokenType.IDENTIFIER and + last_code_token.string == 'goog.scope'): + return True + + return False + + +def _IsFunctionLiteralBlock(block_context): + """Check if a context is a function literal block (without parameters). + + Example function literal block: 'function() {}' + + Args: + block_context: An EcmaContext of type block. + + Returns: + Whether this context is a function literal block. + """ + + previous_code_tokens_iter = itertools.ifilter( + lambda token: token not in JavaScriptTokenType.NON_CODE_TYPES, + reversed(block_context.start_token)) + + # Ignore the current token + next(previous_code_tokens_iter, None) + + # Grab the previous three tokens and put them in correct order. + previous_code_tokens = list(itertools.islice(previous_code_tokens_iter, 3)) + previous_code_tokens.reverse() + + # There aren't three previous tokens. + if len(previous_code_tokens) is not 3: + return False + + # Check that the previous three code tokens are "function ()" + previous_code_token_types = [token.type for token in previous_code_tokens] + if (previous_code_token_types == [ + JavaScriptTokenType.FUNCTION_DECLARATION, + JavaScriptTokenType.START_PARAMETERS, + JavaScriptTokenType.END_PARAMETERS]): + return True + + return False + + +def IsInClosurizedNamespace(symbol, closurized_namespaces): + """Match a goog.scope alias. + + Args: + symbol: An identifier like 'goog.events.Event'. + closurized_namespaces: Iterable of valid Closurized namespaces (strings). + + Returns: + True if symbol is an identifier in a Closurized namespace, otherwise False. + """ + for ns in closurized_namespaces: + if symbol.startswith(ns + '.'): + return True + + return False + + +def _GetVarAssignmentTokens(context): + """Returns the tokens from context if it is a var assignment. + + Args: + context: An EcmaContext. + + Returns: + If a var assignment, the tokens contained within it w/o the trailing + semicolon. + """ + if context.type != ecmametadatapass.EcmaContext.VAR: + return + + # Get the tokens in this statement. + if context.start_token and context.end_token: + statement_tokens = tokenutil.GetTokenRange(context.start_token, + context.end_token) + else: + return + + # And now just those tokens that are actually code. + is_non_code_type = lambda t: t.type not in JavaScriptTokenType.NON_CODE_TYPES + code_tokens = filter(is_non_code_type, statement_tokens) + + # Pop off the semicolon if present. + if code_tokens and code_tokens[-1].IsType(JavaScriptTokenType.SEMICOLON): + code_tokens.pop() + + if len(code_tokens) < 4: + return + + if (code_tokens[0].IsKeyword('var') and + code_tokens[1].IsType(JavaScriptTokenType.SIMPLE_LVALUE) and + code_tokens[2].IsOperator('=')): + return code_tokens + + +def MatchAlias(context): + """Match an alias statement (some identifier assigned to a variable). + + Example alias: var MyClass = proj.longNamespace.MyClass. + + Args: + context: An EcmaContext of type EcmaContext.VAR. + + Returns: + If a valid alias, returns a tuple of alias and symbol, otherwise None. + """ + code_tokens = _GetVarAssignmentTokens(context) + if code_tokens is None: + return + + if all(tokenutil.IsIdentifierOrDot(t) for t in code_tokens[3:]): + # var Foo = bar.Foo; + alias, symbol = code_tokens[1], code_tokens[3] + # Mark both tokens as an alias definition to not count them as usages. + alias.metadata.is_alias_definition = True + symbol.metadata.is_alias_definition = True + return alias.string, tokenutil.GetIdentifierForToken(symbol) + + +def MatchModuleAlias(context): + """Match an alias statement in a goog.module style import. + + Example alias: var MyClass = goog.require('proj.longNamespace.MyClass'). + + Args: + context: An EcmaContext. + + Returns: + If a valid alias, returns a tuple of alias and symbol, otherwise None. + """ + code_tokens = _GetVarAssignmentTokens(context) + if code_tokens is None: + return + + if(code_tokens[3].IsType(JavaScriptTokenType.IDENTIFIER) and + code_tokens[3].string == 'goog.require'): + # var Foo = goog.require('bar.Foo'); + alias = code_tokens[1] + symbol = tokenutil.GetStringAfterToken(code_tokens[3]) + if symbol: + alias.metadata.is_alias_definition = True + return alias.string, symbol diff --git a/tools/closure_linter/closure_linter/scopeutil_test.py b/tools/closure_linter/closure_linter/scopeutil_test.py new file mode 100644 index 0000000..722a953 --- /dev/null +++ b/tools/closure_linter/closure_linter/scopeutil_test.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the scopeutil module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + + +import unittest as googletest + +from closure_linter import ecmametadatapass +from closure_linter import scopeutil +from closure_linter import testutil + + +def _FindContexts(start_token): + """Depth first search of all contexts referenced by a token stream. + + Includes contexts' parents, which might not be directly referenced + by any token in the stream. + + Args: + start_token: First token in the token stream. + + Yields: + All contexts referenced by this token stream. + """ + + seen_contexts = set() + + # For each token, yield the context if we haven't seen it before. + for token in start_token: + + token_context = token.metadata.context + contexts = [token_context] + + # Also grab all the context's ancestors. + parent = token_context.parent + while parent: + contexts.append(parent) + parent = parent.parent + + # Yield each of these contexts if we've not seen them. + for context in contexts: + if context not in seen_contexts: + yield context + + seen_contexts.add(context) + + +def _FindFirstContextOfType(token, context_type): + """Returns the first statement context.""" + for context in _FindContexts(token): + if context.type == context_type: + return context + + +def _ParseAssignment(script): + start_token = testutil.TokenizeSourceAndRunEcmaPass(script) + statement = _FindFirstContextOfType( + start_token, ecmametadatapass.EcmaContext.VAR) + return statement + + +class StatementTest(googletest.TestCase): + + def assertAlias(self, expected_match, script): + statement = _ParseAssignment(script) + match = scopeutil.MatchAlias(statement) + self.assertEquals(expected_match, match) + + def assertModuleAlias(self, expected_match, script): + statement = _ParseAssignment(script) + match = scopeutil.MatchModuleAlias(statement) + self.assertEquals(expected_match, match) + + def testSimpleAliases(self): + self.assertAlias( + ('foo', 'goog.foo'), + 'var foo = goog.foo;') + + self.assertAlias( + ('foo', 'goog.foo'), + 'var foo = goog.foo') # No semicolon + + def testAliasWithComment(self): + self.assertAlias( + ('Component', 'goog.ui.Component'), + 'var Component = /* comment */ goog.ui.Component;') + + def testMultilineAlias(self): + self.assertAlias( + ('Component', 'goog.ui.Component'), + 'var Component = \n goog.ui.\n Component;') + + def testNonSymbolAliasVarStatements(self): + self.assertAlias(None, 'var foo = 3;') + self.assertAlias(None, 'var foo = function() {};') + self.assertAlias(None, 'var foo = bar ? baz : qux;') + + def testModuleAlias(self): + self.assertModuleAlias( + ('foo', 'goog.foo'), + 'var foo = goog.require("goog.foo");') + self.assertModuleAlias( + None, + 'var foo = goog.require(notastring);') + + +class ScopeBlockTest(googletest.TestCase): + + @staticmethod + def _GetBlocks(source): + start_token = testutil.TokenizeSourceAndRunEcmaPass(source) + for context in _FindContexts(start_token): + if context.type is ecmametadatapass.EcmaContext.BLOCK: + yield context + + def assertNoBlocks(self, script): + blocks = list(self._GetBlocks(script)) + self.assertEquals([], blocks) + + def testNotBlocks(self): + # Ensure these are not considered blocks. + self.assertNoBlocks('goog.scope(if{});') + self.assertNoBlocks('goog.scope(for{});') + self.assertNoBlocks('goog.scope(switch{});') + self.assertNoBlocks('goog.scope(function foo{});') + + def testNonScopeBlocks(self): + + blocks = list(self._GetBlocks('goog.scope(try{});')) + self.assertEquals(1, len(blocks)) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + + blocks = list(self._GetBlocks('goog.scope(function(a,b){});')) + self.assertEquals(1, len(blocks)) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + + blocks = list(self._GetBlocks('goog.scope(try{} catch(){});')) + # Two blocks: try and catch. + self.assertEquals(2, len(blocks)) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + + blocks = list(self._GetBlocks('goog.scope(try{} catch(){} finally {});')) + self.assertEquals(3, len(blocks)) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop())) + + +class AliasTest(googletest.TestCase): + + def setUp(self): + self.start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT) + + def testMatchAliasStatement(self): + matches = set() + for context in _FindContexts(self.start_token): + match = scopeutil.MatchAlias(context) + if match: + matches.add(match) + + self.assertEquals( + set([('bar', 'baz'), + ('foo', 'this.foo_'), + ('Component', 'goog.ui.Component'), + ('MyClass', 'myproject.foo.MyClass'), + ('NonClosurizedClass', 'aaa.bbb.NonClosurizedClass')]), + matches) + + def testMatchAliasStatement_withClosurizedNamespaces(self): + + closurized_namepaces = frozenset(['goog', 'myproject']) + + matches = set() + for context in _FindContexts(self.start_token): + match = scopeutil.MatchAlias(context) + if match: + unused_alias, symbol = match + if scopeutil.IsInClosurizedNamespace(symbol, closurized_namepaces): + matches.add(match) + + self.assertEquals( + set([('MyClass', 'myproject.foo.MyClass'), + ('Component', 'goog.ui.Component')]), + matches) + +_TEST_SCRIPT = """ +goog.scope(function() { + var Component = goog.ui.Component; // scope alias + var MyClass = myproject.foo.MyClass; // scope alias + + // Scope alias of non-Closurized namespace. + var NonClosurizedClass = aaa.bbb.NonClosurizedClass; + + var foo = this.foo_; // non-scope object property alias + var bar = baz; // variable alias + + var component = new Component(); +}); + +""" + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter/statetracker.py b/tools/closure_linter/closure_linter/statetracker.py old mode 100755 new mode 100644 index 5630c17..52e3639 --- a/tools/closure_linter/closure_linter/statetracker.py +++ b/tools/closure_linter/closure_linter/statetracker.py @@ -24,6 +24,7 @@ import re from closure_linter import javascripttokenizer from closure_linter import javascripttokens from closure_linter import tokenutil +from closure_linter import typeannotation # Shorthand Type = javascripttokens.JavaScriptTokenType @@ -39,7 +40,8 @@ class DocFlag(object): including braces. type_end_token: The last token specifying the flag type, including braces. - type: The type spec. + type: The type spec string. + jstype: The type spec, a TypeAnnotation instance. name_token: The token specifying the flag name. name: The flag name description_start_token: The first token in the description. @@ -53,34 +55,47 @@ class DocFlag(object): STANDARD_DOC = frozenset([ 'author', 'bug', + 'classTemplate', + 'consistentIdGenerator', 'const', 'constructor', 'define', 'deprecated', + 'dict', 'enum', 'export', + 'expose', 'extends', 'externs', 'fileoverview', + 'idGenerator', 'implements', 'implicitCast', 'interface', + 'lends', 'license', + 'ngInject', # This annotation is specific to AngularJS. 'noalias', 'nocompile', 'nosideeffects', 'override', 'owner', + 'package', 'param', 'preserve', 'private', + 'protected', + 'public', 'return', 'see', + 'stableIdGenerator', + 'struct', 'supported', 'template', 'this', 'type', 'typedef', + 'unrestricted', ]) ANNOTATION = frozenset(['preserveTry', 'suppress']) @@ -89,51 +104,127 @@ class DocFlag(object): # Includes all Closure Compiler @suppress types. # Not all of these annotations are interpreted by Closure Linter. + # + # Specific cases: + # - accessControls is supported by the compiler at the expression + # and method level to suppress warnings about private/protected + # access (method level applies to all references in the method). + # The linter mimics the compiler behavior. SUPPRESS_TYPES = frozenset([ 'accessControls', + 'ambiguousFunctionDecl', + 'checkDebuggerStatement', 'checkRegExp', + 'checkStructDictInheritance', 'checkTypes', 'checkVars', + 'const', + 'constantProperty', 'deprecated', 'duplicate', + 'es5Strict', + 'externsValidation', + 'extraProvide', + 'extraRequire', 'fileoverviewTags', + 'globalThis', + 'internetExplorerChecks', 'invalidCasts', 'missingProperties', + 'missingProvide', + 'missingRequire', + 'missingReturn', 'nonStandardJsDocs', 'strictModuleDepCheck', + 'suspiciousCode', + 'tweakValidation', + 'typeInvalidation', + 'undefinedNames', 'undefinedVars', 'underscore', 'unknownDefines', + 'unnecessaryCasts', + 'unusedPrivateMembers', 'uselessCode', 'visibility', - 'with']) + 'with', + ]) HAS_DESCRIPTION = frozenset([ - 'define', 'deprecated', 'desc', 'fileoverview', 'license', 'param', - 'preserve', 'return', 'supported']) + 'define', + 'deprecated', + 'desc', + 'fileoverview', + 'license', + 'param', + 'preserve', + 'return', + 'supported', + ]) + # Docflags whose argument should be parsed using the typeannotation parser. HAS_TYPE = frozenset([ - 'define', 'enum', 'extends', 'implements', 'param', 'return', 'type', - 'suppress']) + 'const', + 'define', + 'enum', + 'extends', + 'final', + 'implements', + 'mods', + 'package', + 'param', + 'private', + 'protected', + 'public', + 'return', + 'suppress', + 'type', + 'typedef', + ]) - TYPE_ONLY = frozenset(['enum', 'extends', 'implements', 'suppress', 'type']) + # Docflags for which it's ok to omit the type (flag without an argument). + CAN_OMIT_TYPE = frozenset([ + 'const', + 'enum', + 'final', + 'package', + 'private', + 'protected', + 'public', + 'suppress', # We'll raise a separate INCORRECT_SUPPRESS_SYNTAX instead. + ]) + + # Docflags that only take a type as an argument and should not parse a + # following description. + TYPE_ONLY = frozenset([ + 'const', + 'enum', + 'extends', + 'implements', + 'package', + 'suppress', + 'type', + ]) HAS_NAME = frozenset(['param']) EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$') EMPTY_STRING = re.compile(r'^\s*$') - def __init__(self, flag_token): + def __init__(self, flag_token, error_handler=None): """Creates the DocFlag object and attaches it to the given start token. Args: flag_token: The starting token of the flag. + error_handler: An optional error handler for errors occurring while + parsing the doctype. """ self.flag_token = flag_token self.flag_type = flag_token.string.strip().lstrip('@') # Extract type, if applicable. self.type = None + self.jstype = None self.type_start_token = None self.type_end_token = None if self.flag_type in self.HAS_TYPE: @@ -142,28 +233,37 @@ class DocFlag(object): if brace: end_token, contents = _GetMatchingEndBraceAndContents(brace) self.type = contents + self.jstype = typeannotation.Parse(brace, end_token, + error_handler) self.type_start_token = brace self.type_end_token = end_token elif (self.flag_type in self.TYPE_ONLY and - flag_token.next.type not in Type.FLAG_ENDING_TYPES): + flag_token.next.type not in Type.FLAG_ENDING_TYPES and + flag_token.line_number == flag_token.next.line_number): + # b/10407058. If the flag is expected to be followed by a type then + # search for type in same line only. If no token after flag in same + # line then conclude that no type is specified. self.type_start_token = flag_token.next self.type_end_token, self.type = _GetEndTokenAndContents( self.type_start_token) if self.type is not None: self.type = self.type.strip() + self.jstype = typeannotation.Parse(flag_token, self.type_end_token, + error_handler) # Extract name, if applicable. self.name_token = None self.name = None if self.flag_type in self.HAS_NAME: # Handle bad case, name could be immediately after flag token. - self.name_token = _GetNextIdentifierToken(flag_token) + self.name_token = _GetNextPartialIdentifierToken(flag_token) # Handle good case, if found token is after type start, look for - # identifier after type end, since types contain identifiers. + # a identifier (substring to cover cases like [cnt] b/4197272) after + # type end, since types contain identifiers. if (self.type and self.name_token and tokenutil.Compare(self.name_token, self.type_start_token) > 0): - self.name_token = _GetNextIdentifierToken(self.type_end_token) + self.name_token = _GetNextPartialIdentifierToken(self.type_end_token) if self.name_token: self.name = self.name_token.string @@ -191,6 +291,13 @@ class DocFlag(object): self.description_end_token, self.description = ( _GetEndTokenAndContents(interesting_token)) + def HasType(self): + """Returns whether this flag should have a type annotation.""" + return self.flag_type in self.HAS_TYPE + + def __repr__(self): + return '' % (self.flag_type, repr(self.jstype)) + class DocComment(object): """JavaScript doc comment object. @@ -207,14 +314,21 @@ class DocComment(object): Args: start_token: The first token in the doc comment. """ - self.__params = {} - self.ordered_params = [] - self.__flags = {} + self.__flags = [] self.start_token = start_token self.end_token = None self.suppressions = {} self.invalidated = False + @property + def ordered_params(self): + """Gives the list of parameter names as a list of strings.""" + params = [] + for flag in self.__flags: + if flag.flag_type == 'param' and flag.name: + params.append(flag.name) + return params + def Invalidate(self): """Indicate that the JSDoc is well-formed but we had problems parsing it. @@ -228,28 +342,27 @@ class DocComment(object): """Test whether Invalidate() has been called.""" return self.invalidated - def AddParam(self, name, param_type): - """Add a new documented parameter. - - Args: - name: The name of the parameter to document. - param_type: The parameter's declared JavaScript type. - """ - self.ordered_params.append(name) - self.__params[name] = param_type - def AddSuppression(self, token): """Add a new error suppression flag. Args: token: The suppression flag token. """ - #TODO(user): Error if no braces - brace = tokenutil.SearchUntil(token, [Type.DOC_START_BRACE], - [Type.DOC_FLAG]) - if brace: - end_token, contents = _GetMatchingEndBraceAndContents(brace) - self.suppressions[contents] = token + flag = token and token.attached_object + if flag and flag.jstype: + for suppression in flag.jstype.IterIdentifiers(): + self.suppressions[suppression] = token + + def SuppressionOnly(self): + """Returns whether this comment contains only suppression flags.""" + if not self.__flags: + return False + + for flag in self.__flags: + if flag.flag_type != 'suppress': + return False + + return True def AddFlag(self, flag): """Add a new document flag. @@ -257,7 +370,7 @@ class DocComment(object): Args: flag: DocFlag object. """ - self.__flags[flag.flag_type] = flag + self.__flags.append(flag) def InheritsDocumentation(self): """Test if the jsdoc implies documentation inheritance. @@ -265,10 +378,7 @@ class DocComment(object): Returns: True if documentation may be pulled off the superclass. """ - return (self.HasFlag('inheritDoc') or - (self.HasFlag('override') and - not self.HasFlag('return') and - not self.HasFlag('param'))) + return self.HasFlag('inheritDoc') or self.HasFlag('override') def HasFlag(self, flag_type): """Test if the given flag has been set. @@ -279,7 +389,10 @@ class DocComment(object): Returns: True if the flag is set. """ - return flag_type in self.__flags + for flag in self.__flags: + if flag.flag_type == flag_type: + return True + return False def GetFlag(self, flag_type): """Gets the last flag of the given type. @@ -290,7 +403,101 @@ class DocComment(object): Returns: The last instance of the given flag type in this doc comment. """ - return self.__flags[flag_type] + for flag in reversed(self.__flags): + if flag.flag_type == flag_type: + return flag + + def GetDocFlags(self): + """Return the doc flags for this comment.""" + return list(self.__flags) + + def _YieldDescriptionTokens(self): + for token in self.start_token: + + if (token is self.end_token or + token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or + token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES): + return + + if token.type not in [ + javascripttokens.JavaScriptTokenType.START_DOC_COMMENT, + javascripttokens.JavaScriptTokenType.END_DOC_COMMENT, + javascripttokens.JavaScriptTokenType.DOC_PREFIX]: + yield token + + @property + def description(self): + return tokenutil.TokensToString( + self._YieldDescriptionTokens()) + + def GetTargetIdentifier(self): + """Returns the identifier (as a string) that this is a comment for. + + Note that this uses method uses GetIdentifierForToken to get the full + identifier, even if broken up by whitespace, newlines, or comments, + and thus could be longer than GetTargetToken().string. + + Returns: + The identifier for the token this comment is for. + """ + token = self.GetTargetToken() + if token: + return tokenutil.GetIdentifierForToken(token) + + def GetTargetToken(self): + """Get this comment's target token. + + Returns: + The token that is the target of this comment, or None if there isn't one. + """ + + # File overviews describe the file, not a token. + if self.HasFlag('fileoverview'): + return + + skip_types = frozenset([ + Type.WHITESPACE, + Type.BLANK_LINE, + Type.START_PAREN]) + + target_types = frozenset([ + Type.FUNCTION_NAME, + Type.IDENTIFIER, + Type.SIMPLE_LVALUE]) + + token = self.end_token.next + while token: + if token.type in target_types: + return token + + # Handles the case of a comment on "var foo = ...' + if token.IsKeyword('var'): + next_code_token = tokenutil.CustomSearch( + token, + lambda t: t.type not in Type.NON_CODE_TYPES) + + if (next_code_token and + next_code_token.IsType(Type.SIMPLE_LVALUE)): + return next_code_token + + return + + # Handles the case of a comment on "function foo () {}" + if token.type is Type.FUNCTION_DECLARATION: + next_code_token = tokenutil.CustomSearch( + token, + lambda t: t.type not in Type.NON_CODE_TYPES) + + if next_code_token.IsType(Type.FUNCTION_NAME): + return next_code_token + + return + + # Skip types will end the search. + if token.type not in skip_types: + return + + token = token.next def CompareParameters(self, params): """Computes the edit distance and list from the function params to the docs. @@ -360,7 +567,8 @@ class DocComment(object): Returns: A string representation of this object. """ - return '' % (str(self.__params), str(self.__flags)) + return '' % ( + str(self.ordered_params), str(self.__flags)) # @@ -409,28 +617,25 @@ def _GetMatchingEndBraceAndContents(start_brace): return token, ''.join(contents) -def _GetNextIdentifierToken(start_token): - """Searches for and returns the first identifier at the beginning of a token. +def _GetNextPartialIdentifierToken(start_token): + """Returns the first token having identifier as substring after a token. - Searches each token after the start to see if it starts with an identifier. - If found, will split the token into at most 3 piecies: leading whitespace, - identifier, rest of token, returning the identifier token. If no identifier is - found returns None and changes no tokens. Search is abandoned when a - FLAG_ENDING_TYPE token is found. + Searches each token after the start to see if it contains an identifier. + If found, token is returned. If no identifier is found returns None. + Search is abandoned when a FLAG_ENDING_TYPE token is found. Args: start_token: The token to start searching after. Returns: - The identifier token is found, None otherwise. + The token found containing identifier, None otherwise. """ token = start_token.next - while token and not token.type in Type.FLAG_ENDING_TYPES: - match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.match( + while token and token.type not in Type.FLAG_ENDING_TYPES: + match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search( token.string) - if (match is not None and token.type == Type.COMMENT and - len(token.string) == len(match.group(0))): + if match is not None and token.type == Type.COMMENT: return token token = token.next @@ -455,7 +660,8 @@ def _GetEndTokenAndContents(start_token): last_line = iterator.line_number last_token = None contents = '' - while not iterator.type in Type.FLAG_ENDING_TYPES: + doc_depth = 0 + while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0: if (iterator.IsFirstInLine() and DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)): # If we have a blank comment line, consider that an implicit @@ -470,6 +676,17 @@ def _GetEndTokenAndContents(start_token): # only a doc comment prefix or whitespace. break + # b/2983692 + # don't prematurely match against a @flag if inside a doc flag + # need to think about what is the correct behavior for unterminated + # inline doc flags + if (iterator.type == Type.DOC_START_BRACE and + iterator.next.type == Type.DOC_INLINE_FLAG): + doc_depth += 1 + elif (iterator.type == Type.DOC_END_BRACE and + doc_depth > 0): + doc_depth -= 1 + if iterator.type in Type.FLAG_DESCRIPTION_TYPES: contents += iterator.string last_token = iterator @@ -501,6 +718,9 @@ class Function(object): is_constructor: If the function is a constructor. name: The name of the function, whether given in the function keyword or as the lvalue the function is assigned to. + start_token: First token of the function (the function' keyword token). + end_token: Last token of the function (the closing '}' token). + parameters: List of parameter names. """ def __init__(self, block_depth, is_assigned, doc, name): @@ -509,9 +729,13 @@ class Function(object): self.is_constructor = doc and doc.HasFlag('constructor') self.is_interface = doc and doc.HasFlag('interface') self.has_return = False + self.has_throw = False self.has_this = False self.name = name self.doc = doc + self.start_token = None + self.end_token = None + self.parameters = None class StateTracker(object): @@ -538,7 +762,7 @@ class StateTracker(object): self._block_depth = 0 self._is_block_close = False self._paren_depth = 0 - self._functions = [] + self._function_stack = [] self._functions_by_name = {} self._last_comment = None self._doc_comment = None @@ -548,6 +772,24 @@ class StateTracker(object): self._last_line = None self._first_token = None self._documented_identifiers = set() + self._variables_in_scope = [] + + def DocFlagPass(self, start_token, error_handler): + """Parses doc flags. + + This pass needs to be executed before the aliaspass and we don't want to do + a full-blown statetracker dry run for these. + + Args: + start_token: The token at which to start iterating + error_handler: An error handler for error reporting. + """ + if not start_token: + return + doc_flag_types = (Type.DOC_FLAG, Type.DOC_INLINE_FLAG) + for token in start_token: + if token.type in doc_flag_types: + token.attached_object = self._doc_flag(token, error_handler) def InFunction(self): """Returns true if the current token is within a function. @@ -555,7 +797,7 @@ class StateTracker(object): Returns: True if the current token is within a function. """ - return bool(self._functions) + return bool(self._function_stack) def InConstructor(self): """Returns true if the current token is within a constructor. @@ -563,7 +805,7 @@ class StateTracker(object): Returns: True if the current token is within a constructor. """ - return self.InFunction() and self._functions[-1].is_constructor + return self.InFunction() and self._function_stack[-1].is_constructor def InInterfaceMethod(self): """Returns true if the current token is within an interface method. @@ -572,10 +814,10 @@ class StateTracker(object): True if the current token is within an interface method. """ if self.InFunction(): - if self._functions[-1].is_interface: + if self._function_stack[-1].is_interface: return True else: - name = self._functions[-1].name + name = self._function_stack[-1].name prototype_index = name.find('.prototype.') if prototype_index != -1: class_function_name = name[0:prototype_index] @@ -591,7 +833,7 @@ class StateTracker(object): Returns: True if the current token is within a top level function. """ - return len(self._functions) == 1 and self.InTopLevel() + return len(self._function_stack) == 1 and self.InTopLevel() def InAssignedFunction(self): """Returns true if the current token is within a function variable. @@ -599,7 +841,7 @@ class StateTracker(object): Returns: True if if the current token is within a function variable """ - return self.InFunction() and self._functions[-1].is_assigned + return self.InFunction() and self._function_stack[-1].is_assigned def IsFunctionOpen(self): """Returns true if the current token is a function block open. @@ -607,8 +849,8 @@ class StateTracker(object): Returns: True if the current token is a function block open. """ - return (self._functions and - self._functions[-1].block_depth == self._block_depth - 1) + return (self._function_stack and + self._function_stack[-1].block_depth == self._block_depth - 1) def IsFunctionClose(self): """Returns true if the current token is a function block close. @@ -616,8 +858,8 @@ class StateTracker(object): Returns: True if the current token is a function block close. """ - return (self._functions and - self._functions[-1].block_depth == self._block_depth) + return (self._function_stack and + self._function_stack[-1].block_depth == self._block_depth) def InBlock(self): """Returns true if the current token is within a block. @@ -659,6 +901,30 @@ class StateTracker(object): """ return bool(self._paren_depth) + def ParenthesesDepth(self): + """Returns the number of parens surrounding the token. + + Returns: + The number of parenthesis surrounding the token. + """ + return self._paren_depth + + def BlockDepth(self): + """Returns the number of blocks in which the token is nested. + + Returns: + The number of blocks in which the token is nested. + """ + return self._block_depth + + def FunctionDepth(self): + """Returns the number of functions in which the token is nested. + + Returns: + The number of functions in which the token is nested. + """ + return len(self._function_stack) + def InTopLevel(self): """Whether we are at the top level in the class. @@ -752,7 +1018,8 @@ class StateTracker(object): Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX): f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT], None, True) - if f and f.attached_object.type_start_token is not None: + if (f and f.attached_object.type_start_token is not None and + f.attached_object.type_end_token is not None): return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and tokenutil.Compare(t, f.attached_object.type_end_token) < 0) return False @@ -763,8 +1030,8 @@ class StateTracker(object): Returns: The current Function object. """ - if self._functions: - return self._functions[-1] + if self._function_stack: + return self._function_stack[-1] def GetBlockDepth(self): """Return the block depth. @@ -786,6 +1053,29 @@ class StateTracker(object): """Return the very first token in the file.""" return self._first_token + def IsVariableInScope(self, token_string): + """Checks if string is variable in current scope. + + For given string it checks whether the string is a defined variable + (including function param) in current state. + + E.g. if variables defined (variables in current scope) is docs + then docs, docs.length etc will be considered as variable in current + scope. This will help in avoding extra goog.require for variables. + + Args: + token_string: String to check if its is a variable in current scope. + + Returns: + true if given string is a variable in current scope. + """ + for variable in self._variables_in_scope: + if (token_string == variable + or token_string.startswith(variable + '.')): + return True + + return False + def HandleToken(self, token, last_non_space_token): """Handles the given token and updates state. @@ -808,6 +1098,12 @@ class StateTracker(object): # by language. self._block_types.append(self.GetBlockType(token)) + # When entering a function body, record its parameters. + if self.InFunction(): + function = self._function_stack[-1] + if self._block_depth == function.block_depth + 1: + function.parameters = self.GetParams() + # Track block depth. elif type == Type.END_BLOCK: self._is_block_close = not self.InObjectLiteral() @@ -833,21 +1129,23 @@ class StateTracker(object): self._doc_comment.end_token = token elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG): - flag = self._doc_flag(token) - token.attached_object = flag + # Don't overwrite flags if they were already parsed in a previous pass. + if token.attached_object is None: + flag = self._doc_flag(token) + token.attached_object = flag + else: + flag = token.attached_object self._doc_comment.AddFlag(flag) - if flag.flag_type == 'param' and flag.name: - self._doc_comment.AddParam(flag.name, flag.type) - elif flag.flag_type == 'suppress': + if flag.flag_type == 'suppress': self._doc_comment.AddSuppression(token) elif type == Type.FUNCTION_DECLARATION: last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None, True) doc = None - # Only functions outside of parens are eligible for documentation. - if not self._paren_depth: + # Only top-level functions are eligible for documentation. + if self.InTopLevel(): doc = self._doc_comment name = '' @@ -861,8 +1159,7 @@ class StateTracker(object): # my.function.foo. # bar = function() ... identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True) - while identifier and identifier.type in ( - Type.IDENTIFIER, Type.SIMPLE_LVALUE): + while identifier and tokenutil.IsIdentifierOrDot(identifier): name = identifier.string + name # Traverse behind us, skipping whitespace and comments. while True: @@ -877,14 +1174,22 @@ class StateTracker(object): next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2) function = Function(self._block_depth, is_assigned, doc, name) - self._functions.append(function) + function.start_token = token + + self._function_stack.append(function) self._functions_by_name[name] = function + # Add a delimiter in stack for scope variables to define start of + # function. This helps in popping variables of this function when + # function declaration ends. + self._variables_in_scope.append('') + elif type == Type.START_PARAMETERS: self._cumulative_params = '' elif type == Type.PARAMETERS: self._cumulative_params += token.string + self._variables_in_scope.extend(self.GetParams()) elif type == Type.KEYWORD and token.string == 'return': next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) @@ -893,6 +1198,22 @@ class StateTracker(object): if function: function.has_return = True + elif type == Type.KEYWORD and token.string == 'throw': + function = self.GetFunction() + if function: + function.has_throw = True + + elif type == Type.KEYWORD and token.string == 'var': + function = self.GetFunction() + next_token = tokenutil.Search(token, [Type.IDENTIFIER, + Type.SIMPLE_LVALUE]) + + if next_token: + if next_token.type == Type.SIMPLE_LVALUE: + self._variables_in_scope.append(next_token.values['identifier']) + else: + self._variables_in_scope.append(next_token.string) + elif type == Type.SIMPLE_LVALUE: identifier = token.values['identifier'] jsdoc = self.GetDocComment() @@ -906,7 +1227,7 @@ class StateTracker(object): # Detect documented non-assignments. next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES) - if next_token.IsType(Type.SEMICOLON): + if next_token and next_token.IsType(Type.SEMICOLON): if (self._last_non_space_token and self._last_non_space_token.IsType(Type.END_DOC_COMMENT)): self._documented_identifiers.add(token.string) @@ -926,7 +1247,6 @@ class StateTracker(object): if function: function.has_this = True - def HandleAfterToken(self, token): """Handle updating state after a token has been checked. @@ -952,7 +1272,17 @@ class StateTracker(object): if self.InFunction() and self.IsFunctionClose(): # TODO(robbyw): Detect the function's name for better errors. - self._functions.pop() + function = self._function_stack.pop() + function.end_token = token + + # Pop all variables till delimiter ('') those were defined in the + # function being closed so make them out of scope. + while self._variables_in_scope and self._variables_in_scope[-1]: + self._variables_in_scope.pop() + + # Pop delimiter + if self._variables_in_scope: + self._variables_in_scope.pop() elif type == Type.END_PARAMETERS and self._doc_comment: self._doc_comment = None diff --git a/tools/closure_linter/closure_linter/statetracker_test.py b/tools/closure_linter/closure_linter/statetracker_test.py new file mode 100755 index 0000000..494dc64 --- /dev/null +++ b/tools/closure_linter/closure_linter/statetracker_test.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the statetracker module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + + + +import unittest as googletest + +from closure_linter import javascripttokens +from closure_linter import statetracker +from closure_linter import testutil + + +class _FakeDocFlag(object): + + def __repr__(self): + return '@%s %s' % (self.flag_type, self.name) + + +class IdentifierTest(googletest.TestCase): + + def testJustIdentifier(self): + a = javascripttokens.JavaScriptToken( + 'abc', javascripttokens.JavaScriptTokenType.IDENTIFIER, 'abc', 1) + + st = statetracker.StateTracker() + st.HandleToken(a, None) + + +class DocCommentTest(googletest.TestCase): + + @staticmethod + def _MakeDocFlagFake(flag_type, name=None): + flag = _FakeDocFlag() + flag.flag_type = flag_type + flag.name = name + return flag + + def testDocFlags(self): + comment = statetracker.DocComment(None) + + a = self._MakeDocFlagFake('param', 'foo') + comment.AddFlag(a) + + b = self._MakeDocFlagFake('param', '') + comment.AddFlag(b) + + c = self._MakeDocFlagFake('param', 'bar') + comment.AddFlag(c) + + self.assertEquals( + ['foo', 'bar'], + comment.ordered_params) + + self.assertEquals( + [a, b, c], + comment.GetDocFlags()) + + def testInvalidate(self): + comment = statetracker.DocComment(None) + + self.assertFalse(comment.invalidated) + self.assertFalse(comment.IsInvalidated()) + + comment.Invalidate() + + self.assertTrue(comment.invalidated) + self.assertTrue(comment.IsInvalidated()) + + def testSuppressionOnly(self): + comment = statetracker.DocComment(None) + + self.assertFalse(comment.SuppressionOnly()) + comment.AddFlag(self._MakeDocFlagFake('suppress')) + self.assertTrue(comment.SuppressionOnly()) + comment.AddFlag(self._MakeDocFlagFake('foo')) + self.assertFalse(comment.SuppressionOnly()) + + def testRepr(self): + comment = statetracker.DocComment(None) + comment.AddFlag(self._MakeDocFlagFake('param', 'foo')) + comment.AddFlag(self._MakeDocFlagFake('param', 'bar')) + + self.assertEquals( + '', + repr(comment)) + + def testDocFlagParam(self): + comment = self._ParseComment(""" + /** + * @param {string} [name] Name of customer. + */""") + flag = comment.GetFlag('param') + self.assertEquals('string', flag.type) + self.assertEquals('string', flag.jstype.ToString()) + self.assertEquals('[name]', flag.name) + + def _ParseComment(self, script): + """Parse a script that contains one comment and return it.""" + _, comments = testutil.ParseFunctionsAndComments(script) + self.assertEquals(1, len(comments)) + return comments[0] + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter/strict_test.py b/tools/closure_linter/closure_linter/strict_test.py new file mode 100755 index 0000000..2634456 --- /dev/null +++ b/tools/closure_linter/closure_linter/strict_test.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# Copyright 2013 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for gjslint --strict. + +Tests errors that can be thrown by gjslint when in strict mode. +""" + + + +import unittest + +import gflags as flags +import unittest as googletest + +from closure_linter import errors +from closure_linter import runner +from closure_linter.common import erroraccumulator + +flags.FLAGS.strict = True + + +class StrictTest(unittest.TestCase): + """Tests scenarios where strict generates warnings.""" + + def testUnclosedString(self): + """Tests warnings are reported when nothing is disabled. + + b/11450054. + """ + original = [ + 'bug = function() {', + ' (\'foo\'\');', + '};', + '', + ] + + expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING, + errors.FILE_IN_BLOCK] + self._AssertErrors(original, expected) + + def _AssertErrors(self, original, expected_errors): + """Asserts that the error fixer corrects original to expected.""" + + # Trap gjslint's output parse it to get messages added. + error_accumulator = erroraccumulator.ErrorAccumulator() + runner.Run('testing.js', error_accumulator, source=original) + error_nums = [e.code for e in error_accumulator.GetErrors()] + + error_nums.sort() + expected_errors.sort() + self.assertListEqual(error_nums, expected_errors) + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter/testdata/all_js_wrapped.js b/tools/closure_linter/closure_linter/testdata/all_js_wrapped.js new file mode 100644 index 0000000..6eb3b38 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/all_js_wrapped.js @@ -0,0 +1,5 @@ +(function($) { + // My code goes here. + // linter should not throw random exceptions because the file starts with + // an open paren. Regression test for bug 2966755. +})(jQuery); diff --git a/tools/closure_linter/closure_linter/testdata/blank_lines.js b/tools/closure_linter/closure_linter/testdata/blank_lines.js new file mode 100644 index 0000000..1dc3da2 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/blank_lines.js @@ -0,0 +1,104 @@ +/** + * @fileoverview This is a file overview with no lines above it, at the top of + * the file (GOOD). + */ +/** // WRONG_BLANK_LINE_COUNT + * @fileoverview This is a file overview with no lines above it (BAD). + */ + +/** + * @fileoverview This is a file overview with one line above it (GOOD). + */ + + +/** + * @fileoverview This is a file overview with two lines above it (GOOD). + */ + +/** // WRONG_BLANK_LINE_COUNT + * A constructor with 1 line above it (BAD). + * @constructor + */ +function someFunction() {} + + +/** // WRONG_BLANK_LINE_COUNT + * A constructor with 2 lines above it (BAD). + * @constructor + */ +function someFunction() {} + + + +/** + * A constructor with 3 lines above it (GOOD). + * @constructor + */ +function someFunction() {} + + + + +/** // WRONG_BLANK_LINE_COUNT + * A constructor with 4 lines above it (BAD). + * @constructor + */ +function someFunction() {} + +/** // WRONG_BLANK_LINE_COUNT + * Top level block with 1 line above it (BAD). + */ +function someFunction() {} + + +/** + * Top level block with 2 lines above it (GOOD). + */ +function someFunction() {} + + + +/** // WRONG_BLANK_LINE_COUNT + * Top level block with 3 lines above it (BAD). + */ +function someFunction() {} + + +// -1: EXTRA_SPACE +/** + * Top level block with 2 lines above it, one contains whitespace (GOOD). + */ +function someFunction() {} + + +// This comment should be ignored. +/** + * Top level block with 2 lines above it (GOOD). + */ +function someFunction() {} + +// Should not check jsdocs which are inside a block. +var x = { + /** + * @constructor + */ +}; + +/** + * This jsdoc-style comment should not be required to have two lines above it + * since it does not immediately precede any code. + */ +// This is a comment. + +/** + * This jsdoc-style comment should not be required to have two lines above it + * since it does not immediately precede any code. + */ +/** + * This is a comment. + */ + +/** + * This jsdoc-style comment should not be required to have two lines above it + * since it does not immediately precede any code. + */ diff --git a/tools/closure_linter/closure_linter/testdata/bugs.js b/tools/closure_linter/closure_linter/testdata/bugs.js new file mode 100644 index 0000000..7352005 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/bugs.js @@ -0,0 +1,43 @@ +// Copyright 2007 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A file full of known bugs - this file serves only as a reference and is not +// tested in any way. + +/** + * @param {{foo} x This is a bad record type. + * @param {{foo}} y This is a good record type with bad spacing. + * @param {{foo}} This is a good record type with no parameter name. + */ +function f(x, y, z) { +} + + +// Should report extra space errors. +var magicProps = { renderRow: 0 }; + +// No error reported here for missing space before {. +if (x){ +} + +// Should have a "brace on wrong line" error. +if (x) +{ +} + +// We could consider not reporting it when wrapping makes it necessary, as in: +if (aLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongCondition) + { + // Code here. +} diff --git a/tools/closure_linter/closure_linter/testdata/empty_file.js b/tools/closure_linter/closure_linter/testdata/empty_file.js new file mode 100644 index 0000000..e69de29 diff --git a/tools/closure_linter/closure_linter/testdata/ends_with_block.js b/tools/closure_linter/closure_linter/testdata/ends_with_block.js new file mode 100644 index 0000000..40aa872 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/ends_with_block.js @@ -0,0 +1,19 @@ +// Copyright 2009 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Regression test for an old crasher. + +if (x) { + alert('Hello world'); +} diff --git a/tools/closure_linter/closure_linter/testdata/externs.js b/tools/closure_linter/closure_linter/testdata/externs.js new file mode 100644 index 0000000..94e2ad3 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/externs.js @@ -0,0 +1,34 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * Externs files are treated specially. We don't require documentation or + * return statements in functions when they are documented. + * + * @externs + */ + + +function VXMLBaseElement() {} + + +/** + * Should not complain about return tag with no return statement in + * externs.js file. + * @param {string} attrName The name of the attribute. + * @return {string} + */ +VXMLBaseElement.prototype.getAttribute = function(attrName) {}; + +VXMLBaseElement.prototype.undocumentedMethod = function() {}; diff --git a/tools/closure_linter/closure_linter/testdata/externs_jsdoc.js b/tools/closure_linter/closure_linter/testdata/externs_jsdoc.js new file mode 100644 index 0000000..926593f --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/externs_jsdoc.js @@ -0,0 +1,37 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Tests that the externs tag within a fileoverview comment is + * sufficient to identify an externs file. + * @externs + * + * Externs files are treated specially. We don't require documentation or + * return statements in functions when they are documented. + * + */ + + +function VXMLBaseElement() {} + + +/** + * Should not complain about return tag with no return statement in + * an externs file. + * @param {string} attrName The name of the attribute. + * @return {string} + */ +VXMLBaseElement.prototype.getAttribute = function(attrName) {}; + +VXMLBaseElement.prototype.undocumentedMethod = function() {}; diff --git a/tools/closure_linter/closure_linter/testdata/file_level_comment.js b/tools/closure_linter/closure_linter/testdata/file_level_comment.js new file mode 100644 index 0000000..8658115 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/file_level_comment.js @@ -0,0 +1,13 @@ +/** + * File level comment as the first thing in a file (GOOD). + * @license Copyright 2009 SomeThirdParty. + */ +/** // WRONG_BLANK_LINE_COUNT + * Comment block that is not the first thing in a file (BAD). + * @license Copyright 2009 SomeThirdParty. + */ + +/** // WRONG_BLANK_LINE_COUNT + * Top level comment with a single line above it (BAD). + * @license Copyright 2009 SomeThirdParty. + */ diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html b/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html new file mode 100644 index 0000000..c341bb9 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html @@ -0,0 +1,52 @@ + + + + + +Tests + + + + + + + + + + + diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html b/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html new file mode 100644 index 0000000..bb9a16f --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html @@ -0,0 +1,51 @@ + + + + + +Tests + + + + + + + + + + + diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js new file mode 100644 index 0000000..3a1ccb1 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js @@ -0,0 +1,293 @@ +// Copyright 2008 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/** + * @fileoverview Autofix test script. + * + * @author robbyw@google.com (Robby Walker) + * @author robbyw@google.com (Robby Walker) + * @author robbyw@google.com(Robby Walker) + * @author robbyw@google.com + * @author robbyw@google.com Robby + */ + +goog.provide('w'); +goog.provide('Y'); +goog.provide('X'); +goog.provide('Z'); + +// Some comment about why this is suppressed top. +/** @suppress {extraRequire} */ +goog.require('dummy.NotUsedTop'); // Comment top. +goog.require('dummy.Bb'); +/** @suppress {extraRequire} */ +// Some comment about why this is suppressed different. +goog.require('dummy.NotUsedDifferentComment'); +goog.require('dummy.Cc'); +// Some comment about why this is suppressed middle. +/** @suppress {extraRequire} */ +goog.require('dummy.NotUsedMiddle'); // Comment middle. +goog.require('dummy.Dd'); +goog.require('dummy.aa'); +// Some comment about why this is suppressed bottom. +/** @suppress {extraRequire} */ +goog.require('dummy.NotUsedBottom'); // Comment bottom. + +var x = new dummy.Bb(); +dummy.Cc.someMethod(); +dummy.aa.someMethod(); + + +/** + * @param {number|null} badTypeWithExtraSpace |null -> ?. + * @returns {number} returns -> return. + */ +x.y = function( badTypeWithExtraSpace) { +} + + +/** @type {function():null|Array.} only 2nd |null -> ? */ +x.badType; + + +/** @type {Array.|null} only 2nd |null -> ? */ +x.wickedType; + + +/** @type { string | null } null -> ? */ +x.nullWithSpace; + +spaceBeforeSemicolon = 10 ; +spaceBeforeParen = 10 +(5 * 2); +arrayNoSpace =[10]; +arrayExtraSpace [10] = 10; +spaceBeforeClose = ([10 ] ); +spaceAfterStart = ( [ 10]); +extraSpaceAfterPlus = 10 + 20; +extraSpaceBeforeOperator = x ++; +extraSpaceBeforeOperator = x --; +extraSpaceBeforeComma = x(y , z); +missingSpaceBeforeOperator = x+ y; +missingSpaceAfterOperator = x +y; +missingBothSpaces = x+y; +equalsSpacing= 10; +equalsSpacing =10; +equalsSpacing=10; +equalsSpacing=[10]; +reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName=1000; + +"string should be single quotes"; + +// Regression test for interaction between space fixing and semicolon fixing - +// previously the fix for the missing space caused the function to be seen as +// a non-assigned function and then its semicolon was being stripped. +x=function() { +}; + +/** + * Missing a newline. + * @constructor + * @extends {a.b.c} + */ +x.y.z = function() { +};goog.inherits(x.y.z, a.b.c); + +/** + * Extra blank line. + * @constructor + * @extends {a.b.c} + */ +x.y.z = function() { +}; + +goog.inherits(x.y.z, a.b.c); + +/** + * Perfect! + * @constructor + * @extends {a.b.c} + */ +x.y.z = function() { +}; +goog.inherits(x.y.z, a.b.c); + +// Whitespace at end of comment. +var removeWhiteSpaceAtEndOfLine; + +/** + * Whitespace at EOL (here and the line of code and the one below it). + * @type {string} + * @param {string} Description with whitespace at EOL. + */ +x = 10; + +/** + * @type number + */ +foo.bar = 3; + +/** + * @enum {boolean + */ +bar.baz = true; + +/** + * @extends Object} + */ +bar.foo = x; + +/** + * @type function(string, boolean) : void + */ +baz.bar = goog.nullFunction; + +/** {@inheritDoc} */ +baz.baz = function() { +}; + +TR_Node.splitDomTreeAt(splitNode, clone, /** @type Node */ (quoteNode)); + +x = [1, 2, 3,]; +x = { + a: 1, +}; + +if (x) { +}; + +for (i = 0;i < 10; i++) { +} +for (i = 0; i < 10;i++) { +} +for ( i = 0; i < 10; i++) { +} +for (i = 0 ; i < 10; i++) { +} +for (i = 0; i < 10 ; i++) { +} +for (i = 0; i < 10; i++ ) { +} +for (i = 0; i < 10; i++) { +} +for (i = 0; i < 10; i++) { +} +for (i = 0 ;i < 10; i++) { +} + +var x = 10 +var y = 100; + + +/** + * This is to test the ability to add or remove a = in type to mark optional + * parameters. + * @param {number=} firstArg Incorrect the name should start with opt_. Don't + * handle the fix (yet). + * @param {function(string=):number} opt_function This should end with a =. + * @param {function(number)} opt_otherFunc This should end with a =. + * @param {string} opt_otherArg Incorrect this should be string=. + * @param {{string, number}} opt_recordArg Incorrect this should + * be {string, number}=. + */ +function someFunction(firstArg, opt_function, opt_otherFunc, opt_otherArg, + opt_recordArg) { +} + + +/** + * This is to test the ability to add '...' in type with variable arguments. + * @param {number} firstArg First argument. + * @param {string} var_args This should start with '...'. + */ +function varArgFunction(firstArg, var_args) { +} + + +/** + * This is to test the ability to add '...' in type with variable arguments. + * @param {number} firstArg First argument. + * @param {{a, b}} var_args This should start with '...'. + */ +function varArgRecordTypeFunction(firstArg, var_args) { +} + +var indent = 'correct'; + indent = 'too far'; +if (indent) { +indent = 'too short'; +} +indent = function() { + return a + + b; +}; + + +/** + * Regression test, must insert whitespace before the 'b' when fixing + * indentation. Its different from below case of bug 3473113 as has spaces + * before parameter which was not working in part of the bug fix. + */ +indentWrongSpaces = function( + b) { +}; + + +/** + * Regression test, must insert whitespace before the 'b' when fixing + * indentation. + * @bug 3473113 + */ +indent = function( +b) { +}; + + + +/** + * This is to test the ability to remove multiple extra lines before a top-level + * block. + */ +function someFunction() {} +/** + * This is to test the ability to add multiple extra lines before a top-level + * block. + */ +function someFunction() {} + + +// This is a comment. +/** + * This is to test that blank lines removed before a top level block skips any + * comments above the block. + */ +function someFunction() {} +// This is a comment. +/** + * This is to test that blank lines added before a top level block skips any + * comments above the block. + */ +function someFunction() {} + + +/** + * Parameters don't have proper spaces. + * @param {number} a + * @param {number} b + * @param {number} d + * @param {number} e + * @param {number} f + */ +function someFunction(a, b,d, e, f) { +} + +// File does not end with newline \ No newline at end of file diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js new file mode 100644 index 0000000..4d7c385 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js @@ -0,0 +1,465 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Test file for indentation. + * @author robbyw@google.com (Robert Walker) + */ + +goog.provide('goog.editor.SeamlessField'); +goog.provide('goog.something'); + +goog.require('goog.events.KeyCodes'); +goog.require('goog.userAgent'); + +// Some good indentation examples. + +var x = 10; +var y = 'some really really really really really really really long string', + z = 14; +if (x == 10) { + x = 12; +} +if (x == 10 || + x == 12) { + x = 14; +} +if (x == 14) { + if (z >= x) { + y = 'test'; + } +} +x = x + + 10 + ( + 14 + ); +something = + 5; +var arr = [ + 1, 2, 3]; +var arr2 = [ + 1, + 2, + 3]; +var obj = { + a: 10, + b: 20 +}; +callAFunction(10, [100, 200], + 300); +callAFunction([ + 100, + 200 +], +300); +callAFunction('abc' + + 'def' + + 'ghi'); + +x.reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName + .someMember = 10; + + +// confused on allowed indentation in continued function assignments vs overlong +// wrapped function calls. +some.sample() // LINE_ENDS_WITH_DOT + .then(function(response) { + return 1; + }); + + +/** + * Some function. + * @return {number} The number ten. + */ +goog.something.x = function() { + return 10 + + 20; +}; + + +/** + * Some function. + * @param {number} longParameterName1 Some number. + * @param {number} longParameterName2 Some number. + * @param {number} longParameterName3 Some number. + * @return {number} Sum number. + */ +goog.something.y = function(longParameterName1, longParameterName2, + longParameterName3) { + return longParameterName1 + longParameterName2 + longParameterName3; +}; + + +/** + * Some function. + * @param {number} longParameterName1 Some number. + * @param {number} longParameterName2 Some number. + * @param {number} longParameterName3 Some number. + * @return {number} Sum number. + */ +goog.something.z = function(longParameterName1, longParameterName2, + longParameterName3) { + return longParameterName1 + longParameterName2 + longParameterName3; +}; + +if (opt_rootTagName) { + doc.appendChild(doc.createNode(3, + opt_rootTagName, + opt_namespaceUri || '')); +} + + +/** + * For a while this errored because the function call parens were overriding + * the other opening paren. + */ +goog.something.q = function() { + goog.something.x(a.getStartNode(), + a.getStartOffset(), a.getEndNode(), a.getEndOffset()); +}; + +function doSomething() { + var titleElement = goog.something(x, // UNUSED_LOCAL_VARIABLE + y); +} + +switch (x) { + case 10: + y = 100; + break; + + // This should be allowed. + case 20: + if (y) { + z = 0; + } + break; + + // This should be allowed, + // even with mutliple lines. + case 30: + if (y) { + z = 0; + } + break; + + case SadThatYouSwitch + .onSomethingLikeThis: + z = 10; + + case 40: + z = 20; + + default: + break; +} + +// Description of if case. +if (x) { + +// Description of else case should be allowed at this indent. +// Multiple lines is ok. +} else { + +} + + +/** @inheritDoc */ +goog.editor.SeamlessField.prototype.setupMutationEventHandlersGecko = + function() { + var x = 10; + x++; +}; + + +// Regression test for '.' at the end confusing the indentation checker if it is +// not considered to be part of the identifier. +/** @inheritDoc */ +goog.editor.SeamlessField.prototype + .setupMutationEventHandlersGecko = function() { + // -2: LINE_ENDS_WITH_DOT + var x = 10; + x++; +}; + +var someReallyReallyLongVariableName = + y ? /veryVeryVeryVeryVeryVeryVeryVeryLongRegex1/gi : + /slightlyLessLongRegex2/gi; + +var somethingOrOther = z ? + a : + b; + +var z = x ? y : + 'bar'; + +var z = x ? + y : + a; + +var z = z ? + a ? b : c : + d ? e : f; + +var z = z ? + a ? b : + c : + d ? e : f; + +var z = z ? + a ? + b : + c : + d ? e : f; + +var z = z ? + a ? b : c : + d ? e : + f ? g : h; + +var z = z ? + a + + i ? + b + + j : c : + d ? e : + f ? g : h; + + +if (x) { + var block = + // some comment + // and some more comment + (e.keyCode == goog.events.KeyCodes.TAB && !this.dispatchBeforeTab_(e)) || + // #2: to block a Firefox-specific bug where Macs try to navigate + // back a page when you hit command+left arrow or comamnd-right arrow. + // See https://bugzilla.mozilla.org/show_bug.cgi?id=341886 + // get Firefox to fix this. + (goog.userAgent.GECKO && e.metaKey && + (e.keyCode == goog.events.KeyCodes.LEFT || + e.keyCode == goog.events.KeyCodes.RIGHT)); +} + +if (x) { +} + +var somethingElse = { + HAS_W3C_RANGES: goog.userAgent.GECKO || goog.userAgent.WEBKIT || + goog.userAgent.OPERA, + + // A reasonably placed comment. + SOME_KEY: goog.userAgent.IE +}; + +var x = { + ySomethingReallyReallyLong: + 'foo', + z: 'bar' +}; + +// Some bad indentation. + +var a = 10; // WRONG_INDENTATION +var b = 10, + c = 12; // WRONG_INDENTATION +x = x + + 10; // WRONG_INDENTATION +if (x == 14) { + x = 15; // WRONG_INDENTATION + x = 16; // WRONG_INDENTATION +} + +var longFunctionName = function(opt_element) { + return opt_element ? + new z(q(opt_element)) : 100; + // -1: WRONG_INDENTATION +}; + +longFunctionName(a, b, c, + d, e, f); // WRONG_INDENTATION +longFunctionName(a, b, + c, // WRONG_INDENTATION + d); // WRONG_INDENTATION + +x = a ? b : + c; // WRONG_INDENTATION +y = a ? + b : c; // WRONG_INDENTATION + +switch (x) { + case 10: + break; // WRONG_INDENTATION + case 20: // WRONG_INDENTATION + break; + default: // WRONG_INDENTATION + break; +} + +while (true) { + x = 10; // WRONG_INDENTATION + break; // WRONG_INDENTATION +} + +function foo() { + return entryUrlTemplate + .replace( + '${authorResourceId}', + this.sanitizer_.sanitize(authorResourceId)); +} + +return [new x( + 10)]; +return [ + new x(10)]; + +return [new x( + 10)]; // WRONG_INDENTATION +return [new x( + 10)]; // WRONG_INDENTATION + +return {x: y( + z)}; +return { + x: y(z) +}; + +return {x: y( + z)}; // WRONG_INDENTATION +return {x: y( + z)}; // WRONG_INDENTATION + +return /** @type {Window} */ (x( + 'javascript:"' + encodeURI(loadingMessage) + '"')); // WRONG_INDENTATION + +x = { + y: function() {} +}; + +x = { + y: foo, + z: bar + + baz // WRONG_INDENTATION +}; + +x({ + a: b +}, +10); + +z = function(arr, f, val, opt_obj) { + x(arr, function(val, index) { + rval = f.call(opt_obj, rval, val, index, arr); + }); +}; + +var xyz = [100, + 200, + 300]; + +var def = [100, + 200]; // WRONG_INDENTATION + +var ghi = [100, + 200]; // WRONG_INDENTATION + +var abcdefg = ('a' + + 'b'); + +var x9 = z('7: ' + + x(x)); // WRONG_INDENTATION + +function abc() { + var z = d('div', // UNUSED_LOCAL_VARIABLE + { + a: 'b' + }); +} + +abcdefg('p', {x: 10}, + 'Para 1'); + +function bar1() { + return 3 + + 4; // WRONG_INDENTATION +} + +function bar2() { + return 3 + // WRONG_INDENTATION + 4; // WRONG_INDENTATION +} + +function bar3() { + return 3 + // WRONG_INDENTATION + 4; +} + +// Regression test for unfiled bug. Wrongly going into implied block after else +// when there was an explicit block (was an else if) caused false positive +// indentation errors. +if (true) { +} else if (doc.foo( + doc.getBar(baz))) { + var x = 3; +} + +// Regression tests for function indent + 4. +// (The first example is from the styleguide.) +if (veryLongFunctionNameA( + veryLongArgumentName) || + veryLongFunctionNameB( + veryLongArgumentName)) { + veryLongFunctionNameC(veryLongFunctionNameD( + veryLongFunctioNameE( + veryLongFunctionNameF))); +} + +if (outer(middle( + inner(first)))) {} +if (outer(middle( + inner(second)), + outer_second)) {} +if (nested.outer( + first)) {} +if (nested.outer(nested.middle( + first))) {} +if (nested + .outer(nested.middle( + first))) {} +if (nested.outer(first + .middle( + second), + third)) {} + +// goog.scope should not increase indentation. +goog.scope(function() { +var x = 5; +while (x > 0) { + --x; +} +}); // goog.scope + + +goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE +// +1: UNUSED_LOCAL_VARIABLE +var x = 5; // WRONG_INDENTATION +}); // goog.scope + +goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE +var x = 5; // UNUSED_LOCAL_VARIABLE +}); // goog.scope + +goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE +var x = 5; // UNUSED_LOCAL_VARIABLE +}); // goog.scope diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js new file mode 100644 index 0000000..974af91 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js @@ -0,0 +1,21 @@ +// Correct dot placement: +var x = window.some() + .method() + .calls(); + +// Wrong dots: +window. + some(). + // With a comment in between. + method(). + calls(); + +// Wrong plus operator: +var y = 'hello' + + 'world' + // With a comment in between. + + '!'; + +// Correct plus operator (untouched): +var y = 'hello' + + 'world'; diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js new file mode 100644 index 0000000..c03e117 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js @@ -0,0 +1,21 @@ +// Correct dot placement: +var x = window.some() + .method() + .calls(); + +// Wrong dots: +window + .some() + // With a comment in between. + .method() + .calls(); + +// Wrong plus operator: +var y = 'hello' + + 'world' + + // With a comment in between. + '!'; + +// Correct plus operator (untouched): +var y = 'hello' + + 'world'; diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js new file mode 100644 index 0000000..37fe2b8 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js @@ -0,0 +1,310 @@ +// Copyright 2008 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Autofix test script. + * + * @author robbyw@google.com (Robby Walker) + * @author robbyw@google.com (Robby Walker) + * @author robbyw@google.com (Robby Walker) + * @author robbyw@google.com (Robby Walker) + * @author robbyw@google.com (Robby) + * @author robbyw@google.com + * @author robbyw@google.com Robby + */ + +goog.provide('X'); +goog.provide('Y'); +goog.provide('Z'); +goog.provide('w'); + +goog.require('dummy.Bb'); +goog.require('dummy.Cc'); +// Some comment about why this is suppressed bottom. +/** @suppress {extraRequire} */ +goog.require('dummy.NotUsedBottom'); // Comment bottom. +/** @suppress {extraRequire} */ +// Some comment about why this is suppressed different. +goog.require('dummy.NotUsedDifferentComment'); +// Some comment about why this is suppressed middle. +/** @suppress {extraRequire} */ +goog.require('dummy.NotUsedMiddle'); // Comment middle. +// Some comment about why this is suppressed top. +/** @suppress {extraRequire} */ +goog.require('dummy.NotUsedTop'); // Comment top. +goog.require('dummy.aa'); + +var x = new dummy.Bb(); +dummy.Cc.someMethod(); +dummy.aa.someMethod(); + + +/** + * @param {?number} badTypeWithExtraSpace |null -> ?. + * @return {number} returns -> return. + */ +x.y = function(badTypeWithExtraSpace) { +}; + + +/** @type {function():null|Array.} only 2nd |null -> ? */ +x.badType; + + +/** @type {?Array.} only 2nd |null -> ? */ +x.wickedType; + + +/** @type {? string } null -> ? */ +x.nullWithSpace; + +spaceBeforeSemicolon = 10; +spaceBeforeParen = 10 + (5 * 2); +arrayNoSpace = [10]; +arrayExtraSpace[10] = 10; +spaceBeforeClose = ([10]); +spaceAfterStart = ([10]); +extraSpaceAfterPlus = 10 + 20; +extraSpaceBeforeOperator = x++; +extraSpaceBeforeOperator = x--; +extraSpaceBeforeComma = x(y, z); +missingSpaceBeforeOperator = x + y; +missingSpaceAfterOperator = x + y; +missingBothSpaces = x + y; +equalsSpacing = 10; +equalsSpacing = 10; +equalsSpacing = 10; +equalsSpacing = [10]; +reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName = 1000; + +'string should be single quotes'; + +// Regression test for interaction between space fixing and semicolon fixing - +// previously the fix for the missing space caused the function to be seen as +// a non-assigned function and then its semicolon was being stripped. +x = function() { +}; + + + +/** + * Missing a newline. + * @constructor + * @extends {a.b.c} + */ +x.y.z = function() { +}; +goog.inherits(x.y.z, a.b.c); + + + +/** + * Extra blank line. + * @constructor + * @extends {a.b.c} + */ +x.y.z = function() { +}; +goog.inherits(x.y.z, a.b.c); + + + +/** + * Perfect! + * @constructor + * @extends {a.b.c} + */ +x.y.z = function() { +}; +goog.inherits(x.y.z, a.b.c); + +// Whitespace at end of comment. +var removeWhiteSpaceAtEndOfLine; + + +/** + * Whitespace at EOL (here and the line of code and the one below it). + * @type {string} + * @param {string} Description with whitespace at EOL. + */ +x = 10; + + +/** + * @type {number} + */ +foo.bar = 3; + + +/** + * @enum {boolean} + */ +bar.baz = true; + + +/** + * @extends {Object} + */ +bar.foo = x; + + +/** + * @type {function(string, boolean) : void} + */ +baz.bar = goog.nullFunction; + + +/** @inheritDoc */ +baz.baz = function() { +}; + +TR_Node.splitDomTreeAt(splitNode, clone, /** @type {Node} */ (quoteNode)); + +x = [1, 2, 3,]; +x = { + a: 1, +}; + +if (x) { +} + +for (i = 0; i < 10; i++) { +} +for (i = 0; i < 10; i++) { +} +for (i = 0; i < 10; i++) { +} +for (i = 0; i < 10; i++) { +} +for (i = 0; i < 10; i++) { +} +for (i = 0; i < 10; i++) { +} +for (i = 0; i < 10; i++) { +} +for (i = 0; i < 10; i++) { +} +for (i = 0; i < 10; i++) { +} + +var x = 10; +var y = 100; + + +/** + * This is to test the ability to add or remove a = in type to mark optional + * parameters. + * @param {number=} firstArg Incorrect the name should start with opt_. Don't + * handle the fix (yet). + * @param {function(string=):number=} opt_function This should end with a =. + * @param {function(number)=} opt_otherFunc This should end with a =. + * @param {string=} opt_otherArg Incorrect this should be string=. + * @param {{string, number}=} opt_recordArg Incorrect this should + * be {string, number}=. + */ +function someFunction(firstArg, opt_function, opt_otherFunc, opt_otherArg, + opt_recordArg) { +} + + +/** + * This is to test the ability to add '...' in type with variable arguments. + * @param {number} firstArg First argument. + * @param {...string} var_args This should start with '...'. + */ +function varArgFunction(firstArg, var_args) { +} + + +/** + * This is to test the ability to add '...' in type with variable arguments. + * @param {number} firstArg First argument. + * @param {...{a, b}} var_args This should start with '...'. + */ +function varArgRecordTypeFunction(firstArg, var_args) { +} + +var indent = 'correct'; +indent = 'too far'; +if (indent) { + indent = 'too short'; +} +indent = function() { + return a + + b; +}; + + +/** + * Regression test, must insert whitespace before the 'b' when fixing + * indentation. Its different from below case of bug 3473113 as has spaces + * before parameter which was not working in part of the bug fix. + */ +indentWrongSpaces = function( + b) { +}; + + +/** + * Regression test, must insert whitespace before the 'b' when fixing + * indentation. + * @bug 3473113 + */ +indent = function( + b) { +}; + + +/** + * This is to test the ability to remove multiple extra lines before a top-level + * block. + */ +function someFunction() {} + + +/** + * This is to test the ability to add multiple extra lines before a top-level + * block. + */ +function someFunction() {} + + +// This is a comment. +/** + * This is to test that blank lines removed before a top level block skips any + * comments above the block. + */ +function someFunction() {} + + +// This is a comment. +/** + * This is to test that blank lines added before a top level block skips any + * comments above the block. + */ +function someFunction() {} + + +/** + * Parameters don't have proper spaces. + * @param {number} a + * @param {number} b + * @param {number} d + * @param {number} e + * @param {number} f + */ +function someFunction(a, b, d, e, f) { +} + +// File does not end with newline diff --git a/tools/closure_linter/closure_linter/testdata/goog_scope.js b/tools/closure_linter/closure_linter/testdata/goog_scope.js new file mode 100644 index 0000000..aa655d8 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/goog_scope.js @@ -0,0 +1,63 @@ +// Copyright 2011 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Tests provides/requires in the presence of goog.scope. + * There should be no errors for missing provides or requires. + * + * @author nicksantos@google.com (Nick Santos) + */ + +goog.provide('goog.something.Something'); + +goog.require('goog.util.Else'); + +goog.scope(function() { +var Else = goog.util.Else; +var something = goog.something; + +/** // WRONG_BLANK_LINE_COUNT + * This is a something. + * @constructor + */ +something.Something = function() { + /** + * This is an else. + * @type {Else} + */ + this.myElse = new Else(); + + /** @type {boolean} */ + this.private_ = false; // MISSING_PRIVATE, UNUSED_PRIVATE_MEMBER +}; + +/** // WRONG_BLANK_LINE_COUNT + * // +3: MISSING_PRIVATE + * Missing private. + */ +something.withTrailingUnderscore_ = 'should be declared @private'; + +/** // WRONG_BLANK_LINE_COUNT + * Does nothing. + */ +something.Something.prototype.noOp = function() {}; + + +/** + * Does something. + * Tests for included semicolon in function expression in goog.scope. + */ +something.Something.prototype.someOp = function() { +} // MISSING_SEMICOLON_AFTER_FUNCTION +}); // goog.scope diff --git a/tools/closure_linter/closure_linter/testdata/html_parse_error.html b/tools/closure_linter/closure_linter/testdata/html_parse_error.html new file mode 100644 index 0000000..df61da1 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/html_parse_error.html @@ -0,0 +1,32 @@ + + + + + GJsLint HTML JavaScript extraction tests + + + Text outside the script tag should not be linted as JavaScript. + Stray closing brace: } + +
Some more non-JavaScript text with missing whitespace: (a+b).
+ + diff --git a/tools/closure_linter/closure_linter/testdata/indentation.js b/tools/closure_linter/closure_linter/testdata/indentation.js new file mode 100644 index 0000000..10d2ad0 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/indentation.js @@ -0,0 +1,465 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Test file for indentation. + * @author robbyw@google.com (Robert Walker) + */ + +goog.provide('goog.editor.SeamlessField'); +goog.provide('goog.something'); + +goog.require('goog.events.KeyCodes'); +goog.require('goog.userAgent'); + +// Some good indentation examples. + +var x = 10; +var y = 'some really really really really really really really long string', + z = 14; +if (x == 10) { + x = 12; +} +if (x == 10 || + x == 12) { + x = 14; +} +if (x == 14) { + if (z >= x) { + y = 'test'; + } +} +x = x + + 10 + ( + 14 + ); +something = + 5; +var arr = [ + 1, 2, 3]; +var arr2 = [ + 1, + 2, + 3]; +var obj = { + a: 10, + b: 20 +}; +callAFunction(10, [100, 200], + 300); +callAFunction([ + 100, + 200 +], +300); +callAFunction('abc' + + 'def' + + 'ghi'); + +x.reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName + .someMember = 10; + + +// confused on allowed indentation in continued function assignments vs overlong +// wrapped function calls. +some.sample(). // LINE_ENDS_WITH_DOT + then(function(response) { + return 1; + }); + + +/** + * Some function. + * @return {number} The number ten. + */ +goog.something.x = function() { + return 10 + + 20; +}; + + +/** + * Some function. + * @param {number} longParameterName1 Some number. + * @param {number} longParameterName2 Some number. + * @param {number} longParameterName3 Some number. + * @return {number} Sum number. + */ +goog.something.y = function(longParameterName1, longParameterName2, + longParameterName3) { + return longParameterName1 + longParameterName2 + longParameterName3; +}; + + +/** + * Some function. + * @param {number} longParameterName1 Some number. + * @param {number} longParameterName2 Some number. + * @param {number} longParameterName3 Some number. + * @return {number} Sum number. + */ +goog.something.z = function(longParameterName1, longParameterName2, + longParameterName3) { + return longParameterName1 + longParameterName2 + longParameterName3; +}; + +if (opt_rootTagName) { + doc.appendChild(doc.createNode(3, + opt_rootTagName, + opt_namespaceUri || '')); +} + + +/** + * For a while this errored because the function call parens were overriding + * the other opening paren. + */ +goog.something.q = function() { + goog.something.x(a.getStartNode(), + a.getStartOffset(), a.getEndNode(), a.getEndOffset()); +}; + +function doSomething() { + var titleElement = goog.something(x, // UNUSED_LOCAL_VARIABLE + y); +} + +switch (x) { + case 10: + y = 100; + break; + + // This should be allowed. + case 20: + if (y) { + z = 0; + } + break; + + // This should be allowed, + // even with mutliple lines. + case 30: + if (y) { + z = 0; + } + break; + + case SadThatYouSwitch + .onSomethingLikeThis: + z = 10; + + case 40: + z = 20; + + default: + break; +} + +// Description of if case. +if (x) { + +// Description of else case should be allowed at this indent. +// Multiple lines is ok. +} else { + +} + + +/** @inheritDoc */ +goog.editor.SeamlessField.prototype.setupMutationEventHandlersGecko = + function() { + var x = 10; + x++; +}; + + +// Regression test for '.' at the end confusing the indentation checker if it is +// not considered to be part of the identifier. +/** @inheritDoc */ +goog.editor.SeamlessField.prototype. + setupMutationEventHandlersGecko = function() { + // -2: LINE_ENDS_WITH_DOT + var x = 10; + x++; +}; + +var someReallyReallyLongVariableName = + y ? /veryVeryVeryVeryVeryVeryVeryVeryLongRegex1/gi : + /slightlyLessLongRegex2/gi; + +var somethingOrOther = z ? + a : + b; + +var z = x ? y : + 'bar'; + +var z = x ? + y : + a; + +var z = z ? + a ? b : c : + d ? e : f; + +var z = z ? + a ? b : + c : + d ? e : f; + +var z = z ? + a ? + b : + c : + d ? e : f; + +var z = z ? + a ? b : c : + d ? e : + f ? g : h; + +var z = z ? + a + + i ? + b + + j : c : + d ? e : + f ? g : h; + + +if (x) { + var block = + // some comment + // and some more comment + (e.keyCode == goog.events.KeyCodes.TAB && !this.dispatchBeforeTab_(e)) || + // #2: to block a Firefox-specific bug where Macs try to navigate + // back a page when you hit command+left arrow or comamnd-right arrow. + // See https://bugzilla.mozilla.org/show_bug.cgi?id=341886 + // get Firefox to fix this. + (goog.userAgent.GECKO && e.metaKey && + (e.keyCode == goog.events.KeyCodes.LEFT || + e.keyCode == goog.events.KeyCodes.RIGHT)); +} + +if (x) { +} + +var somethingElse = { + HAS_W3C_RANGES: goog.userAgent.GECKO || goog.userAgent.WEBKIT || + goog.userAgent.OPERA, + + // A reasonably placed comment. + SOME_KEY: goog.userAgent.IE +}; + +var x = { + ySomethingReallyReallyLong: + 'foo', + z: 'bar' +}; + +// Some bad indentation. + + var a = 10; // WRONG_INDENTATION +var b = 10, + c = 12; // WRONG_INDENTATION +x = x + + 10; // WRONG_INDENTATION +if (x == 14) { + x = 15; // WRONG_INDENTATION + x = 16; // WRONG_INDENTATION +} + +var longFunctionName = function(opt_element) { + return opt_element ? + new z(q(opt_element)) : 100; + // -1: WRONG_INDENTATION +}; + +longFunctionName(a, b, c, + d, e, f); // WRONG_INDENTATION +longFunctionName(a, b, + c, // WRONG_INDENTATION + d); // WRONG_INDENTATION + +x = a ? b : + c; // WRONG_INDENTATION +y = a ? + b : c; // WRONG_INDENTATION + +switch (x) { + case 10: + break; // WRONG_INDENTATION + case 20: // WRONG_INDENTATION + break; +default: // WRONG_INDENTATION + break; +} + +while (true) { + x = 10; // WRONG_INDENTATION + break; // WRONG_INDENTATION +} + +function foo() { + return entryUrlTemplate + .replace( + '${authorResourceId}', + this.sanitizer_.sanitize(authorResourceId)); +} + +return [new x( + 10)]; +return [ + new x(10)]; + +return [new x( + 10)]; // WRONG_INDENTATION +return [new x( + 10)]; // WRONG_INDENTATION + +return {x: y( + z)}; +return { + x: y(z) +}; + +return {x: y( + z)}; // WRONG_INDENTATION +return {x: y( + z)}; // WRONG_INDENTATION + +return /** @type {Window} */ (x( +'javascript:"' + encodeURI(loadingMessage) + '"')); // WRONG_INDENTATION + +x = { + y: function() {} +}; + +x = { + y: foo, + z: bar + + baz // WRONG_INDENTATION +}; + +x({ + a: b +}, +10); + +z = function(arr, f, val, opt_obj) { + x(arr, function(val, index) { + rval = f.call(opt_obj, rval, val, index, arr); + }); +}; + +var xyz = [100, + 200, + 300]; + +var def = [100, + 200]; // WRONG_INDENTATION + +var ghi = [100, + 200]; // WRONG_INDENTATION + +var abcdefg = ('a' + + 'b'); + +var x9 = z('7: ' + +x(x)); // WRONG_INDENTATION + +function abc() { + var z = d('div', // UNUSED_LOCAL_VARIABLE + { + a: 'b' + }); +} + +abcdefg('p', {x: 10}, + 'Para 1'); + +function bar1() { + return 3 + + 4; // WRONG_INDENTATION +} + +function bar2() { + return 3 + // WRONG_INDENTATION + 4; // WRONG_INDENTATION +} + +function bar3() { + return 3 + // WRONG_INDENTATION + 4; +} + +// Regression test for unfiled bug. Wrongly going into implied block after else +// when there was an explicit block (was an else if) caused false positive +// indentation errors. +if (true) { +} else if (doc.foo( + doc.getBar(baz))) { + var x = 3; +} + +// Regression tests for function indent + 4. +// (The first example is from the styleguide.) +if (veryLongFunctionNameA( + veryLongArgumentName) || + veryLongFunctionNameB( + veryLongArgumentName)) { + veryLongFunctionNameC(veryLongFunctionNameD( + veryLongFunctioNameE( + veryLongFunctionNameF))); +} + +if (outer(middle( + inner(first)))) {} +if (outer(middle( + inner(second)), + outer_second)) {} +if (nested.outer( + first)) {} +if (nested.outer(nested.middle( + first))) {} +if (nested + .outer(nested.middle( + first))) {} +if (nested.outer(first + .middle( + second), + third)) {} + +// goog.scope should not increase indentation. +goog.scope(function() { +var x = 5; +while (x > 0) { + --x; +} +}); // goog.scope + + +goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE +// +1: UNUSED_LOCAL_VARIABLE + var x = 5; // WRONG_INDENTATION +}); // goog.scope + +goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE +var x = 5; // UNUSED_LOCAL_VARIABLE +}); // MISSING_END_OF_SCOPE_COMMENT + +goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE +var x = 5; // UNUSED_LOCAL_VARIABLE +}); // malformed goog.scope comment // MALFORMED_END_OF_SCOPE_COMMENT diff --git a/tools/closure_linter/closure_linter/testdata/interface.js b/tools/closure_linter/closure_linter/testdata/interface.js new file mode 100644 index 0000000..7daeee3 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/interface.js @@ -0,0 +1,89 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Test file for interfaces. + * @author robbyw@google.com (Robert Walker) + */ + +goog.provide('sample.BadInterface'); +goog.provide('sample.GoodInterface'); + + + +/** + * Sample interface to demonstrate correct style. + * @interface + */ +sample.GoodInterface = function() { +}; + + +/** + * Legal methods can take parameters and have a return type. + * @param {string} param1 First parameter. + * @param {Object} param2 Second parameter. + * @return {number} Some return value. + */ +sample.GoodInterface.prototype.legalMethod = function(param1, param2) { +}; + + +/** + * Legal methods can also take no parameters and return nothing. + */ +sample.GoodInterface.prototype.legalMethod2 = function() { + // Comments should be allowed. +}; + + +/** + * Legal methods can also be omitted, even with params and return values. + * @param {string} param1 First parameter. + * @param {Object} param2 Second parameter. + * @return {number} Some return value. + */ +sample.GoodInterface.prototype.legalMethod3; + + +/** + * Legal methods can also be set to abstract, even with params and return + * values. + * @param {string} param1 First parameter. + * @param {Object} param2 Second parameter. + * @return {number} Some return value. + */ +sample.GoodInterface.prototype.legalMethod4 = goog.abstractMethod; + + + +/** + * Sample interface to demonstrate style errors. + * @param {string} a This is illegal. + * @interface + */ +sample.BadInterface = function(a) { // INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS + this.x = a; // INTERFACE_METHOD_CANNOT_HAVE_CODE +}; + + +/** + * It is illegal to include code in an interface method. + * @param {string} param1 First parameter. + * @param {Object} param2 Second parameter. + * @return {number} Some return value. + */ +sample.BadInterface.prototype.illegalMethod = function(param1, param2) { + return 10; // INTERFACE_METHOD_CANNOT_HAVE_CODE +}; diff --git a/tools/closure_linter/closure_linter/testdata/jsdoc.js b/tools/closure_linter/closure_linter/testdata/jsdoc.js new file mode 100644 index 0000000..d62fd3c --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/jsdoc.js @@ -0,0 +1,1455 @@ +// Copyright 2007 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Errors related to JsDoc. + * + * @author robbyw@google.com (Robby Walker) + * + * @author robbyw@google.com (Robby Walker) // EXTRA_SPACE, EXTRA_SPACE + * @author robbyw@google.com(Robby Walker) // MISSING_SPACE + * + * @author robbyw@google.com () // INVALID_AUTHOR_TAG_DESCRIPTION + * @author robbyw@google.com // INVALID_AUTHOR_TAG_DESCRIPTION + * + * @owner ajp@google.com (Andy Perelson) + * @badtag // INVALID_JSDOC_TAG + * @customtag This tag is passed as a flag in full_test.py + * @requires anotherCustomTagPassedInFromFullTestThatShouldAllowASingleWordLongerThan80Lines + * @requires firstWord, secondWordWhichShouldMakeThisLineTooLongSinceThereIsAFirstWord + * @wizmodule + * @wizModule // INVALID_JSDOC_TAG + */ +// -4: LINE_TOO_LONG + +goog.provide('MyClass'); +goog.provide('goog.NumberLike'); +goog.provide('goog.math.Vec2.sum'); + +goog.require('goog.array'); +goog.require('goog.color'); +goog.require('goog.dom.Range'); +goog.require('goog.math.Matrix'); +goog.require('goog.math.Vec2'); + + +/** + * Test the "no compilation should be done after annotation processing" tag. + * @nocompile + */ + + +/** + * @returns // INVALID_JSDOC_TAG + * @params // INVALID_JSDOC_TAG + * @defines // INVALID_JSDOC_TAG + * @nginject // INVALID_JSDOC_TAG + * @wizAction // INVALID_JSDOC_TAG + */ +function badTags() { +} + + +// +4: MISSING_JSDOC_TAG_DESCRIPTION +/** + * @license Description. + * @preserve Good tag, missing punctuation + * @preserve + */ +function goodTags() { + /** @preserveTry */ + try { + hexColor = goog.color.parse(value).hex; + } catch (ext) { + // Regression test. The preserveTry tag was incorrectly causing a warning + // for a missing period at the end of tag description. Parsed as + // flag: preserve, description: Try. + } +} + + +/** + * Some documentation goes here. + * + * @param {Object} object Good docs. + * @ngInject + * @wizaction + */ +function good(object) { +} + + +/** + * Some documentation goes here. + * @param {function(string, string) : string} f A function. + */ +function setConcatFunc(f) { +} + + +/** + * Some docs. + */ +function missingParam(object) { // MISSING_PARAMETER_DOCUMENTATION +} + + +/** + * @return {number} Hiya. + * @override + */ +function missingParamButInherit(object) { + return 3; +} + + +/** + * @inheritDoc + */ +function missingParamButInherit(object) { +} + + +/** + * @override + */ +function missingParamButOverride(object) { +} + + +// +2: UNNECESSARY_BRACES_AROUND_INHERIT_DOC +/** + * {@inheritDoc} + */ +function missingParamButInherit(object) { +} + + +/** + * Some docs. + * + * @param {Object} object Docs. + */ +function mismatchedParam(elem) { // WRONG_PARAMETER_DOCUMENTATION + /** @param {number} otherElem */ + function nestedFunction(elem) { // WRONG_PARAMETER_DOCUMENTATION + }; +} + + +/** + * @return {boolean} A boolean primitive. + */ +function goodReturn() { + return something; +} + + +/** + * @return {some.long.type.that.will.make.the.description.start.on.next.line} + * An object. + */ +function anotherGoodReturn() { + return something; +} + + +// +2: MISSING_JSDOC_TAG_TYPE +/** + * @return false. + */ +function missingReturnType() { + return something; +} + + +// +2: MISSING_SPACE +/** + * @return{type} + */ +function missingSpaceOnReturnType() { + return something; +} + + +// +2: MISSING_JSDOC_TAG_TYPE +/** + * @return + */ +function missingReturnType() { + return something; +} + +class.missingDocs = function() { // MISSING_MEMBER_DOCUMENTATION +}; + + +/** + * No return doc needed. + */ +function okMissingReturnDoc() { + return; +} + + +// +2: UNNECESSARY_RETURN_DOCUMENTATION +/** + * @return {number} Unnecessary return doc. + */ +function unnecessaryMissingReturnDoc() { +} + + +/** + * The "suppress" causes the compiler to ignore the 'debugger' statement. + * @suppress {checkDebuggerStatement} + */ +function checkDebuggerStatementWithSuppress() { + debugger; +} + + +/** + * Return doc is present, but the function doesn't have a 'return' statement. + * The "suppress" causes the compiler to ignore the error. + * @suppress {missingReturn} + * @return {string} + */ +function unnecessaryMissingReturnDocWithSuppress() { + if (false) { + return ''; + } else { + // Missing return statement in this branch. + } +} + + +// +3: MISSING_JSDOC_TAG_TYPE +// +2: UNNECESSARY_RETURN_DOCUMENTATION +/** + * @return + */ +function unnecessaryMissingReturnNoType() { +} + + +/** + * @return {undefined} Ok unnecessary return doc. + */ +function okUnnecessaryMissingReturnDoc() { +} + + +/** + * @return {*} Ok unnecessary return doc. + */ +function okUnnecessaryMissingReturnDoc2() { +} + + +/** + * @return {void} Ok unnecessary return doc. + */ +function okUnnecessaryMissingReturnDoc3() { +} + + +/** + * This function doesn't return anything, but it does contain the string return. + */ +function makeSureReturnTokenizesRight() { + fn(returnIsNotSomethingHappeningHere); +} + + +/** + * @return {number|undefined} Ok unnecessary return doc. + */ +function okUnnecessaryMissingReturnDoc3() { +} + + +/** + * @return {number} Ok unnecessary return doc. + */ +function okUnnecessaryReturnWithThrow() { + throw 'foo'; +} + + +/** @inheritDoc */ +function okNoReturnWithInheritDoc() { + return 10; +} + + +/** @override */ +function okNoReturnWithOverride() { + return 10; +} + + +/** + * No return doc. + */ // MISSING_RETURN_DOCUMENTATION +function badMissingReturnDoc() { + return 10; +} + + + +/** + * Constructor so we should not have a return jsdoc tag. + * @constructor + */ +function OkNoReturnWithConstructor() { + return this; +} + + +/** + * Type of array is known, so the cast is unnecessary. + * @suppress {unnecessaryCasts} + */ +function unnecessaryCastWithSuppress() { + var numberArray = /** @type {!Array.} */ ([]); + /** @type {number} */ (goog.array.peek(numberArray)); +} + + + +/** + * Make sure the 'unrestricted' annotation is accepted. + * @constructor @unrestricted + */ +function UnrestrictedClass() {} + + + +/** + * Check definition of fields in constructors. + * @constructor + */ +function AConstructor() { + /** + * A field. + * @type {string} + * @private + */ + this.isOk_ = 'ok'; + + // +5: MISSING_PRIVATE + /** + * Another field. + * @type {string} + */ + this.isBad_ = 'missing private'; + + /** + * This is ok, but a little weird. + * @type {number} + * @private + */ + var x = this.x_ = 10; + + // At first, this block mis-attributed the first typecast as a member doc, + // and therefore expected it to contain @private. + if (goog.math.Matrix.isValidArray(/** @type {Array} */ (m))) { + this.array_ = goog.array.clone(/** @type {Array.>} */ (m)); + } + + // Use the private and local variables we've defined so they don't generate a + // warning. + var y = [ + this.isOk_, + this.isBad_, + this.array_, + this.x_, + y, + x + ]; +} + + +/** + * @desc This message description is allowed. + */ +var MSG_YADDA_YADDA_YADDA = 'A great message!'; + + +/** + * @desc So is this one. + * @hidden + * @meaning Some unusual meaning. + */ +x.y.z.MSG_YADDA_YADDA_YADDA = 'A great message!'; + + +/** + * @desc But desc can only apply to messages. + */ +var x = 10; // INVALID_USE_OF_DESC_TAG + + +/** + * Same with hidden. + * @hidden + */ +var x = 10; // INVALID_USE_OF_DESC_TAG + + +/** + * Same with meaning. + * @meaning Some unusual meaning. + */ +var x = 10; // INVALID_USE_OF_DESC_TAG + + +// +9: MISSING_SPACE +// +9: MISSING_JSDOC_TAG_TYPE +// +10: OUT_OF_ORDER_JSDOC_TAG_TYPE +// +10: MISSING_JSDOC_TAG_TYPE, MISSING_SPACE +/** + * Lots of problems in this documentation. + * + * @param {Object} q params b & d are missing descriptions. + * @param {Object} a param d is missing a type (oh my). + * @param {Object}b + * @param d + * @param {Object} x param desc. + * @param z {type} Out of order type. + * @param{} y Empty type and missing space. + * @param {Object} omega mis-matched param. + */ +function manyProblems(a, b, c, d, x, z, y, alpha) { + // -1: MISSING_PARAMETER_DOCUMENTATION, EXTRA_PARAMETER_DOCUMENTATION + // -2: WRONG_PARAMETER_DOCUMENTATION +} + + +/** + * Good docs + * + * @param {really.really.really.really.really.really.really.long.type} good + * My param description. + * @param {really.really.really.really.really.really.really.really.long.type} + * okay My param description. + * @param + * {really.really.really.really.really.really.really.really.really.really.long.type} + * fine Wow that's a lot of wrapping. + */ +function wrappedParams(good, okay, fine) { +} + + +// +4: MISSING_JSDOC_TAG_TYPE +// +3: MISSING_JSDOC_PARAM_NAME +/** + * Really bad + * @param + */ +function reallyBadParam(a) { // MISSING_PARAMETER_DOCUMENTATION +} + + +/** + * Some docs. + * + * @private + */ +class.goodPrivate_ = function() { +}; + + +/** + * Some docs. + */ +class.missingPrivate_ = function() { // MISSING_PRIVATE +}; + + +/** + * Some docs. + * + * @private + */ +class.extraPrivate = function() { // EXTRA_PRIVATE +}; + + +/** + * Anything ending with two underscores is not treated as private. + */ +class.__iterator__ = function() { +}; + + +/** + * Some docs. + * @package + */ +class.goodPackage = function() { +}; + + +/** + * Some docs. + * @package + */ +class.badPackage_ = function() { // MISSING_PRIVATE +}; + + +/** + * Some docs. + * @protected + */ +class.goodProtected = function() { +}; + + +/** + * Some docs. + * @protected + */ +class.badProtected_ = function() { // MISSING_PRIVATE +}; + + +/** + * Example of a legacy name. + * @protected + * @suppress {underscore} + */ +class.dom_ = function() { + /** @suppress {with} */ + with ({}) {} +}; + + +/** + * Legacy names must be protected. + * @suppress {underscore} + */ +class.dom_ = function() { +}; + + +/** + * Allow compound suppression. + * @private + */ +class.dom_ = function() { + /** @suppress {visibility|with} */ + with ({}) {} +}; + + +/** + * Allow compound suppression. + * @private + */ +class.dom_ = function() { + /** @suppress {visibility,with} */ + with ({}) {} +}; + + +// +4: UNNECESSARY_SUPPRESS +/** + * Some docs. + * @private + * @suppress {underscore} + */ +class.unnecessarySuppress_ = function() { +}; + + +/** + * Some docs. + * @public + */ +class.goodProtected = function() { +}; + + +/** + * Some docs. + * @public + */ +class.badProtected_ = function() { // MISSING_PRIVATE +}; + + +/** + * Example of a legacy name. + * @public + * @suppress {underscore} + */ +class.dom_ = function() { +}; + + +// +5: JSDOC_PREFER_QUESTION_TO_PIPE_NULL +// +7: JSDOC_PREFER_QUESTION_TO_PIPE_NULL +/** + * Check JsDoc type annotations. + * @param {Object?} good A good one. + * @param {Object|null} bad A bad one. + * @param {Object|Element?} ok1 This is acceptable. + * @param {Object|Element|null} right The right way to do the above. + * @param {null|Object} bad2 Another bad one. + * @param {Object?|Element} ok2 Not good but acceptable. + * @param {Array.?} complicated A good one that was reported as + * bad. See bug 1154506. + */ +class.sampleFunction = function(good, bad, ok1, right, bad2, ok2, + complicated) { +}; + + +/** + * @return {Object?} A good return. + */ +class.goodReturn = function() { + return something; +}; + + +/** @type {Array.} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */ +class.badType; + + +/** + * For template types, the ?TYPE notation is not parsed correctly by the + * compiler, so don't warn here. + * @type {Array.} + * @template TYPE + */ +class.goodTemplateType; + + +// As the syntax may look ambivalent: The function returns just null. +/** @type {function():null|Object} */ +class.goodType; + + +/** @type {function():(null|Object)} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */ +class.badType; + + +// As the syntax may look ambivalent: The function returns just Object. +/** @type {function():Object|null} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */ +class.badType; + + +/** @type {(function():Object)|null} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */ +class.badType; + + +/** @type {function(null,Object)} */ +class.goodType; + + +/** @type {{a:null,b:Object}} */ +class.goodType; + + +// +2: JSDOC_PREFER_QUESTION_TO_PIPE_NULL +/** + * @return {Object|null} A bad return. + */ +class.badReturn = function() { + return something; +}; + + +/** + * @return {Object|Element?} An not so pretty return, but acceptable. + */ +class.uglyReturn = function() { + return something; +}; + + +/** + * @return {Object|Element|null} The right way to do the above. + */ +class.okReturn = function() { + return something; +}; + + +// +2: MISSING_SPACE, MISSING_SPACE +/** + * @return{mytype}Something. + */ +class.missingSpacesReturn = function() { + return something; +}; + + +/** + * A good type in the new notation. + * @type {Object?} + */ +class.otherGoodType = null; + + +/** + * A complex type that should allow both ? and |. + * @bug 1570763 + * @type {function(number?, Object|undefined):void} + */ +class.complexGoodType = goog.nullFunction; + + +/** + * A complex bad type that we can catch, though there are many we can't. + * Its acceptable. + * @type {Array.|string?} + */ +class.complexBadType = x || 'foo'; + + +/** + * A strange good type that caught a bad version of type checking from + * other.js, so I added it here too just because. + * @type {number|string|Object|Element|Array.|null} + */ +class.aStrangeGoodType = null; + + +/** + * A type that includes spaces. + * @type {function() : void} + */ +class.assignedFunc = goog.nullFunction; + + +// +4: JSDOC_PREFER_QUESTION_TO_PIPE_NULL +// +3: MISSING_BRACES_AROUND_TYPE +/** + * A bad type. + * @type Object|null + */ +class.badType = null; + + +// +3: JSDOC_PREFER_QUESTION_TO_PIPE_NULL +/** + * A bad type, in the new notation. + * @type {Object|null} + */ +class.badType = null; + + +/** + * An not pretty type, but acceptable. + * @type {Object|Element?} + */ +class.uglyType = null; + + +/** + * The right way to do the above. + * @type {Object|Element|null} + */ +class.okType = null; + + +/** + * @type {boolean} Is it okay to have a description here? + */ +class.maybeOkType = null; + + +/** + * A property whose type will be infered from the right hand side since it is + * constant. + * @const + */ +class.okWithoutType = 'stout'; + + +/** + * Const property without type and text in next line. b/10407058. + * @const + * TODO(user): Nothing to do, just for scenario. + */ +class.okWithoutType = 'string'; + + +/** + * Another constant property, but we should use the type tag if the type can't + * be inferred. + * @type {string} + * @const + */ +class.useTypeWithConst = functionWithUntypedReturnValue(); + + +/** + * Another constant property, but using type with const if the type can't + * be inferred. + * @const {string} + */ +class.useTypeWithConst = functionWithUntypedReturnValue(); + + +// +3: MISSING_BRACES_AROUND_TYPE +/** + * Constant property without proper type. + * @const string + */ +class.useImproperTypeWithConst = functionWithUntypedReturnValue(); + + +/** + * @define {boolean} A define. + */ +var COMPILED = false; + + +// +2: MISSING_JSDOC_TAG_TYPE +/** + * @define A define without type info. + */ +var UNTYPED_DEFINE = false; + + +// +4: MISSING_JSDOC_TAG_DESCRIPTION, MISSING_SPACE +/** + * A define without a description and missing a space. + * + * @define{boolean} + */ +var UNDESCRIBED_DEFINE = false; + + +// Test where to check for docs. +/** + * Docs for member object. + * @type {Object} + */ +x.objectContainingFunctionNeedsNoDocs = { + x: function(params, params) {} +}; + +if (test) { + x.functionInIfBlockNeedsDocs = function() { // MISSING_MEMBER_DOCUMENTATION + x.functionInFunctionNeedsNoDocs = function() { + }; + }; +} else { + x.functionInElseBlockNeedsDocs = function() { // MISSING_MEMBER_DOCUMENTATION + x.functionInFunctionNeedsNoDocs = function() { + }; + }; +} + + +/** + * Regression test. + * @param {goog.math.Vec2} a + * @param {goog.math.Vec2} b + * @return {goog.math.Vec2} The sum vector. + */ +goog.math.Vec2.sum = function(a, b) { + return new goog.math.Vec2(a.x + b.x, a.y + b.y); +}; + + +// +6: JSDOC_MISSING_OPTIONAL_PREFIX +// +8: JSDOC_MISSING_OPTIONAL_PREFIX +// +8: JSDOC_MISSING_OPTIONAL_TYPE +// +8: JSDOC_MISSING_OPTIONAL_TYPE +/** + * Optional parameters test. + * @param {number=} numberOptional The name should be prefixed by opt_. + * @param {function(number=)} funcOk Ok. + * @param {number} numberOk The type is ok. + * @param {function(string=):number=} funcOpt Param name need opt_ prefix. + * @param {string} opt_stringMissing The type miss an ending =. + * @param {function(number=)} opt_func The type miss an ending =. + * @param {string=} opt_ok The type is ok. + * @param {function(string=):number=} opt_funcOk Type is ok. + */ +goog.math.Vec2.aFunction = function( + numberOptional, funcOk, numberOk, funcOpt, opt_stringMissing, opt_func, + opt_ok, opt_funcOk) { +}; + + +/** + * Good documentation! + * + * @override + */ +class.goodOverrideDocs = function() { +}; + + +/** + * Test that flags embedded in docs don't trigger ends with invalid character + * error. + * @bug 2983692 + * @deprecated Please use the {@code @hidden} annotation. + */ +function goodEndChar() { +} + + +/** + * Test that previous case handles unballanced doc tags. + * @param {boolean} a Whether we should honor '{' characters in the string. + */ +function goodEndChar2(a) { +} + + +/** + * Regression test for braces in description invalidly being matched as types. + * This caused a false error for missing punctuation because the bad token + * caused us to incorrectly calculate the full description. + * @bug 1406513 + * @return {Object|undefined} A hash containing the attributes for the found url + * as in: {url: "page1.html", title: "First page"} + * or undefined if no match was found. + */ +x.z.a = function() { + return a; +}; + + +/** + * @bug 1492606 HTML parse error for JSDoc descriptions grashed gjslint. + * @param {string} description a long email or common name, e.g., + * "John Doe " or "Birthdays Calendar" + */ +function calendar(description) { +} + + +/** + * @bug 1492606 HTML parse error for JSDoc descriptions grashed gjslint. + * @param {string} description a long email or common name, e.g., + * "John Doe " or "Birthdays Calendar". + */ +function calendar(description) { +} + + +/** + * Regression test for invoked functions, this code used to report missing + * param and missing return errors. + * @type {number} + */ +x.y.z = (function(x) { + return x + 1; +})(); + + +/** + * Test for invoked function as part of an expression. It should not return + * an error for missing docs for x. + */ +goog.currentTime = something.Else || (function(x) { + //... +})(10); + + +/** + * @type boolean //MISSING_BRACES_AROUND_TYPE + */ +foo.bar = true; + + +/** + * @enum {null //MISSING_BRACES_AROUND_TYPE + */ +bar.foo = null; + + +/** + * @extends Object} //MISSING_BRACES_AROUND_TYPE + */ // JSDOC_DOES_NOT_PARSE +bar.baz = x; + + +/** @inheritDoc */ // INVALID_INHERIT_DOC_PRIVATE +x.privateFoo_ = function() { // MISSING_PRIVATE +}; + + +/** + * Does bar. + * @override // INVALID_OVERRIDE_PRIVATE + */ +x.privateBar_ = function() { // MISSING_PRIVATE +}; + + +/** + * Inherits private baz_ method (evil, wrong behavior, but we have no choice). + * @override + * @suppress {accessControls} + */ +x.prototype.privateBaz_ = function() { +}; + + +/** + * This looks like a function but it's a function call. + * @type {number} + */ +test.x = function() { + return 3; +}(); + + +/** + * Invalid reference to this. + */ // MISSING_JSDOC_TAG_THIS +test.x.y = function() { + var x = this.x; // UNUSED_LOCAL_VARIABLE +}; + + +/** + * Invalid write to this. + */ // MISSING_JSDOC_TAG_THIS +test.x.y = function() { + this.x = 10; +}; + + +/** + * Invalid standalone this. + */ // MISSING_JSDOC_TAG_THIS +test.x.y = function() { + some.func.call(this); +}; + + +/** + * Invalid reference to this. + */ // MISSING_JSDOC_TAG_THIS +function a() { + var x = this.x; // UNUSED_LOCAL_VARIABLE +} + + +/** + * Invalid write to this. + */ // MISSING_JSDOC_TAG_THIS +function b() { + this.x = 10; +} + + +/** + * Invalid standalone this. + */ // MISSING_JSDOC_TAG_THIS +function c() { + some.func.call(this); +} + + +/** + * Ok to do any in a prototype. + */ +test.prototype.x = function() { + var x = this.x; + this.y = x; + some.func.call(this); +}; + + +/** + * Ok to do any in a prototype that ends in a hex-like number. + */ +test.prototype.getColorX2 = function() { + var x = this.x; + this.y = x; + some.func.call(this); +}; + + +/** + * Ok to do any in a function with documented this usage. + * @this {test.x.y} Object bound to this via goog.bind. + */ +function a() { + var x = this.x; + this.y = x; + some.func.call(this); +} + + +/** + * Ok to do any in a function with documented this usage. + * @this {test.x.y} Object bound to this via goog.bind. + */ +test.x.y = function() { + var x = this.x; + this.y = x; + some.func.call(this); +}; + + +/** + * Regression test for bug 1220601. Wrapped function declarations shouldn't + * cause need for an (at)this flag, which I can't write out or it would get + * parsed as being here. + * @param {Event} e The event. + */ +detroit.commands.ChangeOwnerCommand + .prototype.handleDocumentStoreCompleteEvent = function(e) { + this.x = e.target; +}; + + + +/** + * Ok to do any in a constructor. + * @constructor + */ +test.x.y = function() { + this.y = x; + var x = this.y; // UNUSED_LOCAL_VARIABLE + some.func.call(this); +}; + +// Test that anonymous function doesn't throw an error. +window.setTimeout(function() { + var x = 10; // UNUSED_LOCAL_VARIABLE +}, 0); + + +/** + * @bug 1234567 + */ +function testGoodBug() { +} + + +/** + * @bug 1234567 Descriptions are allowed. + */ +function testGoodBugWithDescription() { +} + + +// +2: NO_BUG_NUMBER_AFTER_BUG_TAG +/** + * @bug Wrong + */ +function testBadBugNumber() { +} + + +// +2: NO_BUG_NUMBER_AFTER_BUG_TAG +/** + * @bug Wrong + */ +function testMissingBugNumber() { +} + + + +/** + * @interface + */ +function testInterface() { +} + + + +/** + * @implements {testInterface} + * @constructor + */ +function testImplements() { +} + + +/** + * Function that has an export jsdoc tag. + * @export + */ +function testExport() { +} + + +/** + * Declare and doc this member here, without assigning to it. + * @bug 1473402 + * @type {number} + */ +x.declareOnly; + +if (!someCondition) { + x.declareOnly = 10; +} + + +/** + * JsDoc describing array x.y as an array of function(arg). The missing + * semicolon caused the original bug. + * @type {Array.} + */ +x.y = [] // MISSING_SEMICOLON +x.y[0] = function(arg) {}; +x.y[1] = function(arg) {}; + + +/** + * Regression test for unfiled bug where descriptions didn't properly exclude + * the star-slash that must end doc comments. + * @return {Function} A factory method. + */ +x.y.foo = function() { + /** @return {goog.dom.Range} A range. */ + return function() { + return goog.dom.Range.createRangeFromNothing(); + }; +}; + + +// +4: INCORRECT_SUPPRESS_SYNTAX +// +4: INVALID_SUPPRESS_TYPE +/** + * Docs... + * @suppress + * @suppress {fake} + */ +class.x = 10; + + +/** + * These docs are OK. They used to not parse the identifier due to the use of + * array indices. + * @bug 1640846 + * @private + */ +window['goog']['forms']['Validation'].prototype.form_ = null; + + +/** + * Check JsDoc multiline type annotations. + * @param {string| + * number} multiline description. + */ +function testMultiline(multiline) { +} + + +/** + * Check JsDoc nosideeffects annotations. + * @nosideeffects + */ +function testNoSideEffects() { +} + + +/** + * @enum {google.visualization.DateFormat|google.visualization.NumberFormat| + * google.visualization.PatternFormat} + */ +MultiLineEnumTypeTest = { + FOO: 1, + BAR: 2, + BAZ: 3 +}; + + +/** + * @enum {google.visualization.DateFormat|google.visualization.NumberFormat|google.visualization.PatternFormat} + */ +AllowedLongLineEnum = { + CAT: 1, + DOG: 2, + RAT: 3 +}; + + +/** + * Typeless enum test + * @enum + */ +TypelessEnumTest = { + OK: 0, + CHECKING: 1, + DOWNLOADING: 2, + FAILURE: 3 +}; + +// Regression test for bug 1880803, shouldn't need to document assignments to +// prototype. +x.prototype = {}; + +y + .prototype = {}; + +x.y + .z.prototype = {}; + +x.myprototype = {}; // MISSING_MEMBER_DOCUMENTATION + +x.prototype.y = 5; // MISSING_MEMBER_DOCUMENTATION + +x.prototype + .y.z = {}; // MISSING_MEMBER_DOCUMENTATION + + +/** @typedef {(string|number)} */ +goog.NumberLike; + + +/** + * Something from the html5 externs file. + * @type {string} + * @implicitCast + */ +CanvasRenderingContext2D.prototype.fillStyle; + + + +/** + * Regression test. + * @bug 2994247 + * @inheritDoc + * @extends {Bar} + * @constructor + * @private + */ +Foo_ = function() { +}; + + +/** + * @param {function(this:T,...)} fn The function. + * @param {T} obj The object. + * @template T + */ +function bind(fn, obj) { +} + + + +/** + * @constructor + * @classTemplate T + */ +function MyClass() { +} + + +foo(/** @lends {T} */ ({foo: 'bar'})); + + + +/** + * @param {*} x . + * @constructor + * @struct + */ +function StructMaker(x) { this.x = x; } + +var structObjLit = /** @struct */ { x: 123 }; + + + +/** + * @param {*} x . + * @constructor + * @dict + */ +function DictMaker(x) { this['x'] = x; } + +var dictObjLit = /** @dict */ { x: 123 }; + + +/** + * @idGenerator + * @param {string} x . + * @return {string} . + */ +function makeId(x) { + return ''; +} + + +/** + * @consistentIdGenerator + * @param {string} x . + * @return {string} . + */ +function makeConsistentId(x) { + return ''; +} + + +/** + * @stableIdGenerator + * @param {string} x . + * @return {string} . + */ +function makeStableId(x) { + return ''; +} + + +/** + * Test to make sure defining object with object literal doest not produce + * doc warning for @this. + * Regression test for b/4073735. + */ +var Foo = function(); +Foo.prototype = { + /** + * @return {number} Never. + */ + method: function() { + return this.method(); + } +}; + +/** Regression tests for annotation types with spaces. */ + + +/** @enum {goog.events.Event} */ +var Bar; + + + +/** + * @constructor + * @implements {goog.dom.Range} + */ +var Foo = function() { + /** @final {goog.events.Event} */ + this.bar = null; +}; + +/* Regression tests for not ending block comments. Keep at end of file! **/ +/** + * When there are multiple asteriks. In the failure case we would get an + * error that the file ended mid comment, with no end comment token***/ +/** + * Was a separate bug 2950646 when the closing bit was on it's own line + * because the ending star was being put into a different token type: DOC_PREFIX + * rather than DOC_COMMENT. + **/ diff --git a/tools/closure_linter/closure_linter/testdata/limited_doc_checks.js b/tools/closure_linter/closure_linter/testdata/limited_doc_checks.js new file mode 100644 index 0000000..701cce9 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/limited_doc_checks.js @@ -0,0 +1,29 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Test file for limited doc checks. + */ + + +/** + * Don't require documentation of parameters. + * @param {boolean} + * @param {boolean} c + * @param {boolean} d No check for punctuation + * @bug 3259564 + */ +x.y = function(a, b, c, d) { + return a; +}; diff --git a/tools/closure_linter/closure_linter/testdata/minimal.js b/tools/closure_linter/closure_linter/testdata/minimal.js new file mode 100644 index 0000000..6dbe733 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/minimal.js @@ -0,0 +1 @@ +function f(x) {} // Regression test for old parsing bug. diff --git a/tools/closure_linter/closure_linter/testdata/not_strict.js b/tools/closure_linter/closure_linter/testdata/not_strict.js new file mode 100644 index 0000000..f8ede3d --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/not_strict.js @@ -0,0 +1,42 @@ +/** // _WRONG_BLANK_LINE_COUNT + * @fileoverview This file has errors that could trigger both in strict and non + * strict mode. The errors beginning with _ should not be triggered when strict + * flag is false. + * // -1: _INVALID_AUTHOR_TAG_DESCRIPTION + */ + +/** // _WRONG_BLANK_LINE_COUNT + * A constructor with 1 line above it (BAD). + * // +1: MISSING_JSDOC_TAG_TYPE + * @param a A parameter. + * @privtae // INVALID_JSDOC_TAG + * @constructor + */ +function someFunction(a) { + /** +1: _MISSING_BRACES_AROUND_TYPE + * @type number + */ + this.a = 0; + someReallyReallyReallyReallyReallyReallyReallyReallyLongiName = quiteBigValue; // LINE_TOO_LONG + if (this.a == 0) { + // _WRONG_INDENTATION + return // MISSING_SEMICOLON + } +}; // ILLEGAL_SEMICOLON_AFTER_FUNCTION + + +// +1: _UNNECESSARY_BRACES_AROUND_INHERIT_DOC +/** {@inheritDoc} */ +function someFunction.prototype.toString() { +} + + +/** + * When not strict, there is no problem with optional markers in types. + * @param {string=} a A string. + * @param {string} aOk An other string. + * @param {number} opt_b An optional number. + * @param {number=} opt_bOk An other optional number. + */ +someFunction.optionalParams = function(a, aOk, opt_b, opt_bOk) { +}; diff --git a/tools/closure_linter/closure_linter/testdata/other.js b/tools/closure_linter/closure_linter/testdata/other.js new file mode 100644 index 0000000..1e424ce --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/other.js @@ -0,0 +1,459 @@ +// Copyright 2007 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Miscellaneous style errors. + * + * @author robbyw@google.com (Robby Walker) + */ + +goog.provide('goog.dom'); + +goog.require('goog.events.EventHandler'); + +var this_is_a_really_long_line = 100000000000000000000000000000000000000000000000; // LINE_TOO_LONG + +// Declaration in multiple lines. +// Regression test for b/3009648 +var + a, + b = 10; + +// http://this.comment.should.be.allowed/because/it/is/a/URL/that/can't/be/broken/up + + +/** + * Types are allowed to be long even though they contain spaces. + * @type {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType} + */ +x.z = 1000; + + +/** + * Params are also allowed to be long even though they contain spaces. + * @param {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType} fn + * The function to call. + */ +x.z = function(fn) { +}; + + +/** + * Visibility tags are allowed to have type, therefore they allowed to be long. + * @private {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType} + */ +x.z_ = 1000; + + +/** + * Visibility tags are allowed to have type, therefore they allowed to be long. + * @public {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType} + */ +x.z = 1000; + + +/** + * Visibility tags are allowed to have type, therefore they allowed to be long. + * @protected {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType} + */ +x.z = 1000; + + +/** + * Visibility tags are allowed to have type, therefore they allowed to be long. + * @package {function(ReallyReallyReallyReallyLongType,AnotherExtremelyLongType):LongReturnType} + */ +x.z = 1000; + +// +2: LINE_TOO_LONG +var x = + a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.tooLongEvenThoughNoSpaces; + +// +1: LINE_TOO_LONG +getSomeExtremelyLongNamedFunctionWowThisNameIsSoLongItIsAlmostUnbelievable().dispose(); + + +/** + * @param {number|string|Object|Element|Array.|null} aReallyReallyReallyStrangeParameter + * @param {number|string|Object|Element|goog.a.really.really.really.really.really.really.really.really.long.Type|null} shouldThisParameterWrap + * @return {goog.a.really.really.really.really.really.really.really.really.long.Type} + */ +x.y = function(aReallyReallyReallyStrangeParameter, shouldThisParameterWrap) { + return something; +}; + + +/** + * @type {goog.a.really.really.really.really.really.really.really.really.long.Type?} + */ +x.y = null; + +function doesEndWithSemicolon() { +}; // ILLEGAL_SEMICOLON_AFTER_FUNCTION + +function doesNotEndWithSemicolon() { +} + +doesEndWithSemicolon = function() { + // +1: UNUSED_LOCAL_VARIABLE + var shouldEndWithSemicolon = function() { + } // MISSING_SEMICOLON_AFTER_FUNCTION +}; + +doesNotEndWithSemicolon = function() { +} // MISSING_SEMICOLON_AFTER_FUNCTION + +doesEndWithSemicolon['100'] = function() { +}; + +doesNotEndWithSemicolon['100'] = function() { +} // MISSING_SEMICOLON_AFTER_FUNCTION + +if (some_flag) { + function doesEndWithSemicolon() { + }; // ILLEGAL_SEMICOLON_AFTER_FUNCTION + + function doesNotEndWithSemicolon() { + } + + doesEndWithSemicolon = function() { + }; + + doesNotEndWithSemicolon = function() { + } // MISSING_SEMICOLON_AFTER_FUNCTION +} + +// No semicolon for expressions that are immediately called. +var immediatelyCalledFunctionReturnValue = function() { +}(); + + +/** + * Regression test for function expressions treating semicolons wrong. + * @bug 1044052 + */ +goog.now = Date.now || function() { + //... +}; + + +/** + * Regression test for function expressions treating semicolons wrong. + * @bug 1044052 + */ +goog.now = Date.now || function() { + //... +} // MISSING_SEMICOLON_AFTER_FUNCTION + + +/** + * Function defined in ternary operator + * @bug 1413743 + * @param {string} id The ID of the element. + * @return {Element} The matching element. + */ +goog.dom.$ = document.getElementById ? + function(id) { + return document.getElementById(id); + } : + function(id) { + return document.all[id]; + }; + + +/** + * Test function in object literal needs no semicolon. + * @type {Object} + */ +x.y = { + /** + * @return {number} Doc the inner function too. + */ + a: function() { + return 10; + } +}; + +// Semicolon required at end of object literal. +var throwObjectLiteral = function() { + throw { + x: 0, + y: 1 + } // MISSING_SEMICOLON +}; + +var testRegex = /(\([^\)]*\))|(\[[^\]]*\])|({[^}]*})|(<[^&]*>)/g; +var testRegex2 = /abc/gimsx; + +var x = 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + + 20; // LINE_STARTS_WITH_OPERATOR + +var x = 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + + -20; // unary minus is ok + +var x = z++ + + 20; // LINE_STARTS_WITH_OPERATOR + +var x = z. // LINE_ENDS_WITH_DOT + y(); + +// Regression test: This line was incorrectly not reporting an error +var marginHeight = x.layout.getSpacing_(elem, 'marginTop') + + x.layout.getSpacing_(elem, 'marginBottom'); +// -1: LINE_STARTS_WITH_OPERATOR + +// Regression test: This line was correctly reporting an error +x.layout.setHeight(elem, totalHeight - paddingHeight - borderHeight + - marginHeight); // LINE_STARTS_WITH_OPERATOR + +// Regression test: This line was incorrectly reporting spacing and binary +// operator errors +if (i == index) { +} +++i; + +var twoSemicolons = 10;; // REDUNDANT_SEMICOLON + +if (i == index) { +} else; // REDUNDANT_SEMICOLON +i++; + +do; // REDUNDANT_SEMICOLON +{ +} while (i == index); + +twoSemicolons = 10; +// A more interesting example of two semicolons + ; // EXTRA_SPACE, WRONG_INDENTATION, REDUNDANT_SEMICOLON + + +/** @bug 1598895 */ +for (;;) { + // Do nothing. +} + +for (var x = 0, foo = blah(), bar = {};; x = update(x)) { + // A ridiculous case that should probably never happen, but I suppose is + // valid. +} + +var x = "allow'd double quoted string"; +var x = "unnecessary double quotes string"; // UNNECESSARY_DOUBLE_QUOTED_STRING +// +1: MULTI_LINE_STRING, UNNECESSARY_DOUBLE_QUOTED_STRING, +var x = "multi-line unnecessary double quoted \ + string."; + + +// Regression test: incorrectly reported missing doc for variable used in global +// scope. +/** + * Whether the "Your browser isn't fully supported..." warning should be shown + * to the user; defaults to false. + * @type {boolean} + * @private + */ +init.browserWarning_ = false; + +init.browserWarning_ = true; + +if (someCondition) { + delete this.foo_[bar]; +} + +// Commas at the end of literals used to be forbidden. +x = [1, 2, 3,]; +x = [1, 2, 3, /* A comment */]; +x = [ + 1, + 2, + 3, +]; +x = { + a: 1, +}; + +// Make sure we don't screw up typing for Lvalues and think b:c is a type value +// pair. +x = a ? b : c = 34; +x = a ? b:c; // MISSING_SPACE, MISSING_SPACE +x = (a ? b:c = 34); // MISSING_SPACE, MISSING_SPACE + +if (x) { + x += 10; +}; // REDUNDANT_SEMICOLON + + +/** + * Bad assignment of array to prototype. + * @type {Array} + */ +x.prototype.badArray = []; // ILLEGAL_PROTOTYPE_MEMBER_VALUE + + +/** + * Bad assignment of object to prototype. + * @type {Object} + */ +x.prototype.badObject = {}; // ILLEGAL_PROTOTYPE_MEMBER_VALUE + + +/** + * Bad assignment of class instance to prototype. + * @type {goog.events.EventHandler} + */ +x.prototype.badInstance = new goog.events.EventHandler(); +// -1: ILLEGAL_PROTOTYPE_MEMBER_VALUE + +// Check that some basic structures cause no errors. +x = function() { + try { + } finally { + y = 10; + } +}; + +switch (x) { + case 10: + break; + case 20: + // Fallthrough. + case 30: + break; + case 40: { + break; + } + default: + break; +} + +do { + x += 10; +} while (x < 100); + +do { + x += 10; +} while (x < 100) // MISSING_SEMICOLON + +// Missing semicolon checks. +x = 10 // MISSING_SEMICOLON +x = someOtherVariable // MISSING_SEMICOLON +x = fnCall() // MISSING_SEMICOLON +x = {a: 10, b: 20} // MISSING_SEMICOLON +x = [10, 20, 30] // MISSING_SEMICOLON +x = (1 + 2) // MISSING_SEMICOLON +x = { + a: [ + 10, 20, (30 + + 40) + ] +} // MISSING_SEMICOLON +x = a + .b + .c(). // LINE_ENDS_WITH_DOT + d; + +// Test that blocks without braces don't generate incorrect semicolon and +// indentation errors. TODO: consider disallowing blocks without braces. +if (x) + y = 10; + +if (x) + y = 8 // MISSING_SEMICOLON + +// Regression test for bug 2973408, bad missing semi-colon error when else +// is not followed by an opening brace. +if (x) + y = 3; +else + z = 4; + +// We used to erroneously report a missing semicolon error. +if (x) +{ +} + +while (x) + y = 10; + +for (x = 0; x < 10; x++) + y += 10; + z += 10; // WRONG_INDENTATION + +var x = 100 // MISSING_SEMICOLON + +// Also regression test for bug 2973407 Parse error on nested ternary statments. +foo = bar ? baz ? 1 : 2 : 3 // MISSING_SEMICOLON +foo = bar ? 1 : baz ? 2 : 3; +bar ? 1 : baz ? 2 : bat ? 3 : 4; +bar ? 1 : baz ? bat ? 3 : 4 : baq ? 5 : 6; +foo = bar ? 1 : 2; + +foo = { + str: bar ? baz ? blah ? 1 : 2 : 3 : 4 +} // MISSING_SEMICOLON + + +// Regression tests for bug 2969408 GJsLint doesn't like labeled statements. +mainLoop: while (!y) { +} + +myLabel1: myLabel2: var x; + +for (var i = 0; i < n; i++) { + myLabel3: + while (true) { + break myLabel3; + } +} + +myLabelA : myLabelB : x > y ? 0 : 1; // EXTRA_SPACE, EXTRA_SPACE, EXTRA_SPACE + +// Regression test for bug 4269466. +var a = new Scheme({default: 0}); +switch (foo) { + default: + var a = new Scheme({default: 0}); + break; +} + + +/** @private Some text is allowed after tag */ +x.y_ = function() { +}; + + +/** @private Some text is allowed after tag but not the long oneeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee.*/ // LINE_TOO_LONG +x.y_ = function() { +}; + + +/** @private {number} Some text is allowed after tag */ +x.z_ = 200; + + +/** @private {number} Some text is allowed after tag but not the long oneeeeeeeeeeeeeeee. */ // LINE_TOO_LONG +x.z_ = 200; + +// Regression tests for b/16298424. +var z = function() {}.bind(); +window.alert(function() {}.bind()); +function() { +}.bind(); +var y = function() { +}.bind(); +var y = function() { + } + .bind(); + +/* comment not closed // FILE_MISSING_NEWLINE, FILE_IN_BLOCK \ No newline at end of file diff --git a/tools/closure_linter/closure_linter/testdata/provide_blank.js b/tools/closure_linter/closure_linter/testdata/provide_blank.js new file mode 100644 index 0000000..a4e0716 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/provide_blank.js @@ -0,0 +1,29 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Checks that missing provides are reported at the first require + * when there are no other provides in the file. + */ + +goog.require('dummy.package.ClassName'); // MISSING_GOOG_PROVIDE + + + +/** + * @constructor + */ +dummy.Something = function() {}; + +var x = new dummy.package.ClassName(); diff --git a/tools/closure_linter/closure_linter/testdata/provide_extra.js b/tools/closure_linter/closure_linter/testdata/provide_extra.js new file mode 100644 index 0000000..3370950 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/provide_extra.js @@ -0,0 +1,39 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed 2to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Checks for extra goog.provides. + * + */ + +goog.provide(''); // EXTRA_GOOG_PROVIDE + +goog.provide('dummy.AnotherThingTest'); // ok since mentioned in setTestOnly +goog.provide('dummy.AnotherTrulyLongNamespaceToMakeItExceedEightyCharactersThingTest'); + +goog.provide('dummy.Something'); +goog.provide('dummy.Something'); // EXTRA_GOOG_PROVIDE +goog.provide('dummy.SomethingElse'); // EXTRA_GOOG_PROVIDE + +goog.provide('dummy.YetAnotherThingTest'); // EXTRA_GOOG_PROVIDE + +goog.setTestOnly('dummy.AnotherThingTest'); +goog.setTestOnly('dummy.AnotherTrulyLongNamespaceToMakeItExceedEightyCharactersThingTest'); + + + +/** + * @constructor + */ +dummy.Something = function() {}; diff --git a/tools/closure_linter/closure_linter/testdata/provide_missing.js b/tools/closure_linter/closure_linter/testdata/provide_missing.js new file mode 100644 index 0000000..42de489 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/provide_missing.js @@ -0,0 +1,40 @@ +// Copyright 2011 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// We are missing a provide of goog.something.Else. +// -15: MISSING_GOOG_PROVIDE + +/** + * @fileoverview Tests missing provides and the usage of the missing provide + * suppression annotation. + * + */ + + + +/** + * Constructor for Something. + * @constructor + * @suppress {missingProvide} + */ +goog.something.Something = function() {}; + + + +/** + * Constructor for Else. We should get an error about providing this, but not + * about the constructor for Something. + * @constructor + */ +goog.something.Else = function() {}; diff --git a/tools/closure_linter/closure_linter/testdata/require_alias.js b/tools/closure_linter/closure_linter/testdata/require_alias.js new file mode 100644 index 0000000..804b2ed --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_alias.js @@ -0,0 +1,14 @@ +// We are missing a require of goog.sample.UsedType +goog.provide('goog.something.Else'); // +1: MISSING_GOOG_REQUIRE + + +goog.scope(function() { +var unused = goog.events.unused; // UNUSED_LOCAL_VARIABLE +var used = goog.events.used; // ALIAS_STMT_NEEDS_GOOG_REQUIRE +var UsedType = goog.sample.UsedType; +var other = goog.sample.other; + + +/** @type {used.UsedAlias|other.UsedAlias} */ +goog.something.Else = UsedType.create(); +}); // goog.scope diff --git a/tools/closure_linter/closure_linter/testdata/require_all_caps.js b/tools/closure_linter/closure_linter/testdata/require_all_caps.js new file mode 100644 index 0000000..49344f2 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_all_caps.js @@ -0,0 +1,30 @@ +// Copyright 2008 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview A should come before B. + * + */ + +goog.provide('XX'); // GOOG_PROVIDES_NOT_ALPHABETIZED +goog.provide('ZZ'); +goog.provide('YY'); + +goog.require('dummy.AA'); // GOOG_REQUIRES_NOT_ALPHABETIZED +goog.require('dummy.CC'); +goog.require('dummy.BB'); + +dummy.AA(); +dummy.CC(); +dummy.BB(); diff --git a/tools/closure_linter/closure_linter/testdata/require_blank.js b/tools/closure_linter/closure_linter/testdata/require_blank.js new file mode 100644 index 0000000..060781c --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_blank.js @@ -0,0 +1,29 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Checks that missing requires are reported just after the last + * provide when there are no other requires in the file. + */ + +goog.provide('dummy.Something'); // +1: MISSING_GOOG_REQUIRE + + + +/** + * @constructor + */ +dummy.Something = function() {}; + +var x = new dummy.package.ClassName(); diff --git a/tools/closure_linter/closure_linter/testdata/require_extra.js b/tools/closure_linter/closure_linter/testdata/require_extra.js new file mode 100644 index 0000000..3ee39c7 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_extra.js @@ -0,0 +1,35 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Checks for extra goog.requires. + * + */ + +goog.require(''); // EXTRA_GOOG_REQUIRE +goog.require('dummy.Aa'); +goog.require('dummy.Aa.CONSTANT'); // EXTRA_GOOG_REQUIRE +goog.require('dummy.Aa.Enum'); // EXTRA_GOOG_REQUIRE +goog.require('dummy.Bb'); +goog.require('dummy.Ff'); // EXTRA_GOOG_REQUIRE +goog.require('dummy.Gg'); // EXTRA_GOOG_REQUIRE +goog.require('dummy.cc'); +goog.require('dummy.cc'); // EXTRA_GOOG_REQUIRE +goog.require('dummy.hh'); // EXTRA_GOOG_REQUIRE + +new dummy.Aa(); +dummy.Bb.someMethod(); +dummy.cc(); +var x = dummy.Aa.Enum.VALUE; +var y = dummy.Aa.CONSTANT; diff --git a/tools/closure_linter/closure_linter/testdata/require_function.js b/tools/closure_linter/closure_linter/testdata/require_function.js new file mode 100644 index 0000000..532bb67 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_function.js @@ -0,0 +1,22 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Description of this file. + */ + +goog.require('goog.mobile.paging.getPage'); + + +goog.mobile.paging.getPage(); diff --git a/tools/closure_linter/closure_linter/testdata/require_function_missing.js b/tools/closure_linter/closure_linter/testdata/require_function_missing.js new file mode 100644 index 0000000..33bec21 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_function_missing.js @@ -0,0 +1,24 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// -14: MISSING_GOOG_REQUIRE + +/** + * @fileoverview Description of this file. + */ + + + +goog.mobile.paging.getPage(); +goog.mobile.paging.getOtherPage(); diff --git a/tools/closure_linter/closure_linter/testdata/require_function_through_both.js b/tools/closure_linter/closure_linter/testdata/require_function_through_both.js new file mode 100644 index 0000000..d9525ec --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_function_through_both.js @@ -0,0 +1,23 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Description of this file. + */ + +goog.require('goog.mobile.paging'); +goog.require('goog.mobile.paging.getPage'); + + +goog.mobile.paging.getPage(); diff --git a/tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js b/tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js new file mode 100644 index 0000000..55628fc --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js @@ -0,0 +1,22 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Description of this file. + */ + +goog.require('goog.mobile.paging'); + + +goog.mobile.paging.getPage(); diff --git a/tools/closure_linter/closure_linter/testdata/require_interface.js b/tools/closure_linter/closure_linter/testdata/require_interface.js new file mode 100644 index 0000000..d6e8302 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_interface.js @@ -0,0 +1,31 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Contains a test to verify that interfaces implemented in a file + * are goog.require'd. + * + */ + +// We're missing a goog.require of goog.something.SomeInterface. +goog.provide('goog.something.SomeClass'); // +1: MISSING_GOOG_REQUIRE + + + +/** + * Constructor for SomeClass. + * @constructor + * @implements {goog.something.SomeInterface} + */ +goog.something.SomeClass = function() {}; diff --git a/tools/closure_linter/closure_linter/testdata/require_interface_alias.js b/tools/closure_linter/closure_linter/testdata/require_interface_alias.js new file mode 100644 index 0000000..c71b29c --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_interface_alias.js @@ -0,0 +1,34 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Contains a test to verify that aliased interfaces + * are goog.require'd. + */ + +// We're missing a goog.require of goog.something.SomeInterface. +goog.provide('goog.something.SomeClass'); // +1: MISSING_GOOG_REQUIRE + +goog.scope(function() { +var something = goog.something; + + + +/** + * Constructor for SomeClass. + * @constructor + * @implements {something.SomeInterface} + */ +something.SomeClass = function() {}; +}); // goog.scope diff --git a/tools/closure_linter/closure_linter/testdata/require_interface_base.js b/tools/closure_linter/closure_linter/testdata/require_interface_base.js new file mode 100644 index 0000000..c8bb1f6 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_interface_base.js @@ -0,0 +1,31 @@ +// Copyright 2011 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Contains a test to verify that parent interfaces + * implemented are goog.require'd. + * + */ + +// We're missing a goog.require of goog.something.BaseInterface. +goog.provide('goog.something.SomeInterface'); // +1: MISSING_GOOG_REQUIRE + + + +/** + * Constructor for SomeInterface. + * @interface + * @extends {goog.something.BaseInterface} + */ +goog.something.SomeInterface = function() {}; diff --git a/tools/closure_linter/closure_linter/testdata/require_lower_case.js b/tools/closure_linter/closure_linter/testdata/require_lower_case.js new file mode 100644 index 0000000..c1fff4a --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_lower_case.js @@ -0,0 +1,30 @@ +// Copyright 2008 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview The B should come before the b. + * + */ + +goog.provide('x'); // GOOG_PROVIDES_NOT_ALPHABETIZED +goog.provide('X'); +goog.provide('Y'); + +goog.require('dummy.bb'); // GOOG_REQUIRES_NOT_ALPHABETIZED +goog.require('dummy.Bb'); +goog.require('dummy.Cc'); + +var x = dummy.bb.a(); +var y = dummy.Bb.a(); +var z = dummy.Cc.a(); diff --git a/tools/closure_linter/closure_linter/testdata/require_missing.js b/tools/closure_linter/closure_linter/testdata/require_missing.js new file mode 100644 index 0000000..3539c94 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_missing.js @@ -0,0 +1,40 @@ +// Copyright 2011 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Tests missing requires around the usage of the require + * suppression annotation. + * + */ + +// We are missing a require of goog.foo. +goog.provide('goog.something.Else'); // +1: MISSING_GOOG_REQUIRE + + + +/** + * Constructor for Else. + * @constructor + */ +goog.something.Else = function() { + /** @suppress {missingRequire} */ + this.control.createConstructorMock( + goog.foo.bar, 'Baz'); + + // Previous suppress should only be scoped to that statement. + this.control.createConstructorMock( + goog.foo.bar, 'Baz'); + + this.control.invoke(goog.foo.bar, 'Test'); +}; diff --git a/tools/closure_linter/closure_linter/testdata/require_numeric.js b/tools/closure_linter/closure_linter/testdata/require_numeric.js new file mode 100644 index 0000000..29d8377 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_numeric.js @@ -0,0 +1,30 @@ +// Copyright 2008 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Numbers should come before letters. + * + */ + +goog.provide('xa'); // GOOG_PROVIDES_NOT_ALPHABETIZED +goog.provide('x1'); +goog.provide('xb'); + +goog.require('dummy.aa'); // GOOG_REQUIRES_NOT_ALPHABETIZED +goog.require('dummy.a1'); +goog.require('dummy.ab'); + +dummy.aa.a; +dummy.a1.a; +dummy.ab.a; diff --git a/tools/closure_linter/closure_linter/testdata/require_provide_blank.js b/tools/closure_linter/closure_linter/testdata/require_provide_blank.js new file mode 100644 index 0000000..0e0c188 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_provide_blank.js @@ -0,0 +1,31 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// -14: MISSING_GOOG_PROVIDE +// -15: MISSING_GOOG_REQUIRE + +/** + * @fileoverview Checks that missing requires and provides are reported at the + * top of the file when there are no existing goog.requires or provides in the + * file. + */ + + + +/** + * @constructor + */ +dummy.Something = function() {}; + +var x = new dummy.package.ClassName(); diff --git a/tools/closure_linter/closure_linter/testdata/require_provide_missing.js b/tools/closure_linter/closure_linter/testdata/require_provide_missing.js new file mode 100644 index 0000000..a56f4d0 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_provide_missing.js @@ -0,0 +1,76 @@ +// Copyright 2008 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview The same code as require_provide_ok, but missing a provide + * and a require call. + * + */ + +goog.provide('goog.something'); // +1: MISSING_GOOG_PROVIDE +// Missing provide of goog.something.Else and goog.something.SomeTypeDef. + +goog.require('goog.Class'); +goog.require('goog.package'); // +1: MISSING_GOOG_REQUIRE +// Missing requires of goog.Class.Enum and goog.otherThing.Class.Enum. + + +var x = new goog.Class(); +goog.package.staticFunction(); + +var y = goog.Class.Enum.VALUE; + + +/** + * @typedef {string} + */ +goog.something.SomeTypeDef; + + +/** + * Private variable. + * @type {number} + * @private + */ +goog.something.private_ = 10; + + +/** + * Use private variables defined in this file so they don't cause a warning. + */ +goog.something.usePrivateVariables = function() { + var x = [ + goog.something.private_, + x + ]; +}; + + +/** + * Static function. + */ +goog.something.staticFunction = function() { +}; + + + +/** + * Constructor for Else. + * @constructor + */ +goog.something.Else = function() { + // Bug 1801608: Provide goog.otherThing.Class.Enum isn't missing. + var enum = goog.otherThing.Class.Enum; + goog.otherThing.Class.Enum = enum; +}; diff --git a/tools/closure_linter/closure_linter/testdata/require_provide_ok.js b/tools/closure_linter/closure_linter/testdata/require_provide_ok.js new file mode 100644 index 0000000..01ddafe --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/require_provide_ok.js @@ -0,0 +1,214 @@ +// Copyright 2008 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview There is nothing wrong w/ this javascript. + * + */ +goog.module('goog.super.long.DependencyNameThatForcesMethodDefinitionToSpanMultipleLinesFooBar'); +goog.provide('goog.something'); +goog.provide('goog.something.Else'); +goog.provide('goog.something.Else.Enum'); +/** @suppress {extraProvide} */ +goog.provide('goog.something.Extra'); +goog.provide('goog.something.SomeTypeDef'); +goog.provide('goog.somethingelse.someMethod'); +goog.provide('goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters'); +goog.provide('notInClosurizedNamespacesSoNotExtra'); + +goog.require('dummy.foo'); +goog.require('dummy.foo.someSpecificallyRequiredMethod'); +goog.require('goog.Class'); +/** @suppress {extraRequire} */ +goog.require('goog.extra.require'); +goog.require('goog.package'); +goog.require('goog.package.ClassName'); +goog.require('goog.package.OtherClassName'); +/** @suppress {extraRequire} Legacy dependency on enum */ +goog.require('goog.package.OuterClassName.InnerClassName'); +goog.require('goog.super.long.DependencyNameThatForcesMethodDefinitionToSpanMultipleLinesFooBar'); +goog.require('goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters2'); +goog.require('goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters3'); +goog.require('notInClosurizedNamespacesSoNotExtra'); + +dummy.foo.someMethod(); +dummy.foo.someSpecificallyRequiredMethod(); + + +// Regression test for bug 3473189. Both of these 'goog.provide' tokens should +// be completely ignored by alphabetization checks. +if (typeof goog != 'undefined' && typeof goog.provide == 'function') { + goog.provide('goog.something.SomethingElse'); +} + + +var x = new goog.Class(); +goog.package.staticFunction(); + +var y = goog.Class.Enum.VALUE; + + +// This should not trigger a goog.require. +var somethingPrivate = goog.somethingPrivate.PrivateEnum_.VALUE; + + +/** + * This method is provided directly, instead of its namespace. + */ +goog.somethingelse.someMethod = function() {}; + + +/** + * Defining a private property on a required namespace should not trigger a + * provide of that namespace. Yes, people actually do this. + * @private + */ +goog.Class.privateProperty_ = 1; + + +/** + * @typedef {string} + */ +goog.something.SomeTypeDef; + + +/** + * @typedef {string} + * @private + */ +goog.something.SomePrivateTypeDef_; + + +/** + * Some variable that is declared but not initialized. + * @type {string|undefined} + * @private + */ +goog.something.somePrivateVariable_; + + +/** + * Private variable. + * @type {number} + * @private + */ +goog.something.private_ = 10; + + +/** + * Use private variables defined in this file so they don't cause a warning. + */ +goog.something.usePrivateVariables = function() { + var x = [ + goog.something.private_, + goog.Class.privateProperty_, + x + ]; +}; + + + +/** + * A really long class name to provide and usage of a really long class name to + * be required. + * @constructor + */ +goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters = + function() { + var x = new goog.super.long. // LINE_ENDS_WITH_DOT + DependencyNameThatForcesTheLineToBeOverEightyCharacters2(); + var x = new goog.super.long + .DependencyNameThatForcesTheLineToBeOverEightyCharacters3(); + // Use x to avoid a warning. + var x = [x]; +}; + + +/** + * A really long class name to to force a method definition to be greater than + * 80 lines. We should be grabbing the whole identifier regardless of how many + * lines it is on. + */ +goog.super.long + .DependencyNameThatForcesMethodDefinitionToSpanMultipleLinesFooBar + .prototype.someMethod = function() { +}; + + +/** + * Static function. + */ +goog.something.staticFunction = function() { + // Tests that namespace usages are identified using 'namespace.' not just + // 'namespace'. + googSomething.property; + dummySomething.property; + goog.package.ClassName // A comment in between the identifier pieces. + .IDENTIFIER_SPLIT_OVER_MULTIPLE_LINES; + goog.package.OtherClassName.property = 1; + + // Test case where inner class needs to be required explicitly. + new goog.package.OuterClassName.InnerClassName(); + + // Don't just use goog.bar for missing namespace, hard coded to never require + // goog since it's never provided. + control.createConstructorMock( + /** @suppress {missingRequire} */ goog.foo.bar, 'Baz'); + + goog.require('goog.shouldBeIgnored'); +}; + + + +/** + * Constructor for Else. + * @constructor + */ +goog.something.Else = function() { + /** @suppress {missingRequire} */ + this.control.createConstructorMock(goog.foo.bar, 'Baz'); +}; + + +/** + * Enum attached to Else. Should not need to be provided explicitly, but + * should not generate an extra require warning either. + * @enum {number} + */ +goog.something.Else.Enum = { + 'key': 1 +}; + + +/** + * Sample of a typedef. This should not need a provide as it is an inner + * element like an enum. + * + * @typedef {{val1: string, val2: boolean, val3: number}} + */ +goog.something.Else.Typedef; + + + +/** + * Constructor for SomethingElse. + * @constructor + */ +goog.something.SomethingElse = function() {}; + + +/** + * @suppress {missingProvide} + */ +goog.suppress.someMethod = function() {}; diff --git a/tools/closure_linter/closure_linter/testdata/semicolon_missing.js b/tools/closure_linter/closure_linter/testdata/semicolon_missing.js new file mode 100644 index 0000000..5601db8 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/semicolon_missing.js @@ -0,0 +1,18 @@ +/** + * @fileoverview This is for regression testing of scenario where semicolon is + * missing at EOF. b/10801776. + */ + +goog.provide('dummy.foo.DATA'); + +/** + * @type {string} + * @const + * + * For repeating the bug blank comment line above this is needed. + */ + +// +3: MISSING_SEMICOLON +dummy.foo.DATA = + 'SFSDFSDdfgdfgdftreterterterterterggsdfsrrwerwerwsfwerwerwere55454ss' + + 'SFSDFSDdfgdfgdftretertertertertergg' diff --git a/tools/closure_linter/closure_linter/testdata/simple.html b/tools/closure_linter/closure_linter/testdata/simple.html new file mode 100644 index 0000000..42ebab9 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/simple.html @@ -0,0 +1,33 @@ + + + + + GJsLint HTML JavaScript extraction tests + + + + + + + diff --git a/tools/closure_linter/closure_linter/testdata/spaces.js b/tools/closure_linter/closure_linter/testdata/spaces.js new file mode 100644 index 0000000..85a36e5 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/spaces.js @@ -0,0 +1,354 @@ +// Copyright 2007 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Errors relating to whitespace. + * + * @author robbyw@google.com (Robby Walker) + */ + +if(needs_space) { // MISSING_SPACE +} + +if ( too_much_space) { // EXTRA_SPACE +} + +if (different_extra_space ) { // EXTRA_SPACE +} + +switch(needs_space) { // MISSING_SPACE +} + +var x = 'if(not_an_error)'; + +var y = afjkljkl + ajklasdflj + ajkadfjkasdfklj + aadskfasdjklf + jkasdfa + ( + kasdfkjlasdfjkl / jklasdfjklasdfjkl); + +x = 5+ 8; // MISSING_SPACE +x = 5 +8; // MISSING_SPACE +x= 5; // MISSING_SPACE +x = 6; // EXTRA_SPACE +x = 7; // EXTRA_SPACE +x = 6 + 2; // EXTRA_SPACE +x += 10; + +throw Error('Selector not supported yet('+ opt_selector + ')'); // MISSING_SPACE +throw Error('Selector not supported yet(' +opt_selector + ')'); // MISSING_SPACE +throw Error( + 'Selector not supported yet' + + '(' +(opt_selector ? 'foo' : 'bar') + ')'); // MISSING_SPACE + +x++; +x ++; // EXTRA_SPACE +x++ ; // EXTRA_SPACE +y = a + ++b; +for (var i = 0; i < 10; ++i) { +} + +// We omit the update section of the for loop to test that a space is allowed +// in this special case. +for (var part; part = parts.shift(); ) { +} + +if (x == y) { +} + +x = 10; // no error here +x = -1; +x++; +++x; + +x = bool ? -1 : -1; + +x = {a: 10}; +x = {a:10}; // MISSING_SPACE + +x = !!y; + +x >>= 0; +x <<= 10; + +x[100] = 10; +x[ 100] = 10; // EXTRA_SPACE +x[100 ] = 10; // EXTRA_SPACE +x [100] = 10; // EXTRA_SPACE +x[10]= 5; // MISSING_SPACE +var x = []; +x = [[]]; +x = [[x]]; +x = [[[x, y]]]; +var craziness = ([1, 2, 3])[1]; +var crazinessError = ([1, 2, 3]) [1]; // EXTRA_SPACE +var multiArray = x[1][2][3][4]; +var multiArrayError = x[1] [2][3][4]; // EXTRA_SPACE + +array[aReallyLooooooooooooooooooooooooooooongIndex1][ + anotherVeryLoooooooooooooooooooooooooooooooooooongIndex +] = 10; + +if (x) { + array[aReallyLooooooooooooooooooooooooooooongIndex1][ + anotherVeryLoooooooooooooooooooooooooooooooooooongIndex + ] = 10; +} + + +/** + * Docs. + * @param {Number} x desc. + * @return {boolean} Some boolean value. + */ +function functionName( x) { // EXTRA_SPACE + return !!x; +} + + +/** + * Docs. + * @param {Number} x desc. + */ +function functionName(x ) { // EXTRA_SPACE + return; +} + + +/** + * Docs. + * @param {Number} x desc. + * @param {Number} y desc. + */ +function functionName(x,y) { // MISSING_SPACE +} + + +/** + * Docs. + * @param {Number} x desc. + * @param {Number} y desc. + */ +function functionName(x, y) { +} + + +/** + * Docs. + */ +function functionName() { // EXTRA_SPACE +} + + +/** + * Docs. + */ +function functionName(){ // MISSING_SPACE +} + +functionName (); // EXTRA_SPACE + + +/** + * Docs. + */ +function functionName () { // EXTRA_SPACE +} + + +/** + * Docs. + */ +var foo = function () { // EXTRA_SPACE +}; + + + +/** + * Missing a newline. + * @constructor + * @extends {a.b.c} + */ +x.y.z = function() { +};goog.inherits(x.y.z, a.b.c); // MISSING_LINE + + + +/** + * Extra space. + * @constructor + * @extends {a.b.c} + */ +x.y.z = function() { +}; + goog.inherits(x.y.z, a.b.c); // WRONG_INDENTATION + + + +/** + * Extra blank line. + * @constructor + * @extends {a.b.c} + */ +x.y.z = function() { +}; + +goog.inherits(x.y.z, a.b.c); // -1: EXTRA_LINE + + + +/** + * Perfect! + * @constructor + * @extends {a.b.c} + */ +x.y.z = function() { +}; +goog.inherits(x.y.z, a.b.c); + +if (flag) { + /** + * Also ok! + * @constructor + * @extends {a.b.c} + */ + x.y.z = function() { + }; + goog.inherits(x.y.z, a.b.c); +} + + +/** + * Docs. + */ +x.finally = function() { +}; + +x.finally(); +x + .finally(); +x.finally (); // EXTRA_SPACE +x + .finally (); // EXTRA_SPACE +try { +} finally (e) { +} +try { +} finally(e) { // MISSING_SPACE +} + +functionName(x , y); // EXTRA_SPACE +functionName(x,y); // MISSING_SPACE +functionName(x, y); + +var really_really_really_really_really_really_really_really_really_long_name = + 2; + +var current = arr[cursorRead++]; + +var x = -(y + z); + +// Tab before + +var foo + 3; // ILLEGAL_TAB +if (something) { + var x = 4; // ILLEGAL_TAB +} + +// +1: ILLEGAL_TAB +// Tab <-- in a comment. + + +// +3: ILLEGAL_TAB +// +3: ILLEGAL_TAB +/** + * An inline flag with a tab {@code asdfasd}. + * @return {string} Illegal <-- tab in a doc description. + */ +function x() { + return ''; +} + + +// +2: ILLEGAL_TAB +/** + * @type {tabBeforeMe} + */ + +// +1: EXTRA_SPACE +var whitespaceAtEndOfLine; + +// +1: EXTRA_SPACE +// Whitespace at end of comment. + + +// +4: EXTRA_SPACE +// +4: EXTRA_SPACE +// +4: EXTRA_SPACE +// +4: EXTRA_SPACE +/* + * Whitespace at EOL. + * @type {string} + * @param {string} Description with whitespace at EOL. + */ +x = 10; + + +/** + * @param {?{foo, bar: number}} x This is a valid annotation. + * @return {{baz}} This is also a valid annotation. + */ +function recordTypeFunction(x) { + return x; +} + +if (y) { + // Colons are difficult. + y = x ? 1 : 2; + y = x ? 1: 2; // MISSING_SPACE + + x = { + b: 'Good', + d : 'Space before colon is bad', // EXTRA_SPACE + f: abc ? def : ghi // These colons should be treated differently + }; + + x = {language: langCode}; // EXTRA_SPACE +} + +// 1094445 - should produce missing space error before +. +// +1: MISSING_SPACE +throw Error('Selector not supported yet ('+ opt_selector + ')'); + +// This code is ok. +for (i = 0; i < len; ++i) { +} + +for (i = 0;i < 10; i++) { // MISSING_SPACE +} +for (i = 0; i < 10;i++) { // MISSING_SPACE +} +for ( i = 0; i < 10; i++) { // EXTRA_SPACE +} +for (i = 0 ; i < 10; i++) { // EXTRA_SPACE +} +for (i = 0; i < 10 ; i++) { // EXTRA_SPACE +} +for (i = 0; i < 10; i++ ) { // EXTRA_SPACE +} +for (i = 0; i < 10; i++) { // EXTRA_SPACE +} +for (i = 0; i < 10; i++) { // EXTRA_SPACE +} +for (i = 0 ;i < 10; i++) { // EXTRA_SPACE, MISSING_SPACE +} + +// Regression test for bug 3508480, parse error when tab as last token. +// +1: ILLEGAL_TAB, EXTRA_SPACE diff --git a/tools/closure_linter/closure_linter/testdata/tokenizer.js b/tools/closure_linter/closure_linter/testdata/tokenizer.js new file mode 100644 index 0000000..1fbcf4b --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/tokenizer.js @@ -0,0 +1,78 @@ +// Copyright 2008 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Errors relating to tokenizing. + * + * @author robbyw@google.com (Robby Walker) + */ + +// Regression test: if regular expressions parse incorrectly this will emit an +// error such as: Missing space after '/' +x = /[^\']/; // and all the other chars + +// Regression test: if regular expressions parse incorrectly this will emit an +// error such as: Missing space before + +var regExp = fromStart ? / ^[\t\r\n]+/ : /[ \t\r\n]+$/; + +// Regression test for bug 1032312: test for correct parsing of multiline +// strings +// +2: MULTI_LINE_STRING +var RG_MONTH_EVENT_TEMPLATE_SINGLE_QUOTE = new Template( + '\ +
>>' and '>>>=' that weren't +// recognized as operators. +a -= b; a -= c; a ^= c >>> 13; a >>>= 1; + +// Regression test as xor was not allowed on the end of a line. +x = 1000 ^ + 45; + +// Regression test for proper number parsing. If parsed incorrectly, some of +// these notations can lead to missing spaces errors. +var x = 1e-6 + 1e+6 + 0. + .5 + 0.5 + 0.e-6 + .5e-6 + 0.5e-6 + 0x123abc + + 0X1Ab3 + 1E7; + +// Regression test for keyword parsing - making sure the fact that the "do" +// keyword is a part of the identifier below doesn't break anything. +this.undoRedoManager_.undo(); + +// Regression test for regex as object value not matching. +x = {x: /./}; + +// Regression test for regex as last array element not matching. +x = [/./]; + +// Syntax tests for ES6: +x = x => x; diff --git a/tools/closure_linter/closure_linter/testdata/unparseable.js b/tools/closure_linter/closure_linter/testdata/unparseable.js new file mode 100644 index 0000000..e842614 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/unparseable.js @@ -0,0 +1,44 @@ +// Copyright 2008 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + + +/** + * Constructs and initializes a new good object. + * @constructor + */ +goog.good = function() { +}; + + +/** + * Makes this good object go bad. + * @param {number} badnessLevel How bad this object is going. + */ +goog.good.prototype.goBad = function() { // EXTRA_PARAMETER_DOCUMENTATION +}; + +if (x) + // Cannot parse ending block because beginning block is missing. +} // FILE_DOES_NOT_PARSE + + +/** + * Unecessary return documentation error is not reported because file checking + * stopped at token causing parse error. + * + * @return {boolean} Whether reform was sucessful. + */ +goog.good.prototype.reform = function() { +}; diff --git a/tools/closure_linter/closure_linter/testdata/unused_local_variables.js b/tools/closure_linter/closure_linter/testdata/unused_local_variables.js new file mode 100644 index 0000000..e9e51a1 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/unused_local_variables.js @@ -0,0 +1,88 @@ +// Copyright 2013 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Checks that unused local variables result in an error. + */ + +goog.provide('dummy.Something'); + + + +/** + * @constructor + */ +dummy.Something = function() { + // This variable isn't really used, but we can't tell for sure. + var usedVariable = []; + usedVariable.length = 1; + + var variableUsedInAClosure = []; + var functionUsedByInvoking = function() { + variableUsedInAClosure[1] = 'abc'; + }; + functionUsedByInvoking(); + + var variableUsedTwoLevelsDeep = []; + var firstLevelFunction = function() { + function() { + variableUsedTwoLevelsDeep.append(1); + } + }; + firstLevelFunction(); + + // This variable isn't being declared so is unchecked. + undeclaredLocal = 1; + + var unusedVariable; + + // Check that using a variable as member name doesn't trigger + // usage. + this.unusedVariable = 0; + this.unusedVariable = this.unusedVariable + 1; + + // Check that declaring a variable twice doesn't trigger + // usage. + var unusedVariable; // UNUSED_LOCAL_VARIABLE + + var unusedVariableWithReassignment = []; // UNUSED_LOCAL_VARIABLE + unusedVariableWithReassignment = 'a'; + + var unusedFunction = function() {}; // UNUSED_LOCAL_VARIABLE + + var unusedHiddenVariable = 1; // UNUSED_LOCAL_VARIABLE + firstLevelFunction = function() { + // This variable is actually used in the function below, but hides the outer + // variable with the same name. + var unusedHiddenVariable = 1; + function() { + delete unusedHiddenVariable; + } + }; +}; + + +goog.scope(function() { +var unusedAlias = dummy.Something; // UNUSED_LOCAL_VARIABLE +var UsedTypeAlias = dummy.Something; +var AnotherUsedTypeAlias = dummy.Something; + + +/** @protected {AnotherUsedTypeAlias.Something|UsedTypeAlias} */ +var usedAlias = dummy.Something; +new usedAlias(); +}); // goog.scope + +// Unused top level variables are not checked. +var unusedTopLevelVariable; diff --git a/tools/closure_linter/closure_linter/testdata/unused_private_members.js b/tools/closure_linter/closure_linter/testdata/unused_private_members.js new file mode 100644 index 0000000..76c0865 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/unused_private_members.js @@ -0,0 +1,205 @@ +// Copyright 2010 The Closure Linter Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview Checks that an unused private members result in an error. + */ + +goog.provide('dummy.Something'); + + + +/** + * @constructor + */ +dummy.Something = function() { + /** + * @type {number} + * @private + */ + this.normalVariable_ = 1; + + // +5: UNUSED_PRIVATE_MEMBER + /** + * @type {number} + * @private + */ + this.unusedVariable_ = 1; + + /** + * @type {number} + * @private + * @suppress {unusedPrivateMembers} + */ + this.suppressedUnusedVariable_ = 1; +}; + + +/** + * @type {number} + * @private + */ +dummy.Something.NORMAL_CONSTANT_ = 1; + + +// +5: UNUSED_PRIVATE_MEMBER +/** + * @type {number} + * @private + */ +dummy.Something.UNUSED_CONSTANT_ = 1; + + +/** + * @type {number} + * @private + * @suppress {unusedPrivateMembers} + */ +dummy.Something.SUPPRESSED_UNUSED_CONSTANT_ = 1; + + +/** + * @type {number} + * @private + */ +dummy.Something.normalStaticVariable_ = 1; + + +// +5: UNUSED_PRIVATE_MEMBER +/** + * @type {number} + * @private + */ +dummy.Something.unusedStaticVariable_ = 1; + + +/** + * @type {number} + * @private + * @suppress {unusedPrivateMembers} + */ +dummy.Something.suppressedUnusedStaticVariable_ = 1; + + +/** + * @type {number} + * @private + */ +dummy.Something.prototype.normalVariableOnPrototype_ = 1; + + +// +5: UNUSED_PRIVATE_MEMBER +/** + * @type {number} + * @private + */ +dummy.Something.prototype.unusedVariableOnPrototype_ = 1; + + +/** + * @type {number} + * @private + * @suppress {unusedPrivateMembers} + */ +dummy.Something.prototype.suppressedUnusedVariableOnPrototype_ = 1; + + +/** + * Check edge cases that should not be reported. + */ +dummy.Something.prototype.checkFalsePositives = function() { + this.__iterator__ = 1; + this.normalVariable_.unknownChainedVariable_ = 1; + othernamespace.unusedVariable_ = 1; + + this.element_ = 1; + this.element_.modifyPublicMember = 1; + + /** @suppress {underscore} */ + this.suppressedUnderscore_ = true; +}; + + +/** + * Use all the normal variables. + */ +dummy.Something.prototype.useAllTheThings = function() { + var x = [ + dummy.Something.NORMAL_CONSTANT_, + this.normalStaticVariable_, + this.normalVariable_, + this.normalVariableOnPrototype_, + dummy.Something.normalStaticMethod_(), + this.normalMethod_(), + x + ]; +}; + + +// +5: UNUSED_PRIVATE_MEMBER +/** + * Unused static method. + * @private + */ +dummy.Something.unusedStaticMethod_ = function() { + // Do nothing. +}; + + +/** + * Unused static method. + * @private + * @suppress {unusedPrivateMembers} + */ +dummy.Something.suppressedUnusedStaticMethod_ = function() { + // Do nothing. +}; + + +/** + * Normal static method. + * @private + */ +dummy.Something.normalStaticMethod_ = function() { + // Do nothing. +}; + + +// +5: UNUSED_PRIVATE_MEMBER +/** + * Unused non-static method. + * @private + */ +dummy.Something.prototype.unusedMethod_ = function() { + // Do nothing. +}; + + +/** + * Unused non-static method that is suppressed. + * @private + * @suppress {unusedPrivateMembers} + */ +dummy.Something.prototype.suppressedUnusedMethod_ = function() { + // Do nothing. +}; + + +/** + * Normal non-static method. + * @private + */ +dummy.Something.prototype.normalMethod_ = function() { + // Do nothing. +}; diff --git a/tools/closure_linter/closure_linter/testdata/utf8.html b/tools/closure_linter/closure_linter/testdata/utf8.html new file mode 100644 index 0000000..29517d0 --- /dev/null +++ b/tools/closure_linter/closure_linter/testdata/utf8.html @@ -0,0 +1,26 @@ + + + + + + diff --git a/tools/closure_linter/closure_linter/testutil.py b/tools/closure_linter/closure_linter/testutil.py new file mode 100644 index 0000000..f7084ee --- /dev/null +++ b/tools/closure_linter/closure_linter/testutil.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utility functions for testing gjslint components.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + +import StringIO + +from closure_linter import ecmametadatapass +from closure_linter import javascriptstatetracker +from closure_linter import javascripttokenizer + + +def TokenizeSource(source): + """Convert a source into a string of tokens. + + Args: + source: A source file as a string or file-like object (iterates lines). + + Returns: + The first token of the resulting token stream. + """ + + if isinstance(source, basestring): + source = StringIO.StringIO(source) + + tokenizer = javascripttokenizer.JavaScriptTokenizer() + return tokenizer.TokenizeFile(source) + + +def TokenizeSourceAndRunEcmaPass(source): + """Tokenize a source and run the EcmaMetaDataPass on it. + + Args: + source: A source file as a string or file-like object (iterates lines). + + Returns: + The first token of the resulting token stream. + """ + start_token = TokenizeSource(source) + ecma_pass = ecmametadatapass.EcmaMetaDataPass() + ecma_pass.Process(start_token) + return start_token + + +def ParseFunctionsAndComments(source, error_handler=None): + """Run the tokenizer and tracker and return comments and functions found. + + Args: + source: A source file as a string or file-like object (iterates lines). + error_handler: An error handler. + + Returns: + The functions and comments as a tuple. + """ + start_token = TokenizeSourceAndRunEcmaPass(source) + + tracker = javascriptstatetracker.JavaScriptStateTracker() + if error_handler is not None: + tracker.DocFlagPass(start_token, error_handler) + + functions = [] + comments = [] + for token in start_token: + tracker.HandleToken(token, tracker.GetLastNonSpaceToken()) + + function = tracker.GetFunction() + if function and function not in functions: + functions.append(function) + + comment = tracker.GetDocComment() + if comment and comment not in comments: + comments.append(comment) + + tracker.HandleAfterToken(token) + + return functions, comments diff --git a/tools/closure_linter/closure_linter/tokenutil.py b/tools/closure_linter/closure_linter/tokenutil.py index 6ed5f7f..11e3ccc 100755 --- a/tools/closure_linter/closure_linter/tokenutil.py +++ b/tools/closure_linter/closure_linter/tokenutil.py @@ -19,15 +19,17 @@ __author__ = ('robbyw@google.com (Robert Walker)', 'ajp@google.com (Andy Perelson)') -from closure_linter.common import tokens -from closure_linter import javascripttokens - import copy +import StringIO + +from closure_linter.common import tokens +from closure_linter.javascripttokens import JavaScriptToken +from closure_linter.javascripttokens import JavaScriptTokenType # Shorthand -JavaScriptToken = javascripttokens.JavaScriptToken Type = tokens.TokenType + def GetFirstTokenInSameLine(token): """Returns the first token in the same line as token. @@ -42,6 +44,58 @@ def GetFirstTokenInSameLine(token): return token +def GetFirstTokenInPreviousLine(token): + """Returns the first token in the previous line as token. + + Args: + token: Any token in the line. + + Returns: + The first token in the previous line as token, or None if token is on the + first line. + """ + first_in_line = GetFirstTokenInSameLine(token) + if first_in_line.previous: + return GetFirstTokenInSameLine(first_in_line.previous) + + return None + + +def GetLastTokenInSameLine(token): + """Returns the last token in the same line as token. + + Args: + token: Any token in the line. + + Returns: + The last token in the same line as token. + """ + while not token.IsLastInLine(): + token = token.next + return token + + +def GetAllTokensInSameLine(token): + """Returns all tokens in the same line as the given token. + + Args: + token: Any token in the line. + + Returns: + All tokens on the same line as the given token. + """ + first_token = GetFirstTokenInSameLine(token) + last_token = GetLastTokenInSameLine(token) + + tokens_in_line = [] + while first_token != last_token: + tokens_in_line.append(first_token) + first_token = first_token.next + tokens_in_line.append(last_token) + + return tokens_in_line + + def CustomSearch(start_token, func, end_func=None, distance=None, reverse=False): """Returns the first token where func is True within distance of this token. @@ -77,14 +131,14 @@ def CustomSearch(start_token, func, end_func=None, distance=None, else: while token and (distance is None or distance > 0): - next = token.next - if next: - if func(next): - return next - if end_func and end_func(next): + next_token = token.next + if next_token: + if func(next_token): + return next_token + if end_func and end_func(next_token): return None - token = next + token = next_token if distance is not None: distance -= 1 @@ -123,7 +177,6 @@ def SearchExcept(start_token, token_types, distance=None, reverse=False): reverse: When true, search the tokens before this one instead of the tokens after it - Returns: The first token of any type in token_types within distance of this token, or None if no such token is found. @@ -162,6 +215,13 @@ def DeleteToken(token): Args: token: The token to delete """ + # When deleting a token, we do not update the deleted token itself to make + # sure the previous and next pointers are still pointing to tokens which are + # not deleted. Also it is very hard to keep track of all previously deleted + # tokens to update them when their pointers become invalid. So we add this + # flag that any token linked list iteration logic can skip deleted node safely + # when its current token is deleted. + token.is_deleted = True if token.previous: token.previous.next = token.next @@ -173,19 +233,62 @@ def DeleteToken(token): following_token.metadata.last_code = token.metadata.last_code following_token = following_token.next -def DeleteTokens(token, tokenCount): + +def DeleteTokens(token, token_count): """Deletes the given number of tokens starting with the given token. Args: token: The token to start deleting at. - tokenCount: The total number of tokens to delete. + token_count: The total number of tokens to delete. """ - for i in xrange(1, tokenCount): + for i in xrange(1, token_count): DeleteToken(token.next) DeleteToken(token) + +def InsertTokenBefore(new_token, token): + """Insert new_token before token. + + Args: + new_token: A token to be added to the stream + token: A token already in the stream + """ + new_token.next = token + new_token.previous = token.previous + + new_token.metadata = copy.copy(token.metadata) + + if new_token.IsCode(): + old_last_code = token.metadata.last_code + following_token = token + while (following_token and + following_token.metadata.last_code == old_last_code): + following_token.metadata.last_code = new_token + following_token = following_token.next + + token.previous = new_token + if new_token.previous: + new_token.previous.next = new_token + + if new_token.start_index is None: + if new_token.line_number == token.line_number: + new_token.start_index = token.start_index + else: + previous_token = new_token.previous + if previous_token: + new_token.start_index = (previous_token.start_index + + len(previous_token.string)) + else: + new_token.start_index = 0 + + iterator = new_token.next + while iterator and iterator.line_number == new_token.line_number: + iterator.start_index += len(new_token.string) + iterator = iterator.next + + def InsertTokenAfter(new_token, token): - """Insert new_token after token + """Insert new_token after token. Args: new_token: A token to be added to the stream @@ -221,6 +324,21 @@ def InsertTokenAfter(new_token, token): iterator = iterator.next +def InsertTokensAfter(new_tokens, token): + """Insert multiple tokens after token. + + Args: + new_tokens: An array of tokens to be added to the stream + token: A token already in the stream + """ + # TODO(user): It would be nicer to have InsertTokenAfter defer to here + # instead of vice-versa. + current_token = token + for new_token in new_tokens: + InsertTokenAfter(new_token, current_token) + current_token = new_token + + def InsertSpaceTokenAfter(token): """Inserts a space token after the given token. @@ -228,28 +346,44 @@ def InsertSpaceTokenAfter(token): token: The token to insert a space token after Returns: - A single space token""" + A single space token + """ space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line, token.line_number) InsertTokenAfter(space_token, token) -def InsertLineAfter(token): +def InsertBlankLineAfter(token): """Inserts a blank line after the given token. Args: token: The token to insert a blank line after Returns: - A single space token""" + A single space token + """ blank_token = JavaScriptToken('', Type.BLANK_LINE, '', token.line_number + 1) - InsertTokenAfter(blank_token, token) - # Update all subsequent ine numbers. - blank_token = blank_token.next - while blank_token: - blank_token.line_number += 1 - blank_token = blank_token.next + InsertLineAfter(token, [blank_token]) + + +def InsertLineAfter(token, new_tokens): + """Inserts a new line consisting of new_tokens after the given token. + + Args: + token: The token to insert after. + new_tokens: The tokens that will make up the new line. + """ + insert_location = token + for new_token in new_tokens: + InsertTokenAfter(new_token, insert_location) + insert_location = new_token + + # Update all subsequent line numbers. + next_token = new_tokens[-1].next + while next_token: + next_token.line_number += 1 + next_token = next_token.next def SplitToken(token, position): @@ -275,6 +409,10 @@ def SplitToken(token, position): def Compare(token1, token2): """Compares two tokens and determines their relative order. + Args: + token1: The first token to compare. + token2: The second token to compare. + Returns: A negative integer, zero, or a positive integer as the first token is before, equal, or after the second in the token stream. @@ -283,3 +421,277 @@ def Compare(token1, token2): return token1.line_number - token2.line_number else: return token1.start_index - token2.start_index + + +def GoogScopeOrNoneFromStartBlock(token): + """Determines if the given START_BLOCK is part of a goog.scope statement. + + Args: + token: A token of type START_BLOCK. + + Returns: + The goog.scope function call token, or None if such call doesn't exist. + """ + if token.type != JavaScriptTokenType.START_BLOCK: + return None + + # Search for a goog.scope statement, which will be 5 tokens before the + # block. Illustration of the tokens found prior to the start block: + # goog.scope(function() { + # 5 4 3 21 ^ + + maybe_goog_scope = token + for unused_i in xrange(5): + maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and + maybe_goog_scope.previous else None) + if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope': + return maybe_goog_scope + + +def GetTokenRange(start_token, end_token): + """Returns a list of tokens between the two given, inclusive. + + Args: + start_token: Start token in the range. + end_token: End token in the range. + + Returns: + A list of tokens, in order, from start_token to end_token (including start + and end). Returns none if the tokens do not describe a valid range. + """ + + token_range = [] + token = start_token + + while token: + token_range.append(token) + + if token == end_token: + return token_range + + token = token.next + + +def TokensToString(token_iterable): + """Convert a number of tokens into a string. + + Newlines will be inserted whenever the line_number of two neighboring + strings differ. + + Args: + token_iterable: The tokens to turn to a string. + + Returns: + A string representation of the given tokens. + """ + + buf = StringIO.StringIO() + token_list = list(token_iterable) + if not token_list: + return '' + + line_number = token_list[0].line_number + + for token in token_list: + + while line_number < token.line_number: + line_number += 1 + buf.write('\n') + + if line_number > token.line_number: + line_number = token.line_number + buf.write('\n') + + buf.write(token.string) + + return buf.getvalue() + + +def GetPreviousCodeToken(token): + """Returns the code token before the specified token. + + Args: + token: A token. + + Returns: + The code token before the specified token or None if no such token + exists. + """ + + return CustomSearch( + token, + lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES, + reverse=True) + + +def GetNextCodeToken(token): + """Returns the next code token after the specified token. + + Args: + token: A token. + + Returns: + The next code token after the specified token or None if no such token + exists. + """ + + return CustomSearch( + token, + lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES, + reverse=False) + + +def GetIdentifierStart(token): + """Returns the first token in an identifier. + + Given a token which is part of an identifier, returns the token at the start + of the identifier. + + Args: + token: A token which is part of an identifier. + + Returns: + The token at the start of the identifier or None if the identifier was not + of the form 'a.b.c' (e.g. "['a']['b'].c"). + """ + + start_token = token + previous_code_token = GetPreviousCodeToken(token) + + while (previous_code_token and ( + previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or + IsDot(previous_code_token))): + start_token = previous_code_token + previous_code_token = GetPreviousCodeToken(previous_code_token) + + if IsDot(start_token): + return None + + return start_token + + +def GetIdentifierForToken(token): + """Get the symbol specified by a token. + + Given a token, this function additionally concatenates any parts of an + identifying symbol being identified that are split by whitespace or a + newline. + + The function will return None if the token is not the first token of an + identifier. + + Args: + token: The first token of a symbol. + + Returns: + The whole symbol, as a string. + """ + + # Search backward to determine if this token is the first token of the + # identifier. If it is not the first token, return None to signal that this + # token should be ignored. + prev_token = token.previous + while prev_token: + if (prev_token.IsType(JavaScriptTokenType.IDENTIFIER) or + IsDot(prev_token)): + return None + + if (prev_token.IsType(tokens.TokenType.WHITESPACE) or + prev_token.IsAnyType(JavaScriptTokenType.COMMENT_TYPES)): + prev_token = prev_token.previous + else: + break + + # A "function foo()" declaration. + if token.type is JavaScriptTokenType.FUNCTION_NAME: + return token.string + + # A "var foo" declaration (if the previous token is 'var') + previous_code_token = GetPreviousCodeToken(token) + + if previous_code_token and previous_code_token.IsKeyword('var'): + return token.string + + # Otherwise, this is potentially a namespaced (goog.foo.bar) identifier that + # could span multiple lines or be broken up by whitespace. We need + # to concatenate. + identifier_types = set([ + JavaScriptTokenType.IDENTIFIER, + JavaScriptTokenType.SIMPLE_LVALUE + ]) + + assert token.type in identifier_types + + # Start with the first token + symbol_tokens = [token] + + if token.next: + for t in token.next: + last_symbol_token = symbol_tokens[-1] + + # A dot is part of the previous symbol. + if IsDot(t): + symbol_tokens.append(t) + continue + + # An identifier is part of the previous symbol if the previous one was a + # dot. + if t.type in identifier_types: + if IsDot(last_symbol_token): + symbol_tokens.append(t) + continue + else: + break + + # Skip any whitespace + if t.type in JavaScriptTokenType.NON_CODE_TYPES: + continue + + # This is the end of the identifier. Stop iterating. + break + + if symbol_tokens: + return ''.join([t.string for t in symbol_tokens]) + + +def GetStringAfterToken(token): + """Get string after token. + + Args: + token: Search will be done after this token. + + Returns: + String if found after token else None (empty string will also + return None). + + Search until end of string as in case of empty string Type.STRING_TEXT is not + present/found and don't want to return next string. + E.g. + a = ''; + b = 'test'; + When searching for string after 'a' if search is not limited by end of string + then it will return 'test' which is not desirable as there is a empty string + before that. + + This will return None for cases where string is empty or no string found + as in both cases there is no Type.STRING_TEXT. + """ + string_token = SearchUntil(token, JavaScriptTokenType.STRING_TEXT, + [JavaScriptTokenType.SINGLE_QUOTE_STRING_END, + JavaScriptTokenType.DOUBLE_QUOTE_STRING_END]) + if string_token: + return string_token.string + else: + return None + + +def IsDot(token): + """Whether the token represents a "dot" operator (foo.bar).""" + return token.type is JavaScriptTokenType.OPERATOR and token.string == '.' + + +def IsIdentifierOrDot(token): + """Whether the token is either an identifier or a '.'.""" + return (token.type in [JavaScriptTokenType.IDENTIFIER, + JavaScriptTokenType.SIMPLE_LVALUE] or + IsDot(token)) diff --git a/tools/closure_linter/closure_linter/tokenutil_test.py b/tools/closure_linter/closure_linter/tokenutil_test.py new file mode 100644 index 0000000..c7d3854 --- /dev/null +++ b/tools/closure_linter/closure_linter/tokenutil_test.py @@ -0,0 +1,297 @@ +#!/usr/bin/env python +# +# Copyright 2012 The Closure Linter Authors. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the scopeutil module.""" + +# Allow non-Google copyright +# pylint: disable=g-bad-file-header + +__author__ = ('nnaze@google.com (Nathan Naze)') + +import unittest as googletest + +from closure_linter import ecmametadatapass +from closure_linter import javascripttokens +from closure_linter import testutil +from closure_linter import tokenutil + + +class FakeToken(object): + pass + + +class TokenUtilTest(googletest.TestCase): + + def testGetTokenRange(self): + + a = FakeToken() + b = FakeToken() + c = FakeToken() + d = FakeToken() + e = FakeToken() + + a.next = b + b.next = c + c.next = d + + self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d)) + + # This is an error as e does not come after a in the token chain. + self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e)) + + def testTokensToString(self): + + a = FakeToken() + b = FakeToken() + c = FakeToken() + d = FakeToken() + e = FakeToken() + + a.string = 'aaa' + b.string = 'bbb' + c.string = 'ccc' + d.string = 'ddd' + e.string = 'eee' + + a.line_number = 5 + b.line_number = 6 + c.line_number = 6 + d.line_number = 10 + e.line_number = 11 + + self.assertEquals( + 'aaa\nbbbccc\n\n\n\nddd\neee', + tokenutil.TokensToString([a, b, c, d, e])) + + self.assertEquals( + 'ddd\neee\naaa\nbbbccc', + tokenutil.TokensToString([d, e, a, b, c]), + 'Neighboring tokens not in line_number order should have a newline ' + 'between them.') + + def testGetPreviousCodeToken(self): + + tokens = testutil.TokenizeSource(""" +start1. // comment + /* another comment */ + end1 +""") + + def _GetTokenStartingWith(token_starts_with): + for t in tokens: + if t.string.startswith(token_starts_with): + return t + + self.assertEquals( + None, + tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1'))) + + self.assertEquals( + '.', + tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string) + + self.assertEquals( + 'start1', + tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('.')).string) + + def testGetNextCodeToken(self): + + tokens = testutil.TokenizeSource(""" +start1. // comment + /* another comment */ + end1 +""") + + def _GetTokenStartingWith(token_starts_with): + for t in tokens: + if t.string.startswith(token_starts_with): + return t + + self.assertEquals( + '.', + tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string) + + self.assertEquals( + 'end1', + tokenutil.GetNextCodeToken(_GetTokenStartingWith('.')).string) + + self.assertEquals( + None, + tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1'))) + + def testGetIdentifierStart(self): + + tokens = testutil.TokenizeSource(""" +start1 . // comment + prototype. /* another comment */ + end1 + +['edge'][case].prototype. + end2 = function() {} +""") + + def _GetTokenStartingWith(token_starts_with): + for t in tokens: + if t.string.startswith(token_starts_with): + return t + + self.assertEquals( + 'start1', + tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string) + + self.assertEquals( + 'start1', + tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string) + + self.assertEquals( + None, + tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2'))) + + def testInsertTokenBefore(self): + + self.AssertInsertTokenAfterBefore(False) + + def testInsertTokenAfter(self): + + self.AssertInsertTokenAfterBefore(True) + + def AssertInsertTokenAfterBefore(self, after): + + new_token = javascripttokens.JavaScriptToken( + 'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1) + + existing_token1 = javascripttokens.JavaScriptToken( + 'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1) + existing_token1.start_index = 0 + existing_token1.metadata = ecmametadatapass.EcmaMetaData() + + existing_token2 = javascripttokens.JavaScriptToken( + ' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1) + existing_token2.start_index = 3 + existing_token2.metadata = ecmametadatapass.EcmaMetaData() + existing_token2.metadata.last_code = existing_token1 + + existing_token1.next = existing_token2 + existing_token2.previous = existing_token1 + + if after: + tokenutil.InsertTokenAfter(new_token, existing_token1) + else: + tokenutil.InsertTokenBefore(new_token, existing_token2) + + self.assertEquals(existing_token1, new_token.previous) + self.assertEquals(existing_token2, new_token.next) + + self.assertEquals(new_token, existing_token1.next) + self.assertEquals(new_token, existing_token2.previous) + + self.assertEquals(existing_token1, new_token.metadata.last_code) + self.assertEquals(new_token, existing_token2.metadata.last_code) + + self.assertEquals(0, existing_token1.start_index) + self.assertEquals(3, new_token.start_index) + self.assertEquals(4, existing_token2.start_index) + + def testGetIdentifierForToken(self): + + tokens = testutil.TokenizeSource(""" +start1.abc.def.prototype. + onContinuedLine + +(start2.abc.def + .hij.klm + .nop) + +start3.abc.def + .hij = function() {}; + +// An absurd multi-liner. +start4.abc.def. + hij. + klm = function() {}; + +start5 . aaa . bbb . ccc + shouldntBePartOfThePreviousSymbol + +start6.abc.def ghi.shouldntBePartOfThePreviousSymbol + +var start7 = 42; + +function start8() { + +} + +start9.abc. // why is there a comment here? + def /* another comment */ + shouldntBePart + +start10.abc // why is there a comment here? + .def /* another comment */ + shouldntBePart + +start11.abc. middle1.shouldNotBeIdentifier +""") + + def _GetTokenStartingWith(token_starts_with): + for t in tokens: + if t.string.startswith(token_starts_with): + return t + + self.assertEquals( + 'start1.abc.def.prototype.onContinuedLine', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1'))) + + self.assertEquals( + 'start2.abc.def.hij.klm.nop', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2'))) + + self.assertEquals( + 'start3.abc.def.hij', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3'))) + + self.assertEquals( + 'start4.abc.def.hij.klm', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4'))) + + self.assertEquals( + 'start5.aaa.bbb.ccc', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5'))) + + self.assertEquals( + 'start6.abc.def', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6'))) + + self.assertEquals( + 'start7', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7'))) + + self.assertEquals( + 'start8', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8'))) + + self.assertEquals( + 'start9.abc.def', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9'))) + + self.assertEquals( + 'start10.abc.def', + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10'))) + + self.assertIsNone( + tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1'))) + + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/closure_linter/typeannotation.py b/tools/closure_linter/closure_linter/typeannotation.py new file mode 100644 index 0000000..00604c1 --- /dev/null +++ b/tools/closure_linter/closure_linter/typeannotation.py @@ -0,0 +1,401 @@ +#!/usr/bin/env python +#*-* coding: utf-8 +"""Closure typeannotation parsing and utilities.""" + + + +from closure_linter import errors +from closure_linter import javascripttokens +from closure_linter.common import error + +# Shorthand +TYPE = javascripttokens.JavaScriptTokenType + + +class TypeAnnotation(object): + """Represents a structured view of a closure type annotation. + + Attribute: + identifier: The name of the type. + key_type: The name part before a colon. + sub_types: The list of sub_types used e.g. for Array.<…> + or_null: The '?' annotation + not_null: The '!' annotation + type_group: If this a a grouping (a|b), but does not include function(a). + return_type: The return type of a function definition. + alias: The actual type set by closurizednamespaceinfo if the identifier uses + an alias to shorten the name. + tokens: An ordered list of tokens used for this type. May contain + TypeAnnotation instances for sub_types, key_type or return_type. + """ + + IMPLICIT_TYPE_GROUP = 2 + + NULLABILITY_UNKNOWN = 2 + + # Frequently used known non-nullable types. + NON_NULLABLE = frozenset([ + 'boolean', 'function', 'number', 'string', 'undefined']) + # Frequently used known nullable types. + NULLABLE_TYPE_WHITELIST = frozenset([ + 'Array', 'Document', 'Element', 'Function', 'Node', 'NodeList', + 'Object']) + + def __init__(self): + self.identifier = '' + self.sub_types = [] + self.or_null = False + self.not_null = False + self.type_group = False + self.alias = None + self.key_type = None + self.record_type = False + self.opt_arg = False + self.return_type = None + self.tokens = [] + + def IsFunction(self): + """Determines whether this is a function definition.""" + return self.identifier == 'function' + + def IsConstructor(self): + """Determines whether this is a function definition for a constructor.""" + key_type = self.sub_types and self.sub_types[0].key_type + return self.IsFunction() and key_type.identifier == 'new' + + def IsRecordType(self): + """Returns True if this type is a record type.""" + return (self.record_type or + bool([t for t in self.sub_types if t.IsRecordType()])) + + def IsVarArgsType(self): + """Determines if the type is a var_args type, i.e. starts with '...'.""" + return self.identifier.startswith('...') or ( + self.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP and + self.sub_types[0].identifier.startswith('...')) + + def IsEmpty(self): + """Returns True if the type is empty.""" + return not self.tokens + + def IsUnknownType(self): + """Returns True if this is the unknown type {?}.""" + return (self.or_null + and not self.identifier + and not self.sub_types + and not self.return_type) + + def Append(self, item): + """Adds a sub_type to this type and finalizes it. + + Args: + item: The TypeAnnotation item to append. + """ + # item is a TypeAnnotation instance, so pylint: disable=protected-access + self.sub_types.append(item._Finalize(self)) + + def __repr__(self): + """Reconstructs the type definition.""" + append = '' + if self.sub_types: + separator = (',' if not self.type_group else '|') + if self.identifier == 'function': + surround = '(%s)' + else: + surround = {False: '{%s}' if self.record_type else '<%s>', + True: '(%s)', + self.IMPLICIT_TYPE_GROUP: '%s'}[self.type_group] + append = surround % separator.join([repr(t) for t in self.sub_types]) + if self.return_type: + append += ':%s' % repr(self.return_type) + append += '=' if self.opt_arg else '' + prefix = '' + ('?' if self.or_null else '') + ('!' if self.not_null else '') + keyword = '%s:' % repr(self.key_type) if self.key_type else '' + return keyword + prefix + '%s' % (self.alias or self.identifier) + append + + def ToString(self): + """Concats the type's tokens to form a string again.""" + ret = [] + for token in self.tokens: + if not isinstance(token, TypeAnnotation): + ret.append(token.string) + else: + ret.append(token.ToString()) + return ''.join(ret) + + def Dump(self, indent=''): + """Dumps this type's structure for debugging purposes.""" + result = [] + for t in self.tokens: + if isinstance(t, TypeAnnotation): + result.append(indent + str(t) + ' =>\n' + t.Dump(indent + ' ')) + else: + result.append(indent + str(t)) + return '\n'.join(result) + + def IterIdentifiers(self): + """Iterates over all identifiers in this type and its subtypes.""" + if self.identifier: + yield self.identifier + for subtype in self.IterTypes(): + for identifier in subtype.IterIdentifiers(): + yield identifier + + def IterTypeGroup(self): + """Iterates over all types in the type group including self. + + Yields: + If this is a implicit or manual type-group: all sub_types. + Otherwise: self + E.g. for @type {Foo.} this will yield only Foo., + for @type {Foo|(Bar|Sample)} this will yield Foo, Bar and Sample. + + """ + if self.type_group: + for sub_type in self.sub_types: + for sub_type in sub_type.IterTypeGroup(): + yield sub_type + else: + yield self + + def IterTypes(self): + """Iterates over each subtype as well as return and key types.""" + if self.return_type: + yield self.return_type + + if self.key_type: + yield self.key_type + + for sub_type in self.sub_types: + yield sub_type + + def GetNullability(self, modifiers=True): + """Computes whether the type may be null. + + Args: + modifiers: Whether the modifiers ? and ! should be considered in the + evaluation. + Returns: + True if the type allows null, False if the type is strictly non nullable + and NULLABILITY_UNKNOWN if the nullability cannot be determined. + """ + + # Explicitly marked nullable types or 'null' are nullable. + if (modifiers and self.or_null) or self.identifier == 'null': + return True + + # Explicitly marked non-nullable types or non-nullable base types: + if ((modifiers and self.not_null) or self.record_type + or self.identifier in self.NON_NULLABLE): + return False + + # A type group is nullable if any of its elements are nullable. + if self.type_group: + maybe_nullable = False + for sub_type in self.sub_types: + nullability = sub_type.GetNullability() + if nullability == self.NULLABILITY_UNKNOWN: + maybe_nullable = nullability + elif nullability: + return True + return maybe_nullable + + # Whitelisted types are nullable. + if self.identifier.rstrip('.') in self.NULLABLE_TYPE_WHITELIST: + return True + + # All other types are unknown (most should be nullable, but + # enums are not and typedefs might not be). + return self.NULLABILITY_UNKNOWN + + def WillAlwaysBeNullable(self): + """Computes whether the ! flag is illegal for this type. + + This is the case if this type or any of the subtypes is marked as + explicitly nullable. + + Returns: + True if the ! flag would be illegal. + """ + if self.or_null or self.identifier == 'null': + return True + + if self.type_group: + return bool([t for t in self.sub_types if t.WillAlwaysBeNullable()]) + + return False + + def _Finalize(self, parent): + """Fixes some parsing issues once the TypeAnnotation is complete.""" + + # Normalize functions whose definition ended up in the key type because + # they defined a return type after a colon. + if self.key_type and self.key_type.identifier == 'function': + current = self.key_type + current.return_type = self + self.key_type = None + # opt_arg never refers to the return type but to the function itself. + current.opt_arg = self.opt_arg + self.opt_arg = False + return current + + # If a typedef just specified the key, it will not end up in the key type. + if parent.record_type and not self.key_type: + current = TypeAnnotation() + current.key_type = self + current.tokens.append(self) + return current + return self + + def FirstToken(self): + """Returns the first token used in this type or any of its subtypes.""" + first = self.tokens[0] + return first.FirstToken() if isinstance(first, TypeAnnotation) else first + + +def Parse(token, token_end, error_handler): + """Parses a type annotation and returns a TypeAnnotation object.""" + return TypeAnnotationParser(error_handler).Parse(token.next, token_end) + + +class TypeAnnotationParser(object): + """A parser for type annotations constructing the TypeAnnotation object.""" + + def __init__(self, error_handler): + self._stack = [] + self._error_handler = error_handler + self._closing_error = False + + def Parse(self, token, token_end): + """Parses a type annotation and returns a TypeAnnotation object.""" + root = TypeAnnotation() + self._stack.append(root) + current = TypeAnnotation() + root.tokens.append(current) + + while token and token != token_end: + if token.type in (TYPE.DOC_TYPE_START_BLOCK, TYPE.DOC_START_BRACE): + if token.string == '(': + if (current.identifier and + current.identifier not in ['function', '...']): + self.Error(token, + 'Invalid identifier for (): "%s"' % current.identifier) + current.type_group = current.identifier != 'function' + elif token.string == '{': + current.record_type = True + current.tokens.append(token) + self._stack.append(current) + current = TypeAnnotation() + self._stack[-1].tokens.append(current) + + elif token.type in (TYPE.DOC_TYPE_END_BLOCK, TYPE.DOC_END_BRACE): + prev = self._stack.pop() + prev.Append(current) + current = prev + + # If an implicit type group was created, close it as well. + if prev.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP: + prev = self._stack.pop() + prev.Append(current) + current = prev + current.tokens.append(token) + + elif token.type == TYPE.DOC_TYPE_MODIFIER: + if token.string == '!': + current.tokens.append(token) + current.not_null = True + elif token.string == '?': + current.tokens.append(token) + current.or_null = True + elif token.string == ':': + current.tokens.append(token) + prev = current + current = TypeAnnotation() + prev.tokens.append(current) + current.key_type = prev + elif token.string == '=': + # For implicit type groups the '=' refers to the parent. + try: + if self._stack[-1].type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP: + self._stack[-1].tokens.append(token) + self._stack[-1].opt_arg = True + else: + current.tokens.append(token) + current.opt_arg = True + except IndexError: + self.ClosingError(token) + elif token.string == '|': + # If a type group has explicitly been opened do a normal append. + # Otherwise we have to open the type group and move the current + # type into it, before appending + if not self._stack[-1].type_group: + type_group = TypeAnnotation() + if current.key_type and current.key_type.identifier != 'function': + type_group.key_type = current.key_type + current.key_type = None + type_group.type_group = TypeAnnotation.IMPLICIT_TYPE_GROUP + # Fix the token order + prev = self._stack[-1].tokens.pop() + self._stack[-1].tokens.append(type_group) + type_group.tokens.append(prev) + self._stack.append(type_group) + self._stack[-1].tokens.append(token) + self.Append(current, error_token=token) + current = TypeAnnotation() + self._stack[-1].tokens.append(current) + elif token.string == ',': + self.Append(current, error_token=token) + current = TypeAnnotation() + self._stack[-1].tokens.append(token) + self._stack[-1].tokens.append(current) + else: + current.tokens.append(token) + self.Error(token, 'Invalid token') + + elif token.type == TYPE.COMMENT: + current.tokens.append(token) + current.identifier += token.string.strip() + + elif token.type in [TYPE.DOC_PREFIX, TYPE.WHITESPACE]: + current.tokens.append(token) + + else: + current.tokens.append(token) + self.Error(token, 'Unexpected token') + + token = token.next + + self.Append(current, error_token=token) + try: + ret = self._stack.pop() + except IndexError: + self.ClosingError(token) + # The type is screwed up, but let's return something. + return current + + if self._stack and (len(self._stack) != 1 or + ret.type_group != TypeAnnotation.IMPLICIT_TYPE_GROUP): + self.Error(token, 'Too many opening items.') + + return ret if len(ret.sub_types) > 1 else ret.sub_types[0] + + def Append(self, type_obj, error_token): + """Appends a new TypeAnnotation object to the current parent.""" + if self._stack: + self._stack[-1].Append(type_obj) + else: + self.ClosingError(error_token) + + def ClosingError(self, token): + """Reports an error about too many closing items, but only once.""" + if not self._closing_error: + self._closing_error = True + self.Error(token, 'Too many closing items.') + + def Error(self, token, message): + """Calls the error_handler to post an error message.""" + if self._error_handler: + self._error_handler.HandleError(error.Error( + errors.JSDOC_DOES_NOT_PARSE, + 'Error parsing jsdoc type at token "%s" (column: %d): %s' % + (token.string, token.start_index, message), token)) diff --git a/tools/closure_linter/closure_linter/typeannotation_test.py b/tools/closure_linter/closure_linter/typeannotation_test.py new file mode 100755 index 0000000..da9dfa3 --- /dev/null +++ b/tools/closure_linter/closure_linter/typeannotation_test.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python +"""Unit tests for the typeannotation module.""" + + + + +import unittest as googletest + +from closure_linter import testutil +from closure_linter.common import erroraccumulator + +CRAZY_TYPE = ('Array.))>') + + +class TypeErrorException(Exception): + """Exception for TypeErrors.""" + + def __init__(self, errors): + super(TypeErrorException, self).__init__() + self.errors = errors + + +class TypeParserTest(googletest.TestCase): + """Tests for typeannotation parsing.""" + + def _ParseComment(self, script): + """Parse a script that contains one comment and return it.""" + accumulator = erroraccumulator.ErrorAccumulator() + _, comments = testutil.ParseFunctionsAndComments(script, accumulator) + if accumulator.GetErrors(): + raise TypeErrorException(accumulator.GetErrors()) + self.assertEquals(1, len(comments)) + return comments[0] + + def _ParseType(self, type_str): + """Creates a comment to parse and returns the parsed type.""" + comment = self._ParseComment('/** @type {%s} **/' % type_str) + return comment.GetDocFlags()[0].jstype + + def assertProperReconstruction(self, type_str, matching_str=None): + """Parses the type and asserts the its repr matches the type. + + If matching_str is specified, it will assert that the repr matches this + string instead. + + Args: + type_str: The type string to parse. + matching_str: A string the __repr__ of the parsed type should match. + Returns: + The parsed js_type. + """ + parsed_type = self._ParseType(type_str) + # Use listEqual assertion to more easily identify the difference + self.assertListEqual(list(matching_str or type_str), + list(repr(parsed_type))) + self.assertEquals(matching_str or type_str, repr(parsed_type)) + + # Newlines will be inserted by the file writer. + self.assertEquals(type_str.replace('\n', ''), parsed_type.ToString()) + return parsed_type + + def assertNullable(self, type_str, nullable=True): + parsed_type = self.assertProperReconstruction(type_str) + self.assertEquals(nullable, parsed_type.GetNullability(), + '"%s" should %sbe nullable' % + (type_str, 'not ' if nullable else '')) + + def assertNotNullable(self, type_str): + return self.assertNullable(type_str, nullable=False) + + def testReconstruction(self): + self.assertProperReconstruction('*') + self.assertProperReconstruction('number') + self.assertProperReconstruction('(((number)))') + self.assertProperReconstruction('!number') + self.assertProperReconstruction('?!number') + self.assertProperReconstruction('number=') + self.assertProperReconstruction('number=!?', '?!number=') + self.assertProperReconstruction('number|?string') + self.assertProperReconstruction('(number|string)') + self.assertProperReconstruction('?(number|string)') + self.assertProperReconstruction('Object.') + self.assertProperReconstruction('function(new:Object)') + self.assertProperReconstruction('function(new:Object):number') + self.assertProperReconstruction('function(new:Object,Element):number') + self.assertProperReconstruction('function(this:T,...)') + self.assertProperReconstruction('{a:?number}') + self.assertProperReconstruction('{a:?number,b:(number|string)}') + self.assertProperReconstruction('{c:{nested_element:*}|undefined}') + self.assertProperReconstruction('{handleEvent:function(?):?}') + self.assertProperReconstruction('function():?|null') + self.assertProperReconstruction('null|function():?|bar') + + def testOptargs(self): + self.assertProperReconstruction('number=') + self.assertProperReconstruction('number|string=') + self.assertProperReconstruction('(number|string)=') + self.assertProperReconstruction('(number|string=)') + self.assertProperReconstruction('(number=|string)') + self.assertProperReconstruction('function(...):number=') + + def testIndepth(self): + # Do an deeper check of the crazy identifier + crazy = self.assertProperReconstruction(CRAZY_TYPE) + self.assertEquals('Array.', crazy.identifier) + self.assertEquals(1, len(crazy.sub_types)) + func1 = crazy.sub_types[0] + func2 = func1.return_type + self.assertEquals('function', func1.identifier) + self.assertEquals('function', func2.identifier) + self.assertEquals(3, len(func1.sub_types)) + self.assertEquals(1, len(func2.sub_types)) + self.assertEquals('Object.', func2.sub_types[0].sub_types[0].identifier) + + def testIterIdentifiers(self): + nested_identifiers = self._ParseType('(a|{b:(c|function(new:d):e)})') + for identifier in ('a', 'b', 'c', 'd', 'e'): + self.assertIn(identifier, nested_identifiers.IterIdentifiers()) + + def testIsEmpty(self): + self.assertTrue(self._ParseType('').IsEmpty()) + self.assertFalse(self._ParseType('?').IsEmpty()) + self.assertFalse(self._ParseType('!').IsEmpty()) + self.assertFalse(self._ParseType('').IsEmpty()) + + def testIsConstructor(self): + self.assertFalse(self._ParseType('').IsConstructor()) + self.assertFalse(self._ParseType('Array.').IsConstructor()) + self.assertTrue(self._ParseType('function(new:T)').IsConstructor()) + + def testIsVarArgsType(self): + self.assertTrue(self._ParseType('...number').IsVarArgsType()) + self.assertTrue(self._ParseType('...Object|Array').IsVarArgsType()) + self.assertTrue(self._ParseType('...(Object|Array)').IsVarArgsType()) + self.assertFalse(self._ParseType('Object|...Array').IsVarArgsType()) + self.assertFalse(self._ParseType('(...Object|Array)').IsVarArgsType()) + + def testIsUnknownType(self): + self.assertTrue(self._ParseType('?').IsUnknownType()) + self.assertTrue(self._ParseType('Foo.').sub_types[0].IsUnknownType()) + self.assertFalse(self._ParseType('?|!').IsUnknownType()) + self.assertTrue(self._ParseType('?|!').sub_types[0].IsUnknownType()) + self.assertFalse(self._ParseType('!').IsUnknownType()) + + long_type = 'function():?|{handleEvent:function(?=):?,sample:?}|?=' + record = self._ParseType(long_type) + # First check that there's not just one type with 3 return types, but three + # top-level types. + self.assertEquals(3, len(record.sub_types)) + + # Now extract all unknown type instances and verify that they really are. + handle_event, sample = record.sub_types[1].sub_types + for i, sub_type in enumerate([ + record.sub_types[0].return_type, + handle_event.return_type, + handle_event.sub_types[0], + sample, + record.sub_types[2]]): + self.assertTrue(sub_type.IsUnknownType(), + 'Type %d should be the unknown type: %s\n%s' % ( + i, sub_type.tokens, record.Dump())) + + def testTypedefNames(self): + easy = self._ParseType('{a}') + self.assertTrue(easy.record_type) + + easy = self.assertProperReconstruction('{a}', '{a:}').sub_types[0] + self.assertEquals('a', easy.key_type.identifier) + self.assertEquals('', easy.identifier) + + easy = self.assertProperReconstruction('{a:b}').sub_types[0] + self.assertEquals('a', easy.key_type.identifier) + self.assertEquals('b', easy.identifier) + + def assertTypeError(self, type_str): + """Asserts that parsing the given type raises a linter error.""" + self.assertRaises(TypeErrorException, self._ParseType, type_str) + + def testParseBadTypes(self): + """Tests that several errors in types don't break the parser.""" + self.assertTypeError('<') + self.assertTypeError('>') + self.assertTypeError('Foo.=') + self.assertTypeError('Foo.>=') + self.assertTypeError('(') + self.assertTypeError(')') + self.assertTypeError('Foo.') + self._ParseType(':') + self._ParseType(':foo') + self.assertTypeError(':)foo') + self.assertTypeError('(a|{b:(c|function(new:d):e') + + def testNullable(self): + self.assertNullable('null') + self.assertNullable('Object') + self.assertNullable('?string') + self.assertNullable('?number') + + self.assertNotNullable('string') + self.assertNotNullable('number') + self.assertNotNullable('boolean') + self.assertNotNullable('function(Object)') + self.assertNotNullable('function(Object):Object') + self.assertNotNullable('function(?Object):?Object') + self.assertNotNullable('!Object') + + self.assertNotNullable('boolean|string') + self.assertNotNullable('(boolean|string)') + + self.assertNullable('(boolean|string|null)') + self.assertNullable('(?boolean)') + self.assertNullable('?(boolean)') + + self.assertNullable('(boolean|Object)') + self.assertNotNullable('(boolean|(string|{a:}))') + + def testSpaces(self): + """Tests that spaces don't change the outcome.""" + type_str = (' A < b | ( c | ? ! d e f ) > | ' + 'function ( x : . . . ) : { y : z = } ') + two_spaces = type_str.replace(' ', ' ') + no_spaces = type_str.replace(' ', '') + newlines = type_str.replace(' ', '\n * ') + self.assertProperReconstruction(no_spaces) + self.assertProperReconstruction(type_str, no_spaces) + self.assertProperReconstruction(two_spaces, no_spaces) + self.assertProperReconstruction(newlines, no_spaces) + +if __name__ == '__main__': + googletest.main() diff --git a/tools/closure_linter/dist/closure_linter-2.3.17-py2.7.egg b/tools/closure_linter/dist/closure_linter-2.3.17-py2.7.egg new file mode 100644 index 0000000000000000000000000000000000000000..e9a847ca28b84234457ad20e9baf2664149d7de2 GIT binary patch literal 315656 zcma&NQ*0+t`z~7B?yt6O+qP}no!Yi-+qUga%_*n0&V0$q`Tp5^XXjk3n{~JHuH<@YHGK%k9@nKd20|6C+009C1-I~}sxVSl+8Q5CcyP7#O7#LXDTe%t- z&^vmWB@NndF(QfHyhl~~S^-NiJ5y&RUvyL`iqO-R3&pjxq_w3=rIpm2-E=GC5(;^d zcyNB0PmYGSywLMC$x9g+jxPZ4=o;AFUAJ3++pAYCaJ7fJSo%Cd=q?2jv>25CpqlE1 ze~~_IBvW`4az|nuHl9SalmmL}PK1%-XR4}7*QZeWC%o7J?UOJB6hgjqqxpYFj9B^R ztV7s=dnp_Fp%`J(j&klNTJonHfrvEl%Y0$6m8iy*kn2$3Jg> zy*Lh-wC!1p678jITsQ8bzq}Fcp*MYm)p!oY_jDKpnW`^^<<(}tf+v-8%{hc8o#72k z_5gXwpyzia>2{r~iCE5r=A>~p9M4^quRjK9M9Yj@AW+Xx{DevuQN|tRSG}`rHq@CJ z3;mX@&D!frR!EB=e1}y0|3>VF#3&B(6#G&5Z?u#D4$l8|#3o`hjW9!uNMm=ZkfnsL zU`vfM$?h6=(4q^IClOL`)$<^7(m`J>@-^1|zcRp$U`t!lY{nM5>+7nb+ zjU;R)AI>R$uv79?)h%TM^2JGuFQb{}41-3PZ*8)IIf_6n8bD^sKhQK&R)XcDQYH~S zsZ-%Tt1CT~G{iBeW7>r|h$0Szm1USf8%ALJ?Giaj4Ys}bIa>3mncEEiec69LuQQyY zMFA`j5H-pF@nuG~Rz@z4MlLS@es8PxjthVb#s5$JLLy`sH?REt8C`i=t5X`!R!IP9 zx^rjQNKAlK7`8j9?1cLF%ccFVn2yZGhU8U+h4FX}1Cw%tlI zwKn}0{f21k-s*=@<;HaegI@FHQbSBJGLBZmb+1a%*>@bmIP<~aC4mT9 z2!wSnYH2FwHYyfEje)}c({tsGO#BZ*>r4$YAH053WR#X!)$R`*#|<@{wnH9u^(ea% zAdF7ahaMzZU%653#vW6vR&7i!29?3KPVFW3Fld;&$%YE^C{Q-Ycec;057!tPZssq# zRPFrSJp1;ojN}$4H3oTDIVy$vaLnMX$$c==GPav(VaZAByLIyVgPLZ%BO4^j6BeZA zeI51H7zS%#mnz}FcDK5F=S7pItZKS?aK}mK$_w8%C17oJUd|Q;AkVv`){o@mgOb_r zGlj5Z{M%Q~oNq2~0W*S+Bc~sCZd}<7%Ncs{_VnR%M_1VXI?bPF)Q^|j=caKmL-4JL z`b)qGB$3OAl8rsFDB(cQ63Aq$XH1Y$=jj4e6`x?8L}v z9y~khHu}SsRlkU+S{Q&l)oL>&HGIj?<~u-+67Qw7xkpvd z^4~zyt#a#AtNMT}6&6tpaZ)iAkdd=ZL_IgqH_{mNVS)4F=p~Q`m-tLDTlZmDhb7%w z9OnjK6vb#5A+C$9fSdW)fjI4f;$#9q&lhDA8!*5nWXPEp9MptWx$xms$XjBQ* z{MA;6Wm8~mFPkNoIkso3hEWfao@f$`gh$+n zX&lr*P-no}M0Ym6n$6~b_h^Xd=i%svO=)%+<2KEHsEWGY@Mu4cSa5gisiRS=+|eH4 z70W++4JV|J?-0;meowvr{;qrJtd$Ood4CeVVBx69Q7U**8uFqz!@dlrK^D=!A#A8y zKl{kRt`Ld^VopTu6Q}V`w<-plRzt7`6F2(G2m0v?HAJ+k9~dm0cQPb|P?i?S!$Eql z_WF438GpRPi3X7GFxPZPS$iu27hkVd}pI>`wM4))(?C3n?K(T?FLVuq%%OahE zlE3-E8wq-nONnnVioXD#NlF#+!zqa32zWtvC=$k68N8rkNrqP5Xsnn9`h^@Kqp}Q* zhVR^QF1hE19s0&YNFw2NW+I~tn_$8bjqP`ut*qt7vvi3|BK zKp@`LhOzh;&!aYX2=dJ^DZNuVNixcKmn9RD+ACI8L3tiE|o2wxEnk6TOm@L=-L3bR=m3iQ0HUkKA(Ni z&O(9aODJ4b=>Rp5GIp-H?tRPX3E5I|4zMQnmt4vRbJ?H*)` z*+aSix;uzi!NF%5)=NILxpFB6QvO1ba@2w-mi1aCsF8=;Nat?CVy9F=oytlx(~gBQ zg=Jn;w+d`$vl7jMnZ^LOK2i926BmNUkzvk`8J)T^AP2#3o7my8PKN8La8+XCEb$)P2* z98cE4x)!4w$4;6vGh@=lz1)=~+%*vVhM>czo`cl-5|N+fj|l79*;|AhmbJ>B*dR4L z5w2&yTU4VL<1>;%+1L3##6C}e?u0!&yxW^>-O-}u1)5FmUD%Dev8<41Kp8UlR$}Xj z_mJk??X$>_w`XQgK3vkG+PyogceX6O_zgrapHO!HT%HmNeZXB&g?Y~cMbNvtPTn#< z7!?%0vzA>H9IA^N22WUM^LT7i%|H}V^o}}6VyAm4pJ)}|>r7dEk2>uM>cEy8hCT8Zt zd5@Mjs-ezviewISW)uplwD7X1!NltTlW$wzvK7(u8xl84b`EQZ1tpa@{ItjMAAdBp zcd_w9{N=j*8OIu-3uVflOzOg!fTSe)UVx-7i~tDs9k(KHMpI#Ze}<61f7SJ;X0aF8 z7o!OQUtkX9X`1qp0|2-yf0$XV*tU*AY78`+C$)13Tp%B_tbMu%DQ#^kGPRHm3OjF^ zYKe5rsuww)I*gIC{d4Ri3R9Ugolg|GJ zB>NTkwP3;#;^QdIWQ3XCUKM?5JQhCo84fnC^303#I6;HB<9lSD9cf^4Y;GvFuokCy zvj(B6D7~KcF})U~ot^5BUaygbTyM&7vt`TO-}k&rvT^AEg-<@EM~{~SZ`)W8?Thr6 zmkPXJcnyD$gMu$yMT2^{*GH=@_61DFgsKaM`-rajP-?pg6j*yZsX4o*LQ|o8#cm~l z&3@3{m$KIlW@aNRzw8E{@I3sRu?VP&%3_0YL)R}}vy7{n3y@nY8i0kX%GcE5fFVqW zK6Gct@$B1_Y+I>RwN0yWBirlao+MP1?7b@CcnXTwRBe{3s)DkR4eB>A ziPmISaiJ(l)b`nQ7QF-|0>DguYbj}$8lwiSgZ&nHqRKknZza_yUs8o?X$vTa3HZ7Q z3iIE{E65LJkrkA8UOxuF?8t%;IudFJ&g0Tse6&}Nxed?W0Ubn8y@C3E>W|>Wd8LUB zUf547E#U!4iGT;SiDIJE0$*o4qecs^B)l|(J{9ZF#H-iIy|A|Im7K@hBiY7{_~4z# z8#TlSEkfw$*Ns?*AsQk;yY?|E;Y}c#dtnt0H-GP^ZI77U$OCLRJ?sEbxBfBr0;osX z3-WunyWefqCDmw%iOn6Y`y7ojX+nRth-Dr{V&hR37iD051Ld`e z`zyBKex*I?p5oE_65mG*X%HhqQbb^@Da-#{>`hv=WL$8@k@H5Q!Jwj2OqY_#bZn-W2RDLhf*%shMi~j1l@E(CTF_XO z+R_FBvXr;aZ({5nyn?uQJ&86S_iE^$+?$HMS6bq6ihB@)m1UDe+VCmEKAftsX&C zDPkkrZn9inGmG&h9)aXpE@Gntq_XVa+F7^ZQq6kKN6Kxwf>^Z;Ph5oRvV9V4=Z5K!%%v2N>LciwK|x zpN=-Kbj;>0yO@-pjL7G=sXM@U4wHtnbkBvy*^286#0^W>7wn-8a&T_N^Z9d*sDz2U zh3K=0C~$r0jr|6&Nr>jQ3KM^Zv^w8$lElF+vW+0aV;^$T#5ECn9}PzVnRgMBerW9Q_m~gR*O_`!|$p+)3ChJka z?loJRNgD^tJzFSA?89`G9bYMm6M@PsZ}x3fU!mU;SC7Wx^ zy#<&!s_+snTdOt%f6}qtyiESyLB=tv#)6T_!l#-yU5NoLp|Oh*{(8LxE1`s>tgu&U zPjin9h{-KD_e0%A65gz%tC*xF>LF};PooTi-uHo%Sf~>6+#(;0!lPaMT%8r`HNT#Zj9Hy?d6vNK_KOuC?9jZfrgUL!6okvNNx%I zQIv>w%dJ4}XOJ<|W_K4@w52(lBXeZ7C;XjCCn#`oygo|fJti6(JSUX5`ke6@xL^1x zspeWNb_c<~^&DM2>{R}vbgD6YDiB?UBMiS^+{?aIMa{|9sj^CniPUyVM4SImhE9>B z3_sA8q@6&V=}3|!LVVTx`tbnfib>j5_nOjImI`_l$i0)oxfW`GL2}(;+~v{nkk_5u zL?_2yG2o-oj^WmLYgc}Fz?4ac(VW=|OY+GEdX4b>?_x=WxmM(Gh9MLv9t;dl4uhE( zNBOWQM|_0i%$st1KtRA|AXeZYz3*Y)T-rcBcGHo>z4=rI&=LhWgr#;FvwoTX1< z%oBBdEVW*ZDo3Iu{vDqm0*L0WOLqKU6hXi(`E=Zk%;nn%4+S(8&#k#6GjGIOrg0^D zn3FF)X&F&1u7WJADA$_7_y~8aiBBWu#?StyNx~@nFGeC3unrCsazTt~<9* ziJ+poke{%>R<}QD4dVS=NehCF_sIeHnznd#jH^7Nz+}W=4d+}Wr~>YZWfDo;!y#q- zNM?hhEDSvmBoj%1>klQ#5HMm~oPwcl-(*IHXw@#AkopdD5uDP8XrbwCS;2g8aP~&@ z&|GS2__}9*d72yvS}g4cd?~<781n+s3okq*OX4z}oOSkXta-605^a6~p$-@Y*2+nc zKUQAom7E4jZ7kWOc*2s=!Epf(TJa6c75AGd)aU_}+616GW($fnfyJp&Q(t3myS{!= zjiDg`W(Wg4d=aYT+T{rhxJ zwrN@a>!3{y2fM_owY&O?d!vZXaPU3NkWz8@@CcL|Hhm2xtbBR#Cp=b%0VOhC@DhA( zKkCgU)2cWplZD=~?nU=6?T?v=LZ9uKdDQV)lOI;sEmOHK=pWU<1u5f_Ii4g%twIzk zXC7Aw4rd*i64jcPQxv+s@dwS%3;6Dlnkbuc8YIV$X0(pX!-j-G_X)QXJM_W7IFJsG zF~XV7@ZW#;ot1YS>H6;-rPtqHVh7po3>jIpI|8O4^Db5Lpm}YsFRD?7$l^Km1v4l9 z63J8_{gMQhQdnU=yj|KZCIOv{F(B7|_yzBy$IbYHmRv_#a3}^{@hV($GF65{SlRM+ zat`zCBYKEQEQQ$QWwMr`F@z`c)0cY)qZDB3ohmbujVqu!=Lo+}7EAJ+#|mm3>tr#M zH(cO8TDs#y${m@KEQ%RBp%Cu&=61jHG<;f&+60zMu(A@2uZl2o%9aBX_lhHDa#c0J zN)W@C(lm5ZsUBJh3$9DxIbp7aIU%Qludu-z3f`|`W`2RXIcO*GQRl|zWM&h`pf^XD znoU){tGIaNR*r*jIP0$GY*_)R)3e#VajCHZ1Q+4V6+A+0A#1U;Qj|b=L@;%g7nOUD z68EA4szDVnZNW>SF!thN=Q|7zF1mik=7jXXN#2?mq$tmrGlDJF0zQ3#aR=iDX+ge- z+G0A0NyRgGiL!(o;0A+DyNF)3VAUP-3{M)iJ(EaT|l+0)Xarf!R(k(OFvbv$4( zPBFjo>{+kJm+ubi#U^dk4B~UR3zg$#ZFsG({x7NVal4xqVwl*nxUyTtc6vS2Be3Qw z$-#}au)3H(Pp^tnLN0R7&6qvIlLXv0eug6U?74xX76WSJywD{xPo+#JNM|M2_Z(KJ z?IaK{W>Yi7dM&3uRp6g|xH_F~VGSxU3f^PQJ#4|V{dEY%M+p%8K&2$%X{8tXMNXv z)EzGNxhN)f&iU_d6C~#@OwM_7^@%8HQyqvlhV$$A@{z@q2yKW~Y0_sgCQIyK3h@T6 zzC~EyQ;MENGAG$F)l>SrU)X?!d4_uo2I!|6cO&Ph8ydxh{gXwYuV2=HPX^dO*v%p! z!8}Ec{sxaZXNi4|O4zugmMl*FEzb#ZQBmHh@4YOi#kJFsvs|@n%VSYGt%Z1NX4)1_ zN>ugw(zGddDmzSd1%YJForb+x8>Yp5eQpk`eG+e%AJuEbyG}_!m~v$2x7r?bF&hSm z3;+%G^y^li)^6JZd0d`ZzILEAE7lWb73Ei|3#T-i`Wg5TV^T|oRVfUL?8 zmhq?8!b0or-fZEr-GhJH27cXBnR2)^Ha%UpWp4br6FAlkRF#42-`lkmFC^k(ze z%bQ#sVYJT2TP#CIX#?_wICW z$$4%(#IO@9ukeWcAMsN|xaC`)RI5j~F4H1W!N?5a8QpEB(##B-0k&$W+uS)@@;~7- z6`8%LfCcF+?CNGKGmfIw0g>`85G)3wO(6kpfEeUew;yX1!s6J+;_=6HHnh)Pf8XDD ziejS?`ufd%=TCQ5Y`wU5?3s9gA zq!2Fb_&TR~Iqj7s)pv-)pE)!Mghof&E081Zv6mfr4k$)^X_6ZxVj|NaMh1BWA;#s3 zl-HODmwe>bsj1NM5J4N+ym3F<&#*k+BB2TD?2~TOiAj*k9A$C+SjR_ z1_bG5SQO4#p?v%6w*}n(CF;QuhkTpZbyzQNcEznSPUVq7Ha1BZT#4+0>)RLUV{%?` z>j1c|w63i+oOJ6*1N|?+RpTGxmxziF1w}nC88t+&>X zs|PzIf88OEu=X=P`}dpL0eM$&qiJ%6fhbkYL6(2E8P+6RH6?oxK)5kRZ zaQJEsJ3bMU8sfP82#C2Y$I3QiQuB%pHiuMAdhQg2h$R;0`o4E+Qp0wLtVeNwxB*V0 za2&qg8@P%~kbse6aC{<3=(dB~Q{Fxjfn7y?ZHyl8<1vaVQQ&F%?YKiVR|bTLrc3UAvKiDbbcdvL)##SZeOpY1E{K%JF; zUbG#}#Pj{|t`@|5ZpE0c*TBEcRQ?=F-*M=(Vp0^T9A-17uPKc$WmQ)xe;Gt-`U`PF zzb$xE^sJym**G2eLICZ?oM;c(kLwWnOhWYA1@`HW7Tsx@37@HfZ_rpbHzYNS83NA3jR1cVI-Bpa{&tRnxW=Dh1uhf;7xLZwR2%%C{L@pu z$@kIkL9e}uo~?yt)Q9Ef`}Ej_HH|Hkgt+_w)=w+v9S0&I+pp&Nh(E{82VDGqaEc8( zBS^oi>^l__9Mva7mRPQu`gK?DPdce|zR%$=pE8nP^0H?Y9jwF(m~C2P2{nouc9OzomPXSfNj_8U zhYQp=ICO|;B3?!)4dGS$K%r~5cU^GC)?d@NEFVRu_U7(b$%G4vUs`lM6mJzjjZBnK z0!2%J;NL!TO6j5mDU_@9^=k4Ywcrv-Xvl{ko}P9wOtulNY_eY&ulT$%8!~9+lL12P zf}hrNK;~hqpO@hMYe^1EPRc-@*UjtwH;%IF{Ne$<1F&3x>n!1QHaE67;%D|>H+x_& zebMYI4w&Cr@3A1m!6tE!F>JQF<=`?4EL~4CpM3DweGFEPk!`&W4n56JyotUD5>=Gg zXALSkC0u&CqK$m*ZKtSafKtM$Q zKY-e#Rdd#PYXTYnRwJZBfTw!HsVVgLC+&3<)eTXq>J{hOlyY=y8?~y_7#65R~*9c{rOIu93O7c~^OVefQvdf7;bQ z8M1y@@Hn6QSN?*=S8SGI1afN{mKJeMWCQ(S7S`%pC(&UT7RP7>742j8wnL=S^rOzU zL$uP?-egmQAjvT4)nJlB!pd8eM2Oqn+ozBJ8Py_w>_+^ja9)3&SZJrPYY3s%yV$iuK;DdV>gtIu&zuj^#mlPT8)gFm$&$tGv*`c&>We6BO7!$&x+9CK5s@mQCITrd! zhI}a&o%R&L$Fl=-sw&HTG=?+7If3`?i1+z-X05-laDcCg&yu-yK%NEjvz@yq>fM)o5Z~@1*cVn+?ZD*69 zD;A_>i_^&kLR$k3m+Gr{Yh`PGNpOk9d}6Xk_ME<>1kK3t<7p*6HB6aFC{D^LJ1e($ z&z^tJ>!KIe)`x-rA+Oi-ZpN~xP&grM2|OFCn2(jW&eZqpP_tKkS2|SdH$CSSaF96s zUjPORC^hH}^tI=%)b4`mYfKJHfJ56S=a|$5Pt0km_WH%b9?F+N{bs9MbmC?u@dL>v zz!ZgCByL-RH?9(JD>!ZWlgmP--^q!_pp$JtmdEC=QWg$>34(v5VX9{M}t z3W=GqY+}b~0MMyTWr68jD;)Q1qnCO~d?r$-hx@zF+Y*lpSQvKHoaWHBunD`_G*xS9 zB6C0xH($Q#Q4}Hzbn6)>^DH89=XA0*gBQaMM$n=3NGeMZqCnXOyu3`QKS!BnRQyYb zJMBFqD7{NXQd+j+yVUD+LqE%w$vh&WkPB-V*|7+}$0c~jO7<;y=vW@A6oDl33Ei$s zW~PkFOc!i5yq`9k(xfE zotb{TzlF8SIoL*Z9M!r%%N!O&3Eh)9hV=TH7@Wc(Yq>o^*`XlGg;&gIwD(;Q5&R}z zm?h;aAa&U#CQaye{Aa*OFq7^R;25bgzg0Dqx%|j;31^v!HGD57+EJ&Zm|go~fV?E~ zYvBL95f04|Y!w;)-6~}#uu#!1o?a`)9 zN{vN4wLS-%3>kS396&@~hz|10@3P=K9#7hc4MQi~Cd~OcSoU(ot23i}R|3xkkQKEV zyFn*F)X`Pj8**u6_$duBx4>wkpj~!A923~^|FFs|wC<+*%Mwik;|ezeB(7C#n%Cv7 zcRwT&ZXjxViJ(2aS)x%|!)Qm>T4mByh1!bt{-E#^T_v^9?K zhQF8q4O!EFWQ1mWbv&=O+_t3uw(0djsqc(`>&4O6ijQ}Rwcf*DT9b9V>-8$unnFjc z{tna)8Cs);*$TBb+NwlrW7n<3WI%75ZE9!*FNKm`EQ_Lp(ZMl-5TQ9VZPi`vYq7rm z(Qdu%*Kj-4U>exJ>(v4~3>eYehv5zqkQ0jaL^f*kFhL*%SnZl_?4-|!^L)&=*RRd| zy?1lerXdc86?O)`B2MfiC^!JLYl&7}@4@fPO4G@%-kmnv(fiY!eb_?}Zh;T&X|R3t zg2YftkvHbhSg6t7w>u4$u2#C)&`q0m*~eOI+w4@e@x=u+tUsXm)AGfO6#T&+^T~_$ zapzbd%}6qd!%=g$L>lARRovOjy&3QQ7E6Yr1hRC_N$Kw_UAvT_CoYu)UimVoD`hD+ zLljN25qBeE*GafBYH+k3FF6_C4$$fRy)8L6m$^8+O zLtbcRVvX;rygD3>o zKz{Y0i=4S=orESeRo!A?3a3j}<;{DlB2&^9LPt8MS&FVmqdKF}T^dw#&}4--G!l^I zt#@m!xaVP(31KgPQtA1L>p>};qr;e=9j<3-I%86q@JnWb4N(=T9}|8JqBDKQamk}5 zdvc3uiZ%9$H3l4IQ3c*DCw?H!){E(P3E#Y+DQZPakmm@T8dxW~>C9rB1gp{fRC{Q* z&+Fu_NmuW-e#8FPDgKX$f^3ivi3R}#4g8aG1&4;(dXUDGXn(zv z(xmstQWs0eXvH*a2zEBpsDY&ql5JNwmz%XU-G1+M?-?};cTA>MvJM5)(pqPzlGeyU zCG&<;V6feWKs-StFKl`yFkwKTRz!uM(Qam>5bGN?vxN97;Z(wS67&W0!|JZ+UPI# zey4#Nir~=N(lilfsA}CVOrj-K(x^>Ovn5pR6dz;{i=nEd9-Pl_|YJUTwk0PC5@%O__ZeZGT>pTB?VYbNW2u0_TiB5S9JiH49;vA*QU4&Rxckg8i( zioVwCg|3PPhKY&E5e3yMZmev)O%>wtnLGO{t6Oy4Vbxns38$Mxwpp4S3a8T9frpew z2uW-RnK^17@XlA8HofyaFI!1Fp72=sQ<7Fe#~c00b0Oq_CHQ9@b#E%}vqaMU7Mtx< z96flK5DJP&45Nt^6B}Hr?@k1%BgB*n$su3aHK6 z1kfEtDuZR1Zee6O48WXIP3tCwJ{UJm+m156xXTf07gc_;hA@Z}xlfbMZ?gV_dfwyK zK&+!&)SUAS=`94jSy@e9Evl}3Azyqfl2oBfn#hljsVef*d`edC>Lg$ z)2;~k!*~pZ`p)Af_bS-O`Uy(~Lt|Vo^NS(1+v2iTRR@{W4P^T;RL*1CKFm*SSi1tU z-&x@lQay;LGeTeLhx4hLp3Qq3EPJw6#r`8%`Q9!wzuShmx)33RD8djhJ`hmlU(Z?i zT65n!MI=`4BytZNIw%glp~lC)s!T4snz^Bz(8cO7_43aY4kEeFhJ2ZmgM+^i?CN={ zg;eC|D@3TQaI~KAq6_<5p$bJu6GaAf_o8#c(XEP7_5M6HT_&oPayZUz^RDFcguNo# zCm*v$wb~@%GAumJr5=N@J$5OiZ6ai%Q(5ZVIz?wcYl6b_hu-aUb+T^(h~2%}h`P1WEv977 z{Wma2?;%^8iKl*HwrGc1!KosENkMp-a{acQ)e~^FFMSyw?TuXrS%ea>Mka%`PF8Wl5O2nrFH$CINgggQ%>r{wBL+4@G zDl;07TtrfJk)(IZrC?Nf+$gHRB#2(4>(3?J%`gx36~~9iL_Thtz}duhuc2)6J(sP+ zi+gGm~c$t9-(XJ|p}=_2j+q zUI!1h>jVMd3S`nGZTMMZw^V0Xo}G>h{D;xkdeYw*rIpSc_O~0Q%E9X2 z;aDfHmqdmmF!&D{e){dX>!`K0|M~^J!%qE-?W}I?hO2(z3=$RJi2sF>|AdY@YMIH1 z{y|AJ{Qt2E^Is@2snwWwz!5{s)8lc!+GuHEwMMQA1Dz17U3P{;xBgAesunrLT``2B zUF$La+;N$EJ;NDWy}iWa@e0f7DE`T}-Qj!$a?`u{gqUJ-<=MmXu$%p|>$~S8e{$pg zkl*ho5T|ulHS`DBcZPwHH0H^uXX+hahMKmrX~p6ThR>m)<)33&(?7_Yvu>$x@|Bgn zZl!PW)s^kAC}CZJiPS^omC`qmEKz%dLvR;ZXo5m^CGSja0QV zEuE5G_k~Go0l=L)5X7bcN{e7&dQd9@7t;Epmez&$nA(M?vyN?ueYBYWFoa@MDsAGu zG4#Z7<3s(1kS=Ft_A-sp`5hGaiRz7neSqO`Xk0ZTJMBR{SyTxWtBI8fpPJl(F#OL; zm8~(eSIN}k3m&+NWt(-Zw7#RC)nLR;wc%Yirc?;+oVE}rO+kZA%AjH!iRkK7r%1@a zquZS+!8p#qHA~nhMJrdAprOdPF?=vgD|7iahfqQw+t9Ul;fbT=FkzEH76xrD#FhMHc_7QsJu+ZQ6l2tpCVFT)E4y|1Er zhxkv@xcUW71PwJ(%MI(D3%}AEJmD0UTdkB=j`FFG{|%xG!#KOs0kZVEDN3jM1HGwa zW)Tyr|F+KpOIX)539&4|47*Uvpb-@==l&N{8whdWS$Kbjq1`tPS(%)U-RcNt1iTw5 z`YS-LZhvaTi&Yq^AZdLAbu zX%lfswNRtp?l2WY*^aQdWvwe`Tob8S=C7Q4ngL%5tkMPqXT&HfS2p zw?YwCm>Ms~mCh)*0T}`1%mz-KK%)p!BJEJoR;2j0j3^^O?Dq0tbgzRqur}0Zl7j+9 zjTX`fZct@Gm~l?nYcw5rUjHM*N8h6dA zw}>UvoTrf?!1Mhq=5bIqCBuLi?n|m5>fYXV#TX=2Yic@c`mvZB`uhC$=I_3U7c) z_KBD{$STr?KM~VYq>Qsoxns~t>k~V*`NI(COSL3}Opodji5>t#%CG422^HJc{$*G2 z{bW6C86wW))z zQH0X`FU6r9<{L8v%#JV>*6kzvVzy@#Ai#829yjQsc~lWRKm&kp>t!Sb3O!II6U53` zvx+i=t%U5z!z>jCir`HI*H4B-oe8~)O=Ia}|nyy-=ky*K(hdbS`ck3mRk!JITbh_pX1>@2T8 z{=k4#K(ti7(fw3O+(=JXf8197PyP5m90jHEz>Mmje(dtEVEHf5`oDH8|6gD+HgYlh zPf<;4&tZ!b#qY&nP=>%5G)nnuc`zL4kKA(YO_P*Sx5ibnpiIlGIz}rg`uNBC-b_Nu zq~@ev;0~|Wk|XECY<>JpcfX#8xIw*{3tbk)8veCBTd_y6$CY}k1YJANw4e9S_l<2U z>Q$S1*>cd%^cc*nIUbZvaU#Dpt zy!)5$E2F_I0m1$`NjBf-*f*d5YXTFn<{7XTmu^xo3bz5edu9wS&njn^*p5E`Yptq(I*wP9U5dn02 z`X&LlAw;K4kgFYpul<6DcHZb3sx4q!1yVBp^a8WA4agJw9)rJa9H8tLwPD0)-=<-s zuV?hGdkqHZBVJszpE4#kXut8QT>bR!`T8f#^&~;dwnx#XknUs3hS9<~GgkB7W3sj^dqXoxSj&Zb-`=B+H0gYBGrwq!!))5)P%# zTI3lRkKaM*t{cBSY#DCJz{|gx^yXiyfRE+9Golm&_a#oN{;o zwmxmh2EtGkV+JAg-uojc>8S}A3D^)OG|Qg#6h7s2R~1Qi^cc5jIS4dvE^%khwViG| zsL3E1%e=va)ke2YRIy3mXwSpnF7eHFP5xZ%6BBK2P$h zY5pJnDe}K%!wtg;AfzvK>)l8R*|Fu+RbVq%d;7OS*p^@W+-o}3YS5zrx(9IgXnkab zN1JI9Ggc#zGs0CKS?B&t_Q1(RE1v;9Zmj~0_!^qH@|F^@gE#Xc> zdF$dPr_Ak+)K>L5w;SSn*!f!DmdQ_PKp&bRhKYo8$sBcACsG=~_tn?*PL$E3 zGuU?UC9jbPK$dm8T>-aTqBAzp0;uaj*TIr2_Ie41X;vOdj<6M_15{MQ4-oHxB-+|% zr(^3=tH1!~W4;{9uHBG;#4Tdp&}U&CQz-5mYZ+UFcBp7jKb7a|23-Gx#HCsOR}3h6 zD{MfZRD?zsYQ34RFrK$Y&Mv_*YO)FjwzjpgH2gDN$Tcf&J?%!Y<`U5?{w1myNb{gr zhKFL(xU2H}=&$-OAv~!UGBpcHGT(u6ft7@|(3iIy;cdmIN76k3@+ZR@>)fF9VXf#u;GoF8^3s>lPPo$l1a~=&qt~LN=HF9E zSZN=B=gUEHuui1P$wJ8{Y7&L9>$$|Q?M^QrLubP_e@rbz>RShZ%mhr{I%Af|fvt8v(=x&g_Z_hruNUct))#nm+p9#X zv$C?OxmaU@0k0U(wrg4bE1i6~X;1R>u!ub$eZb6eD^J|jO=-3KUGwCD+7iy7acmyL zp%+nN|Nd*Y49o(-LL^{0j|-d1gTi+~0n@ebaNNZj|pl^Cm^8ZkFj=iElTXa3PZQHhO-eccm+qP}nwr$(CZTG$=&FhCIZGXegPS&1# ztTAg#&&a2-2yu(v6@9FASw@&6PC{L?5Pggx*vQE7Kq$@>QS?^P*ctwSE^YOUIkBbm zG|Ga_8o4?J%Zv~-7&JVSfUZHm{9&Pq3uK7|`!rCZ;M5JSLL#b`VAGql|ck!z~<_}%lui$2Ar02BZbym8L97HhVI;MVKaV`Isf7xH3EtUB) zCu!P~AbUJ{?9TWizTiQDYTh*jryMNmFVM?dIN?OEwnnGzhx1`!B`R)_`q6R2FyV1uhSW`PXSiE5~k_5m(bXm)P9u0+*H&jNl!jh zTJ0Qxq6Bj5hclDK=}5QhWxDnK^{}`5`?2XmQqfEI&6k(^1It>E0KQ}s1}r~BO>`69 zL~NpCKZlC9qbAxxWWsAdSNO`BuR5FtRuefJ#DRK{7|4qFs&kunql11=TF>4UrQ#zs z-K{{4S)9KjHEFLqR!GDs$reBVM|4&tzc@Wa?8H$eNNk)Zk<7P-sk^#YxG~a;g^Tk; zKjAd-kG1%_4BWR@5fpK5Fy(Yl`EI`A{(cwhkquhzSZReq`AlOb%^5?~gW^2aGltHB ze8D-*lnvQ$es8|G@U$P63W~d8Qu#tl?db=M%L~bPRBY^;&Sb}H-R&p_&05Wpi^x?! zkI`z|AiU8Q>(44`uJceo)l}>cD+eI_WgQ>h$G|Rjjqpi#3hjD=LB>i-^fJe|zACc{ z6X`Pvwe9FQ%M_V0M}D*+^PBUTE4~B$ri2@%NZ-ZHy^m`shQ8`un(0KBB)p*Is-Obr z+y*#tv(%_CMgVrS0Cut@)-K*~INK(3Ln~j;^G^2l>&+ICB2{-aS>mQ*#$|&7V3O+a zwbVse%34k?#uYDwnI##-o;YlG5V}Ek@(JHg@k#OMXI`jWbAGChk3Ra7pI79A6w8gGRjpanICDpNB|$`^H&T7uzlB_=6!CWC_*e(45e z_}9KqLp}JUCREGylmlU^DT;Dn(@`w}E};Zk0Q**N*c-K*B>n2T9S=?xJAiMcIuEFcw&t2stn!-4oYD6(+*$d@#La;XLPrTyQ={L zDByx=IVE+=rofCfP*89Z>SQX3=0boRCUe>i{09Nu5Y-f#hdOBN#BbO|>{DqP(_m!W z3KB6!SwypiOHVYp9}eg9Bcgcc6KUj#E|#k@amn3ryemG)X2yNe<|t24*7?&n?Kb1_ zV~kUfdEEpx$?dBuf}dZg8rAfF9Hvj9)YirzA}t|4f^qQ?~{GD zfu?cM;;r&s+`{emL_{0yN1efQvp|@*%Or*AuD+p;p}fHyK~Urd!cgq#i9e!cv}$?e z=05N_d5?n5RkUuLyMAKp*cPtp-P+!nJMPMv&8zltF0SZ>ugCmQ2wm1SK+zH`xbW(OhFk z&=+}+@Kv$E6<+El%EuYGrfia?7J&hq=r7Z55S+g3F)ZUMr$eUs-ezqpO)sSvt98u> zxCNglql}wPLt+w0XK-!YR@+(+w<)*A1f#uZ>X%+A?^kU++{8hStRjq7(>fl3M9Quc zRu89V^1vupU+tNfRl{uW#p{Dx_e! zDi|IilG+Rv6S6_NmC4QJ7EU|lrCt&OO-8d%#=X%&%&$Z?lt|+|yndEhX{r%XXmf}1 zc=%oL=f@RMr;i6;@O-uTgF0xVH^5{O_l0i)<97p8fT2kus8W%wP0XIP_4EMRKcvoK zx#b6VNwI{6i3!mfnyzo8nNt#d7>pEEBV-H%a8NRHL?!X^@Kt)E565$8Am^pr6|j^V zVy#f2(eWu<-A2v^gA(6V!Sp+a=iaasDr#F{v@0@qzj1W5qu8SH9F_21hyb zR69^_bwz-9)v~A;G4IuCak`{}N?M*&U8rO}uAW?j+TAmCClW^a@UrF?KOF=X=5`NN z-^@e%CdNHGSY7pnjff-dq%a8_7R9LWwb97auvcd7PiG9mdc;G2eS}nD4ifBM+Fm7T z7bE)vX+e!zE_Uiw{hrlya7L`$?+NIT+3b7~Ki$Gx$m>hN#&rkAw-~k9wxs=rIUe{g zN@Uw)8<|J^kg>YGsSEuhfLi{xP4P2*bMX^Z!7-a9a?zSeahQO%` ztBJb4h@qJNQ%Ay<(=d8!kZO)rm2>&fj(6qDRzQBQJs`vU%MALSTB1=WLoEUQNWV z`$22w!TP|f@9o+p$+kXVbtZ`@>%1f|^M#`H+PV%=x&e!Gq4Qv`DAUtKL4235yoM&R*A&>hA{A_mF9@svT~q!=oLyzl`BCRQ04YJH=fJ|Oeb_xNSA zKQUrK>`gSodNP|b_Pw>7Urh~+Japv&B&?Kn?$g!;vSyLfg0h|UpuB2|OmcNewBqG% z98zX%tBp-W+2)ukLnu$iCJ9Y~c9MF3D$&xlDLdn|`--F|_r>;N^#os-uiA-(id6S- z6sDbK#8&e3O-k9j-aO1@K|&}h(x#{>DMBSdr`c0?3V1kT#$KynXf9(uGwu+Phw=f2 zapo<=<0tP~Q`e)Sdx#QdDda~Ls+2?-=i0`PHeq{SKZ2R$B`gm-Jslrvzw0#MyCN?^ zHp7>u&$Km{pkAcFp8s7%big;hnuH%Ha|KQ9?yKk3kL=1FsnT1WN3fmdnVUFx?7r?_ zS(aU)!Beb3d}euyqeEYcFD)}aC7O33sBhaugg=*9JSouMvQ&??PpfM4x(#(-sK4MF zb+J>SfwSu0p1OV@ zKp^1oaq&&Bp6e_yZ=YX5(~h>N%4M=&dQMJ`k1M=fk7v*F@uNltzuJ5E+EryI(QWpV z+>mJfdg}Q_4N|Rc*BiRMXX%ir>X~)_z&{t!(Mcd6Sp{5f$Xbuo7qpHwIC3wcWfRh*)NEF_KE?bDOylN*ab zD9P=t*vR7b?b*CWl3XJbZTHAYkNk{wofO7TkWPeqel--W1PC-aKv2YPkwdr0c}1;feJmBOVqZIf(?ZNv zp<-{+z?>`BI_|$N~~O+9ui&7Q8`$kv;n=KCQ~AtPf-a0eeT+vHRieu}8_XWua}Wf>Jq@gKr=Q&t`0&OQ%0s2i&ac z4w}U`?l0E{FCIG;29$Lvq++2PzMU`{5&iu99Rj+Yk<0K>drAn=645%lH`|7XfMS+$Q0#8Q_^m`7Xd&>iql zT5KN#J|CRl?oCt!5c}0*vO!%-s|G1+{vebBb~Ef)TW_Ekg{g1>Uv-P48T>*mPa{dIW#dH>|`^>{qpc@J9m z+5Wa9jCivMsVYUxPl`0AqT0b9(e+u->aN*eIV%w84kkPdPH@9YP$KxYoR{Y9?}(ez zubsD~{wItAkKa_5lez_U&N=clp|c$_UKOA@^)HvKLH4;*o=OYOvY}WMuvm!E7)JS$ za$1NZ0AID-rTiUW*AFxUeU2?uX>x%e3DhiqFR#iEC*A`}5u8{3XdO=j zKfdMe4qP6j1lde6P|P0fqh`$nx`B}S7}fb7Y4AI0D0vY)gDtUWDc0~vhG^!u{?$$> z^xf93hcWMUw00z};_s|7MEbHMtztcyMH}ZwQf}U(Gplbst6RL0l+ryA{vDUwyE@qO zru!sbko)ha#%nM9t^Bu}?^pM5vwt;Sz%TG~KTZPLcb7O40d4V|C>xlboKk}Ot92!C z|1>`}R}=bE-6b|kb;YG>1+;}m9!*>ko7D|MHc{C zez{hKay_%HK)}4!BjBj~(hnqCPb68So&(vC!B;;?Aa^MMSj;8;M=AiGs!wW1hEcK_ zR{gLI1D)icV@!Vk^&J(4uY`Jz2FV-T5ByfOK|8Jy z+%7F(W6^xly%{vJhkP1Ek#3O!RS;byx6xgvGoURoVaQ%ra>5H9C?$cL>;$R# zW2Ot$5}_VwAbUZG_*4~Z`LYZc&lpmbbTcs~aQe`{q%W!&n~V%PIDvD(xVfRPLPrn= zU@{(zVDvz42YI$bXBk^+!%EqjJ$-b=4utER>hWd2?(o>f7-D+e!YF_i)QkoP=fu13 z-z*;9cI3ESObX|S&1?@@B|{f544T0?sxau#c-hWmsIv`XX>3=dA(fu{U)7!2f~M7=T?#D9%)B65E4J<1TGl++ShTa9}07$=<9;I9=>0h@?c;YfsSHVe{mc2}cY*>X39R;0C)@bk+lJLUz;s--q| zvH}x&h*Mw2tpZ7Ih!U3+u*5wGLVEs{`b97_S->B*48aRlO7;XTORe=@1bkH_uU?X# zn3`o93qKvhmB6AtB+5yZsu%Fwt>OAsl8d~*H`@uZ^GcF^KDBI#=5&3`&!|_H3y(b$ zqojHoD_(kNzZLMCgNak_c+=t1LBU0%QAt+9dJH2-2B!n`h>{Zt)W#?^peRua0&+~) zIoC|eZ`|fgVpRH0h|-XSA&5!6RJzbvUz-F1%Pmis1TlzU;(9g*eNnbeLwM>75P8ne zzbK--Rm*6SkCTmAAt^6OFxg_N)PID950Xqm7H$h6Fv)4rC-#dtM)jojEu?N9kw)zZIF@Z13q`-nT4gZNjhmq`_V`-=f&J7P`umsAwlPcVM*!!81ryRc zO%5}>5b6H7qn-mX(RdKK^`^VTVh$??L4}J#XuNBTI1S0-RqB34ofRX!SF_Mu8} z93H=N;lubP5~xIT``P)oS~e(@q`88#O&a#RXUMv6C2k@Su9=e7zvisZ<%^CRH~Q+1 za$Pz7Ov~aM_ z1Ny7aD4xXX_mofNEb7EmDP{(dY&A(~C1}2PBA_sf(xF-G9?@vuN(~CDS4~kRqQlZf<|UX*j}$7%N%poyC<3V!ye3l_OZyabu=JDjp12he3h*Q;TLiCn0mS3} z7Q{gj&L%12E#=PTAu+iN?sE|u$a$sJc^S$ehCTyRKDmGqBHL_O*QUp(azW4*N~KtQc}J1~k#3X(jGe(6CF8Y+7ZSAQTv4 zD24%2QLu4olv}thX}SpQ+7#oSa}X`5+GU37Dx1MvZ-&TL;g--{o<$3SfC{Bz4(saI zYUCuYU$RUtLA6+av6uLNQDB9f9E7OPWJE~N^s}GMgN(EbATE_8Xy$&*sBTQKE|0s> z?l5Mu>-*!sS8E4wQ2*Na3^7hNn+{`KOT6}Hx2E<`BRLl$)NfK0-0ASapq#2r9VJxw z+fzDT&@^dNB40S5JJqp%+xEa|T1h~f42ln<3SNRjqk7OXu@(d$9w3Qzi{Cp9Av%h{ z;WDbPKHIom=y5v`og2;|CqReiC==mDmvK^MTIEHnt3SK2yhv^UNv8OykVu;SZK)}V zp_OV85IQt}D*&(@g+szVAV2{>+}X#34UD?P+58evU`zoUJ?WQ-^f;%d%VHVH~)Q%8>`iP}9qe+8?%$_8t!LL{Wo{2e5?nI0MUC z6z(2UEYBA;G`U}jo{I~~6(tloDQl}tvtnl!&E@}2mKrxhc0MZk9m{H8n$}Z$h@ADV zptEdpUNrlq+)}MGd8{G0bHxVdcYOn;Z+@lkcNbFGNAQP@e@~ZWz!QgAgG~BP&_#VB znREpkipCt9zQ@I#HE@r7!&Fu(%O4%mA70<)Vyi;rR5DbSBWke#a@x;BZeOe7UO_+^ zErLQ_X@cPihWlfm%dHBJfmK$hpX5H!@BZ2gs63hs_KGjSf4@zWDTDKxLC#RJ9SYe- zT^6s2!8qWgov%sZg>Yev*?mgGyy1gU@@6C)BW=fUqi7;&V;ztMDQd|ra4=5HglDn+ zi2xASh;29bW%HeVqNTpqn{TG*ZhYEJ+kN8r;<6>YZeoclNi@T~?6<#3J4alc;4)Z( zuV!XWjEM$aB;E?;lvpB!7>HzdY*$EPLi`uciN|}r@BZA01jiSii3!Krx2!y#0Y!YI zJ6*Ln%lCL-6iW(QGzXYBBD=VFvje`v4%b#L7aSrjziN2-Wh%Y3x#9W8=y;h`VbMjr zxJS)8=2hy13AkEko3)mL0PvQ?MSp$e2>D@Yb;(#7ODMUib?XfB;GJkP0=L;vxXQ<* zG&@4eldRZb*?K}m!IUQ)@(MfC9I>r_nQRMGxMAxgiKM=c0XosRYPrCt$r?(N5>p*LAnMx0En$qwHyFLg! zKIpkFNCGtK>XwBhaW!?dHL-I1*mG3IK=c#{=N&~0vAN4tL*P!Pfnhq0Sa~eCs_2j% zYfyW(d|I59DzZ7egKA)-pcPps4r^J&LP&{hwrx)rl`3fI8Sf z4d{3Dfh`3)<~aj2%NCjAOh&na8GdDEdFCc1W)>>=WLYsL!u*on%EYV?GwviGUE&S= zR*S?+y0LSGoR(ecL29yClMQ`Yue5vlP7PJh!XL6eFyD#4*gjDrKcR2;6ex#Z7#ioT z4~3xTZ}Y1hIU@-(qhBL5=DfQY(m;+n7)~)K^gi+z7`lZ!YiOm-AIg_Gk%M!8oS`?~ zY1ah(#w9!xJ3Kioo!nJVR5N8Wl!)S#oOj$Trl1FYnZs>xZX{QaiVsPp``$gXj7LN> zkJtrJCZrtjxAA3B??r5EcI9h)vvzcU?o(pX@N^K!IyZIi@b;3a@+Q(?xK^E#4@T2s~}Tm7c9bCXmJT0Os+fj@bU zJNgS!oQg))ez24;Q$o%_%T?KK%~V@3c;nzhX?FGY~CmdQ~@TjC5#FVWkYh_KD0gja}%b@K3tWQlmU)Dm|;rTRZK$nKJ zWSCB4dpvQ_S(RMPQi?CVAYqOF0{ejCG#SVmF?`VaCw5L8S*Uj+hbcp zZrCq=fz84@o8!^m;c8*HbfT5#h|v+jZw!1fz`-g;3;1-ozCAJ5muFL;@d}3n`@c>f zox0l6mD>7X_P*P#*MPYVi+m%0DEIm-yMhHMjV|szxN_I@^b1z!Wylv-UR>SY($L^( z{K^p0KHJ>=5LEn1DwtB!Na&}ku?Y5UHLQTa8v$V`4VHn1BjmwFr9O9m{dq(q>nsOXqN0w(UscU@J1v%>Z>l#%?tocDtgHTOwAyUgnN^o^>?#mN310 zfsdVdDBJr{-#;>`LhT2cM=D19(%aQo-11GxFsbBIWM;13gvo-V3SPCPFHy4{;s20A z#=xzGG*a1D^G8#DZW%3tl|C z3suDy=P$-*02xGx0p))&tZ4A234$GHRnM70@9^ycmg+Wv*=AVUOXzp800CM(h1Z(K z1zhDxR=ixZEcFsY%7RM7Eb%&C@BtZr5#CRsAr$MGT<|ASO1H}_QD8$heFKQ;pt*Bl zRx$T(s%~Z0T+pM6iUd-jg}b^@2QNwikjYvhg;#&-=i(B{E(yrv_Mo%Y^<1#<7aa&v zb1FWo4J7;Kn^lgIxX&EoY@dPdfZ466!=cID+MCpRjPb@S9S<z z?H~yx8xadS-_2&VxSGe?zZ~#wr6j@Smb=T3Tq{P>3n;b0X|Y32ypv|NnNhS?IBdgm zm4~US$q_wW@%`H`0{-fQ=OAb%@hYrzC*X%vzsg4UPV0c+b9``*x~UtNYbYGCL=-J;_u&`I=Tgz*5@2B9=QYAQtIFPNv%%)T7gB zRpeVgNB#~wU{dcTgX?^Pi$}N$ezb}CW!MNS_sSAWQ{tXhwNZ_m%iTB)>ns*%danFQ zWB~u3oxRrY+KTjTwy`^KrHt9BWqZp5Mg`q__9_slz3HPgW2Fmvy#q4`3~LrIM@|6b zG6H#ZXxf7G%kGMA)i7tGuD*E^nL_grQ`XTrc?6}V!I-E)>w!>~;Xv)l-H@s+VIEaC zoNv+?&tRA!;N%Ug?tT4er?q0}IKHwo>Z%G{XwsqXS+;BK1|sd49>KS3^6B)1o8nNG z8=O$vtB@aAUUu=a4!z!yjb5d4w9Htj8pc&ecf7k-&Gv?vFv*5|(aA@!w!iA`E zfmH(S?IbdQtfL}qXa>cFchXlY`p2(vG@}h%6oUF&+kY}2xo(+km|@^w(}45UGL~<1 zR1c%Xi;ZJOxo0wKixq#+pAT%QA7opTk}6HQ{CBBHcZq8@v%5Nccu2G+>IH0=0f6j^ zK6QU07O$-yh$%Ptm@Th)`UL6f<&I@P2(rqSKd0 zhc;9mu&)p6N&maPGQfg;x%b%53^sO2J!&2Fw`PO;`EKT1P`1|Ai^C18yGwa-2L0K%zU8r0$sS2j=bdb7YaPkb=PQUSbhCXBP5x6l^`deR;PK znk`hYEY%=~$8GJp^)+dM2ne?oof0RLZW|m%UISwBqawmnO19(vDbK z_BlPD8w+CVZT5{^k4a!Tmo4t76*I1Y{~2~y_?9LZW04iZ#KJGaiWt3X94qInWGb*3es z&aaoR_nx($P``|d@3p?4cHN{;xj^4N;{5Hx=;C3UPOL$O5jli5*xGbqIg~9>2QGpb zZS)V`id%G0gS25cVQo4{Ghud81%?rug#(zVH)&?d%Z&Fm2(Q6!^Vbb_6ZeZ!4^06% zSkQ+Ueey7bHK4e)#?66q7NA<+f=BPTllGrAw{1R_vkzV#&Bo%odzDh%0&qgNvD%{_ z-k&+!X2H7r4I8?%$<0#76N9^&H#YoE#WLZZPj&?JdgKsX&2!s$Zuyq$J<9vw-wbW1 zoHeAx+zzeU{0Xd<%dG)(_QH75P3M}tv$1DvP+>MlEku^4i_f&&)<*6P4abr zyVmmzS9Z4Z4W7J_9(TmG*X{X5<4n`}`C3}Q;Yt##u&qLaK!6zqH!NEZUaJoqW}V{N zPsT^0fn&y+)pIIS!n+GMZK`u6`^7jlDqZqtvl~Fw!C@t53&l62uK1v?I(ilC#@crB z(o2ct=P~Ih?MTw9jpiZ-B*#~Gku^APx+t=9kC`lIte%s}-krx}2M`y%;O1=t=xTKk z3(G+S33G7}34M7ALD~fjL>JM)L>WtG4Mw7#-l^y$)HE+*3}I#^z;+X{p4H7_Wsp8( z-!*$q}Ny zDFZQGQ>4__UrehqHtwWfQ2=H27KD8hmO$pPqJR+#g~793bFA+Ee@N++#$yCjPdZnY z&A(c#xNJ5Dt$-}dwXg-}9L?1c^v7t%nOR{&LvAFg?>s#b1YtB62l1F;!TvR}0+edYlh$NYm>E$i3>?ac83^wcBS(P3uWi%QJ~V z=2t$y{sYw4tp3tWpAdEQ0V`5szil!#6=Bz}8Ca00>wJLs4?MD>(c{^Gd`e_@$okeb z?}vQ#@Of-G3V_#k&a+;;ZJzZTfgbkq|j_fKTGm5MRh3oY^M!9XGfQN7} z}PfLdDCkmZE=w6ag!%|?JdBXkwt9B ztP;62F1fLAeppM^Aa{J*t0U%MJup|I%Q#cuD14C@B9Kbvs2(qit<{-?1q?3U<1wk(f zVjd4R-_JssNpefgo364e=b1iM9!TGZavVrM9x&%fR!$q^2p@kq6(vY<7XCCXm#vE$ z1GSa3$pmBz#bu@`8o^n(07O%v*_PsTPcwwy={(#f9$?hZ#|q7Zf^h<~$ctGeb6_}L zLjL|M^FTp`RMM_8w~^~MWg5yucD`(WO(V$(%>X2JLHp}8rZsSEZaOY^Y=9&dMW$dM z#4n4UmoD{SY#CmMK}h0IQJzUOwEE(*{uz(IbYnU=-ByVr3tKI3vTnJexu;j)V2O+) z%?(P-+|(6=Px3?4Zr#zTXy&`2@F*<9W5f?pnC00FXcUgueYuvu2wXtQg-PE%I%(Nj z0N6jt>oUs;&kfG)NCL01pQ=_vx82pUqwQ|l-Ls^v)zP!iR_|n7j5uAmJ$N!y(|#kx zK7pZQwm8V`fl+SA>QraTKgM+$peUK`NVI<&wpPuW7=VaA)us>zJ>15Cgf3-oz(N-V zxS6LgPB4Buq8%wn6205%DFqBzBjNX{KGUKkA4VlI*W%?GYAt6SEnIGfR9T4HR&Lkn zQS!{~UUoA-vEakR$yoolLU| z@jIv%XTI}$o?s-xYB~Np+%7WzMsL}f?Pm_Z#+aDkMgypHsy|lN9o3!n^#<=T`p{ z+9`JE!hk>2P{4_C-q441l*jN=BPl|uWlTLqw}cFh8-H1ek=OUs{Ts4cC;-wz|B3@( z;WpFb&cB)6Yruos4~V-~dPtFqP`!^C^wBuWQXs~QQum3t@SJN}Ss0G>#Y2$$nJjy- zs&l1oG<99#M9h|-iuZBA4MA|VKRf0es50^jqNp812}HC|@{5ZzN8yuAD~ru%u|x#BA4V~nG<3z7r5n3{m&(Vl##eP^ zxVy`WwhhIufPjYES2Ow;jly;skMVI)Kt#|;rrbJu`Lt3Do^&4eoj$=d zx#$^cswjn~=0lLYY(px-^3QS2@%B^QzusSySPp|rQS%X06JT|P<%&}O?R;u)B3gZe zUWJnVX0`y8)|nn${(MW6=es9>wuDx$uUPtbC`5D;Ae86eN7cj*6?y~5m}8(MqDW`q zYl<&r;%5QiUq>O&jY6f>3xCi`+d@#mNP)uRqX@iz>VeLxxXdQMSI^GUySRpW4~p(_ zC`VEJuS)arLR4c(;-3uv@zeiGxYn*M!MnUhjSsCXKC4(mj61MqE@)^)GhD?%ZJ^(^ zo?Ft|Sq~iIfz_0|I(VticB{XKmVGD3Iz5b1_VNxnI0gS=!XVfJHGz>B_&h;`;w!hL z(+wx#B(+v|LaZ8HJ;VdHnZ8OyS&#P%gf>x#f`z*kJOHtF=Lra5-JT3?70w?PG&An$ zK#IcVlYK02)7t&Ys9$psK$@466yZgnXRoGjg9wnMt4+noc1D1x9!Jl0if=S!q*;vw zK$(&ZOw|XOIN@N8$pE!_ZK)fHz1Lk@Ky1uR?vjM~R9Q`7E02);R%0znC5%4BD$-2> zNKL%Bm8#%8+eLj_>$va6p#EaIqvlQiT533sHQ1YLOLFLj5%eCJ&+KW3WL zN!a+{@dOnvOYs~lL-B=3i%<$< zCI?k$PiAdT`C}L*DQtU~fbmO%`QY*6f9*^g<|#%wZip=k7NLZPrg&i%L;e$YIchvb zO@h%~Ux-Y5oc7)H5B#2f7oEt#K4Y3eYe8!0$e3U85;ql?ni)ZAT%73xzP1Z9G-T5Pfi z-IMx}y6a=aw-gR6XOBjep4=}vQLdSfT3y#XjlCFZPoQu4#Ked6Bk;xNvM>Ay>YO#u ztj((lb6UV0KIysL4JW%h7%u;;P;fkaSN zcLA2_X(ImQVeAbNHjv<|>%OYdACmwbQF-gNz^Wyf5FvvthYSg2zjSn_j#i3w7}!QI z5}9h_2;{g-_{2*PnA*&LRQ;`h*uZ@F9LN7)f}OFacdI2=-#l-@*=tyHm73J`fOPCe ziI*xs!6R<^I?LVp(BFKl#YdiWf4MrnK#eeQYosxd%aC-OLbN7DH@KH@4SO!2YDO-S z>OXQ88cogB%JjPDWpZ2t&2+B}A)+{W`W<{$8I zOP1Pqr7$*#4<)ZJqDXW8<<10L*FD;V#CWTzJQ!Y*AHwF&a;W^*r$x3lXSHTfjSEtD zvd}C{azEzP*f`D!ot_`ZvNW3`FTOqGM3?wYIwn9oI#kzbZM%n~Y!gUr2payO5-ug9WzR5VlN#d`y5uBleM2LxmgiLjhT?LAu&^hARlRpLBCoL51JYH)5&it zsCb1;3MlVq9l{>YVJa~Q@|3@&3lT6oK0I)U2!V0mA)0>@peB${k^w*zfmgrVl%F2D zVI-ElDv0FqA}CYZ_TMZr)5Z!{N8JNlM-~cusZ5JsuorYlaKehV{UEp2DlP1IDP9UC zQSXpO?itEQ{zb~$rWXEUy+g%#{CyuFZ-fa!iz6dIc{!Nnv?X`@IZ6n_Pa}g*+X#;= z3*hp>=Mp|=vqhdJXl>7b7PFFAoDB&Rv&M6U4s!U|lA*%)a6v-g8G@Hu;0T6lH@Tj+zIKLY~l4 zw}?NKnAVis8_Fg0L($iISWmZTV&a#ds54H{_RXcDZi9b`ExcBmArsS-x6w7=ZErZcgz)dEm{Eu=}VM zY7v>5vGLOe{KkOGgr&g;{9D8t`82lgmC*Svj;|PYy+5x1CuCOA-GJ{GrV2XE{(JLg&z(nU4FZqtcNcjj9ZGM7^xPY=$MO8mFQi1+s zNSG}_su*}qR8MzQ$vl0SL07U>VbrGbM<#;~Dfdgy*lGYYr|a*|<3S$Pi=+X4L?9%} zU6u{HJ4A(7ICTA(e!7WU9r@4?h};|D%bPjkaiYxo;K2`MkHe;o?2|8Is$SnxY{p6` zk5qol?VV)5-W;jpmaWcvxVpXQe=fcA&lan*M|rb>PcI0M4C$Ce?xQIJ(d&@AnKwzD^9ZS@qGtxe;FnzF zk1xDvpYqj1#z~c4K^1$WO!#Q_mCH)YTJ}}79nI5k0Zk`2U%40u6h&}Amo*V%PBR_I zmOdpoItr6!+)NV4xEN*Jn#Cl~e{b&CS%}K&smA+(dF|SlD>z$chlGj5>3!w*EoH;! zk7!HkX-j_X4j0E@&arb|%u@P=Vl#rGqk5WN`(pR!jfKl8RqlwZ6*5?|wX=@uF!Xj= z9d`HeXLYxJ!7l3edkG%MWU0)lenLDox#EJwCnHnlv-WfnEoI4o4Rho3{hgLL6*5Aw zT(&Uj^h79P9sqK)WzTw^#{s*#s+gwwy1#GF2)30Gj^S1K)Jl~lY>~&OaDE$ld5z_m zP}>#5fA1-~msu_4P)*BtZQJ??E)?&ExbcEau^e8U65d+iBFIIiKxsU zBz<7YRrsLXj&OX%(MWvawEw+71c6oyIJA7D!p{9CW%Ty>gv6pj&eA zX=WriBpav`A-b(}xRZiry^)GIk=Uob-kN-d$mZtIP#Wa!@1t=p8%lFMnJ{(^^=~10 zryiA0E3C9>w0{(iX5_b|vtTz@4dpra1H*mg5=h9U=5PwlS`HS4#>4YtIA|suF%?xk z2cR-SX-8zpCcM}RhIcoHM%Vpnm(JPv;OBAYpZ(00@AcUDQ{PVct3JINY8A&)DLw}= z^>Hq?nu?qEk-((cQ)2+ka%wB_KbYt?uOMRGI8Bst0@q2bz5sPfRM_6Tig+4}c^Z$j zSt&dW61wFQd$Rh;M%;LhV$t*e>L#gC*SXI|FL-yDF~fl2YEtTvE~$t|Vx(e+6jgES z)`EAGwH@NC4!9vp6bETQ(vL`(sr<~r@xp}UK{FHwpqV0*(&FY9Rkd}G7L;R)-18?M z-H|joEo2$;kCaDgJ7Ap22h&2Ra9SerR4>D0hZTRddS}-6&O&rek=Wgmnh^#P{=T)D zMm?vqB-SoFA@RJl^UP8b=YqhSA|A27Fqdk^q5q_Z0d%~LcYRudRAe99kN?q z)K1$gG>oNrtWZQFvXKQvUYAlDdKg9g)S{e1lIJq4qP@*g{4L@tsN@n7dA6qD6E6=` z#m%)|aUb2&YU_%ae6{!AsBi>jw{u5M<)VYt{vk2I_Y04>WOHcwc^^&Xgscb!lp)l-gT{XTxLEd^1UhIm`C&Y<=-VIOXqCN2?!veR`>;85@;LU zFNmN_M(29wFqtX%t&j@#I%UaJv&&OwEsZJ{YFA*G7;Y#3LH}>a{vU{boWU>f3kd+= zDF^_7{{KB>|6gqh*E(8`n{5d`ueE(#@Cha;TrXMMp+=3)IHT)@y!D9IjqU<7IoW)~g`C9bF{8q{@vSY`4F4AX%f%#Mz!4I70-|@zX=by2)nW+^IoFsI26{g$K>BWlzyRF$@#dh%drhl02 zf4<~1$fGU`@z(gD;D<0T!~=CP8dI0@D<=m%kTzLY-VAVQZ;YG4NOyOga?R&78vhEO zMwEp+kj+CG)m;OG9ZJ+Lj4CWVJUoQ2!Hx5?--BBrq%`>*CR=st=P1j)_ve0wk?;WV zB`7!rMkPQuAxa7a*qJhTyEMD-g#Ig;k`ZH6J|XB&sDN{-5?;C5EePwsX`^ zwAXzq9t4oUZgrk#Q_R70-qI&Rpsv3wIO^{ZhD|pzg}#UEh8ff&ePf%O$PKKQ`Or)- zW1YKYuuE08Tqg*4w;SN<2~SXV3?Nf1gYA#j30VsB;TTz&N0kk^;t!s%EtaAT3ep0+6P1~vKUeTPbNn5F_9ccg3dla80b&u_#>8{eM{AhKwFu9us0{;{!Hh4RrUm!txG(DIIQ%kJ>v3`zm6K4F@pT7+BvdA&ih%zYsv1Hj zGg# zt0}7+w7>_+L+1Vz7F2m|n#sN|*hOWtQ;3o`n|edfl;c$hTucq7quEd%q6w)}#IGCg zkdHFIgs`yiXsvufDf`1vP+8okO+~snWW%PXoCBf8!bV0nE8$~(cy#6 zSO{~yVu@t%ZE##BdiYe zXyi6tyRX|k;y52Jsdvf$pzIskbJ2oyV>@4L+qP{xJGO1xw#^;ewv!#(w$-OcJ-DO$ zAFAqE>*b#Sv#rQ5;OFCnqpcaBx6#Mr2r?n%xp`{^^^oYA>BM#^hQ5!{A#MwW}bj0xL>|g#u_Kry~ zr+k^m9h~>t!As=$cbl)N3ghob^0NJqd4K79OqIz=(0)dn5=Rr1cC%!}G3hJX6%IPY zjPE`kyjVS0+Pk?J=PE!+vuld!xB3-_!wLiW?#dSbb9-x#>1bUDhLtqejl~i4H^D?F zu=XHPp!sGK)dfk^9!fgLO0~rXsRgd^ea3wp!J5z`WOnx06H8jLCD4STxauPsBolHX zma0;XfwMAUtqec22R0Ey{ir8wue7V3OSA*4>HP_aMKl>YR1>}6ytJwyIt)kOUZK0F zhnKbRB>1*-pkm8@{+0_Q9Nc}S8~g((oJJcdD+w*vAkC($1c`d3O6 z<+V^xj1*dW*GRMxhvOeP@%?CiCR(@iNRkYmyivsgN~VMjcw#Rc&Gt^>exvGtDfBhN zdE%A?cq_HuxOPt3GkoDvvE+U-)LjZ5TV%!8{6JUHFeZkW!=M+fgK46fZU}bGe%oMs z9*JMxARM69C1-R2JumFT=%$i~pEvn9!PT~N7Ez8uX?5TiwYAQt!j_q+u(G9LR?W7I zSU}5rHNwwnl+~Q>e=w&GbO>o#T)cM?2Q{on+vs}lGK4WT0%>lNOsGGIXsBPfg;`l& zgwbG2#|Vvdh-WSD!-$gJB0C&zht1=3a1b#`4X@(?L}D9*ZskKH7RzWiq^{;xZV}lF z0Nv~E4bnV}W0Qjpyof={8{9B?s^}OL{z+mp@ED#eeWNajTmDPb@D47d$BpY3FS+Mk zqkE>Fy><>-w{D;gVBzC_Tm%fv^x(f7;u4R_se+LY-4;*4W{gk{;a{TiUrT~%4NN&I zaJ2h$4Rgdod(^p13wM|^0d;=YLg)%#fhL;&^k+DEw zR(c=pqG^?#R~q|A)y`N}yQZgfexJ4e-6a>ESMXtT_qQNRw9xe z{*=hSNf944E$p6Yl8(IN{+~gq%x@}n3PN0xVfFEbPA$Lpf)}XV`p&yy#P6e^AD3mLVB;;->UJS} ze{d^E=Z&v~waqCPm+$y9kAAq(5S;)Gx~*J<4Km$MHZdQ+HQR4*9iHGHrYN?PJ`*bE zrCcF4-2TUNkNZgiH%03I*lGWe^~kFTaCnpo1<`G55!ESJUfIi>YO=n&d$%3dP_48- zMQ5ST61j@FQRXxtL*p@|Z+*$hnlh0|CxgJJPjNh;F*d-O9hzoE#xj4@qEID?hN;qZ zzY)*#38xinGW^lIB#1*PEB~`xN1^sxF;EzYw`wxeQfRO(vq2s8iea-*R|i!s4viSu z`e^-v-*Qc{$!+fhGt-33F-zfN*B_{sgSVC|2+kz)2YNZ11fciCe+WZ|h+#YN>Fxo4 zGQyrHZfl7v!0=bJ))(^RyJ7rC=jvI#uMWIi*DzK_Y)^!Jt!Ix2RKvYRfBS6Jf6B|+ zxxD%>Wo=2F0oBdst%QPr{y-dloGq&)wv1J44wqUwvgS?}v!~a2 zsHEG#OG%^1B&}$#7bYC#6m5&t0@Ni7jTKev{D#)W#bDDjM?Ii=p;gpnM-?saN->5= zB&^(fZ5zsRC6BHzqN}N*37%2m%dcnrnwjdP{xEu$K%m}^PFqG~K2Xkb=|^c4QgLuM zlGx3W`1D|U3+r8|_vBW<9@fj~`Um6?cWB1!g?YRj)JM&9Wd?nMwmI{@hT<2J+DfQE zb)t^zxR)9suPqqTK}tJe2jqq0wgt;!>Xg~9KG^BR(~c+r(=qVA?j+GPk{+@}pbzQw zI{7Y6lcT4IrgmM-%RLX|g+nE*$HORS^rFgx!E9fDl;!X_ zkM@;kJvdBKX`!y-+Pl<#sI#U&2*0;vluiB~O%>UkHey6bbtO%EB{f>I=J?JpmC~)~ zQ(~sGS9N&NuSUqniNwxAl+e)fktQr!Fh{l){LpcedEy122tiQ9ViOWgNADZ~GS%h& z6NHK76MGgXcjPYiGCG8=d~b9Pr)}9;EMJ~8T;0#@D6Zo?OoP<9-PD|A_0?^Q6f}V3 z7V2s=cW2W4iz+~o9xc2EQ?PA&CJo+OTj|Up!V=%tYtRTyu^BHf35aNtn!2D_7^&su zcL0({|9m1R3e=RFRS~YlrVf{#Ip1Su-(RTU$~qr)e3YL+QZQ9;%vW+daq~8bOd}Y~ zF=U+6&Xnk9pE14BWdaqI1uZ93`IYQ)FT z#Foy4roT^mq44)6usW8Kx{P3JE|CG518#vG$CI+^y_mD%3q{fE2r#|r#(Gl=M%M2mPabtpsvCXPE%NIJnV*RRDeRXxwRnnC6#FkFWVmA zioSU}a~7lPo!YcfH(ATxJ-L2~yWoGOq<&aHB=;CN-2~ZsYen0{rr^g*e`PU@w>@4z zGU)F^F-gBhm^5xs*Z*m$WWfHV;>`5!6J9)O_qVJg-j#4Ru5_&8UW|>n$#XmwjN4r^ z0B#_2SX57q=Z-ZvXtzGvxthUd&1s6u?~>YEXA(m47%F`kgNuC&3=w8@Y_N&!YkWI0 z9X1dxpb|-wrtTQTaNuV}-StOJ*ZgtLCTXy7|Nhpfo~z*9k1tnp)X3$+W5Zs{KBqV8Ei@+N(U)vY z<69f=ZEz~qd_3h(DpN{*(e+o99v@G*siC85XgzDZ$c>gX?L?G0ZT}pkyaNP;31rfJ ze7wy>7zth$Q!vQ%_W$)=@QH|*cyQFX#}>G^ zqd!IuygJ>9mGN)@N7Nq*el=o|;f@DBK_A|q+`>N|Vb=HlhZ8<4YBvJg&}cea$>W&8 z@Yy^~rPqi;{!`JjRYVL!d0ued9V%RV(pt>F1n`w-r#lr0;oJ!O@*@Yn`=L7qj^K%T z?R4>8cr|HyQJqlK*RKgCry#&&KmVG`CHzIM-MdGJh%Glv=Mxaeif@=d?{;Om^Q@ zfVH*ABkZzFN>@y@q_4QnGK0(xY2lW=@eMAY=ldo2KOpUY)v5}l*i0_}#iEHt0|7Dq z{{v~p+j=q%TjE}J)6uw5C{&YP+DV1sa?)vGNj8?(sbrU8o*ikF;3afAEnQ7}K%_$O zz+M4~qB@gyVmhg+w)&~MNiy^uCDf@pt;!uG%2H|w*{V)kGW6avL~4~=NmVvKzqgRs zm=p zEbBkPVKB6s#?_^-{35YD^ot&uyZ)S&67d`^J`3K}9&c73O35?G{o_9~) zXuNx-&W5jj6PdLK+j!!12~6h@y`YZIxSiM-+Xd3y8o(mCVlwa>Url|IOyil(y1f+i zYyXNexgEd{nPhZzd4Oy>fY`5d{K>DhM0&?3gHCxha&UQ5t<%>}- za@cN?c2&=ZwG(RD4uB&_^)wAVBMizFIpmVev)Ei^-0u^T#?Vd{qU-MO-kyF3@pJ;- zfN8e>pc+uX(qZ_9>z^ZRmHWUt5ZK%^$JD{gz1|Zb`7%%Wy*d&w{xZk->K@41{xV1G zYft~ZIu!VP?*Y968OJ!4vwFV!2?kU!o+5!mWH@JlBplGesAd`~8$@tPP}awCNl*c4 z@sgL0Il^2S%x(fO4QF`(K>JMcfw|L}1^t-L@`?r+&hpAYjnDMp1)cIr28hn`Dh3SB zR)Dp!94VM9^I0qyT!UFHnC^)jX8`AT&MT!u1lO&~9;5TD${yW`Zb86Aj=j+=%%?m6&aoUznC`KhTjf396Cdfo$LS8SK-lRH zxj=g+^Dqzs##01vev?@}7@useKB2(q=?2O>#nOr!`qq7cOXI!-yrLEt_rgPjniuKNEHU4$^I4&skWpW4qh; z?DW;vN@jK4ZS zBgGsC-p3!n`1Z!BhP^L)>FJT6nXZ}xA81327tdvt^z!F@USG%q8{Dhe&WWi&mSRLjWqLvjfb~Aa^TcW46HY$BSB?fr#>4lnV=JRd!`is`07B7#^ zIt#Bn1oM+~dWFFLe?uz1>58hlcB%81iD}F<``%nwX>;n!(IP4{^JJxkaQ>jmOz%`L zWd%u?!acgABTmR{DIk(G@DR>56?oVkef72dV3*?fQfA zG*t$ecGsbH)(m-QZ(5=Gsc-8*o1Zn~F?BnpuLY}_x&6KTIm!nl+KnWOLD)jo;Mxh5 zX&tQT%Vyfou+V%UgVBkujYBBCzlp7p=+#U&Z|5d6KK#YOqX%o zpae|+KGlSjl=&TqV^L8d2Oy?od{o(TkSx;sxVjZeIb)NhR$IkjLB=8%8v$aClq`o$rEbP#FE$13 zOezO}&d+@Ff^?7sJ25Do5>-!77C*7Hl;&WjktQhN*%7QjgYLbqU`8O+=j7p} zXY`Go@KbtYeEKcp1EBPeYB)EJWqMggop_7{*;SLNuIa>!mPx51{VR{g=YeY?}u85c1SQ z3lRWgzLAB6%=fU@ZY0o8^oP-*pA*(+4DPFU(=K1{?Hs?PK3lPGHj9*+*;tV&7{Jdv z-RV}{Aj%$!I9I@=b+L8YzeVb6_c6LWz4bJ6PGv1~cW%I*pYvk1WDS(>7ca#ELfVmV zJmPyvhs!^Uj4%L_4qv6Jr`lDF*By}UI2mG+e^snNUWr7pYo*te$$+W4C~#T+-KQ;g zC5$qG-e%LDVITf|enPAyF*J(Y{NK9zz{zpzfM+KF^=(7;XXMIj#@o=>cqiAxOx4GE ze#5rDXSq`3bw3nVevrqulYL&_+hvroUY-VMu&j;*S2iJ4$|Ik61X8d$mLWZ<;u`2( zUhQ*W-IKgWjKRFjeGL7eMIF6%0pFUl9rI^9N`TI!_Ts_2(Db_!;?R%ZnkQ`gpGh3_ z3a8Lhr+|?+a?SH{o%fe3aj~*=DazHU8}Nnee{UFh6w-v1DDBSttN0b$*V#hyQHe^l z3S}t4q>5S9VA?Qon9+4G=Jj7tVDW9iJNMR-OnN@d9{6IGKM=&QdXxuLYZbVcSR+QM zSgkS7I4AgZ(JVSGYHPP^Sxj-_p0j|Q14fxq>%b=^nv~K^kvM-(&GvkH z5px=5lQnnWo5fl7)&;HU(YbfVnD1sICJo2R|6pK2%|Jw8z#Hso59!6S+WJmS+_rUz z@S{pD|GKj%&S4xbu8og=nFwS&QalB6#&bFbk;KjQ(oNT|BHkLWd%#vJ6!KZvG=sYY zmu}Mu)*W=FzK`3z|K}6gcofpH(3Zl1jy5S;wZ_E1?a!NKgy<}y4Y%Em26Dv~AJ$n8 z^R6^}AbX3b4Li?X&)~iZf}eD4-yB!(E7|=UXOiwl6P8<XH6nx2;Ey`EJsUssuM z+cr;(=?Y`C7Xg1%PezC?>sr!Kudp`{N4x%u+=F=I9eY=xQ3oUlp1RxEI2_I7)MgRERxr-&A6 z<*c3IKiLV*7bPQIYB|EF=-RTmrJrJ zr6eZlqW~?l&+}f{(AB6f?df*Jq!{Z@EIjemzeVl}0j1<~l5|yq9W{!*NdZ|r{*cvY z-{T}?$P{&=iFlEMlJaqt(Ks9>xk9n+%yqR$h&h-C zqPpR9^jCiu&cL(`tzJN~YOyP}AvV~piley@&vSoJTbrTy?Os;dKH~MX^*N$kKsfra||g_ZoJ$Z}FD?AY@mjT#i%Sv+6 z8qp10k)Y9JbEU?dJ05sTyzok-F&RnkB#CWu2TMX}Qc`#Sv?-OoP zSwQZ!#ApkchJ01Gkpg?%dvsTmt(vC;Z$2{gX{*0Hwa`y0tCj359+tRuf!zj*0E6p-0J(pO|u;9s)aP}?Qn65cH#6vl3&U3imE#+eVzN2MUh1FsY1qX zLN~LV{Q%%}^L%RIdw9LH||5;7NR)(RU-4dnfi}%n^Ro&2Z7??L?zXc$SBZ+ndBqkLSlFYwsJNZm5>iTtL&04B4Xy2rS~ zWZ&84e2TG{b}65+S`d@XK++F!rc$39^uko$hJ&_kYqwz>UMrCu$>iObd}vZ-N8(5# zZIdTal(ntk*!Xaob>P!EOtrIPK)|yUZMe4Jv;mo56S8|<=qN0Fk)Adpcj?v%M8U6$ zw$aX9IQx{YXv11Y@DGxORxyM52fMw2QtRG$%9hjTyzhHRHqA|F& zwx4E|^w(-cYlA+)6$a@?L>YB89qlNVt7N=6S>o7QScO^BJOTrguybD{z{O=2ys(C{ zV9l>9^Je{nvUa&P0^zxkl44pSHrs^R?0QKX!T~w6gI8>uy*#H@Uup=#Ri__Cg)V05 zwqmF!Xhe84XvhKUcmN?}31SblwjyIX*odV5xaa!Ym9;fb?~%aPBWfOE$Vm_A;YH7x zGy+!8?gFk;d2~xc%`k?MUg4;Aq@lNAhesf@BwS@=5A2mI8p0|W$|w?Xmwu~@K_SaU z0XKBf@6Wfgd8kx6TmA|`Jn$KKNnwnkROIS>^CBqUEY3&L4D z0S<_^OCb)G@P>(-l(k5-T0EH9Mg(`CKy;FVP53 zL*)LtyRYc+x%{=F;MG<*`8LiEvZF`hke%5ZtesC#!W4sg;qCn~Qe1v3&?gAd=|DZO zUF1?~3!~hKV`3w?3>x{p(PhQ8p7eS#jCNw?7#=y(sR=z-Wr-hpp1|z2JxX1Z15K)0 zfyXkGw~92AEx`=ktzoOe(5i^zWUu#+2S6=>l+9|@$oQtYi}fsDVON> z4xDy=`@)jE5h0y8i%Y%f%i<52mO9T2c4>`U3&*?ENM`d60u0X#{3d?|66??V0}M}* z{L=W-k^LF9JX+ts(e5ibBXC>}n~dD zJFHGiekSA2c-x$Nez96D2|XM*kKt1wyd6Aospi zMrH!_w_NmrInXd3A#wmT{&`c<*#8|EQw~vY({)R)lZIq_)^rcuZk>uldXKl`(04W! zkFo`0g@s}Z$iPrR8}(DL*rjFOv!0}MtYtq>r<+^Sa&qQ{JB@V3Bf;BD-1W8m5^FkS zZy5x$tBK?5KW_6aAuiipmBz*805#NUr;*oLZpd!#Hq?oc@^s=}yRG%4Va^;GIIg)b zcdRrWsbmYZGA+BlC8#MHW#<0wI(0iK&Y3Y9_iTBCMV}kfnLvyH* z-D6)e+#8|Y>}suc>%E0X0HY#^|7;CC7~^CNtEbbvHK|-W-Su9O}6=^ z__6gB$JWC*jmt44@Rxc7qA!-`Wc_;HP&stW$+*j3CA_w`hWZxdJp6lNQ0_)jWbz;~ za+7yKw2mK&;AZNZdyf&-{8)L?uLC<)Z=@#>C zs71CHDE1~iFei-8lBn2#B#TqMh)B5%uka@Z|1Q{uvOl^9(6DrUQztk9(n^^!1S>)-wqiXB$+Q}SM9WhcCsS>AdbGn!6oHtYPuwa$ zoQ>tZedoA+m!Pn>)og%{Kx_zT#zO_z*UTsQEmWNrg@$ypuy$ZUS&Aw{2rOusz=EsE z3TH}N7$1lfF)4iFYs2K`8?T&f_=kiqz+2CPjgD2pM7`}#K%AH<)&=#csqG?^{;~`)7E$ZqC+$9ldV^{|jd3t7e^_5l zq_-iL5;Dr_=o+pf)~(b3UF6!@D%1xTHU=9R*8uK%6@`Y^{bRh~l&YBi!D-P0gf1Eb zM=&R}X|ezcKU0DX%@R|P_;;C;nKEbMJe{2DLZZfa0m{)a^+7VWbYXPx6}yw^IM$W{t6i9i5W)|iANXmqBmC*qZs6_5xU z!yhH4PEk7Gv0Iqqpg?#P9O&F;bEXO6&+dkd9Y-+B5L1<+ff2yslGxRNNUe>p(!52L zO|c>v0bV81Et*1t0Rzg;bQHngCGCvu(fOnL!^HbSuKdoZ)M2H1hEp{(`Z7p=7Sf7Q zHHtqk*CtY-gfv)9NEK&^Yjxlqxa}>J8d2i?NsMfrrp@Nc&lI1P;HzCF_|0;Smy^|x zaf!*9-aIRD9ycir->@!Z&N5uuheELNYSrMk!g2=JrwoI3$Ep2$A%-zbVDOw)!Ler8 zotJ9y&5!?MAQD28r3l|MX-2MyAh!1RVtIGTx&DCZc{a;O} zjOKp;Nhp3YIs%SPd;Qel@h| zWKLSM;y{~2LvTw7qzin*T~paV82ovP4egO34G)WbU^+3H!66Y|-a$}1eXIR)w%WS; zJ21p{9Yl>%3qJH#zXQq@ubAsjkQgvgSlZ$Eh(JG8N^DTy9~^4+HVuh=dv8~$pX(3- z{2g-PFPn^?uxd~j$Y0y=CfgMz7(|V+eNJgA@jZ76T=)6Dh6{Smu|AGH8$dpbZeaOU zbsG4$y3JhOD#d(E(~o2$N}KH1ATmvZC7T}iNDw(q_y^(6rjklAgt~HUT$+p_2d2B< z1!$alY$Owc7ll>M>HuN+M)OZXWc-Bj4?BeTDNCA>2k6MN(#Ysbx!XL#z&tZZiZ0+WCx(iHTdgrxs{ypnzaF9Hz;Hl{ku3`n!H62om?XS2bHt5d zNNYA>g%>Ksn|#1NT~~<%4i-k_r$(4$i7$8wv{AQT7rHGE#s#0@_xI7~`(eFlb5n<* zV8KhO(+Ayg>PoB6eGxCFOTlpXX~qsBsfP`CV*SaT@AYyc331>Qz1B~~v> z);)!aduNM%V%TjHO{;bEU!^thB=7VCYmS{0q>h&dqh%NH9!U|*#wkt%kzB_Fgi6b*^&I@_ZKF|+0YVJVYfql<_rH*7jnhVM8KdLwa z2rOcJ%j;oHX>*|H5Xh9;V_Mx1mJE&VEif6Xw24?*jj&V2^TN_PqR$YDM>FNK~{+g)v}H^B+)-68&=KnA&*N? z{B^l6xgLHD^d{8Bw2{^u{s?9htgig;SOmZt(|scKq(TstCy$o62$ z!uoLW=#<}FMs-glrcJBp3dPEn2$Q?Aqy(`ISTZ2F=Izq1ZL-S(NI|;Wv^oRP);6uo zqmiHp&8*f|uACiEO`5MfAH+uY1x!BzPIx2|gl?G?Kt+%Kx`A0Hhb!rkC?po}-GXk^ zCexNw65!=N^W2UKW;LJTf4dI%<-5&!1=9nKKbRS}PCS?#`_GR>=p=i!6!9nV-)CE3`qSON6 zSbF1dgmO;T$^*{$;vNTYbc5kdqw0VSe%dOOPbN(g+OnBW-cm67@1e^@EI24Q7X(G- zV?3OsB#3(9PmMzcvFOtr7lakfk%9^(Lr8n@wFxMp?pA$(dB^LJmdnV*E%OOFk|mH7 zUcw%xkt6`hob*W^l?9_I7`7Q}n_G3KtWY`_ow6FTll5YZv2@9AQqhF&NA$VhB;YC7 z0aN0{5QrD^>q}e46%W6HDS?d6+_xDeYzKNJ+|4WQ7rbi)UAN1$@ zxIE(>VDr`+li8WOiy5JA;qFunfu3QD_4-&}fLhZZGLzchFD{0bEr1bd3iz)SfgLm` zUyRX0Ca?HAz@V{=Y=!sniSBE~3z`&UvAZkFk}SZc6s&InnKVr>5y}w+#*unKoRF>y z$nuaH7F{vo$tb~gA#0h1pA84Lgn8(`V+>}k(? z1Hbc*k*jpt&2t|2buV^W0{f@5#CO$zo^?t5^AM8^tmasNyjb2h`(*57?CLdseN0`Z z1urMgm$l?#(|`IWcJuk;`=(2NH{WGPedB2B2wHPU#?#z_8T1mSW{nM}<}PRis5=u2 z8F`eJbp9VW>N7>r#IECF@*jpy#xg&9<(u28@AN))o%}6%WX!KNVM-f>L`v{01}D?- z9;G?wlnX(+|MCWiskEX6C_WVuwd_C+@4g!DHQ&W^D)&Xqpwi z%AcGL;IXFpdTp{g(~*lATU`#ct+OTkM^NKdp)Du4HxkNU4&lg}?NR!T{Vp zT(TzjU}>jid5*nu=08^`J}zIlm!*Ts)UlXhNG}z7Y6xebF7+8iI!SSwqzKydu@OpB z;e|BIJs-Iu_PWB}nj&08;whG`DqomE3ces#nEslh8hZCE-54zLT)Ilw8iCk*Ea174 z4<)XwpgKnJ!(+1w6@HCJ6CH>1M_U-(Syq+Fj3RS>5k2^}Lk+mucy5N-A+7}<3$@RL zg?ovy)?k@9U0&sKRf}f<-tcHYKnOq9A9QEJ;UPng9$x zkGsFW@xQkM+IgJ8>GOqeaI@{&#zwG6W&`At@T520@PAI1%S{kX;PnY5g}fL+5yeKf z=4&KA!$1DvLxO@3(84B0J;6=%YV^{;+<|#VXmygr3Sn?T$C?67>Bm?E?zB0b+s)q` z2F{_Jp?i3TdL?fTJ7Fo})Y*M!fHgA}>VH2Yk?bEQV#(SoN#3X>1UrbN>sk>Fxdbd0 z#rg*)%;L=NMdKrB#nqZUH`(7;&W|4VX`jP-9 zFUtdVBr||dhu)GgBVbOQe6mX(583HgVI)bwo+<8>Mn#duAX13Ezao4JP2r*gi;DeT z&O!<~;!h~<3Aj*Z)T=SKASaLs6L-mZ6RtE_r3wmt$UcEgJ+7vK5BD>bzhMCMg*Ufi zM6NB4)kf%^>IRcP2jNDd6o8(CH$caNy=nk22?AM9n-qy3NBX2KISkPAI3-iR!#ohQ zexNDDEal=i+jJU$5{u^y*}|u?AIo({lLcWg=$kkPV*N{>Z2YBCZMrk^Nx}K?WZwuF zn=!AU;Y>|8ZTH$P)@C%n51q0RDuzyky>PA9NmiD9hxfve8TVr?-#Uc-BaUa~%MJm; zCg26@9FS9M_-AvFdfiCC_zvbd+gihK)~6&d3NMVN0Za=y`B8eUd5DIsEWdQgg?r!{53CB$s3*^g77i2#)O|0Q5P~Q9+9F4A?2Aem27MYJW|0i14uDqyojq~z zD9=mA+GAJ(!*xBv6D{&fwJm#+`6mo|3~8JOn8dZs*`mR}BTd|)X5`npLbET5$e02- z&cbp8y&3^>5gd0KzW|Zl2U><++XRpb1K3L;tL{0Toc$%%{5@J(Ngun<&Mw|57^c}T zTV%88(lAOP7-}F|W41(w!(!H_dNEz+I!S(S)3=%4*OCkU7rM?qf~WlY8cUrLr42Y8 zzfkF~nHGnce#D!&{HWskrs5KZ(bUnlTbt!B!3N~sU}Y)cMsTht3{8*n7yJ_YFi^sn$4Nf0&{C1RRTZY_5u0x9n)F^MLp zu#16z(sK0K{piYS@2JD?n&Tx}yIMiYP^O7CnuqQZ8o@lNNrX|7QV^OP31gJXD&ny` z@&Z^Tw22%xdXC~yj@=6EOK4>;X-dqn=C&2aQ7c*zSz3tt@!2Yv)eVHy;Y{1M3^`RI zA5AIz^&eJjy!Ji#Ji(zO?BJ@CHXoDG0W(o7LO0Uqg%}N~Tf}M1N7vZfn>_?=xsU%K zG@h)oE|~bXf0T9Z=&;`(QEct@%}1sX)Yfb^Io&DcLd{rqI0tKO`!6C_z1|Fl#$eX7@#Y{G)H0VgTtm>rbTTBUP+$<2_jWEI)0@DG z({o8_Vtm-9pjcuZov}7@$vd*u$|{ z|4jHTx`eI3T^yN}5x2?Jqgn+y=aFn_rtLmzT)#s? z70EnYuCx0oT!WO#we{7|)d_3B?lYU2+-SuabkQ;sO4#%q2=juAlBMi99S_t&ZrT(W z09%531knStGJ^2EUSyJh;4+fQNeaAW?E1S8%^I6g124|UG)%c|i*yR1Vs$7Ey8JVe zd@<$~MeEBNew$q|@a)!Mmg(4YMGIEkjbW^6j#0IOI=#PGB74zvfMB;N1^Z-;Hrn9U zd<9?6?mY%v>ee{mDH$OVY(zE2m9IVTctPO*P`BYEaxj;K7Rq7hMI?STESC_5sXw!( zAtCx{cvVdoy8%vl^+NO})C!Rjc$qjb>P0K+&Aaajuv0T zS_?VB(Txl@$TNrL+jeboXiMF_N4QtNoPHUqO+*%6*ej}qMzY$8DM7~BzuOTafdOJ3 zr^OxvdOQxP9=kVDd{}pP>-%JR4Z*10p+JLu&y_8p=`DXn*2N5hU)z0krD9tBT?T_* zpaX~$$14{4{`H+X>SW`D$Z53)XxRd)V|0mTL8o^%H9w)+n)ibS%0DeahQAT7UqMt%FZ8|X0EkX;!mVkBXjk)Rw;w*2@`UmwRuUigNZ}VQ|!@l?TOe5!C z>`>+#nTRfHq(P@|a|#K0yKr*~W~)dJ#2KQFm?L1GsH#@S8<3T!AVyN$BqA*q?LsOV zUDD)w-B)UK-C!{{1u~_Jv?sX8?E{~=VGZ(}S^rTL0^+d@rF|#l)nrz;dZ$a(T<(P094=?=`&Zlh+jACo@TZu~RnEf(l^-Pi*lRSi7z-mpCWlRuKctLr%{U4|64R-(2tbS1B0-J60-hM4UD}mvbwVTD&)Hk>L;gB^kNUPgN+RX1K2Os6qTxWwxjwF z{I-A{I7%dJSK>O$(}S004U~}_OzqtgG)u6xB?tm}mBzuF5sacV|011hKyBJ~_7r)= z+%xhGM9VG62WcPKKB>umCY5CIH~#G-*vaaRjkBpWR4@{X$1sm05& zY(O_J5F*h5<)?M}1AYF;Oka=RuZ$@IoTkZ(H#f<0)U-89+~o7DXB*y*Q&^l*A7W&V z=qdsD|3lb0b!WnbTlS4@+qP}nwr$(CQ?YHkVpeS1wv+z)vU{8{&dvT0&t7ZIX-!Kg zABfX@dqdkOFZ>Dm`{irK(cb&^D1Lne!$N)cA673#W_ zq`;5fo$kAr`}E!_YFr#TntU|2mBjFXzsLlKyV-Rbo66W1#Pp8~g&8mvUeea~UDH1# zk^-RhZbV$IS#MkkE;iM=wbY8t9P=%PTK#^So2|?1^L&S5QyH>paW_ZDMeSR#_Ev~B zjj+0ww6M23Z>Vn)@^r-2tSh;~==|*Jlqv6#5*;=ln}zj4hX-FE%wM(CX&|*ntcYb; zTq4`B*=mnkZnO`q7~g&t%atNTzk=|wON$P=@x3-|kW{1kjGVqEST|m_GV3Vf4g4_A zD>f#03Ob39@E?P{?;1pmz~TCgTf@G2Q-@;T9EvQSnb_@0>Dmt9)_qQocR%4)H)w}j zthyd0<;Gf^V@6Y}uf<+BXh;dcrTtP|fUwIwN|1U5MZ!*$h85W8L7JlvD;-fsbL?OSf{CeFVN~HGpC(hh)IT|gxUW_7FxO;uya}a zHCwl2)6YwxR%47=e86FrSBp5?CcEta$ipKm?rgJ_=CPX#@v^P>hpjB9DjVVUXecd~ zQa7Y~GcMJ90EQ41qh=>!lQ}cd^_$koM_JbHcXgaZPzB*0>pkX8*}ke3kG#Ed5g&nh zsh(#z-^d*&ey$uR`yQ6;ajFU}Y>c-_Jv@qXio-K$Z(EFY>ZncJYX8`QsjlQaIx0)` z-~e+zc71w`tw{!UdwCunDsASom;>4P_D|ZWHVNk>aWBTfCWh0sZ4q-lw@`cg*mu5e zyY@e{>(<5AVAl7fGV`lEsE@W^@KHI+*qOGe`udM z0X9<<78FeOO56LLv4$)!&f@0ru8kOQ@h}<6;nvjX-KU^?Kw^4b@OV(fcB@+*Di2~6 z{E~_3@17|q_WsYn+e5JOzwi6{5UEoFH919m_UVh8(y(2QlWhl3e5Z#9ipj9Q*e3F4 znm|cYOEe7*qw310zH`w+I68R^KU^#nb>1WMtyy|@uP$gB?48$0!KIMHNjs7&xVat*es!-Z+rL#r?)0cj zmTIIi(3>yD{UjMOLl1;!z%EiGsZ?TVMEptM)xCd$x=keLgcWT(GTTD5@+Qo^YK`Ih zp;U(@b2oq|2Q^(KPsg6>7Kt7vS%Hr1igYc?wa5~dq8!_mgOSv~)`NUc)%O)n+CH7yf!#eM z+9w=bFy%5D2?BpqK;O@$W+)919Q>pXicM+uRO92m-25s`#!zJajj@^V{U*X`Rdnl? zfPZ+QUC7&3LJDR&NP{pg4vqFeC>sE^!ACZ|?cpXYCj zz8OOx;Wu!z#WW^>+5}_mCIfraz*JW!w^eG4Xf{tH7onx8kqT?bc(hi5XH{=?G>K7l z2wf{$FJn1F-2LUgkN?y2Q{dv?LAl3oQcb8aLQ40t?wF{44GW8*p0^yEf1-fLrF9Aq zjQ!T9bOaZo*Oc@p1`J33P!42m&faH)kRMNtX^OyV5yr_&60fg*di;R@h1#?k6&~HB zq2H3NAv!W|P~YXb;cNLJ>z#DX*JGJYsSlyzMXfo!uF^T|xpv2bw*J?sxXP_TX2!Bq zCPR{LXiF3c*M#q~ZspxC_ff-uaNwgut*AQJoniE;+vG0`wm!W4#Menr+XJc1{Mktl z5gghk%C0qufwIB?r5tIBR})@-D5&)(;;CWzbMuNAM1EcmYSycQImZBk4h zX+=3N(!>^1Cs8MB?o72if$;_hi&=-4b-HW&-cE?ffR&TP)sS`N+0%hWf4jdpR!Y>} z#d@qoa;cCT9Mw3wTCTTlbBz7_w3ZHceLNR(h*-}N!!m1vab=URLvNF(?CryO7wL8p z$=$fHU5vv{7EhNi^EV5gaZ{Y>o-I(b>rB%@b_v6jJDMT8INMw8F>5@3&pf{QY0u0W z36xQ1y!3>P?yKgsJB;Oc9CoBjR(w3~o=Pa)b?e@U{!1}ysChn?6NjzxXSwr&sS{Jb z7Fu4bIu1T#|8R#wfJ+~E z>vKqPGW2jcz8dQRXp5r@MvQn7XdeHeOr?1co+~M8#ysR#TJmuFq(j;Qj{?nNx4>ri&&_sz+)eS>H+^QL;k5&QvS*VgId z>DL`0XoXSYm($d9w>i)LC>C569J7OsbstRS;~rhKWu&9hAA+u}t%q{7(sSJ@?Vxr1 zJ?`W;*Bl!j3kikZ0qG=~C7G{mDF+{4FhMK>({9IKYpA8C-VI|RF75Lh@W{b!Ku>o4 zt1S(ync7UL@=J8+(SBJ~kMrBmeY?fDPwf{{Qop&9GMyEvw_8!+It?Lf*S4q2x zLKVc|xUhEd4xEHUcmDf@8#F<+)?MA$u}TpqRj`(0kut}qN2v^S$OR`gOGi2TB)Zhp zLe&LxsF@H|8P?ElyvD!SAw3Y1z03m$eNy2IRH40aRHNKS%%VGcllC$-A*lPLqG^~C|#!>AvU(IXp6VBkz8q%AFZLnl?|FRM^Sh&Z* z`0(H}VBes(cnDu#T|qeGdx513Qmi~;&if$9pD%5dI~&y!_{IBq@&{OW-^?W-tXl0b zs#UG{b|eOa?sW!PGxm1}G|9o5a$Umd4G|s#Vl_!r?bK0lOkNF&52U(pCkJHo+8%+| z^;4uXI|XdJ4!5(mg9R){-+UnfyRpa5RJ~&cyayAGLZmA|lFJiSu`(~r zWaDQhXHuTIh8Sn`voFgo+ zUrz%asU=j8Tp~X}TS%Vy70OIbg!+k=W_fbn@T!!Y7lg@)NdCWZGNMo;WjPxHnN%fj z@$7$8@)-J?0v<}oJ2V+^eIvrYRmx4Y0?KpybY(kqf49QA#z8YRCmwD%nb*6$c&cLs zu_Tw)0vKR~=43M4EC5iEep&Mx*!MjUQ+yw>l{$dSgple;_*enS-3{Mwaq=SUwww}H znwQC4VZzR(jVy&8S z1!G2E6@1ZB0dJAt*ta<5=^sECdOEIN2MRWjCBX$HFVw@&R`=wa=wEIoivqQHqZTF$ z7={$))dn#4d5*L^eN0+6aEVI7unkw-o=cDKzXtFWnh}}g;^IEtTMw?D?`>_RYA)s4 z#`ZQR^JW1Hk7~~*p{*_;FFm{}@feEwI2NNE)E?G*#toIR8q6i{ZcU`C@>t_g+w|OW z_Ns;gX*O7a4}>E@XGPV1sEl^JF5kw2^=)j7A$S$TOC2o_6s|x|Df?W^S=kxicP93m zES?^C+f9y|+=!(f+wQ5-9AA(K_y3aCgXs096#rStytAd9#~u%1E&M&>O{U?;_vWm; zd6HujON8IKqt$*MbG$;YrF+5@)iOpQv(%$~kTDI<2w12+I00>y(K}&E1JaQoqJ&m) zJGkrm*t{THx7Db9Qj2LWl69MQvM3*Sxc(1q{`g<|EEXX54AqB3Qnm#H@EU)n!t^Aw zJW21!%A;wwd*I6msyiEmSbsiyqShp%vW`a=I9YpWI~?P|@jYgPad-Vv#Q41E&=&ih zr?~h-x5VeBF<1gfZ>7K!qzH1ZR=`X=k7-v9VO4irGojoiC;|6BhEl1vptbCAx=isN{#-fzZ&7gTkazlBxD$(l`=YBCDOOrGf@;oE z4!mns1Fm1yXER#cIDES8Sg~X|WAaaw^eoy{SfU z+A=i@1?;)m)wQgk2UH2|S_X$V2c;(iH8fZY7bi;V z6e@EUBYTVRjc_aXCFUgpGI_Wh0=2RDa9t%>CA_G^LHGA683my?!AwUj zt6EQ}<66Skre2nNh(Ti8Wo}<2I20*3Itq9Zp|kOcxEy!Gvk)iQ3+!}<4Y8Q|vh9$T z1ucXecsbvY8wgjdoNlEVVd}@S8+Vq~mi^@m)&NK0zf;<-xQ*PXILdxP=+=hY=(@Ei z4rfc9|D7>6(0LCbr#{?Q@#l;{4XNs z@eM2^3!xBHq-`$A#J!Xg|4|MxRnnnujx-fjwXT^?l1k&0TGXU8mz$b2T`Puhr;XS&FXD+_ipyn@73-1?(g-hwHwi|+1@Ml7kG9S zVjJS+9WT)en~_E6Hkme6W1jMPi{`8%H;uMlQ7+o0R-sW}_QSSuZ}uZ!>MhbGU(T!& zKgEhm&TXl0z2@z)FTwHWus6Z!W9n?oGwNQa#|L{A}Btp!yQ6U%BJTB5D2{20WAU^s_WP`+rkCi_M`4!WCER0W1OqZC8aE$`A zg(NEw6GdnW#&nADcdBE!n2U6c0#vyKt3u54;@~;Pv{j?O_!`RKM06QFp!%ic&5URV zZ* zo{)*h7aWaaha)0B{67$K%XM|jYfT|U4G-VlM>*@^u->Y<-4G}jE%h`qYh{nx`9X`@_e00)&Hc87P{BYJg+TT)X3*Zq^XQ$_5M$9sPxJ$et|%uX|5H z5C~+1 zyyhqZ!i3$(OZBv$xmc+os7}yINC1rdilQW1mIw~buAShqx#>iND zje^Qad##+3iSCL@8eo!N7D^v`LCdUs2ej9)R7YbT76wHYBHknW#Jjp#efgLUb6I%{ z)?nPwk}WuyD?8m%$D#X8K1Vy$Q-X0RY10G&v*9TS$=^Su7EXW5&7H#WveL9w%Vz`Q zz4UC&i#TS1B`g53cb)En_;g3l8V>dZs7m{(H^pi!Jzzpy{DTv_}6F-E?}qgX|lQ* z<2EreG+IH=_jG%6Ah*1}@9~EAr?hCZ*+pTu53`8UGkQb64>u7R4mZM3pN|0a`wauE z-DCFtmM!NKh^DRd4G^v9>>;e2JgFK7Mi7@f5m60wB z%XGP$YF{8?q4JN|HSn>ycn2_7J9i8S5tF{VkfIhHVq{OcXn|x>SNpu{#eY)4LrzMN zQjtiLgq3UT8Wt(uMOy&rA}h2e1c9AReo6Ye^H#0nrX-EiXBP^+)+b<4tAvA=9Z4c+ zaU*9${tT%AabwM<CB?BRIUAn*DCW?| zc4Hl!(?zX#HxhSS)JDW4NYw)5zw3tWkGFX~c*C2~GyIjVR_;1KZmG_YSo?m`xvPv8 zP^CWb1RTO)V=IWOt7V-2qc;D)NcJoe#mj)N9eYsZ%745SVyD5gz_AD7fzdpvFDWyO z867{-zzu+C3^Uc@-bLq)K)JEH;pn~>!!fEPa|MaBYh`c%9lD!!7vRc7!qk&^aG0rh zXCz)Z)M$_%S+srd=65bKFk2Y6jL9ie$jp0<>?J@1*EIC^^DVSpaEFr2pFj$a@;6Ym zJU|E6l?Y?4g;wxW^N)U9Uj}kkw=kHVr5{`O`8o-^%7MN5+iq;ObFn$R90)6faxiuR zZf#I$=EaE8jeIm4BYKvfCJc8dk;!upD;DAB9%wvd=o}CZylFVD`AF7h=y)gyrMDw( z?;yeA9zlGcjJgxs{fiJfRuvcN4KVhP@@4?ERu@-nPp_keG_HG0h<(d1l237H+$mdw zR8{w^jmbtr5(o&GwBV$$nV=UZQC5m+5AI7*n)BS$=+q}R z@EOU`%U&Olgp)GvQ{6_WfuJg{w!CI2C#@K(UpPiZ(aUN|RDpv>>4 zB*^gb$G08CH9$Gp6p?+-@aZZ2)IP}??D)a~OoK8kiwRP$=!(W0E?ZE)oOc5UdOW8_ zj`|%MifxQTOGw5G!(OLfv2MFU#7L0t(x>k>@i^p}v;$9Z2((tn8?4hlKhNc932uqC zgxTlw5$IuCzvKO%Oj>wZ3CyD}N$EwQb*}mIk*LbD#cT*#t6d=@fadf77t6dwjqv&C zrq$^;e+r5~BUo)9u4U8ZMbv|IiHvk*_Mt zy(-wuk8ASX1^FmWn)-4%18?^$Y2RAg&?lFB@Xt!WpzmC*VbR+0;=}mT zH%LwXFa$Q6wiu5V^a5)&84h}PJ@^}zO>~okdrs0IsTV~&yGF$|Dei*5tRczTwIKfSmXqK06e;4f^U@IiRy__j@} zqN@Ppnx-X6N&30o*rNbaoR1)zFHciv(Kf~C1P8md9ZW$N|CJ(ePE0tg2a+dz;F z+j5VM9fvS%&3rRtyT_qmeZrR=Bq3W(bR=4-OP~24hlb3uwpS-`tVLa_hP1hvn^O{D zMo_d3XeXl5%``w3iq=%bRhEhHnVd=N3-w`Mnl&fzC=>f?%t4eMD05fH(^4+_re?gP ztd&KZpG}08d7iCk*$MBCqkQeCJ2+N)X}d(Un6(M=+}1S7LB&X$jF}k*%AZE3lqX0L z-7{!kb_0nv=#!=&!x3;t_!=P#91Cs<(G0Fr!nUx0wFiXkVQDOlzsFbAWDX=H?rL7_-mT z3g2=b(|L5bO65KLXO+)DzCF6RS3HGm>S#>(CSP=y%T7C+B>YUQH>g--6i*TMzI>h^ z5c!b!(kfS^M7Y1#j!FP)g#>YD1*FW5T2)SaTV`Q;@koRGc$X>%ip*;WN9P`=|BS){ zaNUxgQ_8!lA%F;qZR9EGoQ@b}9;ddHgo&8rKV>>ih<6i(BfIcSWz}73ymF%*2!nLg z1qS`j3`O`rB0r{cm8X;ArlafG#HL;}^-|Y<`&o5F2$`kjE8fo$EX$xq`3RTx`c+_^ zi+MXrL9_1cf}kFN4!V{K0B)TN$6QOrOhmmn`dkK6s|ZtKU}}Q$Ca{>VR2#c#*t<8n^GxX%*M~NUz{Muw-6>h5AEJQjZ;{qxV$%Q*eT!;m6*FUJfjmkj8k#!0DizZK_LJL(UC7`SGu z%!t@wrKIWb0`RGxcegpJmU?0)lXWoss-R+vc8z6-SACoX6u2I`tI~WhM)=_pE-KqN z&fHw-BrxUGMs1q%as_dT8d83uTF$p4{)q^!Lr-XZQmqw)U^rV#U6#%92Aqpgr#N*l zZVfT{vg-L)V3oR!STQ+GH6IkgnOM<3PaP~m*G<995#BO($V$A`|TP^88=2L)LQ|0o#jJEny$FgW`e;-w)DW3~bWIG$8XS&6IMj0(Y zf|bN+1!Z4FtdeUz;>qttTe`tTM=0KF2KeqV-&VZsIvp&*<<6SL2(!yO7!9wk!`L+X zTX)bcvPIz~NzIW{Cjc$RS1AaLxsU!vSfz||)&eI+q_$96(CQEe0gM~iL#th(_!($0 zyQbfqt4MvWNJm&ZxHieTdBa>(pZc0#eQq%k$&k`?1d2ocV%9YT=OL%Hi1HBsijhEt zh4dUiMEM;fD)pWh<%Viu+h@6#N3DXqLv-0|{rC}Q96u!G@=O6pFfBxZ1(}X@U0v4n z++{#s`g9&*gqL=M%W!_Nm2AF3Nq)(bG4vt^Bc32OMpKS3V)GJRf3KutA!F8*if8DA znIp{(*Eq+JQ2p&zZc`AgGp7OO&hPm}#W3gZPrR!;kAf|;3-cAlSn{ai8>Yz=CSCC12 zY8#X$FFFB(XUh)tCzvktb~q(}k^7^+U;DGgc;?Sa_ZW~+{w5dvqmZZBZmt@odD2zn z+0oe+z>o*MJtf>F>q*)6G)wJ|p_NL+R+F0%h5EhEXCum-*3ZDDK{!b8K~%0(3DP@@ zO0ECZ*iHuf_4C-WfYQ?;j$-_=oa9?woIg7)Cag& zi4!LLc!&Co|Bc__^o)6l$OV)q;J~QLSsRYaQ|o<1@I{{Y#STBg4L$`BWgbgf{JggK z`mqSXIYbT-IRUJFmDVgOzSQXlSuS6OjGP!{$;0Avh4Tx>6~-6H779tA9x5O|Yq%BG z@Fu!{xyc5atA1i|%@-*PZeAdNSDl@v%!~w*EIWT=mYl^{caKth49yU=NlIp5X|2_z z>iju?ipj5BG%313-|XRk>N^?AjCt(REN|BNx#}KS>BpLS97!(RTqk8L8b9TR$dM@;nm$LCU=3<95X31QYZnM|K6&uzeC?a%!F#PlpM} zE>=*V>yRD-lea&d2}Oh(Y+EH3I#iLbpC+^;G^fbn`cgeGeDhmMrAh|fgZ}hEC&L2< zycqc>4=G6L?10;p!khNkSnIFO%Vmhb{tP8ez4FqIcGiJ&jUk-1T8J+lx2O9_AY zvyKln-VSw2Z8$t#~hoEU{5{uy9KZk&D-zzSPEKAu@d>9R^!oD4EReyaF( z!FWmS2oN?vfTFuDLhaYW_cwbu zP2b}IS-WO{{v;DVC8$vHO>Z)x_6dVv3kPKVCB4cxf0h(7H`re#0sC|Tj_DJnfvzt` z>ZQ2S@DjcI$RGn&Z8|WyeH9ssfKzr%&1#^?1J5UARp2Y_Xr1KA( zAY1=(fb!Xwoc-J!^oK}Y!E^Fd7UtrVQdEaH-q_iDucz*!q3M!QDuw*0hyElNj(EwA zy1ZT%dix8$6!K^a>5ym$;pQkk&(QFP*qT-s6{X^@1R2Z$_<`RMuW4KTywpf)Vwg&l zup+<^xIJQz3T&ROGt9ITnRdRq9>yoC43e#Q{@2b(gFCX1FO;`0H<|axhBe&)D=Ga_ zLIf|JCg0mv^zJ8~89ObCYKo+_m3FskL}2hGAcOPq7V7t);p%FNWX$#DIMnpxb5LIj zu>Ji28OxW>l9-pOP?F5yCB*d$M~+hGFEH7kZJ=QO0|a*s;uYzPlgLL0KN}fhg)8|s z9p4_&)sJlXj6mh1e)S9CrlW4Cnvz@}_D{X3jdJ(*in7tWaZZ|H`v*;6Lo_@DXs>Hf_qxcDM+ z{+kau;tC2&96(~l-5%6IYkjP{qKUCa;L0ir-k3SFTv(FUbK1P;;jcp2Z!c&MLYN%)Ae!rDWjk%2?%f~L+XQRFfls;&ykENSykm} zs0=BGRQp;w92nO$LH?=yz3n*~y6RpdoqB(q+3&C-2*J##sYRqef+)DYp22F^CSiJ& zhkxeyG^{Fl|Mqm~zJrz<@d6lk&oTLhMaJmQ^Sx$)%Am#1(kH;tTM}JRYZWm=gW5L+ zjznbY&y3wql1M@7@m-2%L}|ImTi6prZyY!R69}rj4xHFRToS@aq)-QPcqa$0E)MM$ z^muq$@N#9ga>kua9Hs<+n*mUF)JMn|M!vHMwypO7P3T=I4jp%#%|gqTJ@CdGK1~W8 zjWF*!fTsJeH1OK;**1n`dePMSevbU=XV6ZYFy@LbqZ=$ED$dol2YbK1&jC*YYV(@Pv!H9k}rc=cDk|)OcNx>^+Qj{7q#JWAl>Eafpby2#Aw`0G5`2L zGH4M|3aTe)Qv=v)w_R^ULgT={z6fb3wKSF)D?}F6yQ$yurv7xMa8c+ zJW|f@!S_92t^^ZDc%SraBKiy~WF}+K^xee1l?rDe-5iD61zpMP7>#vLU7VBQ>^{in!o~h7zIw9gLlHu`z~9Z*FX+b z)WLV$uu!r$vT&!!4R{G8lf17FRKSX*#AiSF*pmopSc`~k>vqFD6YAKmJFUk17=xMt z2h^iWDmQlosBs5`AUaUFWRK^>DYj`q!LU>A=fEOU=3`P81pvL@IX}p$4TosW(vYa- zB}W4KD2BzhOf+4@_?o9*d_X+NHPeEhdeJ3l)Fn4kej#6B$niy}lQ2STdi^s)Tdi_g znmIa3_p$y=XO2d_I~1wga2cpPsY9HryJZy`XI8SDK7U#!6DmxzsRd4+kM`&CDrVU5 z>F~M+etj03Tam`r7Y^-URSD;pMb_w!nd`f_UGm76k(%=cAWfYoll+G-!#u5t&8+{A z(9L2;i5fA0xjsstu_jUk7w%$EnmXO*fm?Yi6%qE%v1l8Iv<1Ooxfh8tb)QJTZnjEk#wjx~V6Q zsGXv>{M3&L&<1ISlI5Q*4jo_qEAfe|r>@P2J$^ z9K;3F)o|w+q7IL3IAk8#RJ`SrPGgm{HEU^6X<9vPuMXSkOMdZqvX5 z9r---s-w$JsSHadQG*9?NBQ~ZFcNv~kAiVV-Z`Y%E)2r^R7Iu`>Xi_gyC)Zor@Z~5 zN-NOnv2KZFV_jGkVKL!Gh^dQ$#9`Ir)OOu%Q`$XPjbSwm^L^vN8*p^u7)8tKX|q?= z4LE43WmXNfJe?W}z~{z|YyL6dfD_kubqV7~nm?o8t!;)Lm~_YiaI(a_On1%8(<1Z(MrnQ^RDW+nHhm3o2n-VUCF zqP`=K+S8|2J&Jpg6){DpD-wzD^DJyrs`FgTH}T>=Tl-4Q$;ru1GdlvCg=ztD36aQX ziiYB>5EEgq=;j|3C*x&F4vU{pFEb;WczzVZ$IqA)HDjch6NeN!>N#klhKuJFpb|T` z2y;=kdI-ZO6Oxh?rd63ELE?_bI~pWDp{m$Mp(g+i`r-zjr>vuOWsU@@M}ILpj-fl# zkvoa4nB9hOUerqO8ViTB#48}nNju&9={4+AW1ZoELQM(PsmVO`=&;-IRZ=vYdRm$^ znK25pmyx?iIv=4}>e@xZnOQofhE-w&iTakc16v&}yCtUoY?Q(Zdw z&nd_f?MQ=GP!*{fZT&~@{lbtWl+`P2yA+v;M%L6#>YN9gzLp~-?Mbjlf`BxNgB@tT zY4G+zhir(FA@v_x@t1m|dQmzfq4he>1ibc>N|Za4tGFDD6dd)Gcmi$ofos&X?{^kThnA(^M)~o+os`%!*SaN5VL|_33$7lUOvY5PF|Ejrhala`p>$4 z@8F~?QytUVMCCVkk0$||sEXD>={7IUb|~pD-RLZql=apYJOR*AEgMFz zMDY^-NjOtjdFv#~lDWcpobao8s{=5HPk8Q&=zTt7dy_g|P^o09HNcTGYfVxGT41mS zv!hN~X7q0;$Su4J6!JXTci_)mEi*0Xw>6i=S)9A=6(rMiCufT@0;}v{ugwkqXd(wA zG;Pgc2f3wCIah_>dFaM{f=+DC8Ex3nh5pQyLR(lwjb;SqLvL{kOjJ-1b|^212zC$N zBRWE|V`hPScH}2f<*twu;sWP%%ybku>g4#@On3~zR@nfJ!_(n~^7`75f_G%Bi7S6# zO$b((oN9<%>KG+LPPyv?cnSvm_N2={d&DX2T3z$HR=_RQ7}=nYzn#lK0PTW6#Ie=$u0(7!67JN&Y?7BDLYv>L5MWk_ zXJz^Rj5N%h<2P1OlUpOi?? zQM0^!pZD3iTl=70$iAs(PJ-_pE*@bfA^LyGmR@CxHd(!_N(a|;xnV=;mS$0?u)ROn zg|cP*`wt>51I|>?m-hW)5(ba{B*uFBEJ@AtFz;Q@pZ?tbQ#GI0E+0xFAu{hl`UXpa zC5=fy@`RALK>7tH_)BxN4{|MaI6Ko>NdO6_d0J!l-A(B{a_`f-3J^%LuQKS_~Es zG}tIX@aKJ-v=zT<&Z4K|^?-HE{c1}xwd7bMpeVWfZ18-d0O7^031x4TNi&4OoB&p- z)*g$fq_#6d3>*?(;Pt_;5Kp)k6FN*8f6n6^6;!SV&Q-f~s%39?&;(4DXV&5H9cv61 zLYA;@h$C3Px9@Zc#k)pCD8}`o?a;?F_F`=;v8tz}_Tl=ftjBExM+b`f0~Iy!NCAG5 zlB61m6DjlTQbgnF!Th0RJwBLqc}(dp@WQ+Jcz9q(X^Y+8HkyB{=(71VBnIsVajc%k zN9;!H3^4%#EwUwHRI2#gwHG!$k9m~3l&$6R&9^S1%j3LQ&9|kfk4N|go=sOXoDWxf z!q8V;#y1uFIvCTR<4+bo?uxJKMeSK8}JxJmb_z9R4`IGRgeFdr!P>yu1xi;f?2|t8s`3m%fWsF z--jYLaJ)bd+22;Ay@B>E>4oK7zA+al{(Y@)27n7omYVfT4Y6$st;Hhg&<5*f9-JKE zrkob90rUGN&)S?vs@#9l9ZO#dpQmGF^!#XDWmzUn8Fx=&!ftnv|pyq^(W*m#hv9Yy`3qY zf6)adrjO*0y$@#5ettiFuW9u=F&QkKM`TlOgNzytmX5~A+8hl=UK?@-`BW%`-!}b4 z+fdX0s3WKKU0cj)Xiy?&7e!F%`(6PlVC7V-S;YX#dq7FVlH+6bMVxk zYScBx=8q3|OBxuv{{{_Ef!3h>$vQ*ko}AN>ZcKWRBRW()^mauF!H$l@-NISa4q!GN z=BNDUT~FKzkuUBn9rSO$$uguIywxP2ADTca;mBG0Ddp$y|4WLips+{;M*{%V-0JtEsec`5F0{ zwXn6T$0K*OCJlkUucT@99KDjFwUZn@GOY<0_O-!-9>_I9xpmMCblX9gyxn_FLQIW` zU<b@mt|5hzCRY|F-j54rsB$wy)&EFZM5@ve?N)n4xFZxF((k@VVPzA4d(HZTZ8 zseyE@5k72URO)K=bYSbj)|U^R1~Og0r%j$~hOf^;yh=BWjDf z2(rYv%O25oO>_0ofD!jhr7%y}f7T#QIb$5-77R-%olN-ZcyBG70nOh4?`fnqE)G1s zTY69Kz?T(U5g7X7Py6EsWHru)Oqe?I;lP#K@)}_rUQW&1vi61V?lcZOW4su-`mI~w zdV-DJeagkZfh0pcB$94NOd&pPx(Ui#w@`+oAv=d&;H;HISGhHeBM=h^E6~Tv~RGqa<=l z|AjQXoRGp4F9|Uk*Kf9`z<7N3f34RcvRAJx1nMX(mx0S*`*n_jaHfTm3$J0 z|DQ{eM?}d?f91|Ql!;)j7vN~DKz10>6vfsE_cd?HMCr>Igusjw{x|9+u*!W?Z zON;S4?OO0gxrd&$QPcJjcSPZoq$HC;3zJ++Iozmy@4ShD=xH5W6};s4L))~6A|Z&6 zMDvfR)kCg8WF@QGoUxFocqOOEsocn4 zk;nk(VxHO;RWepz=3lI-k^uuKb)oXZ2J zc%8QWCzYDMA|5@6Oi2$f*1q>Z8S^;J_9}OFw0S;LrGFKzTO$S2?|3NfTI7Oc4H={n zUkqT&hUWT5!U|!&YQv#&sNS(9iStpA-c%#y$`woM2z!tScH$`ZKPTQ6n%pR2gMZ9qbK4fp?!8y75RyGM;Q%a1Ziz8wigD^H)==TU~f}4 z2pBg|#ys>MGiN_Kj|uthB!bS(0A8LAAINg#_~DRRHc-;v)7yh&42#LYVKBD32bPyY z+G7_6!5}vYgABnuM@J?Fp#i|!2lPR-FmY1kOeMbTyPYu)ppS6C;i01c@wuU`^q>5* z4C_?u`e~-b0Qe|1kU!b=LWm`Le(8bF!Gz3aBc2T=M9e@Wffenh{+>-u0;!9r%*iqN zN#;_iOy+Lzp_ar*a;{%WG0J>wF#%NcBZGQATz&m+jhd`gNdrlWnQ-RfQvYyTZ-J+g zxIk0YV09lbUbaq*yOl19^d)TKgQxctu1VXlxUdOveFN~V%3WA8Vfo>OK2$)P=XGF2 zGzMf;@#(ld0)nVWBQP)@rBW0_@iaG#cHjIdoTHup!jnVd5ZDCUYEqVj2GLT@4sxzK zz8srJY6|&3;;A$?eRfCxIV<{fR@-&T{;nFj{H!~-xfSdx=n05~eBQ=f)_@hr6hf7@ z*|uCy#OZ**g^p8ziysTg6)iKM^{Ma)C?rUzCB?BGQ`kTXLV{ z(U(dyMJ7!#(7=2t6=A7y@^6-b^=bg19Srv=XI7t{G>YTtfBmS@E2L+cCIAf(#xppw zRhAT2aP1yRE7bzadv;FG|6r!;82WcW=LnNuV(M|IKP@m8#}dw=NXI)9FFQi=6)?&?RyuQzic4TjQAer9v~o|=mOnUytyxC z7_r9!I3#&j!Y0alUWEPNZHW;XWVcjw)lJAdfs`E}H>41#1RknIqTZb3((5P{zUr#;*niOYHAWYS zEjJK`jK7FSZCJ}Snby@AqIxunYxvuHmZgf?c}mvqeg9?DB!`LAqCv9Bu4ELxypTCO zGtgp}4T=kMqhOHo(l_!6)eU?wLL6>SWwaAbQaoF-Rm98|=248u76D<%u>{>J=c~sb zQXeu?6;BI}oph$W`ag`FLzHO2vTe(@ZQHhO+qP}nwr$(CZQ~SA8SmWI?fmiIYwvE3 zjMyt9=2XEyQUt{upc9^{DFGkVH;to@9Ja|h;;>*EQ0>F|(l~(XLZWqu+@X6l$|78k zhArtJO~GUvyAYQF)39ZA2zgQQhaP87pqU>-<^jqeriOk877AqofkMfP%H;tGqBn7J z)VNu9PMlIs(*~f%A5eOZl8o8BLXtwP(800aJRP(JNMa**@zXvfUQ9HI?8Lp92f?HV zey>7ra7^=(WwuGa1a<htZzhsUE2;M{jzMac7&LQ}Hb}q}Y^C^!`S+)d?&Ol_+E4@_XWyLk;oD6C z?k`1B4QKdZlngDQ`ft-QXPul1F{Xr|a@#0&q4CacY+2J+5W zlxU#Ug3vvUF1r;-U*t=!&B=oNu-kl8HN;*3EkE4kdObc*2_wkfK(G`B&IBCKDdmcE zAi;P;JG>mjYOaS%{S)npWT-3PqK*yap(c3v)>tgAk%}wGkj-cRWcGnbAPfUj3k?q8Tlw2+xUbKIacG7}wNH1J3n!}%|10U8 zp&djIB6hlf^pbePM360e%33oeROy0n#x?~B-PHMqL&8jKMy&FM?&$NX_9c0 zcU)BH)x%K-=9sc$fr$p_tkBt1xZ{*lK&afK&tT_d_vJOx50tHOu)I*PpdwEDik|dS z$3(=*hG_Dx@a;yJ?426-LQ{Y%(ru@<)En)SQ8v2pc35M*Vyw_R9o4TMy$Z`KjZ&x< z&&7Z1Z1B_vIo>>W4^u-KJ|EEuy=<`OlVfHOomia7j@cU99_ztlU^1 z*>s#F<`s^fB7Y*$`GCu_>lFEjML5r7x%VbJa}X;6r{Fv=$LsX4VlY2p8W}#uY#)5e z#Ar0I7EBi4!d4*46oPU3$^~Nwn6%`r9PpgwUKvJrdbI_O?uB5!(0H8o>)PuGG_E@I0==X30-MmX*Lm}+cI!Xf@+I= zZrSUq1xH+eNbsMjttcJDS-mA?=6#-0j48IExw8g2U+RIxsnHK$Wqi>c4hp6an$4f* zR;3+aKGq<13I!f(cYO3XAV!9iW^n;N~vK`EQ zJ931AX(5!Gd60o8d34lDV>88I#9qF2SDuHr4cs9|lvm|~x)xozwiq+dLU0j}K2Mc*c^MW=?VLwHAvJ`XWG=m}TAk5(WPvx}Gf5zxg_YaKWr z6$DkFaK9#~Y3KKlS+GG>;-2r_Rl+vSDpIT&X11`p)4;uFeJTd)3Dm2FtGC?P=X}V3 zPnA@1I~fI?{ps^TQMKCx;X9_lZ=Fzy;5)PE;i*5r^(L5K#>~?%k!VhbpPx8xq|;>j zq1AjjxrL30Gnswj39_I^t{j=2Z>Y~7!J*H$YUWcGM%x^F=>RPCZIWA}V86uK+bKw1 za17>}o|YKF0j0?j=76sWzQucNzz@^siziPJ}E>C5j*&VIka)>e{^XdN4M zd`42w(;C&M`FKMLwH9j1YLV(bVytVm+!&|?HK_Iz#S@o4%R*fXG=aH8z+y`-){~k7 zIF5AtIXB*}T5ZD)E1>Ic^E%a=gp@AgPwI1!6}XBh4Yta3v6&9+P5mVbFBLi$822~yqA?nxflEshYE@Kc($bF!ol>PX`{bJPn=of&0#*T zsnUAR?+I(?%AL5z=3-JWYenxTyIAKq-vx9qQ{07w5vnF+(m*G@KnckXU!Fx%TnSs6 zrHrM3vf18w z4K@Ctnw$RIrhZ_~>5BX&2{}_Q6_{i+BQ|^D?Ywd8?Qr6qK9wE$6qWSKo?{TbbyHyKOE$`{}>F zTr5{2lPs7-E(3ezr7-Wj`89j9^;HqxN8y3pZJDA?CVujP9-iQnX-WX2tk1OnP|Y;W+pU+dVm(7-n8OdmCB>D#@Euh^*mnhN84W{J;5(_FE;$- znfi8w5O>B3Q}2hX(J-3%o$gr{-iY0Q-uT?9EhRjGF$(X zH^Z4*LKlq7Sg|YmMf5n3i;aC>-KuZrh&);&M70dsZOdgW?1L*;|7z}uLc8Vw28Zt> zJ?mmQQ|Q29NseT9uSD-kmzzT!I*|JYFWH4~1N+OKA)WxN(K*d_dBv2@iIA!TKAA7b zl?h`f;qWGn+!2eP_xA;Qk zmG5#N%HbC_qUi1~VNN!WIVxLl3ODJtvr&gmk-|NHSS49$lSbu=`_>;gvc;yHhWecB z81dg)3Ie{_0u#T8koZav)(UKC^Nr}Q?A%)kLer(SuzvVT*j zp@jE_Fx2@QEuzj}g}6yog?|zLjlfhqSqo%otOlv&&y=~Q+QZBLK>s&Ibwywhdp4AD zm_P#nsOJL!ApSq4sK&kA(hfM|uDs3sH$Vge2>_CbNnuHpK`gB#rrMGh@-0B>Oe6@= zGjyUzPE2?=FadyQ+y^r>f)oL#OsOSPl~Kn{%5#LBC(e?jsZ}Ql)_hK)r#@9q3#6*= zI7*c2P7b7XQ-k3ocgwrXzzHceCZne>FV8)GyLW#--Pob{aq7P>_J2y2Q6KdI`{)PR zf5=es5T+Hdj5ZPSD0_k~GL5PU(x8V1&WU)@N1YTg`~+=MMw~P~NDtLVsGcdJ-9tPG zMsiOP4^>0-OV<#0wUG5xTi0B!EAGWquQTj$k@q?*=J5tAze!NozefJVbuNo@?zg< zGP`vcjYqmp+unXIh93MK=+^`mNS35v+wr!O(ZFEtuQUpr4<@-FW%bZx6M=_8O6vP9yCeTWO3Y2I$0c=f=KI{U8c&jm5WFS`TPC4-m*SxJlUpk;4N za^2U0{`2lz)~Z*nsYWbWWMG=`_Q2}V^ft>{4P2no>bCYhQC_EMW7y6eL9|~jTy7^A z1>kq?ASF+fH~=Vo1KDUE1zyJRaxq8(q`u20ZcZxq-6lCV0nA5>Xto-u`ZjW2-p=iMoi%l4fqHgJ<~PJ}Zp%a|5bZ zFdm=U#UgaK!nj?u;Xb*@CCqPsdE}#mm~68SRrVwW6AHn3K<>}C$hDZ}<(NM13M{ZJ zj0MC67)I_XQWh2)?KEl&wT(hrwT0|POu=RaBnxt}ktYuA_#+|<<}~sUH&GgSC_7jXdOCg$5s44`ri|&f7AOOu9Y_-$pG{R$Xh72LN&g;-2IoE-`B8lp~Aiba@E7 zY$|yOyKw6A5I=DbvPlKNqom52vcw915ftO7T1V5NJ2I0pX7q~lr!{aw+S}n(clT}c zhC3;mjiF>LGP_icxRn^fH6IA3GkyP^Zv`I!xpzR++jq!dXI;QOucQh}806)s6#b1| z(wFP*hWi_$D`Z8Q{P8`CjqNMYelqO_;tdYLh`Sw1XE%X1ftDS;p!R&svMllf$#vZR zQ@erCB*s+${@-1Tb;+VY`3#les_p1+5KvmM|%PRwddEE8rIlJEm!BFX9R9az?^?hZ1L2bacLm=(||WVy#W3X5kDWo9|A_Qv)bUx zXcQ$cC4xrOx-$j+p#h>89{ujvC?_xpg3N9h*Ov+>Dz7V&1RlhuJu4S43y(b)v2_j%0t3LHUU{KD`>4z z#j>2}j>WLm_~mQwRFZelYg2UIP?typjqnTk+S`UW1&0;QdCFI6gvNqO!fh!UgJl_u zdb4~z3@DeKAQ?XZiboqo4IG~+a`54;i<%n~RNZwY|Fnr~JJ%?QGpt*(;rNU=UZtko z^;@;s(oL|%`R~}$&`-11O`zTL^m_JtW$v)sb~}z}XWX*gjRK5n*Qe`Q&bYfF5~jPB zbS(BP=-ej(1*VOL`;J^Mvt))!u=OlxTe3`zx7ts+o%BrZ+UynNk6&KaabNn~%NGoM zIY_cu%V%M`9g83?wm|;HhR!<}q4t7Jx8x^^k!a=4=9Qad@ltwIeG#Bng}#~+;*p6u z3!rxO;dqRm9+hx#;nXPpSW%A>o%aj!q`x{0a#_aZRjXGXt|4SGY{>1*b--r1*o7HN zcF$RF3!R8%#HgLOLzqZvRZi2Ok0cVZ6l>p55jx>5B8tz)rDVM9ayfPvllO4=v_2{S zFrK)%wb+4$;5|$X!86IXT`N_fbb4yziu^K9KJjbE5wJRPZ!{iDutShU{QHL> z0N~G~8_FK=hlQC5AXr9GB4|Mta2gjOcN5YAHdHldB5{9}54C3$D4~j3sWUCJY}df5 zV;w2AscI!f2I8vHtcjk5R8wX;wW-o|Bn&&c`6SQvg5JM9QYEH%&PGx%{X&yxjuit< z6+E0zZ9(>%KrERveTN{Prvx!@*<|@PZu+)K#xTB)lN0XAWq4ZsfA>)i$(`JL(9cDL z#OGW4^OPV8OS9R;4xo>pizt`RdaED@Zd07$T8T zu0?^$w>}bV$*GP=qD;pu86@7K{j}(Z4NsjK7%RTsILaTSM^Bm2&smMk0fA__sygAt17a$vb&G4HAqwiAbMXT+@2d(ZTnls0Bkf zJ=3yZ*70khJxY!bqo58);=Ueup{EER%j(0YGxF{Uq^HW|xCJi;!p{4YR__KP%419g zs!oql8b1;`x+$L=+qkwU4&=aPOJR74!@6v2=pkYgXABzmiWMsSAwbP+Knd05H$%K9 zoEf5plg57)LJ+voTIRbYtz{Y!f}xL4?ywfp5&;Y%TK~bEJwNOMo}tV&+qbUH%wOIK z(XrjiFtCf8qIYaopVQ(1PBq(4xdQqd(gF3Z)l!5dZPXtCzHIyelq=7yplKuY`0YCB zXoFdp+WG0am_K`5z4T!sXw9X{zBp$SgE+E}Jkk;Iqol?bchCh`VRJZ#%roc3X`5x@ z)322D7KhZ=PsW&;gwRw?`7&rEeD5J+gM*H}Y}8Nk54h?zAK+vRb{9zS_YQLsve6NV zGXz3jQ9PlZ2@#Z59^o6nP3uM-A$Ww1=tWHAIU6hm=@I6i^aS8c62}XFq?xMwpg=SP zkY^$(Dx%-Iq=i?sg;ajo2p2t$^pD?q$0wd+J;heiQ8SCgxJUt}jNaRWFd|^i3axn~ zso&s88sn{Ww`mrtuJ#yA)g=uzgd~}Fai|9^NYW)vXoGF z(@{Bl&Mh2w<_r1!G_b0857ky9nnIj<65>4d2Qs{WkU^gP2-$ZOIgR&*B7PeZrbtim zI@68{(;f@cr20;Q&XOqh{p++e1T^gFmQvnOm0u=@NA!%0`c0*{G`8~S%0~Q@1_zJU zAG0*c4wgZ_ctS_LS@~Rff+lI_Y7}M0sh%{>XLA;MwNr%HMWdhW82`DVlM39UUw?-e zUP#P^epqmML+{BhQC!M1bi@RMZLrMX4_9|{iN5+*e{p*pB5ttDC4a2VlGHtt^pz22 z4*IEab5Li#CYJ=0Ni?eu>kxFRQ}T&kJ3bch)GNpB)8UZ3Ntg4CJ_vG3zobmje~N%^ z{lzCxxrIu87=%}$XJP^qW(c-1X;bdppMvg#R3Oe^wVl;u+48DQqpJHf zi)r4m|_)b*JT-oooFUy^4=SI5N!P%cKU4-3N2Sm$AMiMRq*_3z3jv>{I|xQ%&YW{IGD~l$DFPj z^O(ySzXs2JZW%nO%E;md}Sw?QMeLsDzH^<<%a)oNW*q_ zTKH=Q&WzW8c5@q^Y_ezwx3Z(xODjYj)6f9X+^0m;|1P5O=l>j+Y&96|y7rw(b3;eh zikjEC_D+h=WKu2+Huq-WMrG4g+%M>j8%a(?gVwX(?O1KDTlOm{6T)}c?t~OPoTRJ{ zzIt%w?PhrQZWbozpWTvnZ`#cd?*Q~!PCwn{BQTj3PfEODJ&4g_@kLD27?DjD7uWMP*Zee5e#Z_6P(X$h~CAswlyu=(?pNu!WuzbtvVRc4_ zQ8-q|x{X^j3Ep@V4lDVi7k1;wAap-cG9nq6jBad1_bd}FAtxS)hgzS?;n@U)g%^Fz(gfl0y>r z!Z^=GD6!F^-JyJ1@LL5gY^$LV4;#OYegr^4=5_U-DhwT*9{bWL zoPxOG9TB>iAqK)w)eeVY6RWNz9H$J)W*hVobPDwpOKoCIb`rxvVZi`el%4B_k?}K- zI^0(f>y+h3cy?j}?0As|KS%r#h=Z*}ddAs4r<^yinvg^;fH05g6{^D~Gs0Sg6nah~ zL`<#trYIBRFkuDujQUusIE9??4|u-cj>wUQbjzAzHi|_8_7MePV9aj7DrTwPpLg+0 zVu)94;*^D)Q7(~IsE=g)s(hTM_)U2)|Cc~~DWBvsek4j}L`q^DaZfvLwnP@9|G8W4 zok%9a_cCP-)A7#j`8v8`-cql((0_-$ZJliP)qIzCJLeYq<8vYhldJCQ`x8-@4KL*X z@5H;k;GunLT~28GDOX}yMBY7vYg7A)h)2K78$Yx?YA%6qKSkGb<(}Qf^;Y97e*HZ& zGXF_^FPrs4zUyYq4Q0oIX3u_6N4R2{leJHsF?3nK@qY4kjS=Cj#C+P6;OuAb8 z7?G!6lt-|Xc}(a^Dxw#s6e)})H2*le1-+KmE!`d2GMe8>IBQ3_%O}&0l8BSW9n~!X z%vgo?U7tA)!7(YPdqV4C0< zzljsHD$;W_{6s4q= zvK}Dr5qsmnvpAN>F}yw040gcfSHx4y73pFdwdhqSpXk-ZtHmqCieI#^wd!&7P1DsB zaTgqSfJxsIjuf4<@S$##$0Kg8)b$CHN#a~}MdvLJ1iy3jn?!JfxGX(j&r-HL>7+X@VkUU_^Eud-p5O7=;oYEIgq?WfumB!}FR=N1yDv z9p_sRq=&JuSX(9wHF~Vv{zOA^s10V^2~$Fb96}Jl(20%*`^b6P2uMh+bIgM#Lx$8h zQ=ydt31B{T1S>mCEoK(^$IC@_;4@r9f6Ps7{dTWy+qL64+wzvC_0+Vo^f+$7izV-& zFuC(HXF3=Q-uXipGOm5VX~cyfvx#S8Yy~Y1q-P&;RYg3iLs6YJL5tDcrdh;OR4}SX zdm6;cTg$^G*J1B_GD6DuJy~;D4#)Cg$aP0NFy!{*&6Pdwizo19!v{ge5TD132OYR@ z;_olfz81~jIB;OQ1|Dh{V1DzgkK#!{bBn8SWjOeW8Wczglb=LrM1njgO$XC((yWGC zM_dTby=Y-nmkD}N6qy>)(llT5$t68w#fmp=I}G>H=3% z+8T6BY$7ipbLw+xgbKt}UN#4jGU%t`7 z1%DEV1l4lpZ5Zc3PRh>M*r?)}`wTaueHiivEv}RkB~}g;S79t{hZDdYZ#zCyVvPL2 zNaG|frkQKTJg@P@W_CxKaAS?LC+?TOEqbtFjSMe-5@5+44|d4wDUCYMA{`Z=rjei_ zWeaR@s6xKubIbGZgT{R(M(>{H*o?c2Y{2Jgeoe;nfyHG`MRHF6%ST}!;Y`msFQ z4*@Lhug?Y`x*ISmWT!!% z9#@H4ggto%4_Og=Jxmb#%Aqe=kI~2eBEtliFrXns7554nUvY5uVeh$koI1TtDy@!$ z+yh4U56%1PR@fwCd%2|Q{(Al}H)n@#qA%zSDTl@qNkPkgGYIRA1LZdDh%w{%#zQ1q z(>FCZ`-8qvEgfQf5{K=oWEl(OUT2LXCo3PJQ>640G(moH{wATrt}o=ReTD49P*G=4 ziS5t#B`WGHLPuCAFee%%>Hq~+8Z@{q^;hB9FLnhov&IV%SgC+&kF+NWIBJr~1P4PBCRG)(IBuwawk{NdO209~V_ z_b>x^64=h4L9>!b`V(ZGjCa323$Z)r>o%NyN%^vC$Fp2^YDJeIYC40 zuGzh6b2)q(a6%4C#nU4VElqIPVE*@9&bRh?6>OuXB><{w`Sl{yi)a~8oST>-1yXblMa_r{61+V_IQT6-& zSsxrR%g^eOH(2Z+{7?QA&^mI&BrQ!t!K`VP3xnv1oz??(!YZu?{@6w89Xcu`vMXVDcp9!ie0;vEP0VG69 zpb`WNLbhQ7ATGcFqK*(&FnEHN3_{aEXB|-l@&!=tIzqo;=GX5DO8*U#7Hq>%S@T(eC3#OM%inUNdGGFgIFh$uEILN#w{rNBRFv;Zwz#MBJ@37uJHRPtCEBdU?1 zoSNP3kVDBhGYmm{QZ-?eXj0B39}}S};+5Wcb}N&W?NDQ8!6|boA~TE=z``Ejh<7H* zAwy~rgMQwA&@%xAt3LmD!oyNPST3A-t*TZ<)X=`LUItXdAe*gzxPH)$Km=W`1fg6$ zt1=1zWwUMx4F??CapNubntIQr+fJVoi18d3dfw;8J5;*N>(C8szHsSE*@v$)j~veX zJHd-EH+djBPp;F&+jz%q+j8(K*9Bg*?0fi;8_03dDtX7`et)(>s^c|ovH5Tzg2VYk zy-u#9$L%-1`Kq(v(Z?}$Vpb_&>t<9l%FPvWe;~OlC9PfgYx^hm^@xv9*;k$D&evY zC*HkS<&r2pLrv_gTZL+fZ1<)%KJ8CID}7^PiWRB{oj-M}RjrDqAFLa;?IQ`(2f`s= zDi{(B^E;1BGKN!N_%Xv|%i+L*8VbnP8a#(b_7oa^4p|ntW(pePN+4ZuW%y3YLAfbb zgB%MNjg2Y(U?4c76YG(e;^(E*DgAXsf579B0T)Q0DnzlwTujVjztB}Lm;cCLbOXou z=@_PDVYSCZDP`A0ljF`=v`J2q_6QksP%}=gQms>!Y9ET(&=ky^$_tnp!4s>oN|)MH zaFbA;5W3Pw9kH-_6c(%)6`I;-_4GRujQmZcbNau5Q5}%u&cA(xnijyrBw*qdZJo8Y zKQ?+F3KDmhyqV+hSrv|&h}r;sIj=2wKiW77*B8G4#8Zko@SQ?FbVw87!wjn#vptc$ zJ2it&7o78-d~HTnTtBuZ2irr?-|`O-Q!l% z@L}<>>0LV+LqzA1kNnyWD-J-J@{Sp{IM;s&VaOT(xeE==hDAXr;v%J(pkl{nbX>Gb z$Od^P#zI2k&3tlVepq$tj)NZg=CJZ{h7pgRq=gTFg@S?Kxw(V-$D>(46sy^$vQxF) zu}`^6$L2~t*#CQ9|EGDMS&ntJzyttj(Ea~3?`D=BrcVEw_f;Ni_ieVO{lC-=lV=iE zC}Wr34&KYI+M{X8t#Kz(PEEa$>ETHPk&OSia8prk+rNJ1VDLab6HZIJ&Fr_5Hb59K zXQ7?|9$)3>+KGJmVGquD`;D`F+N6%hj>nAL(rlkBI(1Ahe zS)d>a2K`hCk}S~#?f^dQ586X_<3TYAW&G7N+dSXD-tF3-fQHtjmrkD~UQp)gKh1jw zJ6>TJ${5!jM?wuKe?Qq|3~QHpq1gf!XflAZ6dQ~sr-DqU3TdSg4t25240~=E5^0)r z6PyB=;6>BImTk`zq>I6$+qeOGgEDxp;`0BF$8S{s>EQnZ5aa$m89#gt>Aj8{+Z8O? z&u%u1$o-E+`kDOW0MM`7@aI7N#o6Cs&k)A~bopLB8|49+uVto97 z4xyQ4#{`ICvY91gaAZ7oB1xOzxv)>lg5A;yeXcV=KHLy_#1>6VH8(-P8bQ|EmKq@h z0uq`C)MWPlbqu5sz&AR>i+?bj(YJsucG7P+{^H;4ZS3B`4!+(Sdiw7A&&hf<9*j(+ zCANVBwQZZb|dBk>2JtKL^XIZs<})dJW~#6dx0|}1ltv5GST3X!K`8f zrarhK$QLLBe?^-$gSSlpu9iXB628M0!CpJ|_u%%P#S-^x_j*ANvA@qMH?`r*_o_Y- zf@qmal=)LIXu|?I1^!4Q17AyAesl#G zh93gEyuZh9JKK-<4GX*ml7$2YA*p#aKnf@tY2Q;o!hkDEEF(hx%2M$@*5mopo~ZxA zQ@JFNPlUFtDd87Fj2aDX20g4o^Dtcygj=0eNYZKvu9Q z2+)R=kbb_(L_TRj7Q@|VHk`E^CL1w?aa1LGu^xvlY9&I}_}n|uh8X5wpzNs;x8N)& z>?l~HcA(RfYuUEfD#OD)0@!qLY)Vb5;1p1Z^;HXr<-Or~^h5HzHk;;y#aH}O`rC8{ z%#4S_XX5<8N=;BqBp3!Ko4%~f8(w4QJ_Ei6${C-2QjeAZ-&^E+9ylU=*w0IXNKQer z4c`k4FvU0c&TdH?$D|c&g?zg%6#CU7TmgDb{7$y$HfO#&g!d%dwoS7G=Ye@;xEnb@ z`_4&h_R%7JjA)n?oXQaGGAAGi_!~;67=t?rN9Yh*WHC=H&yV&rpFH8PHXXR13&fw2 zO@dE}cq8Sq8^^*~O@0>ce(5*OuJx6rdz{Kp#US(A*C@;S7Ym|N2oC}u0=Qc?$%GRT z4E(~g7?gGD8JFd>lSIA4c^~6hc%tZ!wrZi>XwC-*_`IVG6AwT7IM~N-_rYKl5fL!6c&oo z?3gxMHZLhZwU7@Oy8+09y~=%Hx#0|@Tvmu<3M%$*X$742TLJ)5&ZbQP5mxnk5pc0t zd?|~TMGlK>bKfw6G{F8VFwPaZB14RNL74!Tn(}PjlhtDYowSVdg*4zxO6K;Uz$=bVSvL zN@`+h;yZfyMrJBU{-A=N-KPa-cf)sT6+oPZf)u$fIi{;FYe&Tvx;3KT^pMC3B)d#w zi`{@B1O$iYY#5Qhf#p2VNOB+Og*Bs+v8=QK!xAva7Sh0SCE)@~pHh^y=uH*VFr|tv zSh(J|rznwpA-{bt<^{rd+wcG%M={};>80{IqhC)nrN}GgDp*_-@Vk`&v``KjHY+kI zSS?z{S|EsQB06&(d=-X$rD@xwAEw#0;MAp|6s=IL4mj$Tj2G*ed~I$$G{JoP7P^xf zFID9jebLocwRk~xUeEoA`mIy~1@^Pd{EHnsORbsJZ+VD1A<)cz158tUW1P(|*#%NK za;_W>%Nd=Htya1(V!!n3yp3fd-jfUcSHZzx>*(osb(>qHCEEt;q9F%6>D6z2QIwch z)->7gvVS~w@Oxgu)j4cmlSYK(ArLt#!4k^6kea>^7&Pq=XcggUq;boH^kTcrd8e)} z?Uc@X6ch&bn@Z+{2A}szzUXAygs9mF0u)(eL;X5{Q3={No2%yaqJHfh)SbxaQ38;3GgGXkD?0%$>Xg(fuM>ry+PELPi`S3m5wHemX5-MaA4<5hd>Ri%5Xh~nkb&|k z*%ld%jy>~`a3;Oohl)eWKsK}9e`D_hTx}>X7oC{@^j_w}(httK(vundrKo%Bo%aK;h^vvaIcj6pjjB*8KQzeRV z30z1XoTMfV)4nYjMe!pJ_C|(C==Bz7e$D3TyEv=&V2WYf4p+YR#cp<{td`Z>NPN zV{_>@;0|5VuPVw&d%8gieB+ks5F-KKbdN;THuNWP?0WYi!o^>P$t$69n;=q=@!qT38K z@PgW0x=fnBO2R2`BM9}<&k*z2`a}29+sX!L_|$7~sfE7=3*6VSsH+^Xs+A{KVcL2}kVK*4jvk5uxuHjk za&RV=Qn*)y6qjD~B!e#9okLd318iX!i8kt-$)!l($JI;2m0y0w;HG29{Y?Nep$q=1 zf+hM_!4WyXuaslEU|dW=i21Y0u+!1Bp8#rYw+YOVwBc50)UEK{f2nk>UoANTw&E&- z$rv>YIMgJS>jxiGSKvf-`7RL(va0~|Hw5{vxc{sIpz#~-~5Da6qVT^q* z&zWn}+;CLvpf90*rrC@ErZP*oQmN|)IM#OzEy0NVyDz}$H*5lw-g|7RmtAA*R7_50 z%A2$@QgUwjpH1H-q!N?xXowmuqc)*4BW6LRaakZ}AY8T}4T^CKgyo|`nix`z7>JL^ z&`Mp*M25!JHiC}lP^N$^P%^L@tC+!?ZLmdB&L>E*yDvW!6Dh_bn+dI zTpgw`EL-|Sqk>iWGL|c`lFb9QL7gvTGz|};%(evhh?>=deGQeSvnyC)`nSUt7ix_a zUw?Sgx7$rn;#s4?WGpYfKLs|?@T_5Re@F0~2AB&KqTCW2iCdSZB)!OCy+5va1-hv*oK9@@`Cp{;jOQeHcQr8og#&yZX%Dj{>0@oXmw4q{be)vV-A zMQF^NcP|mOJW@TFYWeJcZ{$ByI4)8ac?kqnd+`w{OLM*vB|IkK`jIzo#;lYXtHn28 zyQZnG&X%5R-wUsOo?5r7imYxg{EB<6?!$`3XPckD!7B8l-*&Z1_dj3sTz$tsnUw9K3i#g| z*{|dW%5qYpShqH|RI~lrYLlre#NCeX)M(G&RCIOaL;M)nohS&4q!q8KMnQy#4c;~@ z*_nB)T%b`oLn|8loc@X&_}7vb)k-a`>=!G0*P6Wti9dA+!=kNW zq3M>seI2H@^YP*Dq{r5E)GylLXUEv8o;`gAKkXSTiI$hRt7`Q7eqFI5*0*C$;$I1&r{GfKlrp5kTYk!i{K&DLFIYJ;3 z!Ok5)un~UH;jau^)^JUq@=IGTY^ofVNHTGJ4x1DlwRij9Exz4H zvYz>L*?kLfgxy>o81!f5JgoW4_St)i6Hb%R(Mwz|^m}Rc;Fp=@Tp>z!vy;X&UxA!z z`Pw;2*aqjtmuS8)JxXclu!w3Sr0=f?k_ z%klJZdcv`l*6=KjtQQ0}gnf~lPKsT7H{bMw81`^E#-`RsB*djs%*9i-_;ixXRTuvu zh?pWfZ@qL04f=7{+*~Te9mTF?@1T(2zT;KuADq4n=*~1` zIjq(rnGHDzlER^PC<}ne&iDDC)Lf0qQ$t`2mxRB?3By0P)VA4#V-kg}?V2h^6>?b? zQ`qNy(r_JcSGWTsrSXIXtwaVjouWO7QIIkR5F?8Oqa4UVb<(upGzyysX@@E=jEdy$ zlOjXHZ9$#bCJ=1T$c6qcT`NQ;DE}~oY}va@_r~WXQWfwW@vLbO!s&Bp1-|6rbtH&( z)!A$1n?bJq(RExwB|`yN{2<)+S_f(7F4x&I$jPvQsNgIyA<9Cx!uhzfHcM-_3gwb0 z2a3D^Dn80}=fnaL@}rsGGlR53$bc@Yu*7L2FKyLXL4xAAOUa_ypc2VM@>IL5x~lUN8su3Y zv(Q-XesfH9d8iY%)(Za(mGj&TT|0TK5<1f34V1ovJ7mhleRTQitkdgorNu$Ufs>z? zE1^UywikscUn&UNhe2l*9L-v=F0mXs@qMPKH{n!N*;Fai01A!kG4)V5$QUI>Ob)fc z@LIo*XAE#O&Vzv|Qv>`e&{&VcJB9RyL2A^7eC5`eGO{5~P;z<-gur${qkpf|HO2lI5V&sf}&G zE7I9&2^JjgQd0311uZ3U)j8U|H>Rg~VyQM{+Udwh&$lEdNW@MV(dJ&rxsAV$;N_~E{^s!pu(!%DUK5!$@G{;v#Jkmdn_;FOoABq~5z&_GUQPCt zGo38rTyvwwO`Y#HrVb*tN*KG=9R@@uarlIO4fw*4=nL6(uXx5z14^E{!IHaF-Q2E40})3U zYG=-9xv~XlJ4O}m>c!cWR!7*f+idcJ4*Rx!k8$3w#hKrZYAR$jZCfXwxgHRW-Cy5f z_5*Y$DDDH7Pgc1mg37u#=nM@fiDDV|tT^<(opEo4Dg#=yQ0N9YktrTJKZXfncba30qf7>i060d@Mn_%TUSF4qU+;()W}czu zb$MdQ)|tlCKrad>jQ=W#J)0(t&;>@~{`Bmjnl9|ZJtmp#dDzD5yjn)f${7sE@;av! zJH5rmWKxA~4Pb|N$~T>yDdK8K4Vi@BAk4qV#1Fw)b$Fydl`Wf&C*l=79Q_^DN!%GU ze5Hw}ub`4b_zu#NQZ(~J^|Wm5PAMH;9){#AC{Pq>dp$v`jXD5+ly_EGAc>wLtBx-Q zcU-c-0ntO+s}%cfIVP-fRS<;LWaL|&@@^Nc+)z{ zS^)A$(iiPiO`P`ko6@^Y0uw{#(Oy7VgRpbrX66&(w1(^TolX9=nc|Ab59x`AX*{F6 z{auIcWgp2zcbu|Nr%B-IuG>GT=;sUY;P>3rwvAQF`AsBm;=Jq2a|nvAftRu@f-fXM>PM5)Vm+H5%IAB z0POhw50U!+6VVt=Yw20zjC$ntaN~K5awih~^-CnJvl%Zm4rzQSbF~?Lj^L3rQxfxx zLo(X9H}z;EN#nWBaeWbH;(gA0*dP%AW)XOZPiv;gMDdXbL=%}W1EH8ZArzSBYZeG7 z6aYr?fshCBap>D+XZG?U$vF`Oq>7x(uruv_JNaPWo$=24)?obM=l>ee)G7DFeD{g+ z6*7RBM=%V^4P9dpCMLzQn3s--Nv13ND;hfaYXV02&}c|N1v>}U#z(;pYRo-;k6*(` zd=jiZ9Tt^c32VejX0@aqYsY>WY{8X&_cl`1`1JVU?kg0UgIUl^0-4pWUH+j z(#}D!6}|lZ{=M?bTeVT6LYxtZIFnWG`64?qKL{Ki3TW@88rG-*YR3jb@XC+;3;**ap~^0 z2CW#??T6M7Ec~`_`EKO*nMlfWN;e$7MQ3392!JQCyPpKz4quY5o8{dd@1Bb;Sk1;C z`lC_dtg_oaz_@cq+#lE2H+~#1x1hDpo@+B`GS*HX;QP?4{OLknT{rGB=Rbn4w;^pi zKet@pxjZ{Mjexd0LZrkQV zFw-R_mD${}=5|djQ^QAZ7Rkf%@ss7vC$t8tPu#$isZw;&e|W9iooyl*?NQ@844^TY z?C9IHWsA`m0-6S)Ij-2%wvSJIYYsk(+je4N52n*DCR!Z3e2JE*aw!6KZbU%u<|rIPPGi~!rBgwT^B1_)$Pg9z5BAvn<}Aw1Q{ zAqMHBP@w5}P$CA)C!vgI50C2WTP_y~m{-40UV)2?Ccu_L3YPN7bs&&i)DBwaXdqV11UBr2UOLii+c;qN}`pCm%g4Rp8C_0ZopvyFS?{uU% zT_M1sNW<+uLZ5ai*XAK$sVT(`)U#-8#S=Ubefn!p#iWO%38gX6Qj^Z zlQE%(RNQce^vfR@2qgo;?2!zU3yx>9(^$jBf4o{Z#Z>hFFt0<%@Q^t?XW%J4#Ci>* zFd@Qcn~tWaAe_bnsOxG&Z=a|t2je~_Fy;4XfZ%R9oeF^P|FL$@!mV3xZ;_xQ%G`B3 zuerTdqVM7OS%}KJ{~WW~acpa|Th6MfM{ajT&fiNdmZ$pjF$ScQ2hL`VWoS>6!u*ds zg{^U)Z1Wg5Z^H6{S!WCv@O_na}s`#;dprWM+OODKf zwK*f@@9gv3t$_Gz6`l3Z8u3cnFS$i*g zB`~CdK(WkG!cHo!5tF}c*T{Wn=yn~by6sbjYrx+s5wQb@uJxo+ccp590DgkHz%_;5 z*atxP>^gVe>_NdvbuFx$Qvpl62K(}euxLmPQ60@zE|SJ3nsD8T}N<8FN%7CU_XwCO(5+Eo~FY+>1~U20>>?TCpxcx;>5Smii4X#s>X*zj9T zayk(M=>9y%7U&lE=r&5L|G@*4;VVBCl_q({3O_4KjnY@MEOSjArcd+E{`nMru?t&i z3NYDR8tQ3MOdn4TI%^I3s|TXcOCwRbjELL-ap0WP)aB%-cV^GO{BY?8sgK+I=fe4> zt6``{IG( zxyX-D-uM0RpGZ89pe|qb6c2H!cI zvNc|sGW+$+fa1Uyj#QijIJ?10@fM?p^f{-B`)^m}G9U+%YKFdQL~MjZ*20Z@!fUzq z&JOo++DI%)-(9Iv&w8?nJ|CoCSN7OA_YtqC$A9DnSTb+Fsg{l?=X1o{*ZC(4Cu-t+`MY8z z`H2_|i3XA*kh?$_f4U(YgCZ%Bi8%c;P3z%Wo)JSZuhQN;Jg<<+(Q+^JUW>qkdgs`9 zh&=B83X)#7r9Ye?WJ(FlVT9ay8)H-}w9PjEX{ITkxFcDlT=r6MCeN6WkU32*b%GL5 zT`Efs8({Xc6mT?9Y9PytD|r>u5b?;M{E_89rvE`orhiiS_FHrev&i5HN`=l$?UJ7K zgQNUmrY($I^U2bqG!=Fey zsJGfD13aghJdC&(F7)P8$QAam@-K{QV$ZoTMuB7{+rPGNCUd=s0+J*t1=p;$=pn3) zI&^pX4CL^QyrS7YEii-_Yo{<(FS#F=C9h0m+fadMxu@_bd3WzEGa_J={#XB2+>zby zAAdpV;QKyI<<*^f63jPIEPDwf0WC3Vdjo>^!@d})h5hM`UyB#~RX$hr7UYYX&>!~I zkKucUl1(%%#SzNaO9Qtm@`B`X3&a~CEkX0)9O|8}pD;a3e{e!MhSr(3uB4L~=_Wzw z^B81cefnsY!v#W*TF)aUpP|kLU7$1KADowsWX619VVk4MoM+A2;UcQw+?b*+EyXDE zg4vLRHag3+%P#Vwf%rf$@IGmtzB2LNP_U+VY9`e5lMg{syRv+$4xD#*kBU;yhh!m7 zU6@!l5nVfmMjAu=)|>RtrEwq1Ff$FZ%JDl^RE`$1F)!)7vG8pB})%;&7-_4ddEQ5&~_Yo!XhN2M50 z`EG@^0&i8Fw|0C@#<@BJfLotvF{Y1@J_(tfg}Zq!o^%}gf`fG$q! z$VViqE~UL?LTpHFXjTvQ?Qs|eyn~OQzmA~p4)$0p0NVwK|HxD&;?{KroZh>QDh~aw zePnib@01ar+4{afL|uRDHKPklw+wRRt^D{%r>tAG|E~#)LtqRY>4!B++6sJnpW=Br7-b!*nK4!=^|_E@qy? zCU4MIsuI)6;#1SZ*>UsRwp`ykji+5mT6}opU-3K*wl#D0o0BEy+%V1Ps@H0()vL{X zcwih%rX#y`s*}*inl;8fX7sgNeKOYWFW(cI9tkLuh2tH5(=LJUa1UJTVlIH2#O;3& zyc%PclbkRfE!9=5ig$z;3zh9@3PddnWK9`M7HyLtnKX3)4=dI$Qd+u<8%cIrP^ETe zR`O4#3@;Y{9Q$uM6seemKp)AIXihcyra22ax@#Aa-J>4-O<`5Fz|tjUjttYR17N(? zzpk#{YN)yIdZ9+EDd=o8O-og~NwrN|rNTZIp{{|fH%6MMl>SrUfCQD)b%a~lS|4Wr z>#vvB-~lf9eSiv{Uq&XnFf%RjY_nEbF~^0D*1qsO``*|_g`Tioq=8lq@yS)LI1g$B zb*SQNS-uC-y6r_y7*j|by*)MTsZ>UvTy8;iZMzUC$| zdX(sLS>;p4yiq+3E_tB}R3c@`SafG-7{?Sw?A7aA z-%n-*3U57rd1w9rpB;{psZ{12i+Z%fURbgP4CrOt8~Z&E}9WOVJUSHd42qxWZ&& zD+naV0j0mCG>F=<96k(^YwV0xWd~rNsdW3^I|GSGtHGyYxVcv+o8G6`Eq!k;z$EF? z&_~3}2CA4#R>>3$r8&zj>t0Ui_Fk7U&^nQM~602J1}8 zXiNhLiJdefiI~m(NVPxtauW#UFXNfmOuAT9iQ=?fF4k`2SA|QYSx)zZ#6mhcW=59+ zSgIUwGG0O_Z$ZRrnq8JEF_>4XV1cDXu!gZaGp#6-shj|Dn;+A`Hp4@4JBw0BPk(X7 zd7?_^GTp8(T|63I3u#GSJC~g_5*?tTU7V|-PwE+_sBws-6yY`QU^`w?8vYCHD5l3~ zdQ@VWK~`aYbmMwrdBT|Vm&4FAmTsS%!al{|O6AH~e-!bY!?s|kyk`EY?ww^BHh?_d z^GSJ~>+18yhm`5sO44(OY<3)Z6+{2a)u(r0Liq{UN;S-3IveT|7;)jSL*C=Cv<+tF z0d}HVwQi0352e%Qrr~1pIxl%Fam;A`P5K@*!Rezg|8R%pO=mFKX;O4#Cdf8(C$%HQ z|EF}^F1Qq3LYS(+0vEnG=5eQ_Ixg5VY%jkiZJKEJi|Sxp&xp9=sm!?U6NnbfOxz+; z6qlsGV?=GqkM{NWJH1IMP-5$K0(zBBuS_;+t`r-S(TArWk`A-mh`1)nu7rOAAiPUq z#_7#uYdmlYKot$8diEp1oK6QYbTNuK@wnu~MLLVa-BTxycIf zYJ|4sLEEoz`j?R$KQ6Jl20SB==-jd;dKNMy2Yv&wFF&FU*heC_hYH{F(%GMH6xl_%EXSh{>{bC`eFq*436f+e z0@!DxhW&)M?s+<2vK^hH*@(b?XrW%R@mcSqpV1YJ>DbqOR zUSK4Sa>yS`5|h*On+97Zci0o_4^Po_Nt1<<*&hAHQ+T8u@r+&NoOzMiWr~kO242jL zbv><}1%y+eIHYO$_Cvf#yxWh9$>qTNO25&AG9L4Gzttnz0xer7A>N;j(PuqRQ9rr5 z;dnh6ei_6)C*oh;fAnFJvgt37Uf8kC$ddJ$EpFpUsrM=F!xJaF%SsWSo#R>1;|n=4 zrQR5yOx7In7xsCBob2upMjWs$=QF<}YMOUMOs%*H`v=+vi#@!`yZ~pHnUxbxX5w9w zPuS$Xtjs&bJ`Fs~_N#tiUrUuOLUrH7Lt69rYB()qAjSUxVlEbsRKsp*eS2u|xQEs? z$7>Gc)r{71<1!$ccVw5VtK52?l-seXlD1YS z>4nG$1J*MH&nJxt8AgYRZ`tRGK+vSVNA zdtp3+v^6DoQ_;^F*a=C>j?hl<&7Jk3*_lOkM0X_aV3aLu`xb(pXw8vr7E-tVK9Vvi zw6kN)v8ixvWGO_cXXng<*%jBsIVd^w@wr#l^X}`~y+3wy<|y{s^tfDlJ=F5x9Yha7 zF2lB8qSKS^cN|<8QL~&gbn%Y6Ievv2d*_{+MbF81Y3L4qc%4?QI^*h9S-lGmB<^p0 zT{iotMgp^vw^}mN4LFLV8z2uRPC>~xx4>_K^BrR<8fIV4YAy})N&nxYwf}5CF@r_P zy#N6Kw1NGvO=$mxF8Hr#P0!iH$@xDfH062Q4F(jSS+%YCut1Okt6j|}!XoDwRY8#j zl@u}&vJS`<9PXwZu;-^PxO%V@TY4?L!Zp5^EvH!eY;TNJ(YQ42ScP7JmIi9l9^@Mk zvdRr}-D;ZbJRhDg_Yelei6yN4T&XCmz|8)QR|E(^XmHkgfuMI#<~)pRkH`gmu$CzZ z+X%vP_0GYsafIGNHf;%xm)ZOqfSzpyJ9`lrY!#t#HiDFp4FUywU`}+1cG-r;e$wK~ zY7jFf5%YGP78_R*+2M!SKeV*OEX3X-Rozj*rdpF%O^vOcIB{8}cN$eIYMmHSvQumEI=MWrB9l& z7~oEsHMjpTc!kpMUz$RQYbaoAt8JW-Cu|+J_jEwq2?*4-21vraA->?2PA8$;^N|}ekWPSNZ7$vz#p`f;kl1>O5 zJ5kfEU4^u86-{o)eb^NlTh2d#BV*H%uUesxaaz zs`UYYd7?kZr|J_#WOo6uRkj;*`JVsS;^$X?YA<6uHy*z1mJDv4YLTVhCL`>{?i+Q; zfU%NW02n7Y_(3~$_!H@rw8TR2YS9S+VdQ<1oCmkC!v?_d*m{(NI@3bM1{^CYKp zIIOdMREYYmA}xE;mwo(uY&L$6&HoC0{r^2SMlH%xc8dZCxh=@#aI`Tf!NWp>2maCW zNvb#=5EK=ANGe&60&vyQJrX%wu6MNx0#_BEAQo0++D-%hgD zTl{R;e1AR}Aq>8o9C`*z6j2cS5lDl^0)q$Un7pu&uuD-fRQ(fl89h-$RhFs$d1+HrcPJ)#eMopS zPBrL6@1#$5SfV0@TMzkPFWzAI=h)?|+mlDQAfpxkam=$ToF$I(al*`XS7&7Zil|0W zK&?zm9hGQR8JZ#SY^aoZY%2Rdd_(v;+)%HfVqM2h?r62W9tEQW0HrZfe4z+nrhS6i zPm{38r8^xU?KI-YDUuT8@I=e$BIQJ~g>&Q-s-3;Cf-^F0Zmyf%+*%KnCyR+k4`tJh zE5}O2$CBMt37~*>_}(h3$lCdMJOr>O5sdaWA58Y}>GrLlXE3uFIZPND_NNLh$xzdk zBiLDTwfaxUM*meWS=o0!)?m{M0hnA`xa}~5q=_>lZYBHT|0Uo~wfK~IZX$v|zF;{J z*?=t@5X?{B5ddfW_Y7XxH@b@t1EW!qeFn|73u=)(<^!Yc*_YCuL{^DO07LAw{iO{O zF%Jt?aJEGbPItEWEG}LSn)lqH2T2poP?yl1{ z@$-`vn$*Xjv&cTNdYAmtt|F&;w*ALbEA@s(7nN{9pDKjGh*bEEpg@z^GCAareWzSD(ZdSRJy*X|C%ZE@ z3ZPTEQAg_qRptV{;PrGj`p%+R4ClE8LfM~J=P^6_NXpx{LjBToht!w|5ao!U$+t-~ zx?NRI;TIrM*x<(bHndq3livj{vX6(vr!?UPqH0;2AXbgVUZ>Rtwl%%~`Errd@+;T| z1^{>g|KG3HI+_^SIU4`oD>2Fjb_W~?U%7gWV*E7XESFEjWhr#4CNFbOdmxWE@9b|>d)3RctR#LMB!#H{3Pi&PzjTHD#t)vmF& zJ3pR6Rk({bH8xi1*Ts(36jp$&Y$esT7W)OMXx8gI_iQq?{SM^Ns;O;Dm9%gi_Qo+3 zuHNgI3w_qrOOdth_1?R8)_>6(#bZ zi-Mgx?I+Sen2*QO1?;i_wElMWR9zfMbP#Z}GaaTpSh#Ru${;NjY|G`b;@q8CJF=#e z>z!jlb2v9M z&U{1X$R!HvZ6F?3Xm8hsnCHU(U z!9uWEJv$iDgarUy;phXG=Vvr*+IkzpcPQ`qJm~ofYmHQ*oZI>-DGBH|OErQMjKx6! zG+p!h^V116x-EljID*`#`&a+gYjxqd&3-(`ez^b&Hp+fBp&gq7k9<67Ib){)X@Xke zRL-;^FSSAtO7t22{-#hE)hn6gknUWzms^D;PKbgwZwhXW8Ik%OAw2j<^NxeP{6C3L zkf(a=GtZ^Adig0WuB_^ZaYIHu8gL7k1*A-I;xVf!&}N6Pzky!(LCKP{{TU)&{UsYx&-)uR5n)e(~j!YCj8jxFo?|WP-sI~f`_RI3=1Vgr@tk3m@yeO?OLcNFY z1D4C@)_4XlL|M9$&2c5qp2C#8b|ejCA?G(`R$qVg8|rFY0(R}EUxRk7lr#J6RLFy)*(@{u`wRGQ zj`pMBVM>Kbp4zzC1;37Lx@chouiV}E@}z|rPs zm2?~0cLah88A+J`_8Pk;+57*6NTH01SG@eK3%kE{0q=i3s?<9n1WZSbsKMPYo}LZmx3dd-9XSSA5Mqk zA&|n#;d&=sYdi<+96il3g${b|mQ-mjrkh_E#eB+u-%9SM{VIXNL+xG-8IBb*8? zDR`7tp=G#}mdpk*VFyixY6X|^EF1!E)){~glnuepiN4as~6?_GY+di)# zs#CRYZ+$D>12kx`+T=(3Eu`|8a|ZvS@6Kr_Zf>rO!o3Lky(Y&NtIvYx!mQX#9z(0Q z3Leu>1kvvgHi#OSiL^>#F;YUz2P5WZERi*z2B5VwZ`#ae=`9lRePe%lDS;d3AIh>y z_3@0q|~Itg?ioPPgp|Monv-6Y?g4AT+~AFO0fQT`v-5P0%ru^fudrr zYXnWV@C%T8)LLCVd_v!6-Y>h|VIM$%wP*_Ee&A7CDY2ne;HpX~9eJ#DAZ~?4F%(() zN$yZ&Hz$pVboYckU*q}0z0?oeD~uSXM6x&BichE3%LUs-nMk??vW4q(m$x}Qk|91J z(z`sL5ND2Nk8|u}9?%ATs&$XwEJ2YJW(Xw{ih5Ka=7NS@R#^eHX zb3iw;iCYqJAH0*XzZZ}uk1lizr`w(Z!c9Ykgr}N@)*=jIp~I$4U9AQ90CY{DR#KOT zg$(?qrax7+K?A0wPe(-IztI$`Na+zZ``Pwkdz_WlVVi04gGTLEUJG}|U05oG8*X`c zp|moUR@=d}bf~~tDXWljt{*)};MrbrMsTmTI#tmD_?YQuZ{6vfec@Wp#nfcv?&Lm4 zDGiPm7)$?38_%8UeS3a1NYYL`RSuuJpIts0@PUq1RIN}r`-b;Gg4 zP_Si(hcn^!w9DOLvQly?Y2@x=kIJA!e9?}(y!AT=+zRf6u{k{xJ4z{(J!I*{-Hp2* z-99?>`T_W#`-gwDJ(u_(002i20021uyE@@wZSo&McZ`a{f7%D%*}C*1c#sINPC05> zYMs{N?Mpx%Qj4nPGn5PlkQ-2LGwzbl4{Yv$!PF!+`0%wuyWjsdu}{z26pEJ3$Nxh7 zgqdpHPgkdlp35(|%dK)b1M7bx#0JaPE*jcl)=k{l960Fr4{tuQp3lhj` z{Rt{iFPb{`9uQPV4fTw0K>eZV;es=PFmSIMf};h72y?@1c< zjli_LHy0!&_u~;H>*yFTm?T`QN{(Ccsv>8C_y~ULL3Q4{OAfEDq>vecg1-1zW;3Q$ zq#X%>*J7jkh$Wr>Szyy)@3Xx>FkSBlX~omqL1@eLTCj`Q=$TUf`JLLM`6T`arDPqU zE$)!;@$@pqE{k@J*1XMMC1oqYhn}?Q?ecMA1Ry4uRarp&*`Fw7oHx%FEO(Vr_qeuN zn?+0Js=-peO|?DLizPV|CWtw0316B5o0D8{k>X-1cKBJK{#f)SphU71IDLBP&5O$^%b(bvG?MX;EF9R*H>vp5eKvK|ww6rCM+RH}Y>DZY~hpPRo|V_dzPj z9o<^Eq3MvUJa&%>NjeQsbWe#aZy$@H9skxzn~*e}+tpp$Bmm9*u5yV}wkmOKIrNI2 zP*6MY|T>BUXKGHj-bdjH?4_4A>9_X8gOQ~!)uxsLbvJATm?u+%V1$qch4EGj4B9K z1Xcu8x24)!%Yw?|noG8ED*}5Kz#vRA2p8@H7?rOS*8T7p?zqFHCTwHQa~p-#tqC|{ z6$zMW8l_CR#|=w7;XXBQAjd)KXFN&W@{W(>=J6GGukYs)<;oaRq_(qQ4V{TBn7Zost5-2F8aQw@@6drCTJSY1hv5aQDP0Z{^ADdoYA0Pj8Ing&5 zv;L9anc-PV(5G6M&lH_TF6-9=!g$Grc?J%rkj*OkTySd))BdT9nC`b=;mE7v6HR?M9q}ds;`oIAlc06nVV=B{YES63{7SdUL z8vOpH35>;l2h=-xyI`+qlyI}bS!Rih)T%)sTvh(g46?cGt|oT=FHJIXu8%J0Rr{X7 zlXUS7%K}G;=2d;Bf%fkgCR~%YX8qwK+FDwZfb2Bwr-RzPaOntpf+RYZFV!o~;vg5* z4DJkk8rx~?8Cx8TsiJ<2qtEdu2F`v^;FG~iqMYF_5LuzelNL5huG`NJ@BX^HZYDXF zm_HR&3f=xh3q(;X0i87PPhUQSj$)gfKYz}Ia(gg9T6R{P#iVDDV4dF)uU*$w(sliX zXO0@v6&m*rO=MLI zYM$VIk{!A~|AYJPKkw!`bkc+WLH&%77(9m^^c2Wnatldb1+>THo9Z~ zJ(XPW3d00Oc&cO{k|Tp4NCa~``Y*KZb@QHCZHzq9OrVlE!hk}9%aV5_65GT;qKyC* z^o%_UAE&P*WpF;`Z7V`SEv=Ac=p*KgsS20=3ihQR zv{e)N)w5*_RC*hmyyuRrljI^^aL>2>0>5As@2pjZqmjVFi>x`%#S@r%DA?aitZC_W zsAMplX{Y2w2&0yESJzps5)b!(RDkbM5*+8UvV-G@JU6m~ z63`~ykY)6ub!E4u+<0zbi}m~g^BA5*`yw2$VnW$Ef81Wtd||@SY4`q7D-W$^kbTUH z3;I4b1VNs9flVpf#*e$xm=cXRjdvJ1!8y51whSK%j65k~M3xFZASEY8mefzZr+9@y zS+*1v6{YbY+Crvc3(HlQ_!=n=W7yZEtiXLm4fB?7&i1 z7>5U>k9USx$;uW{>?O#n!1}ohm(Wg`Q3`Q|Qk@lPyDF+T2D6<=PAkbaY1))7BOTy; zDP$J)>EDrM#wnPSldgtQ0100s8EK&YJy@gFnW$bUPm6}81K+Na5SWjn%^p(b?rl5l znM$Hia1gFAh%Z-=+j8uHw3Y4wb?FtgS(vNt7|q-y{uZzuPu(Kef_CNNYw%pq( zDU|*=KE@bg99e5|*^M&SJqlyTR{@ycWf7Z@U#!B{2wr$z1QdUpA=WS7V{BDT=iPt2 zMMpQk@QvkQhZS1JOz%#;am2{asnN*#hMaSr;Vn$4;0{NHwe{d7GA!)~!~?#X`nguV zyI<=?HZ@|3;6t?BjmDz^hXhK2z${aBrdVkW)s2v!kF9X0*QBH-=wm4(8? z>iZBwRQwRKFKp(F(ggM7+x5($_a65OltDosZK4FySd$d4sz9gWPpnzJq+sP78os}F z+8`1=i4@~W-%9@L+a9=xpj9_@{_t9t=YvI03vre2Y<4u|d;n|at(glruIeo~(6aL$ zVfMaz@Szlr-u~+^519k+@D)w$H2FKcQK&da>?x8X8(A+5gL`sy1So!AI! zqU-C1wknXJ_k;BTKJIx(o}feWBSVHe>_PuMb|T`a-vqS+Bn~d`&WfcYa`-J*H{{@l z1S_^!&h7qG_|eS|>vyJiNs`Tr&PN6RtPdzkItJ|s(5DZt8#lMDSZ!MbvQK~J7+;`-YEQI*^W+L6Aka94aQ0IKo^ zc$ewEPsxxC-?nK&nr@kz!fu4?!j;7djk&n=q@l=rigs(swq{K@l~f$u(8gK$2wi?> z3ytXR=Hdn&Q+hh>m8gGdTqJuI>XFz6^%AU@bnZMgD>ambdb~gz69(bRB(X!{D0%Cp z2;}?+vBMOyp(eOre+4acJHp7NN>LR|-R}JcR|=b(@`a&ZXoe2xgVcDcW^t){F#2nT zFs_InGzfo`!Q(q^mWv;r;K)$6BbDS&zxMUdif$gT+9#P?$$sHfNmU{;)+ zb*(M1ZZEO=F)q)MvR!KSGixgERAVPgAqf(8_8n?hQ?K42N}!VAP_f^^5*R(H(DFb6H^N zK{VBOrC8(&SaaxT0r|h^I)~`cqHasaww>JAwr$(CZQHiZ8{4*R`^I)s`5QIr)vH&H zb583tc4LpV_gvqcE4?G)Fw`MO6>_n{tXwo+Yj;dO;6vf`|N`tsH0MUEXSHKBKn2C%4(@ zZ(f;e>>DU6ZG&%u#oDY=@6dZmv7Y`n2zHuA@6<0SDri;&J@uE-jL}Mt(Tvhcjse{G z-r6|MmGHl@PfI&@=ava>? zH?+;(jo61yURziJL!_S0xLZ*pwn!XgV)qa)Jl3${H8#AfKNr2NZbI?11Qm2HVXGxj zE7A_~+I?Z;K18Y3-EXi~ZIQo(Qcy%|^3WFrJRm&r+nLpF-IY7mM=Ib9WB@yE)axlB z88vxLg%FC^Sp~1d7Nesw9lHpn7ILa9qRVt6oJtC zXkL4bJU^#S!MHG()f2QOM_)DpY;tO?JjH%6u0T))z#m0B1mp};f5Tmd#wFK8d%R%7 z%vyM+h?y1W<7CcoR_an7c$L4Xps6e8Up(XqG_;Y`DhTMnqsU(6AN<$U$sBocyx@G7 zRp9jw!JuB!^{PL%Q1wm4C}o~>|-3=82`T3?pH%Kmxh0F~d3++QOuRhT(ZA#o#Z8WaZ$jfk+fV?#n;2ZBMq<_vp=49d-v%4)<7 zMXf~%U#TASq`FxBPE|END-L3>OF;+Qs+_*di%7~gOLU3zo6Pz|_QjqsljjY)2Ti6c z@CH4GOYW4s~EDY!ty;lPv_2(Brpg62kDPL09K>8M z_IM~a0dq-l#PZ?v*5u$T`oSli?VUMd*6a!BKijVm9?%9Bg8>;PHwqCZbwzSM1)O^# zGOZy7=QuIGfRRmQlIApXLK36^nP+sGEX%0K(>J{)W}*VX-I4MA;$%9uum zwBYeLz~U~i_uagV1a2HT!Z!CghtZ_V!P04&#nN$DPyl9=xsv66N2r{6_Z(&3 zi1Slt+Q2ro^I5cZ!RImGwzI2w+N3&~t+Lu~d9$}MGOAu>@VX)s=K;oFGfKoPpG)U3 z%kxOYszk3`^2lbT^n_nNR?@}wIo4LF`sd0Ua~U#~1KMmZ{)>~M8Df5U$!zsj6u;HS)Akb+Ow5CpVISErD`v$!4wewjb^9A&Lss0^7+ zYZXulnLYkV5AG{Yw}+%GaU>}by^b8v2fjng4awREcqe&J9d4i+KU~Lo%m4ug5aab zc{C#DFlrIWC359hQ>nAayFjHW!WB6smeGHy#TdjK@PPt|KpEPVsO7ECgU;Pf!Sjsv zv$IjpdO5bKs;{TFM1gJfYe{PB1_#5VbXVOsxa9ZtXYWB${dn z=XYW`Ii;{Vdicb<#t@Y3K8#57Ms+!am<|0G{0B<29;+}cV2meCdHKqI%BtxkA>0^a zYW~{0Ct=Sww>I#OS9FN_x_(OnYjv#uE`o8o4-0xxT6+U04DJ$e8_6YSj#Z{8vaBn5 zS>-Q1WBj6_uH*=*%BY(A=u01g6H%v_0@c4`?>Ek5s^yADD5FVe>Bi9CP@^A2B>J-$ zzIP$TBCq|3h-2E4wMX~G9fv&%Zt(u3wd4->7i~Kurxo9@&FPD-hi6w7qjLc}loKIG760A>y00x2e zw~M@LYpzG<)BLn+j2+&xjnfFdNG4_863s@?4UZ>f5~lPd7c5%s#BT}1Q%)RJAP$H% zD4+z@{Lvaj%rlW{@)Pa=?W8Zkii$mOXMcV zR>{bs?iHiuWPgFdW_gRC-Gv%^NT@3Eix7@eOMoKsks1E_Hi?qF>!p*}vmhE|C(vz5 zRe{*Xl&T5+XpQoX_R?N6uMWkvEyJE>r8H>m=;&52T&PxdTJ8;fGgQSr&ezbKd|UzJ zo^t7>LXwLSezZtHXWqO1+#$iq9GJCbj94Y26X&7>H885d)kYOdH@tBqy#3W44e?L4 zAemcmWJo%T7T4r?>VEd9JkkJX>%$4|m;7PY{>v^bd^nA!0P*xE+|Q zjt5;&NN}6%e%lrNRG2`Op2(LL`j{_US{dhQ8&NMD7~It3I9@waf>DZ;U5!<5*h%2z zt%{rxCyZL8Uc9(N2J)qJE|D~@3W-Owk~Qatr)_*t zUhSPN<^c-+K;u^M~QZ9V_L-k&1CM zzfJ!?R zH-7-V^yQ5{qaL(qikA!|=VSx?;eac!51u{d`_Uyu#zlp2Gw$1R|*nZ26Q9_3j$A4>*_Ji|PbWKHzZMCN{$a>3ful7AOB0 z>q)0a*G8W%oOy#jcV-x!DptcxEj?Z#7nm8aaKy;VIdL@)M#&@LVseCp+9TE7bguw( z?idYb!y;Z3UU{-5K&w_*pX|N|!n5{8-z6(wevdt|$_664DRx--`H7$w1CJua-=I+j zHv|u%U=UkHF{x=wHsYt90CaPKQH&5-eX(XoTeXu88zN^NQ-nIIkZ>PeAO; zLFqn0d@CbczPWsks8koMj6)6f8{lKs5Hy-ahkcc>?%q&agtwI5d}wA?^*QX1cZAd8TqBCHy~_ zpb3*0171yUnmJEdI7i5Xt`6hR1n(#C+|cXQf#DV%I$~{0)BEaN6&7u}OYk>@Y)cl3 zCV86X2%X3Uc<1319FJi|-InJB0hLVNp$r$9?=X#xEj%K{3+m9FHxSDfZ9F@aZJt5% zBZTN+&247-#ueo5-T>e`436C@eHsXkFSulOa0@R&Yjj#NKCwGTV#!sitxQiBX!7Xi z39&OZaZ4=uYSW|}URF6*JYMk8PDgVAZJnr_*F1-=2lJTq1K#RXcXPBLYC

pX}>S*o_<6g#OR7dz!z=1op#?T0@JhGXflL&AI9q_mJAt8&lZ>}tT zsBCayRwX%ttUT zAuN$nP*%0ZSh<&@j1_~X|3VrjTbvd$EGKS@g@9MZIzlPC zckjzwonY4D0-~y|jTEA00D~EOP;gr)r zjJ)Kx?Cf{M&;aCj(jMb*9R&5YmMtZ~A z2Xq*VLE1bbUvl3+)|(G<0(21!+T68OZvKw_D9TmcKf-^d`#1c1SNP-3I>Pj1J=)p4 zT!Duz#@*>ji;H;tFx%v!)vLO}Yq4~i2dJF)jSX#D%XmlevBBoJ_K#|uf#jR3iY;ZC zZ#Mb|`2W78>M!PSyZsh$NCW_Y-|P92}(_9qtpAS912n=AbfZz{#-khM_smw9DV4G06fi}Q);III10Bpc-K&}Ad0$l^$0z~^e z1v>kT^0)VR@)>S*=kRWM=X7s<=Xh@c=X`Gk<^XTm&j?--%yGOioFTkHoH4vnoI$t& zIiYyLG6C?xae?yyaKZCHascvxazOIHa)3_x%lgguIc_!PFmIXi8~4lvX8mUaazXjP zIl+DL9&y}}9=Tt+&5=HV&bVJi=AieW^U?d!1OeanXM(8h0h7Ve1wiXyDFdYlu(iQb`k3+=*8r#e?E-LaZRTj|pnv1!{I%=A zEB$@q!D|A&Z*k>Oas03Sh2;I2g*hXg*%qk8YYBN`9uuwa_p&Z&x8ep{ia&-_?81VpxqP;*>v%2?5gDS3bvGAxW!CWq>X)d_@g zhw=tZhagi~_tcV?ZPb3IpDyVZ$z-WCuYV@i`A435f>)wcRMeimtZl86GMkO4Gc96R zzUvqRUiDCJ5N|p;T@q`Bcu(dZoVURfAEsLsl|);8Jn?$@8*ghi zk2P}=vAo?np1`=n)zCvx@zJO*1J@cVst*_QeI;tJYiX9_A0HGEt1BYb?>JS+pWlzyU1qL-?6*yOMAIYH=3Ik%uEzx ziv-L$CkwUgz?%vGEzyM`N)gxiWW!p0)9I(Toh5N0_{&1${`*2+__B8}HDJE`q7N0W zinWxk=-lS5wj9{Tg|Qc8Or;spjzs!~KyvA8&UxtrwX43!PXew&o$*@bzeABon^dH} z5`-zs>b9==T#83I4kEqCK9{1h3mL5O;WGO5r|Y0xFx7iK>LVWOU0n6}>%%qke~eJV z2rIy5{-V05?bbSc1rD)UfyRpVh9(lVH*6TeE4 z`R-JV&|Rdvu)Bzjnt4d_)|TyByL9N3RnC83^pQ$yG*Gc#o|tkdQ5z=nn*8F9a?PFo3;C3(}d^{8{dJZaN*Jb*O=;;r@9S&$UwXmwP z6)H0FX*>cxM=&r1-7s3=I?pcYzw`Qd*rL^Pn&I@g5|4epD(15S4@ee~^XQU0cV}|l z6OI@c5$lzRG|Q&vY9$@&hj=HoY7L7;riwNF)ZezvMopTb1#y)i>_*-!uq@UiRuSun zK<})Pu-g2)Y3u1)3R_+hQDL*B({7h-u6DIDLh9*qy|t+i#_@!t!C^gn%i*cSd&u*Z z$z?cTT(}1wpc4;r|!)$4FS)Yaoz z9RUsZ{0xnWrE8C2I|*O8IPmtIv7t{9!_~NY#{0#EYC4B>N@XqjN{4Pw<f!m-5gN3rJTvw}yZ8p{ zZ>#Jm0S~WT!R%>nOHTXk0It|gC`jl6hEkItG27^*v}VlXhWx(8I#3$nluCBfMQnRL zj0KEqcu%15&bDJolIdYk1d1x*WFLJ0AN&p7$!hC&qxY9WP};>g>~{J}D=3i`Ju_pM zleIAU>_0E}=~K^X)Ayg7?h>|+59<^&W8{ndeVRd`ZnHDL|5L61bKPd69DwWlGP!!Zh`vZ3so;(29~{YNSW=hBdzGk$FNYN5h-ILZoyTXp?I0Za0!8U@8O zpnZZb)?ki=c$ioBLi%>}V6jed_QH!W@smKzLQpiVFsIrMI9uaq1H1 zor$g|wPnu9qGCEgWx4_bdYN6?1;9^U8T9jX_FQy1zrTFk{SVHK88x|RjVLSF?&`I8 z=yFj{k*-eZHM|3QQOk!I@s=V6w`=Mva)czS43ED66cu2~IcPf=w8nVg{elaXW$?!3 z{4e*9)x>q)mKA%HGE{gZ>$yLebdxAqXikq2A2Ah65F%e$G=tY)4-w=CIWjnn}B>0)%|sznx#WltGG%{WDWYBf4ntLRF@IPJsQyY%|BGN4 z(fcJr4?=&e@nt=x+$}KKR;Fs~$QV8vjK9-`og&$Z z(=JA##udT%aqRh4RICw1Q@sxN>%)1Tr2sOzHtKSpR|m8EC)ntN*jOs~zj68$&G_-& z2YUto{$5`eAMw0%*sMVoXdh0}D!k1q1i3E0mp*C8&W_E${JD^NDyw<&{DJ3%*L3{N zs_rf}V(&@MGak9%pbJx@-Us@Ex2b;G#;`Du)n^Pr9-1l1fib*!32LD5kr_ITz&(LT z14!!VwZmmoC)u^@m^f3J{V~NeBs+m_h}qsm47Bo-F{_|9X3XUBvYwBMj*89A>~#Ns zAZZ<_mtHj9u-7v^1!Dk2NpM1NMs!B_VCaO-6`t^}=AFQDn^`^j}wN@&lV>1^i^+MD!$rn#( z$40<%(B8s{R*g|D#FJ1^+2QHCShOr81$6eOHU+RsO3(iKM-zDZ*2F0WCB8FLCWh%f z$}wYW!AwdIL$}c+nLasN#D`6^rX!vgea;nSPuwGykE7-IRHDo&6OrdQ8Ev4x6UjrK z)BgmqRMQDcl;W+h^J>rNdyCWop580LpOom~l4&vEC(yPy>OkUMtsR$e>1mT~%8AtT zL9f$Bci{~z$Sn2;!1>zjeIV!%scir!YmQ&WK)HG!$yXXod>9U9_21>6D=y55(*TpP z>(oW;2CvNOZs~{csJZmLGpj{@+5I-B{`OOMvDsVA|;fC3mm8*&%dsF89|Q z>SrIk`A{XakRa_M%v*Pyu0~pt)gbK^)*ZC z!$(OXlt#alU)TC=jgyz%!@(zj(^A=3+QTNBjKrN#1t=bTvLANAJH(KJG<(~V{-Y-n z@#FM~!5rvUFH?-Kps*c#!PRroDfIUU;iB{K%KaO8_wBza?Bs_Kvii^f0FHeBBUki) zHva$SimtRi>^9kvzPJ5AS4IAGrcFrfW)bw2$}V`;#z1p*?-bI&XcpSEG?bH6tnCQ> zykr&Zw0uq#={N@sv(CeW4chU-g&}#|q=}Xr7tMZwJ=BWRFB0vmT_==6m?pxDWpZ!q z`U%jAMKkjh2#$|oUt5umOF&dAt{)9Oh=Z%3p3@H>p*men@YLOfa!sjCkrdnr0>iS( z0{x3tCtN4`F;H~aPZE3Pttb#=K~uR9(>M5S9+fg3K+6aY_kpMx_wk6XEg1xmZjr@e zfa8_*42}g~B%G770@=p%86p?fQkW~v!SB@y=*G$0;IQ%$RR;}<8Zl<9ExB7kb6C44 zlv;LKu*}kSn8EiOEuM7WFqDKF`-N%`sDCgC0^5%j_}DKNArlf82#xcXf^u>vXgm3} zi6vv4L8id`ZPT_G|0tl%PiX@WnxO*FT_Rgfx*k+*mDJ63*4qAEQGv@_$JckFH~YD> zEG?~;na;v>yy?e5lgqlLUZ39S(a{0?{mshoIi7((s`FrJfbdVhL@ayeS^}*RC^#6Q zqd4f)KJay4+oyqeSTxKqY7#F@7>IOi`+zy6{VXUI9=?-Ivqu9xd8cB9YL#8a&D~K{FgLA%6Kw z%6NxLMPGREQZe+{+n_3!4?T{%GlZ^S+N;z(`(wMPoIFCta4#h=>G?dJBHwEZCoE1T z2Gd-nf*lVuGyCEErn1TmuM>pN3R9Bxd%HMt8J;SbzwOrUKt8)^K-l$F1ho+APsD&^ z_#~Jx!xwKk#R+UQsNj8Uhu}-_?t=kwII@&|_@<@kaYg#87BgR2?nJX1C$`28l%yv+ zRPeD;$Pgbs4Mrb&iTw77Z?w~O1_TT5XIGllfC1F&ytle_JbEh2OhxSD+Mw*+E(eup z@N|rI1|T8<#FX2lj$BCuejXA3pHTdSafCL!mmqoUj;n+t%yKe#KBL^pIb>RHYFgks6fGUBVX$8==LgHPzD4rI0gih`iy3f?I%T5<#+LqY1)7 zxyJ7(|F>qsHkYJ-@J_k;VWh|aa?n76*9mGsPH0D5HqMuU?EcK@UjFR^p<>dOQ{v6! z-8f>fB&JvSs=I>-qApQbC42oSL@P+nx1)@M1w&>@W7!6MTVslS*9HQl062}oqKjez z=Q)A50e?6ZZy;FP9e+!Y4P z#i(WMxhp zJc8NQkv&Rd3C4rZMNi)ZPKRwbMQw+)947*F<%5uAN?lh}%zknn29aYY<~HqY$E$mvW!@4~UX7L&++iBb15*(1aC2v*q6QN|q? zYb+@!zmt(x9qPj#*68E+nrnj8)@xy;Nu^K~du|pv@0ZZi3ahD3Qa2F?*~yT!{dgR1 zkGt3TMZLVuh&$BVWLYW6S~i-WZ%ln5^=oR5WCY7j?gzweli^PzQH4w;b= zP0&@UbOLNSr6)z7@CB^F$6iR5oi27f2~ZT5*BVcY)V>!6?Ty1XHfI)i?&J5X_w%~B zVp_(VB4bj^+HO9^`&-TJ)- zfuj*lRDrd66_U%U$lO}=FdLxVzP=0@NgNeZ`8vW(DVR;rjHwea8>?DXX$an8`Kbur zJimWeL4F_h`wD^h-BA4zU?KfY&{hd`2r{I|XI_TUJEwTCR13}&)=E7;^>r?bxVdAOxd|{3*+@7Ca6*gVitsWAl;KixAGPDJ#WYiR5#c za6Z~JG%wZolqD}heja8@6~1ep80Q5dJ?w!m*C~uoes{rc(ND=Azrz(599HTa~DaEj6m^V)g+}#S|2EwEcQ% zqkXS~X4kH*Euyt-6OC=WGg^z%)^+QfM#BEVvMR}Yfv7Eo`lKz{kE`LtC3D6zN69T@ z=J{hUN389EEU6RZz!f?zrlP0DUW53DG-UGS)LA1PEm`-OO`h`a7UEpL-y0y-lc6f@58v)Ab%n1e#osx#5sZx!zZUeMaC%rTe5Y@%LjEWjin~qOAPc^Hf z`{2a8>}VUXjH+on33J39Wq}0Yx{29yPW?w1KFOzfmXPyRt@AUWE2XKQGer+$Z%JXT zzAc5#W4NeT?wzr&Hyk`G(QO`4ajS z$NRzFdE==Rl~7fr8);$E;WE+1gP?VlYNxJE*`7C!v1zrMVM@aBb3i01^*$ z8}JO%t`9jFJSr<$d(nIw-;BZrr7RTRt_{5OQs%tDgbSk7y}sjfqUfpSA0g9g9X`X| zmMKN6>TO(qe0?4BLzOHQ|56g>kaKv8ej_!MzAhE{Do7Zb1E}4F;#qD$+C8fby-GgL zjin%JcGK(UU>VIFUX#;MnHf61&ogQ;c2vfe5Q;Q-_N1!2sT?rH*rR{|MqjH@CF~ZD zq`S}cq*1x&JZm*pIWxcN&%hNoS1+=nTQtp=!43Ifj3 znz{^*KSA=9#TM_j<-6=_yME7S=lzW(lYXq3Mc5dhZXzy0jztt(SM{#JRyF?A0$Fo_ zI|ot7A@36Xat5?#29rDu0_C@w(rEr;!rJIYK1Q_h2u=aS6cH++#c1m#;UK2G=@*3U zy-B}Z$tqQ;9yaV)&X449y(`=z9BM8gOZ)IYx&@I2mi#~bJQ!* zh}|s;Nkd|pEN%^hl8IdkK6ZlIsbhtO`<#2Oqsj0s_uG8=X+DBpgI#q7J%_dD95S|S z@%ypd)wpZKtUDB1gsjq=bmf6s^i5`{oue`Z(APDl7qdIvdxmUc|L^hKXj@B44QCX!=GvnVMh+4ZQle#2))-6! zi(k(A5=VlY_!B`k2##@dCOh{!v!I~NYiT*CFF=IQ&Z%L7^5Qc%o0#kkox02A*w{eaoV@a4Ud5%i_PSd|C+BsQ|C#USb-S{EAP?koJB06z3_^Nj<&W}O z$AEN72Bc|~wNA_dWJ_-J0Yyu0se9r&&)evUShY2wu`e>&84( zu=Mr8DP2{?qGSP6)AS4*aZE$fWVyy^1Jyv?$3{C0ubDOUi?Vd~=_$aL62_YsWkV1q zlrV!ZRmULqNF0zn$i+&PHo9UukMXCr-aqc`9adVgvrm~fx>Oom1Cvs4Av~?>TfZ>H zWrvB@5p3Bg+m%k~b^PVf`WC-($}{L)PjzkBWl>KUMbAr`)(6VCp)69S91+6uQq z-$*c9L=L@+URpQ#j%GA1`qMNl}fD8OHVNEjxkaKf@*Eg7-zABu=*x<@9VSvrScLgN&-P=bPKZpj#j zX{_QAH`A-1bIr#{4Y=$kHVv*2kRk6$ z8(3mFvs+MGS55Ws)1UB#SK5Os9oNmgdXis1h5Be8zzN}5FRF;Jv=YBVe$^iPIELB0 zG2sfbu0`+2H2uJQezdc;iK~|}=M3u2qFAz%hd0Gl#`39QG=c5-I3$BR&zWXExe)^Q zS~I0XyDLyky)vl>q=v1Z=FP6Mmy;>8?-88geM~Os7)AnqP4DiZ4}8j%ZDc7NLr@G} zQwpQA<0v1LWg+We!Br?99bh0qHSY>^<8{ zu!Ch5p;(7@629()eK|O=WzUhQ27>0^EVZ}a6Z0i%)nYT6LQLr8q&311G;aHS3z=4*Y`@$&naldAC$UQZe?{NLX#0+r501T;x^Ft1OGZL1<;NlGC=4J`s-7tl zf-Yi$lGp`5E8N3w3s_HX(SVNJuZQ0DsQ{@F>pgAROrmThYO~20oJc@};7Qk~v%!2L z=H;ds6t%*Kd`jF6f77;5>q8mvkytO2k>-YCF1JSND8cnz@;7cRy7wF^r#4J|zV zZ8$z_c^)Kc5I7a&zsWc`?MFbPNf*fjs$C(R97oD92CADP7DdL$l^){0Yclny4nOx- z0=9$=4>`_4Se%QR8yRQdk7iDGg{H;I{C5KkPjjXp9p>ZCy`phM@1Knb1`4XxYa>8{ zI;mZNbXC42RScMtTON~^DOL(_MJ5ebA{5-9f%mypoAUNJoq@e-*X>6p7olU8S|i5Z zJrK>UYCbAtekuXCZ{2yCj$67jTer`&=AG%!3uuqNvG2<;C4>CXZ1A1v^d4-u!`bcU zb5GB=g<>b)Rm(oCrT^gNs=avWpHXuvRRYjRO0bCx zTO?Fy;VO^#j99po4i&h9LYf(1?Ss4@LqU5UFMK?1%C=4ZCwL#vcP`$Uy5 zci|!!z9hloHf{4qQD+}H1Bes_yP7}Qd?-S=G@i+UFo8lG<@a6TrKcIbl(;EaoVtZ9 zRxFPVDlAD+D_QJLbUz6k4uV2ymbv1W1Km_dj@?)eufBxeym0Nhgmdm`?vnX8;n77; zIT$#^A#S|-ja%KAO{*XS`({=asQe&3fN zTxHw{8p7J2?Z{VYfZ^#A>UW0x}8q~Mah6$bouAK--VsgE}Y3& zbI#`o!ahofJ$lR~@E$WekRkRI!y+r$yqA%!@OAg$CWmrYIUmUOWy~<|)6d`0SV8i1 z>xXfv@>LxP@-33Aden>3HVc;N*&=c1SiiZS&$te(^+U(gP}cg?zoA#$6# z{uz$n7QTp|oRq!1=MzxE0r-k3`3 zO%UaA%@Ntn#pN-RbNcf(RGF*1m=&41jHk10GRh`{!hU(5|B(>QqBbwZlgcrUT9?iF zRA4gv+fUiDzZvhlXZdgKTCmO1F{E zk!a1l9l{#4OrI~hyi$Uzdrp2Ry9)rz#QC-4E?o~VGrUl?5hzP$wVyYnywUC+rIqh| zWC9vK0pbUM1VG%37zcyWh~U9Y3DTVf4-5|!^+fS^`;w8EkJ9~PK5+BOs>i6&6MJVJCDhyyUg7&LYzrr|9xst zCm1kUs|{*cH*wIH_5&RRat(_*Ws&g`MxrptxXVfp6TbS6EY|~b)dAaPtC2pQD687( zx9u{cGxqGXwv*Iq@xE$*eJwurz_3v63BXe8Lw1&(6Rcoyw?_TFJIAq^Sz? zpl;lzDhBiZs)To|L^%85hjJqA1R&-uC>qeGy&>fCkq&P7P%1?IInXF0ggcUzOFQ-x z#|U8_^ys@h@)K)vQ7wJtEk%V&gZ;z}3ShySz!Ij8-ITZ!n=J>WcwXX6JI6m1>?DlpUVC>w_uH^U zOlrktYa}+HpibHgI1POe4v(K#RBW!=$?U>yMZo@Y#wD*x&glEW$$9r0H`fh$f1GO*2fzK*V#z^Mmu7b^rtl z2^f$P@%w`I>jrut7wqLFUCs`R3Ek?3c%^Xkk0L?k)X6dGnPfFR{F?C9eeHwsD|zMU zL2X&jmB_h`^-9U6{8**}?LYy}o%j^il@JTIgHqVM21_$-rl~>tJ=W|C0)0t+?1#>S zip*mOJ)Ei0`0OHLxlT=-X|efi*OQ%xvqC~J&LodhshJz}*WPx%3TsPQ zYt{Mc4Yizqi6keNp)ac=pjBau1CTE~reH0|&*+mUy6dE{oN4s7MpB7@5MhfrdLjUl>95X=GFd_9X<+)NPJ{ZU+ zq9xo0zHF66Gk&rdA9=z)$WY4S#QD%#>!nu9TbmSyvbC?P9p4+r@4@m0)22krU`t?=s})Vq(|!)^iw0IkEw0zQg99@4 z8pIkW8>%4sl!<}>2N8WEA&gUwDNIX)ShQ)dq`8aq35=9s5XzKzeVij%Gj8 zvfwFyIxbYtV~_&SeW~}976*cCwl%+%nUu7mdDl+ZkXF_uQtV^ch1k-hi_MSWE9{1~ zNpD3CO=z&6n&LltYAoT2FpkY};eaab zMdnx2L}i-*k!kYwth!wp)H8cu?hG0*5~HWTQtx^hi8tQt>EXxmkt<*$hxY8X9qHuw zWv=gb|LZsOCvST%CXnLaGt5s_td_j!WE`#8rSYka7UNyPO-Bv!vg~pm`^p#~W@ct?sHk!M(KZBI*lKAIUwrflDhvQ&sgaTR zvv@YLCSrn8azdWXV9X=fq&Pz;38>gMkE#wx&qx3_psRaW!=fk29;n1^4w|Q~kL0Wa zBciFc$tf&@VicGtcbx_rjR67(A_HokDE+99UZSUjI3lok6#SX0%iK|2HxsUFX|5;c;^l}Uk3?~Ov7>a(@F=3WVJQQqR!??Ib(6a zBO6!b&&}*0x^-$bBR><0{WQLVB#MOtQZwS``HiGGEwQLPx>8+ICsA89XIc7!2Qav7h&5Llv$v<-naig4kgknK zCI!{tRy@7rr;+vPRKf(Bx^$?EI>(bkmr=c8Gkputyqo4%<_JnWoWr&3M2t=&sDN#v zA}RmS$26L!SVi(jDJ9oDYgiy_Rd5tCvn&=mZ4gO)6w>s#ir}RTs9O)+h;aE-ab0kk{z;)0-YGQ4+o|3yna9r1KO$NZAVZHi%m0X|Za_0z#YxD| zvE(}UK#6LIa(REv_5FQvRVjHPmnkl|!0-sTAF84ov)($YDkubydylyQ<0g7AQ)r?W<;u+y zTcny4d#qlmISUuM#L4OF$BJU;6W^Xm@|hipa5w@%X*P-crTliZ9uPnTvY|W7ct1_w zWRI;36n^=^mkS2&c(Al$e^qYeKE{wTlxjv!B&~z*Z+=b?k3>ygZs8qi@eWY{QVO~Y z`E=`tr>kS&UCT%~Og_(00$gNoow=OhQEVHt`RIXd{pVwGgoP(V*1_lxZ>l$C+c?RE zaRo?h@U>_VrSNiqlBmL-VQyU3)kPJ*y)O`VO#eD5eFS&r4Z{V+2^toyuQKB5cLTAKjV=9 zrDPaYt4=%q`WLQ2`QwAdB}#=SlTkg4MrE6G&l3m7YDkn?5*%&7Hi7k1 zKTdNr6vY+V=iuYP#Y^tddfoyYr>D_7y;7kA8L7gOA+A@X-Ryp3URD=BZyZlMf4m#f z2gY}8Jnd|VNiNaSt8X@zc~_Ds8V0lk%LYXNv` zR@DS(1e>7dt5}Y)2zY&@;+_7)b{oMd=fSv)^0B!(xz&}a$Y61LUU$Ogkfxh8xaWS?- zc`@I4%F%?a=!>&^p!I-pfW7B?;UZyv0H3ktKNpoA5E77Y0xSiEYr5cH0c`idKc5(w zyHMEMJGbHAioZo_uNY%R3PeK~Ma|B2xW-e?z9kB1lUxi^>P2B11)~X(FP5G;b9ls* zz$?=sR8WQ*jVwgSl^EokXd8qyWlHrR?!>V|4F4Vl#F7z8ah1barw*GEC4w&mhs<1! zkmuFCj>O|Yt=ZD$IZx3z){lV&jqeXCq{bL7LpKULe_7{4QSm0-^-!)%%OoZ$_=*}c z4M)5xqf)YJbky{g^_J;S%G9Bs9pk_c)I+^c8FrFpF8U~xR!~-cA$5mxs$Ob{-=$j# zLmW;ehd`}4s94j(m!!BRc$Cy@7#@$&)=@sGR8Wkzh+w-e&ddW%5@)2tW%-IiDK17u zYn1;lw$3R!5U5GQ$;7s8+jcUsZQHhO+qP|MV%xTzZ2qU+J-e^{e9yVvUDaP(ZV^yj5}s{DahIe&J~FntVjFtrw6=A>6kdi zuZVZ+(j49v;pQclEf}8J==D=7*Nf|=_wiGM<^3f!Vj-s6WaNvJ{#1~l>NpV3b0pIN&v^a#K z0_Bd6MfRAME%q1Jt3veCL!66hbA@u9#j-|EY$(K>D3MvAy1#mZ6w=8Pa}V#%qYl%7 zb@^iLGyIwz*MKTc->ocdvT!IZ;*q8&piN0D)EzWPD`v0tbTbKlPT^~F2eahcJz%*u zY7JQ_=k!r?r)XT8=EP;3FCpVUW{f8mS>z#X(&RMz*nrdy5*-cAi0g~pAGHfhRTONAN)W{6y39P|33 z#q0v(zj8pfRrQq=)C`_8T%v2b$Bdy4x4I83BUK$DVQ1>Aa z$V$i}=lXT8dq0Y2MT>&&YOZd;tIEY5J0TVcyaMdaWh1Z#0wkl!vF?{g;{Ck$cH1_ni!r;V`OTx1k3`|k zH9V3J?dR19DCDS{gx3c43+0xoDv62+wV7^XxMoV) zm>yrw@3EhgZgp8}&KaKwp!eswc9`o(b?-|oUSzO=j-xC}9iwNOx!&$w>GZv!MU(H! z7cGoN6WzaLe&OIDS{}Xy*iIgSslB3MUEgkKm8fi4DJo@LtC%6Ms}mc)Zj(;X$9Qnr zwst25mANS|rJK@b{ZU;Wo^Co>k46ebELUup7@xP2l(j`E3u%bbHti_zFke9{opLqh z^vo<9^V&A?$S$ip_~}q|T0{6T(IyqZm*gt3RM^!u06i${RYTh_L!XSdKT@uKO{Dy@ z!Uo-7+IZpEpw+DM)cH;X0n0P4p!)SKp*`X$Ql6E4l|2>fl5KBjKC3plrLomm0VS!o z7O&V1{OZQZ8l9LM-F5DtazW&fIc0Nw+19D!>|Sld#yh(2=v71diPdbX1yKX5J#C=M5tqv4dYDOb zc;J4&jC^9wLJa4z`l|2uUs}#diKK_Sn%;w)<=Eh$<8NW@z*a_5F-ykTrJ}*KetlIW z=hs$a-LsDD(NniSwYK)MZ=XN3!OUiyKNLM(MLARkY_2cJAfb52UnI&+UKnAT9MsR? zwt1>%EJ{-F50ScVmqBCPEx#f?|5X`9^d#L4DJPs40sF1Iu)nuYKQYySf{k#pnmqas zl=YBQsYF?-T6NWIdf3Aef54(oaAinEsNy>gt@xjcWa%C zvyVrIPOZ@Re26;(pEOP}M>j@qcSp8XHot++1FvK{oB79nUO@IyFoL? z9O(Xo!PmRe&X?Hz!P7l`r{6S~jId6yyXWg?hfnsN&f%42^erFc-Y8qTP{DqcRnhJ$ z$OJq=iw2?(8L6amaQew?ev@JYo&|=-iR0odDu$U{<#+*{dWB&HtosswH;A?(kMim! z?uNqOnMq7c%odIE+hAzsUR`EiaIo-Ddk|}&Ub4*M5;P<$l~aCS{iNJ5?C96YL5M8W zmIz%Gm18d&->BSdPhs87KOaBsP%;_cG4DZxXf)TnxG4hqZ-#7gJ#nPy{$#mKv&iX8yb*ed zd$PJwP;~;08ArRZ1^a#dw1qTmCW`wpa8ATMrci|s` z+=_t+IL6@t0bBI2>E0YK4d+ua^#+8O3c2#}MxIA?QeV|MXD5D)L4tln-*j+#zcp#+XTCi1r7*#vyoWiijA|>vJN2v8XC#$qL|YMkc^* zmD&jG^pWy8w#Y6iBta12JQ<8M1pF7R3ptPP{yq5`P<_3~DUK|cD8aw;SxDE2U&Eh6 z)^-63PJ~zCQ>u9nnKoDGulvl$YgF^;X*QG21U3)QKJ7giU+n#=l!n>Uk{7k5|6 z%)uAQ|K7V97wF~?3AV8!EYl18Y(UgdOz}`7=5W#>wfMg!M$D$64D+k0%cf7!0vL?E zRG9V!+fJ9$t^Dx5Rvq{E&Bji<|#@_9K4;XCJLKS308xjj zf@2VdTHL!p#3uaINS*6odR`w-N0^QbgeBte30r2?I1& z8ge@hoh6j<*%F+vjPl)0-SQDNIATtA0UPt-i}D=er{O2(%l`z$X?!FZOAy@l)H?5T zOLj@kQzEY{qnJ#Xkw}S-Q1+-BQ7g393d77Fh4>^IRvA?n<+&(4MLbrPYUB^cxKLAf zeIgF2m{dp`=@VGG0|QI8fkV;0Y(~k`)H2M`b(s+EZrVgF`37>a)RZ-2{#U$Ep54MU zhW6$fkBH3RQN)@M_j<+o0Pgg+d82{#`E+++>hX@cwz1_EMUnA6&(3O!N6*D2xucrQ z+=0D*rV@V0win zHP@H08;V4Enm*0a@*IlZgO3+Q8Z41@aEn<0EZ$^#r^?gWf;Ysoc{}3`5_pd z6dmdd=ayt!#Fk4U*c;yXcqv`1_8&&qE=Yh-AEhqCJSwgh$^sEPT=;V!e4JH1CCcIIYY}9W5vx%gUTVR@Mu2t(_I;({nQpDc@^D1sgi4A_WyCU|Cs}Zt zli=M4yvjy+DLYrKL+@Rd>ab+%#pE;q!!O;bJVbBiS)cgZ&%I42!DA_m!GUhJYn_Nl zjr%_sU})y;N4*;|?~}X$2&s<7wH*=!;^qz@gQN$`&v% za2y52585GM8xGl1;V)D-V9jO;FP+YQdKAXd;l_^DBL z!yh1^5ch{f<1z&wwOWL>Gva`T+cR5sST!Crmk=R#7~J+jBTSw&C*-*0{L>3$EqNM3 zc5_56g3&YRXnAleb_5Gt!iTN|+rP>pkr=6%<(RuhR+2UK7|Ez*KR_0!(HGulOuo6H z#*w%J)q9X!D<#`i@Bzlk{3Vj+6)bL=St5LOq`gG@-hc0|xR}iP*dbpyS^={mt!Asn zL~Mu+mHJjWu;PD?IH;7;QHrt8W?wvS{4@*Zmc1<%q9&6x>>k@U8L!$3tlmD<-UBN! z?M?z{MC_{8&n@;0GHTl^YWa1i&Nr~B7R zd}6YRaI|GSZT-aR>@fS1<+++$>sA+J`{`;lZ(QbyR_BjHAU(dsnZWA;9+%4K#G^Uh z3G2-4%likDi`M2qc1=jQ)l?;C9BMnqbxId`P)e)SgWByDBKW=9-~o&UB=nUdw#-Y~ zo%&ZF)R>?wjfcG06kwc4W+`@9?AXsqK+uosj?_@7I-i>iOZ5Fql!VfDonZjv`#>9w z$F1B>T~K7_D@DOy`0Ig?VfFQEN;9nVHSOk{?T(XmHcgM`zlCBmO&6&8b(}s_NGG`w zZ5AbW5nKFk%t+fbd+zXZj;B^NT$6o7W3I+{AywDOmMt>pmctK^>A>lETiVY*G7A;O z>-*aUx4_sO$|Z|n!w>6>ANZY9PVh~F6OFF>oKI9-I*s$Or`sPbj2T#knvX?JCoC`E z4dkheT=_96wtP5+)gjY4g})}>Icb9DX~f3baz{|Ig%v2uoEm#WCbc`;yABJ3b_M`o zx^_GYa`|@^FDjkf>}-Zbg3=bVK{h7R76E5y9f>ULve-8KS{04n!qn~x_0{=^u$f`u zFBM7;LSg5E590u)mm&ooDktDl5hY%;#1QG1n3h;1`LQjH+3U1Z<|9uMDk!WuHq?2% z97pTHA7e+=rJCNQt>$Oz+$^eCM_kdzS%H(RS{)awVF}kGTIX33OqU))ADA999W}v9 zrC!OO5SPtB4K6PY&1ox%%+Vl4~l`9 zL~c`mWW2roz?Mo_b7g~tCcC4h=PazAFYih$hAR5sfp4;IGDjd`;aVO$m(3XRxmqFy z>+=RN=woz=8?>-`0(SjGLohQBi($^@w4V+{=6-y3+i@b{n8(;;AI-#9NW40wIyyB~ zfNv7*W0R*27(m~)i#%4Gx*$bjc`Cg9P=qlF=-8+DR!W^Ga7I+&@N;!?eQTQb+%q=2 zJ*+6fFZsCzMbU*`48PX$(oqc{i+=WdGqubmktdcU0R6*d4Y#^+bd)2&amb>#l@GM2 zc(?e?gnnGXcjo3DHO(K8F2`6(+gqP+uA=RQ>~kr_RR`R~i>^fi37JOP-C>32Q(`Yh zcBF1Av8H4tJwSbe(7Jq2aUr=Diir;8=_qN>$ipFb?n%~Z$$@r%NocQ(6^xSv)GuIR zk{uWYu+z8V`HBHpeqy;>dcA?3ZW)LLx~3q`LaSsEjO|K~0MR2%5oKK>^0B&5Bs=k6 z|3g3e&oYpc=V$Og6qEGd842_M>PP=QBN=w9OIu-&JNs-6_F_zj@o9)B6(?I99a$8i zh-r|hkxYozHzYO9OCU2HX0Rd|&t}oWdaf?;oS&nP!yE}65`#h$xA+5apx-Wm?Ks>L ztYkUBIoyr&`{Azua~Q#mf9N>RK(}Y|1<{8tC_bGp?R>m#SDw%JE>iw{&+zHmDt@yK z@7Uh1e5ObqB3a)%hO^?_kB;G$&K66zlBb$ADkY7YIaZO!E*!hb=j2U=7jTPbCW^R6 zu$D$Rq_a{fPNu%4I` zaf%tfGheBYzj-=fhiax_b3D>$=U-#2a|@AYk@O0_W4|ptmI%wpR3zx&_p(ZGl4vm% z7O4|VMJF5wJ9#8Mro>G{|2-M!`(3i1h_<8tja7WHd<;4A&T%Ov|IR6-i_0lxqyDIc zQKL)90V*$BEZWz_{~>&{P4o@_oIIqu-^qua%%o5~?W__l@zsXhe6K-HGnI$DiTlYx zu8H$ihSVkNr44o+?Kp6tx}=w;1i#!KO&W4bpIK=vz5gh7Z-j9EvwrqRg&= z2qd?Dgj3Q>s+FD%Z>pVFvo=r0p!E=Sw`hLTyK3C=esdqz2Vf3!8j%MbC zA>1}a7sHA6@A~^SLW8>@+2vA!acgLmjtD2_mE z&brM0m`&1ff`NGm$q`9*)u<3A!imeNb6$pXPALuT^M*Q| zVPV8W3dXDS>_sT%J|aPpafz{f-Ol;l_7s@b#_Ak(B(R4;rQVgh_z)&ad0LXl9!ZZN z8al&8XMBu(Elu^uad+XCfTAZCr$Pg+86aw|NY7OwD35iG*KpD%zDjz-UOKZ`rYPsM z68 z4UGudDm+}hGyd|dY}0s49j?>vT9k|qi1pTAsxILA&}Er*sHvwys)k$oAW(2vFVrl| zGvdX#Cl4J5y&Bvr3zvPK7FH@c*3l?w4pZwQQ$3j;ygrQXy5Y1uBA`Bctj&Aox^L#g zIvEBd#V2wZI)C+x@#s3;X5y%@M1xkZ%1maZggcP`2BS z;^O>7slVFLJ`xe~)F_f@g*6iPL&6h+v6{16W{Y})VTz1iMu<^!5R%drLHd~~cq7_#B!Z>KQM z1a_PA4<5oM!rC-0)}h^fUv&UxRXt!&5}bW_4-gU34s&f8Ts7M}9qCe3+jw9}SI=-K z1pSkQ%dMksyR7f6knH`pm$#(sxCqsbUMd<|DTxf!xJ3%1F$P-~_m~Jch#LGWQ}fcr zyzr#iu(9{Czj_3$HN~%ZPkQTYbBME1xRttV8{5R_jKk`DZEPD1i@7%z0+oKMyn^dy z@fL%M1La$alWDq(2scQelYFXGEI zfi;Bn2kp@~Gj_Kw?@m$UdwpuA@E~>5a zVfTp{b6gh+@&OKm6k)chlP}j_D5A)Le*Vl`gyu=fn|P$r(&Cts z%g;w>Za`)pMT+{YPYU(ZO)5p%G&0&>aSyv3XZ4kuDcly}fmdI>uB5oPU->X9+3Kz) zW`Taqv2z`Ra4mH)i64AaWN*^w>ZP+w_mS#qxiu^# zIIgoQf?+&Kg%p?VLmeSns?f)_PSIc5%%GO$e_82MQh~cdDOA0?`1eC)Z}>DU3!1~N z!)HS|KAM#Kg-G!)kVwKV9w<(d%hruHHu`yU>wq&~77`zQ$vcu|>A-Idp(FJKWYYOm z-#pjNJKG;)&G4JQ`y}E5M1W0y6eFkbguZe7gOAYA9JsF|c zN9cE1FD~cE=ypt2Lt0_%Oj-7fnRH$`pZ*m2y2GkqzZF!Cnzr~KK&4p5#)81R3ORI$ zgxrYuis-2fL6^k^?1foW<2?j(BEo2p=V1HQ)>G4q({v=}S(I{#5BKe8H0YL@OkF>e zqGN^#7gs3STgq%<0HkP^m6&7)CigKuQjBh(1k!f}@jcz%Er3aEDO=2u=VLz{ByNtL z6@JW)^`ggoV+M8_m>6i4zH(__?$!v26?mK@ho~TWh)*D@xfOOQCV3I(@P^=M!qGwB zK-=?1Ii*Qo6F}0phtB){IY9Md0?B2W_{&ae+#*gPtN%Jkq2Z=I(sf+w#4a!%3Y!Tl z)!-5+?=9V+mv4vTaG2I2jPG z_#>YlZ`sU~a;II_#Ceea-xY%ETW*)Ijr^e>df2>ugzi7_H|20&Arbq2TiGvvMS!e0 z5`lW?eVJ35YR0HHcz*K!6fvDcFJHI4p`gDvN5UIb=g%i^r7M_JtPNYV+=-3){&Lrb zUouuL?l5q#=3k3_)}i{;uG@RXl9JO0jPRvCG?_FrQOnd~4=WV0H-T^WHG}z9C+&q# zSr;^qQJT6oWzefci?TTB0)Lr+V8dqT{r=ru$Jy=I^IMUIX672Mio&ve+TT@os$%=i zsGAb}B*l2g97gTWUZ5thk6iOL%W3#zPCQ+Xp2}qcykhmkm+v(5}aR(Y^ zu`@1;BE9QkrFC?hk2_aOWgx3+JAjRk&a>Kx-e=NUsv1;MW!=WA#TV_en})SjLtg5! zigc(lH#5o57AificR3A=aq!8FnjOExp?@Tu8V!`&y9AN*T)+S_zRX=>th0U^L*_^cHusUI4#`fJ|65z%_Do` zaLscXLM5u>beLHyF;|V67b_5IAXN;kN!+OM=t=rYhT3VQ6&by;LE6hF!UI^aTEQ*z zX=YShkNfz@XCU`Cd_viD8xk9r>2El%UA3DfV*GZ0#HF&VE46F_e8x}!hx*ac22#8< z_k-0k3Mec!gjKhYyK=)?!cbenlu7R}Mx`b+yZCr+A`G7htK~*XnI>(6I@%gFn6y)w z+&x>HU&|V*SWg*bAg&SyWJf7HEXVh)`w5&DJOaHbyw`55*U0WM_upx8tE{90JcA`< z%5e}d1e*vnrr=>G;v@-SS|Tgw6N>hIJHUPFo_x!D#g89=nhX<(ip>F6Ko#pw)n* z$r-fNdNy|qRm0vus!Un%QL*RLedQ^J-U0dD8jS@yCY&If#DDMw&|b! zqWPdu;69n~mwhXLEu$YCzC&Z#c&bALzB%uh+vlFAIiv?^%G4IpUWmS^`JUvxuj%)gnnTsw zHj=}1d$F;LxsQ(QJF$O8Wof0_cfz z1ZQX&Yw2C{2XAhAod-hPR0_UnkSH8;?9k_^6pJk_fe#w)oZY((H^0Vk;W7$SA^**! z;rI#8XGC0)_f}U#fgds1i;%>UzC5)E6fKJ}XyxW&EOoIoTxV*B zY9M19ZvMBiU}e7C7-)8Jgn$XVW>ppBo+L7rY+-VZ>T`$6oxMTP9NDGmGDM;vb5Jn8 zYh6Wu0FFDNwB8^KpFnRPt+}^FzoHn)D&sWWY+yQoovCcmaYkd8Eq6%D;;*9C77#Bm zUgP0o-RnA9%)&68$ab7Wg4#SNpvp9BruV5dj?L~JNDXwZ?-y4Oz-!f{@x<9QfZN!E zE?N?%UHgmzwq%E4ZrJoWEeQe z!6FHS=Ew8fn{5V$u5SWQ8)<_lQuDdJx*Z8vM&by1#&-kV4Sb?{@{C{hbDShRyaYLS zDZN~Nh;6D3Qr^(<>1x}qp$S!Md(Sp(ta0!AJ6JAuaoE*x0o7_!Z05I1Bf=6)k6oBA zU-yy9-YZPkJmLb3SoO1VXv8z^NxW-$t{DJu2aJ8+~XL+Mv2I`L5|BF~Mw=pudang4( zx3&2dTD!R|9XCW1cAlt6RoefiQ2c%7XjT1Z?${)}7DXa2!gyDLjRr*=ifjPB`zMb$ zXXfYYok13W*x2;uk$}NJJtrr}#}y9NmtE&%?T%R@<`=c9lDW?+4GTN0-mz{pci#eE zE2EPWc2|vTYM)WdL_u3v6qkNlqi>c1R)95X5GLfvkktO~`iPsI9T+`zPKvZWE7F-U z6au90v}RzfdeL@$pDq1JJ|j%EKiq8J&-8q$aak5XKe1+5yA45HBg~l=P&g@41X+>* z&(uLVj2xly(FR3KpD}TtfYQbs0|a;$zLyIAtO;D-3H(6y)^aZfNnZebVaHm&igR=n z4h~M>?<%Ny3wMmWk&o9J--ULphnOG3)Ei53}imo_0Lp0Gzs#@OT5wy^}n5P&zRA136uTSqCk*h54i2!ZkpiYFe1-)||ZWqg2;f z%zGhg-|H-1FV-g~DqhE+{)@-vP4ti+`84Rt$(Oya9ZWB+@XNdFU5~H#HY%bS$mH0l za>y%C0>XU!v`kddBfMHslw40jqZz`Kp6<8<4sEeo$qS>T9+v@0V<-9O!au-n4*u}M z&N6T)4}~=)JGVjmCc*Qm>w(`Pbl2;y>dhJ$05JsgacqSVPVc+F z-v|+rNQQJ@i5W|sGusCD+cLdmQVp~ZtAInQl#yyEl>VE#QKit?4uDH~Z`=VPRFWSR zwa*tDo0<`w&yY2D=-;tL6>!$I5yJG~pEkl7*O)0r_$Y0nZ|IxBEElz+Tz|~Yk@+xZ zgkk4EyHMA^2&PA5QNQ_DK}p&+h&oG^q}!P z@Xo%`aIFci5Nck@{Rsj1)OYNKz|%J|vNcW#fH|P|SlD{@ky?%^e>!48vW(OB0ULrn zyk`fa_whe{`Fmk)7E5;Pw{`P!aiZ9$Am}E7;C%eS{#pJast3V|l-wlK$Ec&^a~hGE z1ww(pjq$gu3f*8Ui0KB_Oy@~}p;x)6hoFs^w5SiOZe1qMkN(u907RHJ4F>`XqL6by zguMw0gFt$cm)J+d36Sdz$~Tyt&k=)|CvA&&+Y}^CBIo?&-lLB+kaNJ#D6@i0k1K!{ z50sFT%Nu5L2fI0ewxZnAQ#&+fTv42j1<7K$APMHdeH^qda!n!P z%q`Z+Zy=oVFEUW$CnD^p1mT4ULialchl)#Mtq=8r7C`W~9Z*)*6)9h+d zUi)fRH{{U9sSc^vzoz^aM-On?sDnp;kHL9!I42h)(5yS$z6gu-kw zY1)#ZYvCH?zhx+2-9~Ed#K8#%CH$3m{|w?(1ojuf=4?wA_yo{3aAvXd>c#Blo0Kb& z2h2M{l9@dR+idcqw6_Vog={&AlOAO~(2tN16JzKn#CpME1f~J@)%5R!@M@zm0g?Xv;T*g~-8HUNf_1+@UYkOO9Kk?RxJkz)Am{}piPF{ImYrtmvCSiJnx zv?w|ZbbJHWtppiI+ZTrmDsvF{cYK>Q09+!&%DSr52C;zHw zpU6!y0i!P%E6+05s^;u*+L3R7~s#`Ye5RIGzaIcorpaSU+)?8I+ zXr@Jni|N6s2xx@KXl{2+tiI!~+DAT<(G`R%3&pYy zaa&7-)U80tQn-hbmzV{*&tJaa^OOLn9Ejy#otnp8%66q>a) z7xPbUg1!xdwZ018h^RjHSF7-GtJR3Z-+K_m6dFfw%;x#0l_My&zIOs0VD76$bw6BB zM!?!yf1_t>8l*w;^*XjFd3F|dPrB%&K>#CkCSK-08hM`lH;8l0RpHzG^q8)p2jkMN ztDQEfIZm}62itaw4V4^S#cUV6jbIVLN?;W-u&3=Y2*V?I;qWsLA#T@kF~J`4LM+}9 zF5eGY&^{M1p)mG%SD0~KMt%rl4OZ=|-2k`ykw;M{?wv(q zxhJ|`!Cgcas&FEMbj#`mDGZ`Tw(b^Ts0~Jv99nYAOA%ceBn<)FKcf&)!o!}FJzh&6 z?O}+58(Xm^lA^K?hL|%jJ<}IJp;Vh+x z)l~qm-5_d9Cm43R-cbe&7H3;o1j*t;?-eMbC^4eN`t9{|M|MeTrmY6q)l836D8Mlx z{W3>+vy3ah=?FM;*)tS2fWM~jBuF$JQWDwyqflZo(7>7jSgmj9&#~A~!<6ZMp=001 zxC`B&Z8C28%8NoRtRSviI01q{xS@^I2i6QurFG;kHv-P8KjhPpme5sHY!vL+zjy8! z zN)=?gv=&jf%P0b8ylmlQHw2$*=Qa+Xx?TDt){K}pS}7ybGHydiu4R1vBJ|gxo~G(N zpL^K0ffS-WAiE~}*FIv5jb@3~FKq*(nKUd$2qKBXQ}52`Coa_lV<$@(?jxCcrDq2HeVI30tR_6jsEI^lyv2rzT2jRKZ8!33PLTY!S@8Cu=~ z*?l`k!DmG02C?(Hu-=Q!n>ll%fHY=KD={lhjojH6l6=#~=umDP{3wTLzOq~YxB=@5 z7RmqsImzmOj$&q4h-x|0(SZ(DZodJi>)A12WW(5aBsTUhDc^W#rP5ol)(oIGV`A4$ zuq|dawuOs>a@gQFzthroGN z{R4(lQC67~Hgn>CO5!MK0~y;JeEejxktj8qX!Es$Gbh9I()02W+JCzVfwStTushqg zS~G_Vb4w$5YgnDt_zh&ZuvrfALd|=w{WJrb8i?6g(0x|&$DeXIO;pvNUC+5osE(!c zR^lYEaeS)Ib?zrL+&yGW|6rSMM8&#~FpIaR?Xm|4B)O>G*k&4nZ5AMM@Z=$e@#L_& zy+bzJMpKOs<_A{9H$Cren4w*~&p7CZ?#-POsc%{hFu@>)G&-P~+2J6YUS?7h^T3{b zB=A8Xu@a;@2EAG}37w4v98}{j-ZdZ5AcBFmfCv%EBvNZn*&(_nw)108o<-6W*r8JZ z9H*Stk7yA6GwIC)ioPZ^c%f7}C6>x=g@3-e-G?f&W}1Z)P-1)0LduwT^X_p73 zew)#=aP~BUo!HRI_PF!1_7;me&T1#9R0l*g@55%AxQ?X z*lHmg-zSR}qxS1irmfOnH8e3g7|Z_AbisfGR*N1OaErw_t` zmD}Fvu>3I3wKM*X9cn61)zSw-DTw0LuuW<~5GXml+%vrq6hroseIIXKSt;^s$u}AR@u()=k(T$U z;q8m;)^H5`1uKHoIt3}|(91KFHEr0*t$&&XffLz}s!s_G=@0e`;$@YspzCtJKMot< z^p#&6YNK0@wNCxkX#piG89OKPu2#b$yNBJgs*hN)qZoVzr|&mZkC`7{-o@zi`o()j zwtr#zR$5Clbc0=T`d*PKb*0szPW|=Tq^^gMYe1(b9TTfopO?VAo-R9#?g6fKm@1pE zw@MS4gi)I(>sO{c`FkvLF1cd%#OKDO4y9w62{zhgkhEoG^g_8^Ehd$1SrR0ybODQ8 zK*Kj=g2?w4ZK5-35J&@D`AyU{dn^I_o9So_fp7&P8^U)vABE?XW}Ju>#KUEb$^=1< z>TX`3S%(7}!rF8;!A6}8ey^shIB+4$$Y}@mtMv=y!MB%5gt~LRytv(;$Vw((o41B7 zLe#N(hVlfB#Q#_egv3?67-LbP#IOJULorz82i}J5Gc#m#$(g{;T0(5I{V{g9EyXcE6^2=%d}QKiZ+tboc=@y7u^w3OROLKQ7C zV1{akM2D*4_BwhTQv9msy7WQ|f1$n$)Go+~ zc2TWJ&a|b6j1h2@Mb;h_wdteCQLsj@F`mT{GwWCDXO=IvgJdvkl4^_OvPrqhLL)c} zvdcCvH!_}+rI=wZx;*JEu%?#@%k#{iEVL635~ya=UE$73O=zwH=}H0r9Hyb z&k&KU3JAdMl3WqjzX+;$1@+1t13On|z!{f3$3B2C+iS;{{?cGf(OSW$xA~H*9U>9_ ziUbIBs&if;?9X#A(f~yTKS_2zzX$>~M6Qbac-vH8?{WW|=8bG3-}@wlfHuO)#~Xo$ z$$&I&VV32b-E{#wLsodV8p**R zqVa6TX~8OyG?wB{FjbKbLl&r%;Sb6xT62WiO!xKWX1lictH_5w8X&ygtjkQUNEj6r z1@n0BybB!Vg%oMCZ^-e%c?LaN?X;;6*prI-#qEkpXH^nNnJC3xKuH3>|2j%d{sJA3c;wHsf!g0J^lbm*Sy5d1q-Sr3gbECyfE=D5CQZ-SJ#I!FyO_}tj&?);vAEgD? z5cbL-TaHPW2|NHbrC*Siq*y}3nb4ZDABR9>^g0HNdLVv|FaXK@ZrwdlGE;{%Iux&y z<*vRrnpXw>9qmhpB|$Il2wJuo_*zUi<>k+;NLN8(57^B`vo4B6q{ql@fjU@UxxCD= zx^~cc!Bxew0!}_MXGd6$O}3Jv7dgVZD@jfkOf5&Cc7+GqnRi966;2ywX@Z~~;sLhz zwb~0-%GXAV@r$bR_e9BgVI3KNRI&ARUdzE&rQ6x3FUgk9o4x)*2B|5oGHTS!QAcd@ zyQ}pW0MVMK5{lG{sU6~*4ZTVPRf`y`_HE3nyL>o2ErgY$a4#MZkc}v~+pbLHJCl>zBQ)(*n;02MvP`W=Q7MH+db!jZ&7HsM4qN<2bOXE?h#;11DiHOf%cAK46B!fWurEB+scF?qB&FhXQYbTj80PpC z5?^n%Pk&!?L8a-2qgCyo>u4@M3F&;%@p-negN``~J2BnWeyP)&tMy##S&hh0op?#k zQczK2n?f2_c}hvTvr}dtJOMz(TEpxGW!f1YrvP&IF_TW2xR`MNV>iHsw&7mIlFgzy zGiU2|tcH;LAfhB(!`cK*Z>oiazrNmlA@^lR*}^QW;uNZ%cHg7a`7fqB*`;)*7i)Aq zz9&DY!(ebE$B0k$a(8!SMEJXC2=AORcqIG*PP$75$<|M7*c)BUv0e_U{JQ&(AmV}Q z;xCI9VtSavdN?gjJ&ju0d=!5vKV_-Izw;H!04(}l2^CI(bbGB3KLf?gv0f|0BO)O9 zjo}?LcXAGn2pSWUGjJ$oU zd=!1gc$8G`Wz=0ms)y5Bai(Rk-ykgV(7`rP1~T~F;tfLz@51QdaU^6fQ{AidFl#QA zEan?#BKnz*ULxH=noimUtY60kp|1q*QorIndD-Qrpi`nuV0aW$MPtMv*%B563whdi z=e*tkf4PR-TC?;wkjvHgTAk@aP`acTJ#Gg#h~!T4#UC_8P;* z4<#KMN+G6*iYa++&tft6H6JW_wO?>V=-lniA85Ee{hE8&rO59)5g#mDBPmR`D zOYCdN1pyw&M_;{L1U-zcs-zoa$ug+6wc5nMbf)4fDT&eMwx?ZiG)RdtAhHP_hzJOTFr({@Z*PCVTnsk zQSWMIo5}R3fwrz_gVk^Y$77U-`hZwQ#Jy28yi7{3%I2R%hY)``kovQ$dmi3B=gG;| z!ROyVzT}ZBp<$QHyU7kG^P}ioT+nc3T@iCn?UWEHCBT!0lR+A^!{6_Qhks;qHEsVR z=DL>L{L_-NsgiXtg<~1#QmlcMOM5ykKvns8O~~H(raMk$yd6B6T4dbmizktFC3s@%3nOg)b-T^OdttN|xLyKP@tBF^sDf zEfA7h)8o{OXFx#qq&~qJ`{PT0x`<{lJE6Wgvj$y4Rs=-;x|5oIx|{)NK3Op*4^ z3LW4-%M8&IkGuQp0S?CH8cTwD+>4M=|4X>nd#$qL-X!6bC zQqs^8ZGGRF&gBWjvj^Yy6s#f7kk`5KX!#|DV%zRS>$-E*4TJL+Ft;g7(eVe3=Ao!` zn!FHF8pq6g`y=CB2BFxv%n~M<5WJ)Sw&kqra1sN4e-oTCY54XMi~04zFb!uOE{)@@ za+6M**^)hi5F-aYN*QO%oIG)HCZgsdBv;vIcq=`{)+)Wbz;{jNGPak+GMaGp66M9M zwX3UBY@Nu0(ZD>)oH1Ss=fO#_B->{*;Iex&1s#a?@Y<%f4c2HGAn#Up6rk6ibNEEDpS_>a3WTrcX;5fw)n`E3Usa7$N~#=nn{_K$19fqhTlyx#Pm_Q6YSy;|UN^QQff@@8!=1y1;ZT>}B^55l^O#^h>&qCT#8#k5v8rStjxFJ6QA95bpdK~~J z0F)dPU&Oy)thKJTi?O^KGaP#UJLYkM8ufjY4t~vto^oxli|p znJPP~Z)x1S`tPnfty<_18WGs&96A>o5 zXOAJ#SbDkZJqH0RWTW=JBc1v=s>yDxYa}f1hE1{zk$P1`EE08Ehjv}7N2tNc@azUV z$pI{Qo8GqE!V27aM^P(i$Xa5TkfD*d+xOvV^?G|1w{8PdHLED_Ul)+0L8KWDye|BH z5B!Z)7TPt2Hdoqmi~((>qEOoDMbhST!qseabv1p%`G$`73LK_NbI(cDGEr}`nx@Y} zVztk%hrm9u54xHUc5AmAsZR>M@ZRz9Tt69>{;HV=)05pyM#G>qokW#$Y5RqU>gP|% zPf7`NfHZxL*y>BykbU-$Ifp&9CZt(-0eu8l9M`lqnF(it-3pYFBm%_5)sXE(1@4wQ z>Ib=ba6OzxVxyXjZR1v%u?c*yVDrTWvpZcw!mNdePn;U`G62}8Ur84knWw=$lT;rj zxEg+$l8@B;7Jw!nfB~>1Y;j2*3yHw{pK%4;dm$=;1jAaHBLJ`9`6c40u<!Zt9k&(7YM|Vr@@#tANFy@4eyLHy zv3J%6o!xT|zyMB>_!~r&<|(3#C=<#Dt*M+4S40?bt3NF2>cWkf^lQ(COJ{x%(Oq9H zc)*Jt?b z>d)d!J$7k>oU1vngZs>y#83n!geI_@YC^4hHdz<{b=TC?;kmonbF^@yVgZ3Nh&_{}LW!7CN1H;&lS z0j0FJ;pT^;&JbTL~Dd$P#s;QG1wa5S%| zTEcOa=nEs+H)rHeuh|;ne~?KWI^;f*LGe*Z${-y{#%|`Gt|C~FOSbZxw1+p^50wXRF&Da9QL zRn_&9rnaYzT#b=>*nEcU5VCTUH@MA3 zwPuW=le1xLN!h7ce;(M>avt|^eHiQD#?b8-!HPm(K*pV(=z9p3d9ZOK&yk1+%O}dR5oyA~-9OXEoB#_(J{F|`pRh^R5 zR)^McMHAOoA92Yl1b{4QYj6Wmlt@0;V2^#;M1OfLaRXE`xWt z!($(JfQ&W)DNG7S5jV<#5j=*xN_=t9G-fVtb)>h}PGnFBt@&S!oil(gOc$ibwspt0 zZTpUG+qUg_$F^CyyXC8@|c`J@FXDw>T*}NBZnI= zLF3q#1v*L<7>iuWP2f5KlEM^VcG9p(iel@s=SS!BDAbj38ueJ4hJ8H*mfz0h#8ud( z*Kv5OL$$Swi8gGGXCB*25F2|CZ9H3%qdQa}Po4n!D8f|v7UHIo$;Gmg9v{UT;>fREH&#g<>D38@;Xq+$h~^rasQx$T z!;T1N0vE@+%A&B_n?r2iKv)ADaNrA5NL|Y@V7=Q`wPu@5}KI(rmxpCR5t*BD|?rl2g`u$Z%^her&TnaJhopFWe9fNTu`LPjSW!Cnt27dw2Z!{gX1WqDxf{lyl(Vw)~ zZvyOrt;`8b&kz5ZV?MfVrUvbOV#lK-&H+@jZLyt4?3;HIh?f0K98UF$S0{NP-l+>y zc>y3gX*Zk}aso7-WTP_?Vy{`!lgLDhVMoNgevUo3V>K@XtXp(0ffBKz;<4UAlb`x-+Dbbl9 zL|GR~p{CoHI~AD#1=Ox6UZ|4Ui^FNb&*a^`j#2>sz_gT|lDVuF9kwy?@KFx9GWs$r zgq5Dcw3n?v7bLfnbRkpU-pmA>jNWh}$mc1oNFS*i|6sO<=Cu!6!fWUR>^noJKX%J~ z-YMGCnCMem*}k-%l((PuD{VO+Bx-g$`9qn@r^jZ^4xKZ@S;{p0RpAY-de*0k2SOj% ze}qKa8^=GPZ|YpX)bgevqROxHub~4_A!dWt#2!gNR!v~70~jI4%NLSEsA6$Ez{zY{RFq5+7xRrtasRnR2Ao_mbQPT=lt{!j;&PN zMQX~L2Jl;`%Lv;Cpvr#$$e%Euw-*=Lm|a}R-|rQGo>!n%&C!n|eI&0q$3FFQ*`R^rx8r~k{quo(~KkeJmtp#4F0*ryN+;Il6KF7IOcf*0gwpF zb}8p+rSBtQ2hHM#vQ7D@s{UU694{EiUD^!jCPw2!Rs<(zD2mty37))XH#9}a&;q$0 zl3@@dr=LO_#sK;E;VwH>owQ)H<1PWLSn0+$;<@CUyis18zFKQ z)PH}tpF>v5H{cKr&PF9fmxy94(}hvOm~*tDjj8&1fmVzBfZ<-U=ktMoAcNo{UoW|v14g~2z79h!Yt|ra0W3X1OM{|E! zi~tPH4W0O*c*JrWemQ1)#JRzR`%qy1ncZsE$swgeW@p7yaCxnrHhWloQkI{)8Ayws z@eYW3jRa2)4P?m zjw(_RJ}$AN-o}oeFKXn1!=_mxeECo0(A(6;0+fH;k3Z=LM{rxRm_5RRX#f_YOHhZ5&Z^FYLb@r}g-4PW5a{!^`Cj*4DSxDy9i`E6#-z;L1wM+OSpf zzStd;rMDd!4#!%{FP!4Ts2UNNIh=_~wbazb%(%eIWXXL<3{?(} z%w90FI8}!xoICKajVUEb=9IrIyS@M60kW&+eAqMciO-WM48C6)29B<*M2>5(gF*(Q;1mPrbk?M*wK2$(!F#h zi)@PYpv0+@nCQ~!XB;l$#ct6PA>)8cdgSZ(-A1N#$S16CE_*%JvHxPbv1J$T1MQ+k z`KQ>g+qS5zmtOruUk?|88+nYb+C(2q#p zJfa=uGm1fYWHBO7xNKJ+v!GL>_edeJsMu62!`nGTA!6gzeC#8k`Ya>-;Z$l|mFSpM zs;Af)NrCSD91`WEIr31%h1G&o}?G~i}--oA)j+EXvj;` zX4s?}Cdr9Y`IqqFBSdI=l%z9wQ^|;=Z@hA#A8zVS;b0cw@E()q=&$2d)mo;NXOS}U z{~(|>?=pZ{Pu3Uwih7N$6{m_4S6z1y>#dIHt^r#Lr-@To?MwD0|KZTo&P4e4mn4V0 zPBX+^+i&8co(fz^*`ivS-F0H;@$jB5&tb{u9sWIRh=R`HF+qf&rIOj_(kGbA?T(!! zB?h?`OKYo5FL#`Kt1EaiJf9hmnMtj_7qgMvs+wB5x^Yk?9H5~cx(7V{mNl^#n6Q+G zBQ=-r2Op3;wMe-->`QqOK)6&^gDt$+c3tS1H~AG*V=*o>p&?~TV0UCYGga)+To$t* zxBFSYs0tfmh46nPky_%G1g2pLlmxH$?Q>fJX(3G$$a_T>YtS&PmdQ01-{jYqvh1AC zw}{;P#8hWmp{=>Htdgxmnd4eS$ntZMlA2N(Y`|e5Pu&@63|@6kEHf7d?UnFD%qE4Y z`kV#A3W6IOdqkDdoE2`&37E0mTDE&i=v#B%C6EL-Pw{^GO`F_c&gndjCot78&ck<% z^F~>0gY^eSs6aoAxtN4o{G~UMJ{rFju|bdPlw)RA3E31=4qmnct$iMxRw5C(0{3)C zA@?j_Z2iAF326i|K}>ld{eC*kx8E6;FMBh_Go}4+`;5;y*pDv1X;|-~XI&PWx+>*$ zvHP=imu>5Vi)fespqxh*MeQ=G)7h1YtA2WImNj@RObG89BGjlhENaXAFYWQcLt|l& z-YpNggW#oqhz?|C%EVqKQ(eQmt3yB9_Jp1CSKzyB7FmwUWD?!i5VsmwEe#zQrWsUh zFB11{@{eXlQ%ak=4cj&uh&9)Mj*r&!Komz*NZ1x2LKOPhR+|<(;jEZyEt?obnZ-#5 z1s8`yx(WyomdAT#?(0hsGBZlYe6@B-Nuw29*&Q&(r!Q&JMZ^=Q%L{==AX)5}u{gr1 z=#nBsAM%+JlWYDqAalx+Z-yu`&t@Dw;g;%W#8m(HVtn7v_{>^t-ehvYTT_6vJ5#%0 zUwaZnoXo<#ZpTONnT@|n(p!PUM&M7q*cs~)plL+1%tc%FU7azmg7RpG0&mRVH>-yv z4%P3ep2)SE1UeK{7?tF9JQVnntpk&|e1Z!`TN-*@_{|MM%NcZg7PHKFYq;L@)D`E~ zjX0CB+geh~_Q6v=a~Fv;yO3KayjlPw`fGq7FP&TLyI#+ol*zBR zAz3pd8kHV6B=-k>j!>m2EnBs)yJ|Vm!v?3XQE)S6MsBpUf|p>_TS11ac8( zaGx5om>cDU!cnad|5^HyNEoxFC%NDNRRq5{uzqRObn8F0ZNpEm?mKp5al;Xhp(81dSqwm&@OiS9|e;YDMuUGG#0%UUu{3-d?KOW0WV{aQF62vI$5)D2^O*At}bi=OCRruu=g?t!t!Aux|GM%hk--#1|M zcFFo%0)Y1;bS7Ci(uL^!EmQA_imHuonY)B951aiGRfF$GIY5E9VWhE%O#h%L;z@X7 z?a1vmSy`j6mm6E|*FXH?_~xy{5g+Y$~lhk;rKJfBPP$RSaF3Bp7dN%r1zIwNQAB2#sxv%5>N1u9~DV)Vgzt&5YDNVvi%Fe z?N6dQVTzfv-o2|Blbm=^2G9MTp+Db<`8j9!#a)>VoRgq!f4j|-RF2m4O_L#uZ`d1|RcNP6#|kf} zY@>v~a=9!jrr25Va^}r-@7alP6&&G7g3l4Y1hk4PNj%_i1ukZLhAzp zY$hx#F~+iy1?rNoNTPcC?_XVs2<|W>euKR)SOV*B2xq2O?M7|PE`EzMb!E#WRd;#& zx>X0q5nOCV-+5SpL~4TedOq0^d;U8bh^&9_Z-n*6AA3mjgIOsn+U>sfGs4yPCxumi z4lg9|c$6}nYp?utM@3SqCd_QSlqa>&`w(F~l|KM(l5f22b7>_i0L&6@pmIChccd%P zQiqKXQsT@V_u}6Yku2~N;zxsayQ#Nf(4BCn%9HD~&`z1dPoWDQe=z7;KilrB)&r;q z@UU)s!*EVcBiPs-^9Ru$$`_qa8ZF0-`to8SfkI6q0-|?1QUXlf+z0cJPUsUEL|0T0 zt+t%@Qx|>CfuHg1lzON;C6>U4#RdV}gaFD%F+exKYa3z|H{>|40m5U57A9?`+cOd_ z8v3!DNfF0fjC0a55)J_ipX3y(VidT@0wH)CP(#q#hSjZL~ zqkmaQ9^G(|Nci0noNL4qD_+0jD>f)K3VC1@&jklAg*FxBDxfQyp^%NzsTUA<9WL(7 zmG0g?XT?5e*x+aLw9}k!d=wrfVDiWGAy>lYCVZ#3CLt^sCqdU~`$RxBO}#wYiYEB! zyUuZ4n*Zf&eHey#ZE@D(0+VNF@0<6(xM%3wK3NeKvm%B5%^7fyzs7Dwuk41hJ1g}kkuI~Br-`&K`G;}EvASWkA3*Wa ziH98E$>OV<>xjMt`P@gk9v7(036II(rOB5*I=iCn5Z;D%+(W7M5*_qYOOTaGw&zVL z)4aBF0+(bFK%-Rtm+fQ-12%yh0hqwfwKi3&+aJGIo0;M^81!*02vXkM;{xoZm|{|U zCprXv+3~Zu-TLGB3T^Q&-h0EL#(heb&-cy0J>lkxRv+hRfTJ^=j=HX+iO5t zCj?miP(qo!H*KBaz+SwvkNmEZZqRy=bu+E8@^L4<+d!tzyO>?V5B!`J@3(+x7}5rJ z^m@o!80(HmU#K!=!g_>!?xRt!Jca>b#Kiv_+9uB)DapM7!M9p3?7z(3WYHDI%N^H0 zJe>uzcBozY>H@V(f zWnt`b?R*-|;_Zk*#bA%=T+6&NV|35c+17|t zhCCiMJo;IbF(3|YEIQ?|X#{`r^{?(co`QZC*FO`U7%R4yR@|@7salv5B6rk&Ox&;D z88eogp6^G*fSK{z2Af?*KAaUDS3koFtu45r(lapTkcqmtOYb-@hZUyuDvvK?eF}tySw^df8&HzvkN>S5Ky51fBp9V@9xUV(9O`<*vZnt<$pDfPOdhl&j0Ij{L* zlwag{0?pWY z<>5B7l7;Y@aVC~ja@}>OjbRWh@vu_#f(UfLzg4g5Gq+f6k`)9RQEZ?-k;L-jIJ!nA zqr^U?c4hh>9d-65=^-;LQ4k3JB^oSIZVF3lY~ByvUpM2Qh7QW$le2E>)sXqz&CShV zdOduT&%F(m7RKD{$5_um`zsn_DSz5U5+?Z6dmo1sf^*JplLtK1%pp~NN6f88YLiEz zvxmA_#GP&~K&tb+p16zp_Anb1;Hd=gWZnT_w4ix0S~daC)5vcW-Mm@)e?|11$9*0l z^m{Djb>ryCuV>v@EdP=H=wZT*TfcX3Sj>A02YtH&xI*^TuYC_lg*)b>!wm`4Iu8>h z$R@-M^RS<83-42-LZMsp%hV=ePt)ky9D_DaJXn*pEC08cV&l9}o( zCT|uG1_w*TGT3|U+PNa6KQBcIRM))hxcJS{d(5VZrFw1p zN0@K0wsn4m#0##OtyN)lHX|Ygm}A4iFuORUh$LssFIZ5ysPvvPa`+_K2UPOlo^r`1 zVl}jF$O{q#j0UBfx(zKItLF;jzst`nEvyM{j$QV{;Q+81sO8LLz-&;1hlj&bE8C7P3j(m zSidnaFkpin45EHJ&rgz0T>pk&7{Wuh8BXX*xECgE;4S_~Q2Qup=FrK__A;gwSokc2 zHl&y|X679|x{y?Ek`e}{xBIf5Rqq}Ij*4ObT~%CUG2hoB1<0>&P@w_QpD|S|GAt+G zxUL5kreH$$4)5ts!8H%JV0^3GX$9Fs-UBhnnDqJ~JRg*<&Elsx^!?cYXv!jkPEABTX~E7{ zzK?iZKZYDssC&FkP}m7V(C9!*DQQ^^vnG!TJ44Y2Jq!H0BVmx3Q0fLOEYRK`c0IfP zDBNTjOD`}ICB`TVF9K-N`1P$mQ^b20%5|3rHFLp%*&7S0JXB@2sU1nnbSBV97~ z!YTldVClG-RE%9>w?FL%A!{qj{tAoQC@h^6h$(P1D_cU4&n`jyXK>nrnENu{t(;fU zxrv95=Ps^n=ed|JOU+D6Hf)wT%RxIUe)#pm+DH+`=Vp3bwju7tJysz-+NFrggJe?? z^Hgq7XrDh!p~3^YLsfzck2s0Yq&;dBjv8yBS?HlgI9-yq0iM_12KgK?ej!MKmgqNk z2!kl``PhUfO$|R`z>be1-lQjLzep4$IwcYM;J{q}RglNYvA-!M&e1#$a$y6aj~dLq zj1rQnhlU1Jlv`xK7ksY9%s;`XBS$z#ir(x|B%lOj8+3@k<~T|09l-z;k80U)lY&1u z#Vrw)qqu3O%AYbyJuJ)JdFcGTnyh&DM$5k)JQreTJ&%|@ifo6*ZZOkE zk{^xolG1#nF+q+N#7 zS@e|{>f%KSWse}P{1#$0tq3Py@cAPJ1s(at$X+gJ%_KqvcVr9$KX}n)i+D1^_N!>5 z!LeM8ormdxxNV8V0V7kMup?_01k{3PS;7tqSCKm1NX5LEfNkg@Q51{dr8)<`=Kdio z+9^B91;@_)-jq+|RsCw}^q9)i6vA)}EW8nn6A^Qa57(U`;>ZzV6rUfTd_4&CsZ9jN z(6|Btzk1BZ0thWIcG`%>cX}1beLY-wdY=}Nt$Vq~FiGgua3j+UG$I_BH!@w(l{!8_ z;zsDqNAO=L1811~wfbx5ht*%g5ch+vw~M-+vfrp(|Hm{nZyH<$L7#GvGH8$XL8w{M zMj6O|xAC(7s0`RNAZqGY{63%Fy~|{hIWKC0KJ_jhMFP+adGjD)MA>|M0ZF@(3rvYb zyeQ0ZYHn9EE|w7jcT-Gy7IURYU-fdpc4H=(9nT@Knch;a=Z%q>Tcwj^zc2F%l7dLh z!`S5M^N(@{ZO{Mm1ZkH9{=JH$6R!@-E_In&$&tSVSiU2Nw$pI)UG=IM)TQ1Fdxy2t zGI6lPi;_Aj$YE!NzU4^@Su1X@5If7+E6Ri<7+J&&hFt0w*n6xl(WWKtV69w&Ln=8U zlW8*C%aFR&)Xjl4=-gFnSdmAY>JAr*m$i=11It}SB88Kd??V}uZlA~#kukQkXK_F z+r3Tk95}&z9s*ZDTNd45#vso~N-PB$*`xK3Zq=Uw^|_l~yl3f00Jq+;yi|=YLaG=} zEvP?%Dw1bYb@pV%{nfH8$prv+$HF{lo*3!xQO9g_c>O83xFb*gy~|bm-PU#AMq8KEfvlR6nFm*7 zSlNPu%OdVSqQB8e2TmlAHfkVK`<}!lm(msa;lc=b$$FwR_QRf$iAw_X;mLZexiJZz zM^f&riq5j{V@gR1;Dv9Jy**u{ITMw!$jG`tYk;Gt2ppqP%{{X2c*u#|G?b_{AxSY| zbFDR+f1?nxTU7%w0wUL0Mrb(&i(|1J|_rsP6r-P(=4@&o7+{En}W;=t|7Bv0maV2!?7l)$Ht6!aOtN($4 z11<`gl8HO%)n(3l3}ggyOs-h`rqTj218l%(s+sH*x;4xKAU9t!yTFETJ!sB?Trtj= z6QAfGXR1T?{HYCY!rZBeAO9sL(?2b)odel8EQajjH;Z|K+u;Zkfx2rCqfB3`12d${ zs+lxDK@4P?kI0tm6EEJWOX&e1XgjpzPh{$y$*=$@Cn+KE_;iV@&7j1~%cP*}5#@me z{P5)mk|iPt$3-cBM%EQJnHfpjq*|#q$pt=*dSSpj##7&_9Jxuqpo2~e0}ou`3>XJv z=AKLY@r2VrSOTT6acUq8fJWjX-8yW@uJ9T4l1Av9-DVEzsPaZME{UmClD2_ZPu7^G zDa{?Fk|~?m*ipyB{kHU2u7qJ7Ws7n|RK5P4Y0`1J@BE2?%9tB)04jYfmXP6N(ub#cDmThgYS{=#1G(9!&Xg6QjArj=Ya+rAn9Dr~xsSmzyXC)}^UfkPT6k z&$oh2dzuh#y&lPn_2TN=%90| zOjz!6Dnu8O=+S^|$(BtU#h5a$Tmx|eIE914+E0#J{5!kTY%R}#L{6&~RIac77m+iH z^s8nsP-}^f(}BmHUx*G28eoc%nJTS8D^}zy`_Efa`JGrw*}3Ymfm(4m_s{5%m}E+H zA$f8o;ee3#s%_Ok>j+AHD&ke61@j1*q>QLFoDY&+-16CZdh)z1wQYg^2zwG4g_e`z z?`4EyLS?iw1F?l+`}$#YHrp)WRT*f~E$FjX~YkF2gL`d}ulzLKoBW z|Go4`^NT;W$HaF(dRr~F4}jc_Y}~!IWOA@7TZbuYY7?WrJUABMAJKDg*y7diLJ<{d zcIzmy2`P-2Yd_@9{$1Ifp?8r$rPaEGkMU4Xi^Qf`fpJ|6gw35iF%{Lz!*GlzQNjzFX>T633?#b$u+T{|=^Df!|I{ z$Wcht?-yZwFn_79bf zKuc0g6`>!vJI#+Jg@|kO|3HLQTt*&zyK2Np zvv3-S_@BTIw-0b+x(7r-*IUyP;Hj+GKEbSf3=c2!bXC;Yl;^-wPLxofV^$o9Tdh>h zgckV}E4bA4=wRz&@+E_9n1Xq(z3M}OV^=Y3aDP>p@@rqLWi^u#Y`Y-n9f-S&*ZU&Y z@&BXrJr+C6Ab79t*=gq>l((>&p|`4Z1l~w{+w<>ly|tEQ_V?Alpka1ju9@^|!6F4- z(Ey|K8DvSUpDK=nwBbdRPqB|cq*}k)j2PeHdaz=;GfaNvPNFlDARz}hWaAWTq~y`S zabr~zGfo-@P}4OwqK(2@H)`mF8!{)Nok$!>eUqM0Fc`X-{cn}rnKS)m{12b(1GNLr z_Ci*HFV-Lh%;MG)1Cz||uMvg{T23ofB+kV2(?VjRW(uN!L^`8LlWK^PAH|X{ao2I_ z<#mVG6~lJ;45<0$Wu!{Wk35KGmM(FH-OG>k;xt`lO(v^MCWn6S>kk*UYfm%y<%>n# z6$_O0!sx7aCMnCA1RC?#yom=h+;HOQ3sKgcut`appbm+-;Ac04-GLoc6CAOCgTBg7 zKpb+}0|`DeCdKBpid6X>@&-sBam63jI@jm2(`&QY3)-ULO$EV64N3IYfcjYI9D0mD z*k{AlcLu{FTt-Hfkt|CR@6`&?FNV(^ntD82iwu&2$L|;ARwBDs5&2 z*bPutrqRlf zMF(^HtUAx&+ux-Rz@rU*)Ud}8az}0r3x$<1g4Rx2;}6`n&~IbKl(D7cL!sOqCrAt<2bF;*^^*W5G|j>V{(MjY%*2$vh2|0%;is z^~fl07d8>Gg}BmIA9WQORagLhfvd)TL3hFde(K1%676+`!W5qXxXSj20O8wLouG?{5@pdVEJ>* z)c>h$7=lgW_AFkP@+_Xbwdtq{Z$e~eT#w>`5VLVF&?ixHK)5gC_RVIlfzFkw`#iQY zxjBg_c%xfUB`)%@jdX zu?;$H7c@3*p?kZ`9Xn@bbkJF2FM7Q41U02_>tTJl1NVx>t5Hz`CD0Jm5-|)}49sVKrI+?S^Jo+r;RV^E zFvVPz42>)T7PEo+oZ^B+-`$SFfvpDF1y4;)tW@pih?E-jnk|RRm7GZ<%bhxj(FEK| zKI=@6dMJC*&HdG`=pgp%?C01})4#QU9tROPSkM)^Jh}C^cTv(ok#celH(?9StP9fSj|nGj$Z9D*oPG*Z&prTkYF|}rghsc zY>3jj4N?+Gakz*JWBl#$um7_r)7qb(g>>v*Qy1+{Cx9$Mp}bz01%PqvlpjHHq1@$} z`md;NAh284@iSB(-Y#Sa2G4N@;Jl`9_zz~%=^$sV?*a$ZG4(HdLRybp?a5|P3YzM~ z42bLJuJH@RT(+S?E0qV`-vUIT$Sde%;&4WeJj{nykL)EV%u?V;ZKa!yy{#4x&IVKD~SEazuf zU*?03M*cau9E;}Wd7*Uo&86+g@W(&ZnYIvc9MtXm^n;Fwpc>|F92Tp62MDji<|%rs z{c3OSD`(F5M_ie2hJrj>=9fe3Xg()c28)K;)=hq7XM_ey`%>LMotO&Su79Qq&y{xC zY=M=Nf1WyQbJ8NZvKM`qWZ$e_3_Adu2Ztl2{jg%neq(A(5!%$I@xzmj(%vT2!z)bd zPkTS>u^(|PkXwkDK&AQcI4qetdRIn)=hX6)_2|?=i)8aPlD882`uZLAZdW$8)e$a>lB+&*?4Y&x>dO5H{8hg(*;H=p3syOgJ95VW;p}s^yYiDT{cD zc;R=A9QrcNWJspqrCX$?qKJ6vxKZW!(H{I*7Bu#rh@u*3E*kdOdziw!#)-DyU{{h+ z6pK?E718t7>%?RWaV{LdaM6?SAB4dMsWBiF#PLb^w~ob}qOU}$QwC*>F4Um9e{0ue zM@m?hzeNJUjX@+~ZfMn_Pt6h^O8#cW5m1=h=dpxq3wSyIe`?<*U8+{<%OlyD zj34gN%AiRTf*VT$FM8`&vN&RwRr-hQ5jQMjU!+M)jaB9duf zhxvk)x5kV&3-7b8(T3sNmVKWu1E{O-hYqRda>_OJTO(qk3O|(ImOJ1|dl(oG{e_II z$|vD~S%&~nOnsRR8ArBxl)72^+IIKkUJbXC&Cu4&1S+zlvt3hkyw)c|aopBQSC(q+ zVYkj%p&`{4pmtrkl%PbFw7GW7;T)g6;)bcY)=kRamDF|x#_^?BT-pHQHt-nG?V=6l z9hSWy_hd#;d0FwYH9n}yFR!b#Ww;c}{e5kcl>@xOWS%GoV2dX{F4ighIFEA5YFV*F zH-bIfue6#y2{i&97S31zJK`0?1!ZVK1xlK89+Kd};~AdvvtTpNi5*q5`YS5LSjlZ5 zYfRbWiFY>YVNf(qbC6j@@{(L`f$@6IgW`YoVe}~N+L1xrpIAd!Qrwf3#o8r9^XRaT znk;z}av41_k744Gn6)ccJ#~E0)!g!DlLH6=CxoDp6#jaP2_!Mf@;6{%L!uFu0Qj|( zit7V!oYz6Zw$J`{#BA(ZEP6>A?fISzUdr`3ztUK`Gyo(B!6FZakNINlZZ{^jVi zH5@LFZ{d$(YZJj?o!(=tLXT~$oNminS*tY*%s`_clwww9T{D1(0|7t(Mrd{w%V4` z|C!!kr#cHkb`p_`wp^~0n7_gEXqxP*m30-`Y>$yxpziEdwG-^5*rZBb=00{DS#z`h z77Eq5Gw*6OtO&DEni=Emvy%&-O5j?zoQXz4YPwn4v9&x=fZx9a&m3kZd17nwCbs6o zsfmFU-GWvQC*;+S7Tr2X?Q&^LVDdt0)lr@i-)50wlsqJdZNRM#F_(_8UyicFB$z$X zCDm`f-TSvn7~ajtzo?}9Pt6X~q3STwVvFBh<_Kq1!@Jhj)?;yCUFmIm^O`SZ1F?Lnw#>iM zI>QB=y;HBUPoR9pK*smqEVQavb1(EewFQBKfVPCqSqL0hHM9+GkI;to7F_N4c)ZjT z+NdhXYbA#Mo1NBEtw#Eihy^xwyiw%%1y8Dn8Y=%z} zwJLy|`|j{PJDX{sDBCfv?TKz}?H-T@ZPwc4bE8{=W6jwV_t(sMgaBOcrK|CA{BWx3 z>7w!jQd2}qL`t~Onx0HS<#ytx0ny>1Ye15U3TKNQb%@ zQ8L9SGm2-`jhN?10q>*fE=mcA9VXL?8q>uzlThy{r)wgNHzh+0KqcUq|E!(dACK|LZ3(KJn3Zz}s;BPe1~OU`Z#c2ZBzLq2G{6lnIbQP}h>GI_c#kh0vkB zS3(rLF_LP*U5XV#H0y-)^}f@9H=%M-OGOnxAxly%q4pm}ryy{eh@*aw(I?vwm`GDr zlvb6Ja-@o!qEg?WpQ?45ICJbOgX~w5ChpWh4_b20JInRH>G*HVUP%3FGX1XoUhUDX zg?yxi&B?uu|3V{bN2q%6lc}*8nuqWaW>Pg|7Cz-$l#0=isIdqS`J_I88o35MiU0`E zs)DoU6!~e?nzHAV{Uoaq9R1QCxEeB#d;%Xl8#2#)6j{T?stDC!vjxQ83pMCqW~d@Y znHs!gImH&RkgG@rv5{gG8@Q>ni57sq#zNpju@|r~uNc!5?^%R-Q4VufjyS6>kFn(6 z)eo3%LvVZPo?Qjzt#RdT|MUKRXsi0}*m74c+qd}qO*_zATDbbN=JsT}X0KIn<=qIF zW3cC3d=IeFwR_7ct@+e*{@b=`>k-!bliFgdY8|+R;5yahIyYwDqhzh=DK!!yvh;BI zr|PC3=|NMF^=@tT#m#Qjw{O=)Ism|L`DQJTbop9yT8b(=;XFTS)%=+W;Pd{g?*vjt z#oKYB=b2byd# z8XBxvV8tpQY^%*%Hoxb>`^rbLKe;fJQn}@rUEQ?e2wL{%ZA(z$0@84qAso?W2YQDb z6JGSl$qfqMR==|yC`2TF+nEs-)Ev1<;H5pZ`wkxjdH5|zLr)}CQx29#40^wxl?=jx z!@vlM3mR2(8!sJeP&Y6V&kj=d*pa}%2#W)V9A^AIosxLU_%NNZGFj5xvkFo3k7X_D z!n4B=%yTCCDtpp2$(H<#uDf;Ad-_X>7EHfr-WJp=LK66RO{BQAiy?`H@Re0M^U@$X zV4Uy|fuk2=;%pQ*MOXsqiWC@iUierwBpO7o2jYCw6NyE2S1ue~-6;Tr+;IgCnF-wK zBXG3Ys`LUxVTtsVk+14xyj9hi6s_*_z>~H)M{5ZNZ_5VqrWKvrY|3(goG~L%7+|&t zJ+u^WNi&41v6U#Xe6Dh&sJm3BDd)Teqhk%v$h!6-tNFNvkVrT^FFEt<0EPmokTaaI zDAEfZ-x9agTvxYI3nxRjZt3*88Lm(1`xa`==vn5uQ?XXJmf3p^78faV1GeUkOS0=; z@{$|abpsxvCAeOpNC)W;UXxH!l0rHsDihZTq*eo*zF0Z5>8fl5qV}zbb8HEtmSpkF z&ASUmgw_;qCv~x=Ll{+a+kOC1ZMy|y;1`g#>r^7TQ9SuHUztX_XrrgP?iU+swtW`n zcaYRwFIW^B(Ka!p>cAQ-841fHpj|Udzs;03Del?ZsKYdUF|tMTm=b#O@NiA+&M{iJ zBVYCB)UoJ@bC~*X#QCsvOk?J;5^IZP-wQQDTEPmXMKcWMzO#Hy*q1#?^S$tpj;XT3&Ug`}9on#&G?@Mv-wtKe zcg(rjolgkcSZZl$Bh@qpW8R=LD*M|{z&QfcZ<|7^>jl^j9@@V>VQ5VAS4c5;h4DJe zoo1*X(SFof^#G{)mnOg;5(SY#T;vxjS}^2Uvic<)R7&y&M|qcmZ=^xMoaIioMYf7V z5M;<96`&{qoXE>dfI&l;O+MOAuRE%uOza5GOu@&NZ@#s+)YiAh-tOUQ^QMXz+Unz= zc%1-a=Kze4Uj!f^(;tStZhrUc8vqe)T^hvAa#3G>?;3ec>Oi8h!E2z%phNiJjx4M$rOPco|Na~Bdix}gHWckc_5*eba3?f&F7 zd`f{jRYj(y@rbqdz()zDr!YuWK;{K{lv>icjiHL) zIsnhsJsKL%9&i^&Nc9)BF%x4ZG){*S zpvPf=o^_(LahX@@m;uH1MeQK_>V^A z)1LXDs_;J3qMIJ4e?bf^^68^x8YQW)+{^Bt>CJ0cAJsJz-Y!0-p(d*xl?px)dFJW2 zTyOZ0EzZeVBV@6RvDW&SW&RnY%8Mu<)R3Auo2c~WD4Ay&1Or5?8@dXPDu7#N`SeM9 zIxfL59d*6v`@$k3nbaxnnP~opd1V(pmP>K{Otp||XdCvy9kP=I=uR7yuX6^K$Hy9| zRSn&ne*R5Bve^K6fGmH$N9d0ql6yPNiv)N;%E`bDB|=wSoSvnD_s)=NMqEN;R=@s!a?m(j3+Q&_-D6(XS*^6;eaNm{uH{pf zmrh!5d(#7`e|>0i=`rVYM1jUxx?S+LYiV&Uq7xSaXody;T8q2I#7&10a%(1){x z1bG-;zf>!}I9Z2r)9@mle>=bAKrAS)5n(L?&d@--v;x;Z%zl0_>}mVoSf9NWBn{v) zT#AQyDfeMq9Yjk^&4dK&5Q-W*YNUhh;Y2E2!vDobe6tLC%daySbtNYRGU&9&D|PAP z-Q*&_j0vXa6s*K5?jS4N13!ibx)lZXY7JlG-|uKH@X-YI;fYh;8sZ>-H(NoofYZmB zA^3K#OY{b_SYI1|0FNUkc$|s8Vkf+Uhdf}*5`-|Lif~Z`c`x6TG0AD2^bD@@gx1ty zcpv*@j!zI@|9jJknRw4h3_b?);m4J`^$yhtXDF9-s@V(?^PK-!KU6MC_4dmeJ=6vJS6jYdLw(w_#xwz9h}1uNC=sdLh&nE|2tTRmf^Q+ z9jU!6KuXOXZ#eNO8u-}k%+G)$fQ$Of>wn*H7uufwNNB`A5ELNw;|!B0ntUAi9e@5F zfARkK^ab9)FQ^p6MnJy!-z7T;F)84pEv#+m`;rwr$(Yux;D69T~Q5M`V~8wr$(CS$W56++J0^-8;SA zIAg3i=AP@Dne-0+PF40o`o(+nWOd)>{oMtuVSa-MX%i6O>Po0 z19#t5@i!D=A1D2OqP(%3vx0&@N==%%C0))Bh+RO-7r(6uvsweL*B}sK3Dk! zPVzj~E}SnHP-Ns|tT~TG!E-STR0CHfJ?n5xHZoJ9&HV)pEYVGtuak&TzZ! z65en0!jwf=Ws^dZMwl2HYroz2wp2PZX&ad=bL~tK>#4GSICVbCE^A%yS|8gbpVv>( z->SqYEE!It0m92Ywls`%=}n)YCf8JrS=%n1+*$>{y~;u`^%A`*NrZi;=75YVk5(-d zZ6awlK=zlbm?qbr?X{s_=11W z?@cYV!31Pa*40MDVRB(XK%>KfodpAX;HGSQ8r|w-$1o#lT~6P7-8O0(QOxG(yJ3Pn z605#Ptqo@@Li_|;JQ{MUwb{%As}fIDDMigbS_Lv5balKV&?l+o%?8%p40@{A=kTG}QyA?Dab>H|tHJHH)mK{Ua4U>2+s(ys_>~$znpERRWei@*7e`8--Hvh(~RG zw$M?#A=m8}pf*jz;%8u)(U(;jqxPGxpjSPm)>*ms%uY7`KVvv5WODCI?FU>PSG!LW zjGZD);B0S-ubFp&_z;w56qJ$;(sTHD<~RZ#`*VO)7!ka=Yqp3aadueWY!XFuvIN{~ z8iq0dJRLq8$HD!%ZO2aRmbL&}`s7DRtBnM_N~o;5BaMKm+#MjvgHyvg@vOBtZ|*+? zzndk7+nX`JRq<6GW6~^Pp%#OHt ztc;ngDFXL07Isix%uG38X}^B*QPyv0s@+=4M*I-7E%PWnu-B_Ra+RHnn|^n7kK=ID z%{Gt-I|ZJpGp>OwqqI^E2AU8yEh32;7%bHf6s~M$WzSpJk;aKiLKPJ$Dy49|PIges zlSzyjXzUj%2bpjvUnrmvE4XUeP1*nXx^bVK=ok^Fl?#sTY3{9Ow-g4B5nj3Ayf@QV z5A}RY?DSzxPUJiJ$aiJkl7;9REf!8*m3<)rWBv#*;x!?_|3$^|b~JiN%wtQz=#=0? z6l0FXh6EyrQ*WOxN~^lCUKqz9;#5srr9!iCyt#jMpY2&|Gnr~XrTT}cCPZ>dT_Po2 zct_hMdctIe%=lT)v(0E){n%bLn%e%#BJMy?lK$fS@^fBiD7!;FSGyv<^H@2S5^khc zL`!LcM-CF_yJe@4jOrG5uQQlTyfkl@{I6s~+YQ&V(4agiOMz8{8VL`%$E_+;b4KLNj%u@))#D$lUZZhk!O zKKf)4QlPrwV3U;cuPt#oOD!ju=V=(P_gm4;9y!DOQ=r}z&9Ph5oT8f4FwS>W6^AZC zll?mV!SsNBH{j4_l(F!sDmX;Q7OlcQpL^7%WzfVbl!faH+T-7()2~p<%1H&`0zzM4TMFJFhp$~O?C|P zNm1i=BivxeOf^5SmHi}|s5+286X5Wwy7V{`ofi3Jch?7*N8=k4;BUGgMTDtE60k<@ zgB-udc!uhMcV%*(H-(PeO0?*3Zqmo<%a(RUa!V+efy1advIbLALM$J5Gyh_o%tc7E z_%0-hDC=l6o(DVIO@M)+{w3JT_JD!FqY$a^wyx=PSjo=PEmxUhj`wyP%dXt4oOCP> zpqYxPTRObjqx-nxQdzM$)kJvKL;LmgSHY7c5;3Y=R^W~(pT3nZo4S=5CJH9t#3kfZ zhYWKm7q-_pL_N2xP;0OEWhYy=MkQ_hZ-%WG=7P*E=+PG*@e&i8F&o)L-^Vasl2VL=2ba5vb@Iy`JWwa8mM+5VCVYt)cJ&1TdMOTA>r@=`pf)ntm zxroQYvo&rH3DjdKS<1DzG*mO`f!Pul!iC6R%68LGbCgXxl~ncU5Wl!z1^A6TM~Hr{ zNUuD5%AjuS^HW^lQ68XJgxH8uyqI~51}PL5gfc=E zxg(L3TlX7srAmUdhw?0b`Q>u9+o(xelx?TMK(#q#Zsf!#NN0K{8d@u5x2I3MfroPo zHW8Eg7-9yxC=<+{;gCS-O5X-6P>ca$7?DzI&qOw!xqvG7(ReuvW41AEzJjD8geoWc zOJ`%N2gKq2{x430WJ+9Ja*w0bR*jB3D8REVyKZP(J^Q)V%-9F@~5@q<%xT!q~Y-&`3X zgNVI7Z;dlAsnI+%{`;E()}romVj9S3Pb$8+ZyM2*+b15w7`VkU~=F}VGUMgE`LBPSQPzt zNpP1!nmd@Z{cMR~^|^GZjAj%dBT!CBMX3#6YsZLkAtL$irnG^FHRPUibi0CN-ICvw)VVeRBG<+QK?n6X&F{2&ERXi>7Vr zmWk+VXy1^zoCpU;&zTH#J8qWl2nnMfaz_49zD!o4zp>fqX}HqF;?lj2+8)k~H4%9( zgSgoDl${I0DXi|?Inptf2Ng^n$4w7ff;fv}yY`HtJGYZ1;!P~;t!j&M!hXO~d6IF4 zTHfFv#nXau#JnTB%CqxYY7;29GH5_{K<{ElqP8+6Bg$pWWdJEbo?Pyd9AL~K1%4@w z>|RJdI*_h{8s`WRo%L4sHyHX}|>LE-)rwIURzxS+O_ejuXcCq@q*8cuz-832!-f z#=**=7|;smL~8U7>hFwCCCexVV163c3211v`sQ4iTZ9xm=2b8Om_Z#x7gInk@K%9O zfml%}kyQM);wn6130c%&fY>Qh{$n57(*|FT(Wmr~DG`Tz4&{u)8{r&=P%{#)MO0qo zi7|hoM3Mgpnu4+Cj6Ux81b)yagP6h@_E2IWkj!4LtSoa(V121sBH^Mvppf^)ignN0 zNya{Znh{SRpKLO!ekXnk3)3yXodX1WbD$vjD63%8kTaZb?h>ugD|-tT={07EO29qS zjVL;Q#Dsj~!a<_9^?qF-=$DfT3xglrX|nSxT>Dlx+8+dfX?nj^#BJlN|DdE*iz7~+ zyRrzXbSQ|FOdgCq!uXxeEj+U$56da^Yp0B_=*u^km|!`7_3K|&ui?V!eJ^aZk&1Dj zAlTsmG!&_j#6v))7lVstXYTrdQOzhF3Om%&8(A2=L%9DZ6MSZv3R)AlaGfYpSjG^R z*lTD85v)5-FdDkLu`ny@gorV{R!VT@-WQ1{barPT7dgi$>WQZPr;Y@I)YoDQj|!Q_#XN-stl)?s-MVd3OFjN{tRv(s*d`9VoZ$%# zlLRuS8cb>M`SMCF#G`|SQ+zivT6E_PF`T!5U^VzxKETtlGmhw?5P&+sWiW9^f_Y~* z8O}}xz+}%xFXL)R-eEPU$@Y7NbtLK%4LDhHP7k=Wme!tudi5tj4W?EU){A|+6R)-1 z?Zq4GHvrNRZk8cfeH$?d2W3I58i1X9FvefSle|7E{^3jnH(NG_(jJFoH+C zvV|Dd0h0cXI`01zK7sdaI1^AUJFHT$&!yrgB|wCF76fPcenBxIFgcK-_Tr-C2eNvg zEU(e54t3Y(m2*OZ@l{5AxQ!xaw=8z`lNL1|#u{3+^`kJ^SEj0WMUT>t+z_vjDZE6C zE`tKMEFS%x?^#oZxh{_f6qAk)-vgckoVWgntYsYUVyfhJcZC@V1Hy^9vawLlZm@0x zpLjnme@y_;i5&Fg18+jP<1tpwe9RuHh~9qrQ{kk z$wg28j0Z<V>oHWm|ceU>gOr^SZJ zgm!ndmDbhO`>!-Xv4qnGzN4E#+M1k%7_9HSl@$NnptBn>t|@RW9>z3DwA@`WN^n;n zZ`ll@lW~CytYbCV5 z3*XUlK|4 zPeNb^2mX8umhX2cYKI@_fU8f9L^z6wc{&;pDYX!bhpD?&NI$Mc4LDQ{ctt6rx?YQ7; z7s8WxQNZx_g%=d|pXo3zE|~7;IH-?@3*h@7)FYMjv)E3AR^fV9WhqT*gkdQsW)rH7 z?YuoU^eWT~O9;D=nH($w{(U1iqPTa(sUP|vbqquAUgu5Bc;+6Sj$$Bl_uZ9v|9Dbb zXs@RV6rtq585XR}mtYRd0)X8318MlaZbQP+oIFyq)xzW-zd7$kLrJz-Y-oIiH`RM! z6NEn$KdlkF9u*$Br^3A!P&Aq^mIDzMO#_a2G~9DNa|V zhidI$^8?ko9uvqeY}7!LOdNhm+9wgrnM;70S~)^5n0QfHYcFI9J-gJREI;`{0m1P~ z%K-}iik70mM;n-#r#`v%fUB#Oq#EJGVz+2$S0)TJB(qBX$xEAkJ0VM!M81H7ojc^@ zHaWmrDbk)W4OCxxDE7D zK?SThN881j^RF$)9bZsaCQD(8;ro9ZBCNBS2+ClBA$&H4-c@T1#*VZf{UsrN z*NK$38u>yD_=3LXe1wjC6oK9{@^ncFET6I-u>53-NkHXph^&XP6yk~^rlQxWPx92HyhiD&O_51zw;b z>V%_$G43pgFzjpv2l23%C>{h!;vJ5(ood(9ou}`7blDfe3-`u*Fb10|?*?0{K<8GN zVxCGM@h@dvnfwG4x1x-7aRYvU`5L=z3-8V&u7dO}S)iHsfR?imMvSzd0YODSIV!Ij ztmc$5IpuVK(76U($wIQ|PmD}08yyu2(8yigO+n!h!|tnKfc;!P6|P(*&Ree3d=LiG z%@6o?KdJ3{pahblx1254dKeHy@m|AFJqJZ$S7(&{zJs<>HwrSy4xxnJ&!6M|{RYik zZLdb3$_CyJtqeIG$OEGN)~rLsKk;FwKx!xFhP_axQFh69Y;1%GAmUKvrV@kEfxtHN zP83>H`{V-{`*hP`U#Qjw!HbAUgMAb`7Qk**T%1=H2n zT46G^Ox$x|X_cBFam9m%;Q?GBhubZ{U5}q2o=0r2GF^eRKZN^l5J?IwPvbZW&Wfpv zp&kmED%KmeZ|t+JS&-{_=NG^E6`!v`Jio>VXgL1rp4GI;a%=|ukx#3+6dp>sK&aIr zWr+OO3AE_T z9k&1C#BeC)e9-oAn>HbPf&T*Y;EfF5BE^5zT>>!itcw(%bCRoY8J7uRhDyk7zlip*boo&{Orb!C$b8))x_Lft`!LN(HtV6HjM%4&0?rvAzIE-_C?qj}ab#=P( z*WSNqO-qSmsf95yl-hY-p)8=h9?5k+nVi!c23)>p;xobW%(Tsl1Fz|?2>XJ7McdAfR;{`TGB7d^jg9c^+Gn6 zI$b@1VlzRS2feU|st3>ogCwS4}P zyWsW%MSDh^XmO8@g6UHfU(9GP+r(S6*n}OYfp=laA-~7^qKag3Xj)f=(PL&cX|#bF z8`bcX;uODl@*u8c=|;uWBFh3j;A9osWRz)tPa~B@i|m}7EvTUAyVt+_f-=TMh^n!5 zF?E~0_Lh&K`|;intcUn2OT%;OHuZi{3oig)$z>swv*_@f5>Lr#5;VCIxMvXN&FZzx zHTpD{ufxI9{HJLCI!u>XWuUz6_1su{rFyGngt+mz>Pm1Vo6ap^r9xvZBdD==)nRyg z;`3JKYg#!!&ufQqw4?rTS)7a$=%i+<1nHE5Vh^M}S4WkN-gezL>gU(*Usa8-xbIl* zq$SaGzKYK zm1Ze57-Wy6ieVRiZmRgtpS&NoTcfB&-u%qr9j9hY3$s5WdF&4aDH4Cy%)Z4H!;=Y? zsU=N@<*7j)s36eZ;Ew$L^~g5g#GuVs^a7d@s1t6QpKI-GAD46eE$f9tzg}_-E2a{h zeu|Wv@vn{$4(ljlsqE83;6`K=^(z)cC6n}0sSVYv5osF_p<+o;tz9ntIxC(Ri6luS zv3N+%EnXq(OcRE(nTb{5=`i77yH3(8tk*!_3*K~0$EQwYX%H3!N&zcl?t#KW2qOg8>B?HLX(sqT$f*f9Y{J%67W#_V=gSJNut?KpQ0 zzi7ODlF_1B*i0?VS$)J*Q)V~ud&Ez<@FgQK`yPhVi=i=cIqAP2cMKgGdc5BjIB}OH zyQ_v=p$v_rC=BzyB_kDR7##P9`Qp@II}dUN^cj)7SjQ{91g65PG2&d$C0FNlksiIG z%;*?Mr~oj?-ElG;tN$d1I8N_DYUihil*EerJYhkJm9CcXAdD_(r_Wls$@{oBs+;=y zeG<*2*uDG|$mWg*UfkZ*Fe|L%Gb|o>DreqN=9V}~tP3?LexLR;%8AZB8>k&cd5QF_ zM}5f*B_$iWSE`2oX-=Gx;fr$atMS^TV!E)@W=2FqWJ}N3O;H{oa=I&^=xmt9kWH zFN-VIiMo)!hQMOByxs_R@g<4-LmlTyC_MM}%+Z_*G)nEq zH}L9X-iBX8l-R*sC(rfMNu#{`7nS^QIVDXkm#Y>&;#=cLbSJn(X6a5eI$zI)47dNY z`PW#J2rd7!_>zyq`3V{SBfl@MycgY#mR$P)7svLsl8+7cZ8qa^#0<%1%f&|v5V4nyiv&{*aDCyeUJ-si2i7fK+Ak7lTy zqTo_c_z=a_Q?lpN(+ozBkLNUokCEvo!L1wh*HopR=GE?3beX-*-bDTPZhRfxD`~O@ zW1O-2AyjKGxRgg5bw9bu&-#IA$zVmJbs9lcygt=SP>)L=he8JLvo<0{3a`?; z6=CiP>c}~VLwo7VUbE60f6%uF3uihI$DvJrrr`_M;N{3wYAZ@=YX#Qr=@UBR2^fL* zhbJQ@<_F}zL;ODhOVpq8pBzX4APpS=p!#17apNuxeS4e{q#d4blnPEqRG#WA)-`a8 zK49i6R4^?_K}D%z+%alX+z8HPVT7^{vbM#$*ezWtwR7IKQ*ZtNe(^xyFX%5q-z#nX z^^CQS1IY$moO0Ut<&M9NjXeI(94zW@D>2JGKW{6FqYXXKygeb}36DEwp zrgYs7nIT5du+>);hhi&PYZ{8Eor5RL*xXdi6=Ncxv)EQKDbFwEqkoxSj?H05VYAK^ zM>HE14i%E)v}m_}mCf=(C!aZU z&D!!HaUEoo+$KPl!k!v^5dwMiszY8!Z(hgW^nnO7ZHhcR0ZA1r*kO=}W}P5nI_!A+ zvuE9A829P`rzJ)1=2xmOR6hp7_gj!dQpo`%gS;2-P)^8DovcqMS^oW8nE1#_R$^kIc!@+ z>>px6PdGy+BoW*W0%?SqiwW*@QZzom3zqv)sHBV;8DZW0+*jR2_n}c#JAhG}%0NP^6_e4me19IRAGhy+irnD8jRc;bYNCk2y!Sn;0 zDUb)^92}~Apm+vOC#czhDRQAPSUu|rFA_2G99g*$Y-{q518s}v@E78u9?mv?4ux8BQ>}<#OClH3&OM3W*sH{ zFe(8%QgkybecTaCkKc%>xwBKt6Cn!QwmJdA^1XN@a31`f+fhMjrE$?0E*P1&G_1SF z*stLUi!2-5l$Q}O4cw?51zVCQFpwdiHq^bgX#6O=fEhSg&w5D=Ui32J>4XHvSe;xD zcqv^@8iXLsa@OD?lLlCF;sH-Aw_7iVoM}-e2$lQ6{r7Porfk&B07c6irZjMU!R$Un z5RcKIL)-y2!ZVeBTtlW+^X3q}P-T3JZL(PVPH|E+Qs_G_Cf+17=tKyN!)MB0v)Y;> zzMpED189eq+c~@)Udd5JE(l+jx*iB2QC%j<^zgUwA47Cn{wxH44*4G4?>&_udxwJTX?(Rn|`per(X1p0- z#*yW-k~;e0tD|9bMMrms@I1be4QWdlii#~Ue`qIJ32OPHT@*rohaCwD23k58&k0Zr z!1`Nb1*?iOS+9xRzNQ!G&2d;2xv_le=FqQ;e=I6C|}hEW4ef%SYG&i^#oO z*W{^yS%%cEnO|`<{N{_~o2M9^p-2bfc-wa2gK(FA&hDdM)*^U?jibIQnNvZ8#L8qG z2z>Gj7o-*)XW#$A2D2cCs&pH4&l`A}FKj;;Hp!CU;XZ;F%_4rU{37P_Drg`l%+pec zdMj{pao$=&5s(?YnQona2b7LJjJxB@o}9;@UfL1KVeRMilW_VOl-*t3qdD zmIM#u*cR&G6pJ=pF(?Jd_8_;0UdyAAu*Zo}#NF zU;)OW8$3Teq`6~=*ji{X?uZ!J))MBUHVmNy(!faSlN$?cD-yH^K>J%k=M55=CEKjw z39)FNOiM;&Fd*9-&7ah3Fh7R##;|1f3-${v>Kd6+lR{_#EGk5T7^lYjtIxvTz4m4< z`b)_OL=_&oX*E9wnVeiC=-KtV_LC z?sUgD5NiAY#@^ElexYpSvqgZXFrq!`0a9yHea&7urS%}zxMai5Z&QPCxpp@Qx{mSY zp`H+*5xmp}ai9 zVS83gFpGnizSpwK730?~ci%0XSO^S~*7nfvrCT3Hd>y#EGi0v;{@t{`TV6nHeZS@F zhYE@xC>3Sw+jtD;4ltuK7h%%9#H2{D-ew2@fUt&bn8=^fJCu|*%&{%z-2FxR~t`7Q2eOY$X%Ka+c@5Gm@b zuo(vCAyKURI^N6!BexU@3=-oWm>FSO8O2PNk z96n%7PVxCH;{{6Aje!4om+6z2TOG-^hAfSJFrkKpd+2ATfc0#|F9xtsp!`Dz1TXJ-d2kw}^*wa}^J_Y||FB<;?CpP$T(>&BY*Rve!Aw&C0=^bq$ z^dppl#Yf(=JW>T9GM|W5fI{0WBR~A*6BF&928(ANCKDPSW$k@OZya%#XT;n3SP8|A zHzUdSTe=`&{V0kKv$-{sKkjGU_Qxe8AJhe}LeqTyLzumsDlP<=AOTW2Gp$i+6a zj{S15HTnidHE0{36(2RPL!0pv3qs7!KnCQ%b$o!@>jdK~t?L5+3!WOKBR>TFiTlIM zK8+ePyd9a$iEHOCr+iC@gzYq++WwZf!jq{ zf`4QtufZV|vFITaG5z%EZsfm6j`DKY zEH8)B$}fjw7$@}j;bD#|*JbwpeRr)DkQcFfFVG?9oe>-ZTq42{!kX~G{P)?YD=u1Z zlq&l70Zr_aG{^MQFHmH z2%bmWI>0UMIlI2rM0x{RjoF;4ZvW1?;%X}SJ8#Bd2oWT*$-S7v(TY&&8c+%8Yef4@ zVNW#-zx?!6Ile#gk75&Co5z?*9w?b&*LI*8`Vz&2Z{f9xolpCz@vqHT(Pl?XYj7`M zpwH-QZiO$EjZA6!zL#kF&c`p6#c0N?)COtz=FJ~%RkTjvy1Dj0TxXv|=Wpm;U8|7# z9cE;C?qAD<*ydeZkr8c*Hh=QcmqECo*Os{ukRlQ-ml?`d)i9ODqJBGXC3HfU%q|b@ zg={e1G9kQBw1;)LX%#bPOHb{_e%U?oj5vmdzkvES@~k5m_>=66`U+T=S$WP5XXU${ z-_e`pGY?ED#(BVLw1u|Gbch5_zOE$fh?M$7Nq=r>vbD1)xZfUvX_HWvH{cAFlFYw@ zt*qYLYfbeILa&HUX8}_$x&{47^d&lS*U2h`qCIa6eRHYRaUQO@5pcOHYyGU-N+0W> z<-uFK8h@tUOZE8s+ytVuy+aL#-LAnUuvIp*Z&vXM5R^`s2^hwNc_5c^HVL`5Gw{vNH(b zh-g#Eyt%6uS{-HLPQZUW1!Ie=Sn#EJg{D74Ks;K&CsbP7P7_cXN^MVF%ulwBN~4cP zquKAmQ7AfsbF9o@MPfHeP#bQw>cee41&tnJ`ce(V5mBw`YJ_O}<$j;;uhkDd^xSL@ zvtWMz;(u(=dPU@i|C|7Sc$FFIGyFb>w=onE-7go6*nWr0mHYV^#t1Un^hd6+tF&#Z z2~e#__z+-h3Tgon!}zajhv@Vhsl;ps`{vZKvz2bBLn3twg+L^2Z4m%v z0ATvBE0z9#JJyYdG`If8zwnD#3?rLxsv3;tpr2I$mmQYKL{-?=NVw{^EP|vc6$mL@ zIoL|%j_fqHOKUMrlBZvP;=w1=O4S}M@$vrK(a1x< z!}F2ldc6~sa`XKZS30q;51-p{JIXJ-$JByQ()SE^oH033pF8&K{Fr0j#e0FV>KBNm z!JyDJ`W)4+SL_vWm6i`*%XK`(lwiEYY&~FO`iwK2%kmuc1^qys5&PC;oErglOjjqI zB3hCtC_ z<2Pri%;*w)1`+_Yz(1AkB+V3GwOq>FH^J$dCok8BR?&Kq24`mluA`M_90>sg>8uze=`C!v>k z?}B(o4-OJC$dSB0pYnRG05T862ixC&D(nw{eM99MfkO-qmtePScTAS? ztMj73Y7F_dVeUy@mFm91mC3#B>hv>hZ0hRA#Aee6rV8$DObk|>ldwqEsV)w5=JN)s zfjf#o=yv#bg6u*pe_wZ&F0-JmQO>l(td%0O(V1n*SZR&3 zPt_~#t@bjr;Sp$XvMWqD64`2%zC)i&MNXTeg>zG)jxH8l$B7qGz1RowK6;AwnYVsI2edZB!v`P6&txqV;khXD&AomQ4Id(X| zMvFF&SgA6N@yzhFXS^AI!*u@YVLQ$;XFk?0!&(q1o1*ke@mxuNd48kx`YJB`l!@$i zR+?*0irnTj^%gK#tNaE4kH*Fo434}OG3E;_mT`dkc?n;Z8IwjN^RZLI3LE#t!~Q<# z=SYMG_3{#~TFJR;;g92`)}SRfao|;89T*8VM=Qd5E-a7s9a!DNT~K7;J5f+@WkqZk z3;7-l$0p!sN6Ke+!pMOA%3ZPW>{fZ|J_x-ZtKPU-vhQ)92&3G#4woz2dlrJdc0rxVn`vVap&zt@iab-ExvmpcD{{=P=IJ z9EH@hGcFa>%@7|HF?@+w6wvafs6uJA^nC6|eelR87F@juD26a2rPzdeYgGx+?&WLA z@ybW)Wy!?vBRdIh^lMgMXgJW$zKtdlX0xY|!dC=Y`Gg*m+%%`oOlhhcy{5H-zm4IU zyuj_nupKzn80oN0h^^6Ln;OsQuO8+HSl9Y==rCote@@5`Cf@i$Kt1?}t4Yro^n++<+^9_WKdIy>Ch4Fk$$iA@^sK zHc1bOI&8C#!z@=HAS-}=;~l zpe2QK51x93(()8~?n=Y|FtQ{Bl!3$@QiQx7ao!+ac2();($8f+3WSswk^YARi)}My&-W9}9nd6G z))1+F*PZgMGud_<)1JunLC9{dpm>XZgdwjL`vU7mY{eg(WK!}iWCV3E@4}jsjgn1u zys;h=QNn|Ky*$~nF7B`#s#6=qvW&&DQJjo**oem~L7{k6L#8{ZWW*%*v#xeyQe*kM z*kIBDFCs&@@D_Lrwf5R+E`07RM>gpOraN>YScpPNWm$%0>9+AAKbp%sK~kMPsJOqe zw~0VS`Bir#5PTx3fd`RVZs-vEq@d;iw9jQa9sFCRZw-eYl;8x9z{`pNls@^bMt2~Z zh}(p4{I`83e*jwUqFyf%?&d87Ae||tzsUmWa!><8NdC1Ft zG>#%NsV#U^lm*0Xqr9jz3W+O*@V=@=&Z9iZ#>ShJ=#gz;&T9cZ)=Pgy6$Bxy)yBFCtJ- zqA4a<<6%sgH3uAmE$WPjn7lA~9KS!R|6HePAuwj>PRjsS*PQ=5UNqt}nUbjN<~2Kq z-+0>mgB*eW4lWLZ>wWJ;E@>Q0A7@LI{`GTk=C%ysiSdonD{-EqT2M0ZBvH-0ac~Y+ zgW$aTdw6-;!3XoMtps;m(w|&_!GNOXmK>a#+{LvU-ssIossdc%QoJ;RPZUc$i$nK1~%&+G%Bwu9|1-zT`Eev5Y23&U$1+o6VC(?&RNDjoKf zK0);|Y?y~t8y4~Kn%rm&T-U*K^0|uNd=Q9VDqAyJJc(^h$3H<-ZQbO!i*NZM`03WE z-LQOAZ}8Nzz<2&Oef{DVc@LDpc#|+63C%u%)BydRZ!v}EzQ@J%u|{$Us@FuG%zp#q)(oklNeaY*r;gY*L-tXfYRi_A zitfP6^BbiOaJg&1-7NWCs_ZsyC5Aa^hLF{0ib(s*E>NOwoiP25(JasACN->7k>8*# z&d^`;{5J@#=Q5F;r7mXh(D2Y0!TIQT5K6;@eZ2mI|7Mgb3L$ODiC6*q&hl?1h>}`Lfwcdoj$Gk zaI~)vs9ycxLzVGFkuKeLPtGP6@Dgx?$FF%8AFHBU(AJ>=Son0Iun7t9&^;mLz!Kx; z8Wzjp7h9=B87 z_#ScJ5I7cOt^|0xui5{w+rHG+aCb$AW?*psRW@O_5jL<(BmV~&-AJC8CRo@DQ|n-< z%KQ~05OoU~NZ-*V2gQ`lw!-9Z0jH&&qi7yK9ROoc%Lxg}*ZsO`ec&pWM~}SI2f{=> z?NBUInhG745?dj>RHMay0my9z`GK#EXYUJa6vYG2gjI>h3vHP#5jq6r0|o2Cyc6B; zJBkT(Pyl!;Dx_fxwx9IIUW;P5ZTjtC_vQ)}ExrETdVT+M*RyaE+PfmH=8(tYBP0v@ z4fSg&`I-9l+$&dB>qB}`B@?>)=kX8F@TKa(QctG9)cA8Ie$R=aG1R5*T7fG{EpM+| z$hb474H1!$QM0@)ufL}EhmB1JxwlEk3=+x;mC^7|C-ZoKF^u4e=`3!(YA7$hHoy*j zWkOi%hgBhlB-9^WA>20GeSNEP`RK6$W`3!iRBT00#Ox~5oqQ!GE$QEizIITTgeO4Z zMzGyATz%!mU#4cy;&QlfsWrWGu-S{CcH8yzR3fXUPh@3$a-t=8f^VPTI28*3W3t#Z zbRqga#a;94ZGKzBf0-^M73#7?;sPC8x#jq(BE-*%AfjNC#LuNQf>nu}jNHwE{6-+5RMlG#EW^lvdOqhj| zI>o*?Z}s>yT%f74T0S4OV720peQP)$hZFEg7q*$* zLlCd-v!EWEXR}4>UMqIx?s~BN5t`hDIdAKZE&Y*Z85ub+))JDL!peypqhjNsh>ZhV z^F(P0;UDSEF`i|cO6ED8ONubGaGWawEdX}M1_j!LY)9RVNG$4twO}Qb?9&z*=Oc7a zz!lg)NdrmY3tyu%XIl}cVqWWSOiMtvvs@$;u}F%$UOnv!^%=WC`lTz>-vqsT{92sI z!W9MsaSg{38}o1fX^&tBj?2ZY4sKgil2w$f6-MZI{+igkk8XKvQ_5icuOllLNI{%Y z0f7YGtZTnC9Xn=KNKZQu4zUG^YF{6sl-m?1Cc^YID4jq*xJ~V&-)aUDlpnAzXM5^; z$g9w6xPE@d>$ux5vzZeK!g71{@>G_+<%U)Ajq(nxjp_Fd_sta&U!Dm9AAG^u+RfxH zg}xPFW=k9B!k`AyW&|(|ChlVAZUdm?tjMy7-t2h(N_c}-W6pBvF{eQ%^ZqhpFv7ZszZIGdeTl>b?+ZaxMdy@5 z{?pcy9} zhNtHJqVI*s(eK4BidwcR@?P(@feTBKQ@Up9rf;MDVk?Vx1n2ll9pMSsgB(FAxTJeX z573r+jjv_T+Wo@ADC)?`E26NWE7)l+l7u2U7JJ@EFhCc%I2Dta8Y)p5JPZ|+A_Rrc z+h7TAJh~z}`Ji_=5!!m2uk%Kzu#6MvfWl8`X)~972K@MkY=H8g;##wsG}K}`K86)l zaBfXpBi7C}ZwB14oNsY|0x_FL^vK+;fadXH>|`C3z6-6ti&$JpQnP(sUJc1PQikDZuE+5-K6o9pRt=<~7XDN$CCr>AJBH3Thfd}rtgG!e{+sMg@SDemtGmCW5sX|@}pNlR&n z>_(+xj@2Rz;S6bg@V4}v8TDtJ8F2M?;i42ewQPNAB!zN<*b98-yH@#Mah!8S+iVs^ zGGrCCMyE^VK-LeJN&B}Ua)|SSUd9L!FkE9o#0;kLb`ZeKpiN;1iz-bc$w-bj&#CKz z?K}UD_Wt2u5ggrcncOX?&QVxX&pB~{AH)sD@*)zbX;dqx0oyL8jwfNZRa(A>UanK} zLG3^d7$r2Qius~!f`{{c1R`E56?Y0A1E`OHhL41|Xh^>+pZ_KNoHUmUn$s8CEI(Ep zuHz3nhWicqNOE1i1#$oj_7ICL!9-s%cj8YFZvgRY0W65&>#(lLzAzqnVg64Xn_V0X zM0RlOEAvXi-X_-4EG$I$>vAk;T4X|sG$vAbDT{rICtK$+Fg@rd%=kbt^5aJk35m`X zi4OPy<+`a#pH?8>Df=s~hfZR*6JUS%pnU?3{e(zWgb<;6U>sAn$Jn`R&^CM_P*#0r zw0nFYm~28KQ!yssoeywv41<6o;Y04BI=2sjiRKItl1Nf{6q->u3FjY?M+i2wF1Rvr zV$&VLHk4Q)hOa1GOB%rHc_KVy2pbp4#Hg$kQIj04=AZBaS-+*gmhdfi z_0AnF59^#Vo#dauwxBoNuP?54_~o7Yn>Av>{`ci=I>NtszP48X3UFUJ+D9DOu0p{c zh`v7K{m-B9$K$t%VTt&u+rz*LnVG-5ist>Yw=4b{lcg~2!22f9$@mFWUJU^+?1my1 zPCy5rub+m~R4Oo8(u9ytkxoEXOSWAuf8ACn+vX4(&V=Vy+jaYEvW@dkN4SsVV(+_3 z;9GoH&t@=;k161*va4ahN{{(qJ8THP6zhYlqubTNcoWYAo&VdY453gtTONFNaJK5g z`!;zZCcR#`h853zvQFkG*}ZMq``WO6^Z4>t>huj-tWAke8OhwBCtrbifAY))EkO~5 z-Rk)a%C6tmPR5nY9L#Yh7Cyf2xwa3m9e_qTy4Kv=&Ms{{p^&>0PaJ0vL6>{es^Mx2X*c)~xM{36kW+-_3k3KKo{nq2uSk<7G;ynbx9ATBZzr zm`${BIM)G=uRonF2|AslLq2u@zu?`FIkVUk5NBV+Wz ze&X>3N&(!c=vwKxS~L{{RVX#7o{f~ygM~H*RjpV2=q=QP+(zhhGKtbu=}@61pzeva zv8+myq;mg_!q8)_za1GI6fOc9mfb;`{1TH2qAoqtkaTg|rCZ>pJ98xpXp&SD(IgcN zS!GNv6+<`w0@{nn-W!Xx1$R})+z4w10YF~+b2L#f!Mt#TXu`OZ$h#XCimjtVdn5C@ zEVicfr^<|d&qtuI6Ie^=E8${Y?BaVE5i|Jl3>L)N* z-G7LQxN!wvr%;BwiB&j}_h&a)m|Xx39ALx>E9 zK^!gW9cP=P5-ZwZBGTe_SZM>MyUij<6|Ec^W%VhNkPK5;WVcKnPiEUY>mrg&g@S`uzsIo=riWyeMM}CWJZ=)Z-7WlG$+!4#hT`m24WI0c^1jm zF&SzH^!q>ys9zsiQ44ib|B5XUs;y@aQ_{}x`K?~(RA7}?)B5Gf>oo2a|6;y!Z;_Zj z=uC3=4}EPjKD*RzLHERRSM3U=ipva?y>b0&JL}FZfzaKI)QxW^_#1e{#@bMKFhteI zqX{wN z)?mtQg7*r1+n8D32dhHxETmN9GWwSjA$R8^z)Ag^eL&DA?0l_@4GJ(7_PywuYI z6%}Fws1+s2Yb7sN(Koh-EIZqxS8&s9by{nU5s7YP5v7yPVKK*fSOlGaWV#W3O>5wK zAXzP8vP|`j4+8C=6Y$YE+7@%ygqG4RIClR|seGDmxY|n6Vx`u~m>YwZDl|#PXJ@F0 zLv03yqw=zTmUZ=BpUVh|)rHDU0V%bzM1qY0G;gSu8><$tp^QbVjvuO4jpLsYz9Z^& z$R30KtIRZ|70HVI(~`bb`4m%lFGXQ{0knMIShYp>9W$<%0=1yy34c2eMwxwSQ?tzy zTp2|B`s?WwTQL`OWhvP;{*e@3+`wNt@BGrSuh#%!yVKD1qYX9Iv1A zI6QLhvlRMFIozr7vC=bn-UcCccHWe%mQa7CRr`k%K$)~{>lXdWwyN5YX+3JVDwSI) zXk!@Ga-La#D=EXz42@is&%BU$Bl0ZSuPoE8isa~yYLBW3GPOn7^6GMN(;1z&4aHC<-*59zMxFoLt;AQKj(|CI!yi z2Z?n3<@#qjkFi;nP!$#{2q^hX^9t6I70Nm>mNC6p%d(;dVnq{oB5=wEN-~QE3?*m} zGZ=+rgX@H=4Wxx~=9H7P`H;#2Fun%*jfL3UQ!i&%7%itl7KLp{p>F^c3&V8@*c(uV zNeypbcSL%WaKnv+ms3EYUirYLh=!N{i3V@26;iDHZ1bf8p&(a19sKX!|K;)|DK10+ z0O^0m|Nkdk`M-+0#;si1+Srp$q>v+{8}DF!_2AgSAYx6u*7fu{G{FoT1dYTWWFf9# zF9cxHST{2hY}Pr>NcrvGLEj-fr<8Xej|perF+HcW^pdJCEi6ZEoAiuAyvnPptGlcJ z`TPS|PyYS=cUGQDAN-9zc%$^&^&b!dKxx1U5P}c^5kXfOR)DB}gFqqr-h*AGt8f)A z#1;Y(FT@Jff=BTp*%@}ANAV&waT}JyNBJDQ4$MVX-~cQ_p8?z$BKth32iOPLLzN2> zh|=5O97QG9LJeMVqH{~?dPhd<(zH~A6=&vW$60oz?XI)Ms-tW{9}_25cdH32XN4JW zy+p0mVbxi8LB54x;(9$jy)7?C-L6xa71^w_WZd0j!Ik$mR^iW(+xY>kjL8)Pm#m_5 zYh_(#tgh3I*Zz(5&(wf`>|NHlHpr%Hzu3f|>7uswYN)`}^W1hJDT;?8YVaF=1w*uQ zcUwg(+s=X|@)r}<9T)=113$ooXuNe~>#_h-c&JDdN+|V-@M<>W3KPd_ZfCLIUI}S!4Hd5T&f3?l73*EhDw(IS48f{3wBCa(qiH{A&UKiy#V}od0YlJj zy)=uCdya6nUXg)+*@#|HYt}169F0h0ELFn2k|wRs2gFfmmNI}3UXhAC`Lk^QRJwXC zwACGy_ zTvxK6=44&@;Uc$|Klqt?BGZ+8g<`a>#F1dhQ|Jdg~Nr+PpMHc#o0F!%(;GkcH# zG|#|MGtdm9r*OatMo)Fm6P%91F>+vvFs#r4bOaO;O+XhY028Dxr~~`}9C0P0c@aLO zI$9AJ2}0c93_DXzQlcEpPtk?FGvErkXQH5dKoLv~)iZhk3mV7hp&XcwiKBYJ5!{aA z5jx0-z=+_8fJ{IpxC6!rN(A^1;t3bgO>i6djyNJua2xCaaAb}EU+4q|b{~3JEmv)r|bbPShCEM$RY{wgk@;Y$OPu7N9Ld==$^5ITp&NwyGkHGV}{ZJ zC|Ew_JyEa)kN%)efW5B5APxzv+6H;@uEzpc-azmWQHF(Jzavu6u^y4y1L)tVyJ-^1 z>xVCBs0M%GKHJt|NGn_Pt^In-C$}Hs6HLG$rZLUgD&j)371m+L7+tuj98r>l@luS; zEGi7;{@qM36)e|ZRKpSQzeb{rnAzBrWJiv+^4o9j%GxIhw^qmQcQN=TG-MH~I3_m+ zGB|&7D993c`}%w3=0<)TVVA*^9Q^Yk(vL*1)3D=fvoB&_Dkoz(li1Ic0D7ALe%y?4 z!EI!0Ny*aM$#liT!F2CpCN+tRenUXTFI76G;b{Bw>(SU{;;eO}etH?8e4% zvCBA8Ux>XkUb-zeb13E{VEk(4NOT1!`2uB$9sbopPpcRie0@u{*3LU;usbg>4wN^%@I&$uG0354VIk^j!!(CY zj|G|J{O$QC{9pQRNKRS;$?hjUJGd$Qkfa5ajb@p%=wV|Arf$R-2XT7>4_nD-DE0`roG5OuHk%eEQ+KN$g?-hntb?+a zjtu__57CaR>>;q-&~)dXsY(Vey`9&z`&RR%Z1M}o&<=C9>MYb#f0U-fKh;$mEv};L zDiM5`7H?TY8_#u$vA0b5O#LlcG^rBSe!G#MvGYu#E#AUe<;ilpDvJiUvi&3~gGR2& zIVUso@C<^l(#XwCjp#lUJk-N$W@@~}Cq^Rc!cz2VT~1mn-#P7ta@z7Dik#F?vFo;y zGSZj6QJGnWi*&aOB&iSj?St&yQF?9H$iPM>&Z3^e+)hR(IoFl|IuS@x~w;}B5zDpcK*FClUFya=IZo;_%;%R!&My&(NzYn zL7W2CZba>V(RqS}@e#7!>pR3iS~r_5wv0`h;THOz?WD6MF5pXrc z>?pIRwSP~=(ahm{w$^GJzr|RaC4^MYjD9x7YAe;t{?*om%y&YFsF$LXSKqvG;P|vt zy|RH{{DT|TICeZgDeO||}@c2L;v&y_|KsPF)>2x>>L0aj2!=>r9tJXBIkT zzJW%o*eXds9rgQj@FaHJ%i_I^C)v?PUL>8;{gy2Yd$pQo-g*HfBWrJgdV=n-pD$<6 z*+?@V?sfuw^8;*K?@MB5?{>U7n~oKAnNk8%CXXf;ZsJVxd_{q+7XUZm*4A;KA32OS zgcwD5SvuWbDA7GDh#LMu00ed^Ff*06=Qy(ByWiLw^Is7F{(<-8l>M)aWWKTE<6*WF zf@68+zcK*+X%A-@{(J%WM&Akoj||3Pz!}gnod^RuCNrXGZj=N!Ovm*g6!lD}S^!VL z#&zHfSjJ*N8Q3wMhyyz&GqV6@DU8KX$VT^S0R}OQ#(*;%#$&)47{*2+l`szuNB0Bu z*#}*d0Ie#ww)t@3x{zN{YCOu|9wkS};2v}B9!WBZHp=hs+%addrSr}}cnYJuq7TLa zL?GxTtgcBht&%=7^0-Es7k<_B@ZVEz_QHLUyyqE5`Iuh}3){slKM~kb?`1_LJf~gc z;A1?q?-y+@{(s;P1?j{L2TCiRV&pz`F*tFFrZ zZjEOjS*Atr^jmf2sMUSFyq&>$VeDl~IrI^t8md%nkx#hMx)isT?TSIR5zCyhodpMV zx0`U%+eVXQ9~u}kT{DOZEv3oCgv-p`E+IepWt3vN!vB~SUrb%99q+XXW?y9O13|Yq z{zK$eiz}%u+H&<7x!CRaUVHmuVJxvw7)1}iOe!A4Yz-T2X#2+(vd4Xi<<3bOGykz2 zX3D-al-IYeV6hx;yY#90Vfwk1Rnw=ar8#@_!EfkEnER8Cp1$m5=|@@0Twd?RhdFnm z?A3?D!Efg2`F!akRK0vblRqLg6enz%dyfZN=VDfvqD%9@*zA9V%j5J)*9wB8R1Keu zap`;Cp!101_R^P?}+0q)++k~9Cb8whN>ZOC`F zR6YHynVQ3d3n^g@DF?pm{62hE%Itjzu{jLa3=+3_`G~`24n^ErN^8yK-Yo=l@f>B6 z^EdYt&eoeXHD6!qsuP%a;v1nsNs$eMOyz;PpP;Rw zkjJF|RhZ>9PhBD|pZ%V(tr5UJ^jGIWI~|0f8YSz^U+mVfZNvEyg;k{({1Tt_JSQsJ zemV3ZGt2SPum16^Y};kx__#xfgZ>N5^8(SUHH>0C?JEgIHF0XY`|lw)hpWIuBXnSV zw~!#&SR%_^H>(a>MH-A%v_Ez`rLQX3^gE#4G{KT&V$sU`#L!2xy|GWyZZtB@f&tI> zXDQk1n``e-&90?1^nA5lC#h0PunD~V61{RKr!Ob0F*p~aE7hnv5_fA$Or8gAI?Y|9 zx11FOX=O*JZ(Zu&&`@9GO`2NT3(N>nVc^1v3m^9i6*75AeY{g;=OU_hGz(c1Bt#AC zpx(PGe$hpW3u+@j;|WYqQU(s>wKv^qJX?H!WM7Qqejjsd2CCSz^az@Zn9+u-z9bXK z8cDabB;~xUbC0Nrx&~gW9mC7*_8_T|Nh!0Ka^cdD?EVFq#GRO0MJNDqZ4cCXEsY-2 zmO$*jBSP5RhQ_U3xQ0D5jh0(PS@d1M12na!>sA3#>Y|)h@BL{NnB7`!kBcecL9;T5 zFjch}j#s8QZmt+hsj*>YN@eCmw2vYLUNj=pa!*}rmUr&HctPe>V#@S?xed|U*jo7einJ31+xF`evp7`Qe37gU6a5?`4766bfWGw5vv38Qe^{oE10MGmT^+g(>+Oiya3b}=jGErpeAoz_nZ}QmU21x~RL715#?k}GQn5_GZyiHYpSbGKp<4|As}Ygd!UQyH{hkvZDC$lq z;)Xk>i%(CeYaS=D>#^wx(Mzfmi{qgG$3?kD1y)l zl5*($V>Ah@dw~&xj3kB8%rqxp*K?XOb~;FUKFs2ACCEvH5|@T9XVq zvx#*?5*+%cf+&RnmKg5aa%dT52!m{rC-z@?DM!uFwIZ!Dg&^}mokC&^bC^ZgJIa2U zDo%q8W({AwW?G(WclD~2`IHtfVzXTev8lVki;TBPUcsoDlLzS#O^i9#gh^Iy#z6%sgelq315tIOYF4U}dwrs$41MF3lNOy|Tw+UP!qMN3QA3SX^jXRTpu$A&OMbe<&19ZOfN*vf$iNxXBv z5}@1y=MtO?NfArMVn)Jg>M}2u+gjy4I-5Nhm$?PF=Ybe>XF3#o6#s5Lp)id~M0b>{ zWl@l%R5fY;fV&7!zU1_!CE1g-0bM%Ev-@qh&pw(I!xw*kUUSx?Wxxe;z9MSS{8q{(O6k@ zmdhI5BDZ2(sJe8D-A$`ZZ`rrnjNw^Mv+!r-t%yPLAIL>7PhxkAk#F8+_XZqby~rk9 z5alVlwz}%vtpv6^*s38Y@NL-6u;0hrM7+zmr+&)UK95Fj7Fs;6)HPuf=SeUUVHIgpS1lw6xQd{as zv@wI#eOmfdffO4^VTR40J77)0xU;kP=$Jkp(Xw6lBglGL|U2dnV{f?MR21qt@ zl&HNT#GU7l9gJmS&}y<6oR@Sn-P`4=gPNfC>6H=@D4tswp><0sBPj(*qL5X?hhH;` zWe{+WQl(OiDO*r8(E7?wvRmt;cRE@muE6DXHD%-V+G#N+m#N093YRm^M$cou*I-+o zn$&{ToR-`2W1?R4&Xn1nHP9Bfb}U;Bu8t}aSal;-uMW{c?ZGzK3M^Q91*SZUMLBN7 zf6=k7kb89mdYjpN|N4ph>8XX_MvBlra(kaQ-#*uwzsSz6d`b8?3mESXRqy}VeVkcT z_~;<-!N7P;jjHh(#NKwe`+jU$jC!j@r-h}a(O>Kj5a&657769ZTv}#=3|rw z=bJnL5z4)4INAWDF9N{ZOrj2z|IFIz|7yqT@m@G@{CP!*{gw~U>u+NA@cxu7hxvsp z_GBq)7^Y?3on70xOSI;`ZDm}bl9ZzMO4}`~`CC~kUaN)JYKytr7XH59f;(q2j)cnO z(@|gj;$q6*zCJKH%8?&Sn=NFep`S!rW)`WP1{t=?zoy((dC+R~D05rjOTgLu5J^7@a|#kb_~3HX4kd~C zMEgOAmQQ!}zArd_lqB688qxmL(`fKb-2@etrKR)a#B5|^=)83CvR^?f@Ovpzl7pXr zrQ*u8w=_;W@X0LxU^Gg*DW~WW%_cSU9sHcC9Os}KB4o_|{L7a>!(qP1z0di*G*Og5$vYD?F$6Bs1!mi%e7UxlQ}5cFtMig%}La z1SC&c%)hXYzvAxg@~a1>plmK}RBx}^k3Q5|3IOwj;Imd(_JW1(93dzH#< zH9x>VpBY)?C=>(PDvONi7tqug8MS=XIKwfs335FiOMSs_V!$h2QS1EcpJ(Du-Ik`M zZSSsvGgv(ktp0v$&xbVoo#gyZ7+MeY0X3BI?TP#Isr9mRehjk~OM$;qewX`?JXPjX zjAG~|%isTEX#Y=WkfO5AGwvVnNadfYo%sK2*EMd%&{f8sY$O%%F^=i?{fp7y3jvel z`%&c-a~0CU!9?Il?bt@1szjTWhyYP*We!T#SIwiN_m~m>omfKkA@3=ry7!pm^5#d- zAeqBOoa*J>Rqp+1?A9d?{F{UGwZ8wWRxA9DIk3O}&dde`N`M#_C=-AMzzT6gf&&wP z6A*$0#0%gZydak0DFW*oydr%tUQocQQThrnPoBsl2?nxO$qt_pFF&mU7IxdH!ul{0|6qr z00{*&6@*iwkgy+Iunb^WsIE*PPJ!eP#i&4B;*(R3FejiD&L5Hr$OPpAGC@5cR!}Fb z71D%sLD}FOKv&E^s0z#k^+8=gk60De1ogpPV2@xGR|NJ!U4V~h71;##!Cjz_& zdw?&X6Ws;30X^WCbQ50%w}B`Hx4}7}qylq+S)d;PD>xJ31$Y5IAeV#_^n$!VobX(* zUNFxb=a37@1?2^tAp8Z>&@)&dyii!|0#Gbw-XPDs3vYt^fWGK00KCv%aL>RNa*ZoQ z(z~XW24*~l8RR^tknR$y%#$gk3xO+4*QN&a*o5*!kkyZr6fEv|Dp)H(6{1XvexR3( z6{HH&1xyPf!u>(mkK}}?q9^jivi!ij^4zNYAik*2Fz1%99!Oak)%n4?lIE08)(|yv zcLLsV`%c_LwmuEAq4C~hG?3-RBJ<~B^|Udsu(CJD<{rm|WILT?krr&&JZQ53{AivX z8LTYwovw`;FX4!zyFLN-;eQ6lU)*o_!li7$R&bg%Xnq3TL zFlfuTsSF;y`dNfsR#>*=mKO@|^ArQ8`pn@ze)-1w*qO-#5j=EQwC?gEMF$Iu*wuLW z1@+%s-UF+(V$s5~zOC@NZu0aF9d56ViPx2RCU2Cqbmz&TwV_g*VYE4}l(o1%BY-Pi zw7 zq0CS9pelcu|5e?ZLkG3?Z_=sPYL!|YH;jG$V4pcC5=)~^ZCC$m6OWNjI=115uU^Zx z;)wfs7XRYCIJP^nsYY=QUi=eTlRI6n9mtjj_wV3hb5;5>)*rR8{ zy))J}+I_U^<<)hQuHoc_7J68@&GW|ykX@QUl8=q&tzAQ{^-liz1PQ&O;piVfE9DAB z*9BczZ*qH-HqL3F3#V~mM+4NAWMF#4HOtoHr`l+>%0`q>b9W+j?IZM1CPl%rPp3g> z7G>*3Y0ghO`n$p*KxoSg|HDdVk4nAD7sgL<`qj)X*tcQN0E`N?32(Ob+Mtw0(;Fxz z)S!Ibs`@(43X|XYU`6-I?&BhPW|@4tHm-hKbJxb~%69#kN7ov8a{AV=lu^fW$gjfN zW_i&mS*1@nLCR;$-U;9kX83@6su(hac~a#_?osQ}%gN{-Nr@xDoU6RUT1rG$8IbQh zs{tPdi(II~ohW4c-x5q@!f{R27(XO6V|qhho(3abeQ~o=nOb5rtwv+$=>Y#g!!FeS z#4Fy?Fz?LeN_*Me)o0ISY%HYW-fGct)8-wr+So4SUx1cPetzO^oK9#4&c>+o7{jN z@=Ma;j?omLPkNzYD7Wf^+1AarA=Sc=x^bJSurj0oe0GfxZpdrQ4>Iv-fnZEz#B=O6 z4A`opF|Mc=Yf)ih&2Ro{#7VWjoL^`0LRqk+IIVx%91OJshFU_|^v|ON;mMB`Ke<4O z3zHGnuH8m5`g7Vnry|Q$;3(~qEcEbs{KS|(KPk4N_(P(LNDTr0;`um9iTwEQT!b#s zz@`RgDr~om!DSKCpzBadv0ODhYUF(lMy%4qGjXK{QW;QvmWPkQ6~6MlSu20cB%$Ln zH$Nv_-^5r;7OoxMJjwYok#8JW(t(gSD;mi}+iH@EG7CA?3CkISF@yj2`5;G*`NRG@&ip*v6t{Bd!rBN_ij?l+IRR4%}zf0sIRvb}kf4`Q?x+RaBk)!pS zF0rRWJZ{>19gZ#Ak1dznuDP6-zbB*K`B1&;pYe@XlN&E8e?HS8?pNV@$T1Ek&tbs+ zW?MqkslRtnJ^YH#&$Hmqj}W(#;2qNX+$LrU&lK-N`?+?!iF3QscKCuT9-v3W9CQ~# z(#3AMSG?;x7z9xh{@kfn|3s=F12*ht4U|RCGiTFDv9)@#=~i6R0_`JIAZ?J{(*k-< zjm_^`+a-}HNv;yIrbxxb9lP7On7A~IOupOv&XD%>hL_8degL-* zqXTs#+*tH2KIj=V)6-Do^qvi({srAtdB2NQL23xhliTxp`EMcuip|8eItTy&+&>P& zznW|SV;g&CS0___8%sMEQzv>mdl!9Y7bi<&m;Z$`%2m~MK4L@pry=Ylfg6Nn%<`Fq zgQq3X+Jd1eRnm4Q;RyNr&Zo)(NtWHL$Tzv%uKsYmonMy=o?IK$J_}ru zQ@#0FyIj?{iWw{4)=stM@%eXk6s)Q(aj@4lCE2CCE-h66ZFEJ5dCEbDacN1A-z3Wv z9~_I3Dsg8jl`3O+ykn#iZB7kK=KCeDQ5Ih~KSy@KSg?!V?4f&XWJf_YlH2mA$_<4& zwJ3IyeB?|lB`>996wRbAM3&?}Zac^xgBUKvkA-S_1GWJ4jqIOdh!2Ga47b1Z_29s` zb?cG{wInuf64WQ!IWXhOn@vx062Y;xbEVHEruwT*5v{-Ei9`kWfcbG{NJ$FR#u& zZKLkSqD3#-u;$WO_7yz%xUuTWo-u#xE?c|?`{mBneUd0r>k#Kl|Lf0?75iP_e6IWB) zsbG_#Cfew4jEVt-+!wc8P5XzrXr0c>h2G6@x5)pCYaEWTYZ8A@8ve(eQ|OH0{Y4a( zFdON?;xTK|pInlCIjdPRcD%r#B6a@l4@$Ib=e$#^nUI?gtc+s%2zGTzwAO=Flg{Y+ zGIq>i!maQ%?me^JL~O&*9{_s%I{kS(HVW;iLGFnfx1iIy?>i*15B0ir+r@i6=1w#L|Ela9cFj zaG6)(aKONNH_%d!t7*B$ZoD->zJf!Av_!zL#7WqZ2H_;fEmx-f=U_0ln>V3+(Hm$4 zAJcg&P}Y-RC46NgMDA&1(GEXI#TLl3=`h9X;}Aa|#23=?1K|-xtpm==1AxJgrzrk4 zBSTqJWQ=qmxfKLvlm{v=R%y4rc<1kB!TN@CTcU(AoyeO8L{TDGCCHXbtS%p(al5zE zzCT6|e7{y^k!a~}eYn<7QT-Pt#fyW{)Pk9;~hyf0Z!#UlgZ@Z7d#u*CO& zBeR4z=}{T{n@Yz2E?WN=Y4iV_O2(}!vv%8J2zW|Jw984fq-r_O;JZM)6alp3=d>2V zWN|nhtT{nWi)w-b3p=QyVf;|;nA)MfPIv|O%i8!A>v>uQ7UpS1HYDvuq=Ta~aBtb0 znf7I`-rIkl+V$mpGTesrKMRT z)C(+yI3mL=U1{luK3G+<6SNU(20vh3qQz(vY!P4~)CPA0e#?=%2iU7Rp}+4+^?~}} z0>Rv|-NPcVMz9$Mb>MnHJAmFFcYq(k-P83j5&i=Id)-32*CvpOxCI(F&c3I-TfCrV zh()3JtN4po8i?dfRFY(qbV3thf>RcCb6t|D;(1;jXq#@9c&160ndKGrp*JN5I_2pW zw&de^YeS`!W)>AnNj=|8a%^M}?tkpO!@iK+kIKn0$d zGCL(81)s_xBShLU&Ak6oKIC8OZ(R&Ze?rHEB)K}6Ypy>{3=`JGOm)AKWJKmCPtlH_ zz`oD|Bug<{*$##{h52IrJYOIE%`Ax_V+x}@$m0gAt1jm8H^mg0Y!o}uo3R)~k_OH= zyn{0xm>zzwXT$qFDHMr1;Womg>~xz(rKK>LZJMz(#}n!B7{L)S&OnJC@fq4k)v}aB z0j zOz3otuphoOpz__M4Hj^K^l#{lugH}ttP*sC2}KV4VG)?Tz8_?|YmsCNkRo!!A9Pos z9vcJ?lVGjj3Q%-d1zRhORzOAYIx7d$fQL`)cf4l)OXZYtLd2T=(szn2B3M*i_ z30tRKgxI=jm()|zY>M%7nA-J91(2N7*Ua)Vtq7(Wj>Hkyu;R0Q&M)CNZF3KY_h2nT zjJe57VRF>PTgRy9#|92?4LGJ7;v3vum_K`zO5jy;o=p5>UN4d>btMtxS%)r&7Ya`l z-i2LRq@Qlms;#c^6EzdQhe{*9Lu=7KQPY-)V?J*w#KK^aXXcMS=68bzcY|<=6nm(G z4PSE1dT(LXlFZrMD1*X6J0v>CZ6Aq-nAK&bEwOoxfle1Kda6UNk)auO+SD8@Q5h?! zwMyJwSWjGQzZ?cm&zyux5^BRt=!T?v8AbL|))IKE!7Sh%86KiF+$zb_i!z;@fm60s zu{v=DO;^9GI7`6W9~o(`S`-lFnh!)7BYFg^$WoJfG5t;nRWEO06{jkbO?%#Bf%&A8 zX;yJuU4{@J1JR`vrN!g(Vq1=x605p!M(jx_?L`E2fy4Rtd|oRvJovzuea^D7s^JqC z2>PyqIq=kJ%9t*Qf9ZkFo%|IaZxIvK{KP~m+F^WTKe@pA%1keDb7*L-Tm9ga0eWa{8#?`COY>TK`y?VDdNEn%jS!17K-C*6cPG=NRT@dNpdMi9?^bp*_cU4l#_DVueSlCCxx;y zP8?6!zry%`8K_kGvZhT)lKx5S(W+U`aNgn9EVFH$I(1O5$JOigo3m04%N(kvbQ_^E zP>IcsklD_t4URa?);m=jtqMf`2#NwMbS4x?q(o|Dh&U7hV;skaQX8p+U*Qv}GRUvS z_fap7!-1My7~dc$ZI+X3KGR7p2Bk;>LR4)DjRr@ID4K&NLqjgPqeY}v8{k=q2b>BSm%D#g*07n>)~*Q5?I z=6=#fg(%ab{Zg61HBYq1m<%Y7oDG;yu+q+B>74VOb4MqOGs4~u5fX1a>^j9L>oDDfi!!>OF4&t zQ*42hKy*Fga8R`GaumxPVc@0<%e~GYzuNJn6l|fLEQpqUZYr= z(S*jpytJP{Mq@?D1d>QnfN1c0ggnnysc4NG00osID#nj(M$d`>oFk5J5^9%X0S3Zk z&?;0Lc3CZ!2fD|i4`|>Q%RGr%IVVZdBF_)?x$-fl{9O$m?jxfs3GkfPGM7e+m!lPs?X>@dh=*Q!gm^rU1#= z3Prk^_k|&VN(p9Hp%E9Lp)rm$a&oXA3(PnZY;51~!x(9rQdC&r#tl~H&;sBM_qV&Y zxT&FXjVTpSCjx_Fp`%bBL5sa(0I2FmdN8hF5dr8za0@(zSQLzIfV2SO1G2&Y;j55~ zS(E+%5LnuGpg{M5or6X!2;hL|<}LbMNjU$P$#A=)-AABjtmrOaaE~Jy^-K$j^Z$^xHfogrij7TJsE?WSX9Yp zgWF8>K3p6q4#i~Aq8nH31IaR(Iz$ zM-+V18IE}3S>HgE=$mDwS-d;MQhnUzx;2EZzdOd`>z3~fZ~ywZT~v+WImA1iVBQ_h zVY@Z7^sV)N=kd5*6jbUKH!gIFKMmaC#iE9NyZ78U#yfj-^xqrssc{RVF5Ye%W$qu^ zwhe7vy>3KPAJu|{+gJBI*$ZRwHdDdjsJMp5grmY$`z#kIbqa$@#r=aDu%g$!sYy_& zD?kyW#Mt6d%T+2_9oMHq^QWwOOReb&-$D>{p8L>*2_f*p7HdE_fQYQY!X6<9`fOqu zhb`qbHL;YqsD(%pRnTIc(0lxj&}0Y{bKGQ1&-&uTx|i&**ejQBNj70b;7nUB9jOPkk%Fblm$N}603I?a5Q}M|H?M!V&kPS({3a?KB3i5vFy>Qo z4PKy_0pkHL0`RaZ@q9;IST=e?cQ?%MScqo11oix0S20zK{awy`tzxxs%bYJtnY^4! zMqUqZcTZ-H{9GMx^eT7kPkzq31zW5<=!6Z@#m1?YRWec;EW*p0@T$60^KGL)HAK+? zyt-r$Zv){=xhtt(kSIO^A z&92F+)>HGV|AVr3U=jt2mUG9pdB(PF+qP}nwr$(CZJTF2XKZC|s#3YRsl1m|?ccC# zt?pi5r_I+aR7HH1;lfe^WsBTvi_)G_lDiRrZVO<*>-&P*n z32hfk!l;^V+o(|*c7#s^*-So1v)b|L??ESQkR6rer+v;;O0tAKT~>bQ8`&GEQhzkX z(x62+X%oVbPIxu_02N3Vzp zw&uiG_Lwwlk5abVRg=feKF2G+u%9|<=juO8Go4*8+bMPm6A8qh>OX3>O#|6@tRwv0<-PYzdKukwDQ;N zafNg|bc}V>-2^OIbu!i+C#rCaxUQz_y=81vFq{1uco}Q=-Mp1Q>G1VmzLhXv`N$PTof)Hx}ebW(E!}Ks770ukSm&D zF_3gqG?!E4td)D-h4z>djnfx7@Z7NEjZTbF32+dp`;-P2B4ho$TtUhVevyG_th0sxTx?;7m?VVf|z)|_#|5ktM%(7Co;qmpts^cUB*5Qlvqid&o357@e78Hy~3)H7ok{NH7`}evT!wJ;i0-FMFO8D5re~NzeTqX6@tTIe~7a$ zjKog6-M##mu|YTSSA7ou=q)9>f0jVA3pkMPqMpsx`UKCS`R zU2~eMzTYqhXp4d#dN1lx*O*JhGy;f=ydekkBqg1^iPbyTpwH|I(kSnra=Myn))Xpn zhlc)@EDC8Xp5z~4uimLGg=rHMGT-*GmqPQpg_t6vJ>>0QbQEG#Q7#@dEixG*E+T28 zLe&l=;YQTgT*w%oYiy1T+jAGB-A6!tH`$0%(xL2Nl8=H^A#L2|Ix!)4g!>p|4$lD3 z0%`%7;2L`4K-+T&PA1(;k?#JTjzAZJ?hIt-$84>*_Znp20MuTb>~KT4T6>40*NGz^#T`hF^#HzCGYF$Cb}V5Dkn3FMWfx6EVcA?qA{Egwem)> z9xj&JCeJ7LIQRCr?Hxgk3B~N>p7Agclq#)rD;y?)g#yS0LvPQ#I3}#9#NCPKK=K1i zX|iJW%Yw?ik450i+r$|5W^A0HEdJ{Vi~3i_`3_{W%up;q&Pc0(kAFjh|I7!y1iWzz z^AiR>gBZrfulf(rVxX`V)vV&8tnj#J2g3C)4K{fy>YIUHG^|?$@HDJzB#_K26}v)W zmyx)t8{cul`77yc2P5xj(-L&_nI-1oUl#PlL1a9To*_F2h{r+ZmPY~D^q%4ceV*fz zjtrjrI{-i@z1q|>3<o^Ya4#>9C~}t@P+Bq5)DzN;YWr0tf?(4-O&~ z%xo`v(~f&N#%gKTw2P^;diE}X@%WwH&+sqS^pUDEOKB2yaY~Kb#%o9QF+xS<3hN;4 zdl3Wf14g1FP|l+FLS?{~f@_(n0g!g_%Lwx2UD(GziSh1w1WvQO;s4ac2^ZQ745@1c z(QHZ&&V6)e4C5ro}&cS!D=+IwF6dkoET$o>Z@E`*K{6Jics~}u4Bl{q@qP& z?+65jhBE8eDX`kZCgW!Hna*gryfRtoaPEIQFzlDAx7dNP3q5)O(86MZam=5C<|3bk zZuCGn3l-(zIwuX5K?{_T{pB|+`nSF;DAS0@2ZDheBDoA;NG=GAop6~!Bc@udAP1c~zovB~KbX7I|id(nXXb;;n z#O59{P-%mYJX3-XH=$g=8IVdyTJ7hy~X=ZG+%*1))F2UgvC{r0lROhroJbd=K69sc2zz5 zekt5Di+02NO@YKy(~zzu{tjhR6N+VqtFf$7R!g-EN`nuE*BbXd)5;X~+R3tjFCEkT zm$@bB%m7F}q+#48S@nd2!drQRO-`HZfK^`F$rZW&rYrWjS@YTO#~kab(5qC zB{{-~4~X^D8KpdMP%fKx0ZN2V0wH`xK2qK1Gk%-dq@ZK>i8WuGA?!CBtiZ)W8D(zf z%Hm{N3cW$-Ku#uJ4KDj0S^cqGG2X%%$~L|m4!x5nOcUptwmcC96jHu3C<*TJI&3v1 zz^lS8b*Y!iLhyi!zky233oYBE++?Y`;W^W3N~7+?^f1EVp4U{1UMFNV>j~N0qpe=Q zUD$e7p|U3gJTYJS=!TGXN6R~K?{5zWbyeSgxF;`%=n!3php!DUxyh<7cEV(Q6NZZO z1<@(UsH1N=OYN!*%H;1ZT#_h;{}@LPjXNbW8@ABia4;rk^0>9aU+~j=WIX*+TwqP@B|ZuX z!jF-Vqkm=X6(*k1BU<*nyr|(kMJqLVhvKqxuZ-xci0fVu;x{>~u&yXKpnRi0R}hRm)qe89;0t2X{`Oq!e{@B8AclDv53P_h9@(pufouP$|}kk zj0beawf)P0=*A3pV8-!`$UDMzqP$RB(w^vblP#dzLv>uI!vG{4oE97g7zI;GvYeLr z=2??K+A+l%{K+Jn5p)ak1k4zgZw*(ewvuI|>Y@d6Ze7f*?x>`@q;E&lRRKH(>n{GD z?flbO9O(yM5!sc+Xe>CXGf*RuC!P#Y zfoH+mSsffW9EqFhuhY3|cDu)T=~?{WpzTBJ3lwjdeI1+zbW@lqcs)~UYHy|>ls31; z)~uLn%^1%#%-wYqQa*S`a-5l_ui|Ec@cL%X39HGM2LMSFsMrcJR|jNy{Lagp?~e;T zGL;B`3E;AlM#vz-4`GcJ;P!!2D+CXAeHmc#>EGnvTIKevW3y_G6&h4&I2(Ixz2L<) zQK4xkv}ggFdV;-6rhQE!TvIACBpCa+9t*>u`XwUyDJI${IuadV5&f0J=Uesr35 z(z8)Cwuf80V|@FIK9Akd{t}AZ(@@?A`t^(TPZxPeN-qj_iD*F468F`Q1~M!Qz@ji7 zax4@?j4w{2f)Yat41ye`WU=O!1LW`|q;C@wRfrybjQ2krW#LSnfN{>T6goxQKry%# zp@=S#rLW-XMLOSk`KUQB4g@Tty20d|5X5L~;U*!2P=z*dkwz_Vb@Yx&J|#hF&EMlm-O=X#Z^zr~1F!p#Ddm z#9s7c~LHjMx=7Gp?pAXWM;2H=gLG>bi;0i&GJtZ;b!zNhKT zu_OeT_p}PJ(TsRzg12Omga%xCtN%6A5~mGIR@BI#8Cp%=xCN7IVx`BzW$sJZDOvD( zZ3hwqB`8v#v36qJ5lMYAK!~+8y-};bFi@6cu^T6YKYF4SZ7Hn$gfLyY@FJ2!2??H= zCy^#7w__(Y|D7QaWzK zM!6!l`E$SEJwiV1xDrwT%jja-^OjfrJ`k;WNgOo1RLBj6~Ot_{$a;4 z*U}H`L~ctZ9&3yhj2(7za--Coh}N;Y;~0Nvl!BkpaDI4XV6KNHqqkv=N|$H-qsL8c zt~>2UbxP}Z?*Yiwi+~3q5e|k6e?XtOe8Ut7gsG92G6&rapHi4FFqf3QiJ>PY6(}H0 zDkT9yqwno39byBGHL8)UCd~H zM$aMz^P^Hf&;=)4DESxE9Pf#z%pB zv~G3q`mvc%uMg5MjFCx`KfR?zkIQetI2)CL>Mej3*YMZq?ujho+_#Zg{OKQ);AA92 z`GVZMfefk;6d{9q=fZ5NdDfuzVJM1=@(p92QM)cVC$-2GT9SeOu#h})G>TlEJ~!ts z3c5cTjQnSExjybMMdJb-KRpA9o}oJ6i6p>*E^Jl0fb_b7T|GHEGG`^|?;N}*_XDU* zZgHMDba4gh>^ynw8U4Pu+%+2j+Q>yo68GI=IYA;x>|@BF9^pSZiMUCli`s8ovlh=q zKhyr^Kyg??;s?Aa?63q^JEiZh%yRJVadK8CkG{D|vd?vf%b`-k z%@h)o+S7zWq$FMX7F`h&3d+)Zmg#U*i$@3)}xMHTHj%kcWW7K`XY}(|Jp^( z3fSRfdI?2jxhdFFjG9$f$$bkHs1DIJw1tmrBzGJPSiqYy0?2&nhyZ;Vl@x+<%*3M3 z-n632_kU@IHn1JEzySNj{{Ir9sQ=$SWuu++Jv(d>gs~Uj^@yclkawYGN)xJhOTvq0 zI%Udo%SANha!D5nm4mdUjW!2Xj=heyOJ<4)1Sq=h+nZayyGd+1`n?XWEs3RM-eU3f zC)1bbv8!2cQ`g$vJSQf)fLYWdGLp9=9^DgZd;lqety3Jnu z`R5KDn#*<}whFQrbE2f$)yl9bt`VFzW4)Ye zg}`df0@$^v@!!r}g$8EUH48b{rsZ|*Vxm6WXP1-P9+|641;w zFaYCcgZZr1u<0%aHVl74wNuC4?f3Du4q906V~TD1SqGH3#i|#mFBy zdyeK8y4`T8nokDW31V7upM@$0X4wr8(QhsLqv#66{8IPH{bdLo*H-YbfpcPoPLd58 zlVqxlrxp^i6wVayM|IiEbcm>G!&J?ahcz%VrYq$YGh$(zs%-mu>*SF&MJ9$zUBNq3 zQ1b7OgDEm(aPM*Bk^vYeQvo&cR_|elYF8xtvZ)m6j?8rehn>0B6)2eDj}N#;x|mza zeafFsf9u)nP7~>Ss&QW)kPN+h=VW^LX=^A1(P97$!BhDyPQ#@Ua)dMJ%K%OU^&QXg_VW>_e|GZs=;yJQrkVl?;H6}bqwCeX1X5n3^MhD}Cs#Y~c`V)%?Q?lWYO zHWVDq$7#$lud)R#fhwYeMD9OCa|S zryFM1xE|fIvp}ydN$P=+|JtE=z-eO)>_XesxG7=`3lbLxR~U=U-8@mLLLly^gq zIg{FX6AuIQ+&2IpuphO$6<_07thMvFMw{G`j)kwD%aeJD`X}oi5o2ks&?kl_M6QJb zu&FCpbtBi{#aJe3{y;hchTfucdvo|2G})FMl=&+pGejLO&q4(%eg_Lh1Iz4g(pka{ zD){CRWoYSyj}#{pZIGH7&z+7m@pQWj1a}Nkwhd)}vJifqcr;TrJ>3gl3Ew_eLlA^2 zs`u1xJW;raG?oMirZfFbcJ#?r-e$v?)o@6_ z`!Kmyt{H-R$6oygK-%t9a>JU<7Ak(&I~lhbqK|o}gyFb>AM!ke$W>l6@kz&<_qDsU)a(9125!*sx4MYIZ(t1N<5D&`u! zU)WcPI3!i&pyov=;vciqqW@@neD9g8+xy&YY2}j7R*cq)2i&|tl;|D<=wQLd_~7sv zxrOVcly*J!8SnqWUjEWB1JtWK(Amy%`Tj34td{Ay&A8u)Xb1)X!0-F7;dMtBTigF0 zyed)ut>9%x`mNv{kWv^2LNTes^P#V|J#60PbVcY&)48Gr(JZuKVLT_QVEc$aNO8zYBf?+=12=G`)%~!~1oABWK)T zaLkEb86PdpSerpS1Qc8nIZi(}0USwr-;a_sM;*gIL@b4&Moj`LC!ie^dJs#Bec~#3 zuScpv%KnD(CFn_RuVAYK@&gD9t`JYdgFwqnfdY)0L0Cm?7TiN@T_!OqLD<^GUBqo* zoOd46QK}p$`l(QfASEM^8Tbj8t181G*y6D9Y16#>R9E}_;UgT=k`RAcwJlj$HSy}n zoD)YoVCZzhHJ70%-1vi<0iu@%F6gE&FPNrpD*GZVF1VIBkc(!1t82=6bQw?Hu;@jJ zX)eK3V$w?O3{7M9hkXYHpqrcwedz7?=1YJV{`ANLg7eQ40hTKhvISpyMj;%VxJ^a0$KjKR9<8 zynx|xnAJ3?Ox;7+G;GlU_PHn{OGeJv3IEsZrM#=PmIV}n#Dv}C3s?ZYkf;6^?tUq8 zndV|ht;ORCO^rIoPB4Uiz|R8@D#$TpEp7+fsIp`l9d@$8M4Ay)4gS&|l6I5KQe~y# z%j)Xn{ql}4nTDoF-+8l2q>St4Ky@bJBUrx+PFy)L{0iqewvMSqc2tRIiNeqL&-1boSFdp2{h&Eregd`EN4K+tBtAc& z$awQ(nokNM1sfDkgRlt0TznxoM545q$Vi@LB~H4%vD}~sqLFJ z9i}IL5|hEg0L_3w(JO*fHlc3R)|LxgNYY!h-lq-`dEF9mudozH6v$e zQ+Tw0Y;ozwav*B3CtWN&DYn`8XdkX32AJ=HNHQPmZGd3cb@Lp?vppdc3gaVIp7g8f zRt|_;CW{wc$`!gXt(Q*h5pf0v17VD9O)JEi8e6@Jb%Bhz)>+YtN2Gs~uuXg$@n###$uhoeQuc-srPW}awq1&UtjouM`AIpag!rs8=}x)F({?CGzI zIg$)ACV`2wj}jRkG_1Ip{$c*CZ7?0#h1q~RrA*lZXJB7PTTh{Q^|$&vLY8dxC_rZc ztX}Y9=AzlE6tfDqDo(hIfapWuhd!PB>2%Bil+U4HND8qf&UPq>B@YIlsPEpxS-syE zG)ODhAC6r~2Vd3r&}7PunMZXi1QmE(`P5q}g-wT?!E`99TQhGEC2yQUYddr0#vEDN zAneWY8B+tnyP?mm2ARI`o%b2A8D(l=cm}DlexOFl@&2bzp8=)bUO^MMYF=R(=)Bpa zSe_zdOnms>OdMl=%23J$wzmko0}%;n$WgB`-B?|X&WEl>=GLe4$5BUQOy0#OHz z>rg{pE>6Hu%;+8L(`bVcUXb|^>;s-SpC;Bvci6QtQ|_mUv;*i%(|u&|>A~3by~<;= z5gueT-EKPF{aQ{-nCH#S*rzp+!7vVlnJf>qgK;dn<|Z`sM};}I3>#& z4|PxhQRS(3Reua^Ap7t5^RXRYZE!5ATnIL``fS_tjCfP~XZng~m8I`r%g1vLTDmJo zx_U}Dj;N_P`20biYU^A;$kRM*xGZ#dO}W{})Q6zerSp*l8%deR&ZH(w>^h()tkxj7 zc0G>m{?2bq_y4S2{zv))0;71G90~xy01W^D^Z#RrF{)ADam1ED{f9Y`STf#vH9Wi5 z=$r~mZe4gaC>Zc7Tn{@Q#t>yCv}P+dutb|_E{+#rnslOWQG5V3-t<%5f}ymqDc>=o zxGnEq!)7~qY9=NDpszQM`FM89x#i{Ec-L&VLI1t^zB_H5Ys};RcGdlVW0F(FG=dwOx=B9)REoO1 z@lUs|X`R#i2o$txdi!+XR8lRKG|IY_6(NdQ2omG~>NAVFX$ol7vbxa#7E5j8{%>}n zz$eh^%8Gsm7ERl10M>G^_&aOa7k;Z|)64vd#-00NFkQ@`+T9K3gHrmC(~c~R5Ka$r zqRW8z-@P-A6z_d8%02DNp}$)Q1hE?A%f}!eiiJ)ueC6S~7s;Q;#A5V_&O8{8n)8ZY z^as+%J;)zWz8S37vXp1WU|J z1$LMeh;95k9pEqKI+GphKb(mF>J#H>4DJp^BjnH=n~@+N44ZK4gyq2e7Y*WRNRz z{Rv`U!cgD%sFvBI+8#WRo`-l8yDz#>|LzLPK6x$2MdL{vQr@HS<{Jfffjs))Jw+n# z2tFnfc1h94dgray7$zwGI^DiN5Z$pwBYjUBLiyUTjyDMGGM~`<-~-QEyM79}POhJ* z-841faD}*mx>`035lZn1BA)cULioMN(Lgw2tN201jR`ueBgJvoyJru-#J=xSaRlNz zITC~s-!_&iSKNj^nZgz+AFyUgP%A@X?J#?QexpqaM0Ap?ZITfc%nyCYwQj|%?v56L zMZmkUX3Fj4T$=_xl4Gb&W!-{Ol}?thNFZadCDCV>ayd8Q7v)zGXf6u*g2e?nV1T~B z1BDmA&p#l0c;6(&UFLav!{3VwzlhYi?ZqK+$o^vTNftlirx=K@&V+aT-)PlM!J~o5 zw#PuV+VOGE%i4ztX^qjZvR#%m>r<;iv$V}-I^hyPyczMq1Jgv=V#H;wR_u+H3^`XA z0*raBWF91k1BP$&p7kY%8IGt8fHA~SwO-4hv;F*!;1rA z#1;`D4nlc2R$C6N*|ok$f&u-(g%SVt=+Zj%COjW=ex5F;;J~a_IM6}CgMur(My}yA z_5Z7ILx2o6AhgVhVPxB}TSJb_E%|22I_ z3FcP>ut$oN=4w8Z%zw?91RPMfLU55W-Gq@Z3TH8SFlHTE{yW^~qGE_a#RE^+Blr>Gn}KJ-WsI!ra(eANI% zMq4K+L=k?VgyW{u<4& zN;h_#$utB&9CY%IFN_$hLGJ~DdC${e1Tr+=4<}rvvb$Z?YuqJBMf|N0kT` zF*MyV#sQb{n0W;J6l?|LM+We-*YcGo>xU_c8i`jARG#>4|J;;G3Y&m%2Y}n&wb@6s z*hl8%y>Oun6*=R@IzoZ9TOnkC-Q0PW9kTh?x>K`5mz7m)Nfj-$L8h|C&~rMYjt_$M z<5rKsv6rVDR^qgBfQch!3AF6K>za3M%Njnsq_`#XaxnX^s0!EY-;3PjO6C$ zBR)h?AW;J8?_3Q4tyDrZxKFMx7F{ zG%0Vt>{0;UmU?J>AIGr%N{e~G)||zfc!W77GqT`Az0qNL81kf5Wof&rr^eNQA`;VK z)=>K^k&R9LfvrZ3DlE_TG*WxEWGpE9gctognC9EUgME^jf#D?>q`E|Kdj&EJ5FCjP zdmm@th#SY?^a>~NvV6zWvDa~IFe!-UjxBSpYRJb$JNFbAk1$4l>LZ~?4~+(kN7G4gR#7+ zM9)FrKTR82Q`)XBtB@G;DSug-7$l(67sq|hzF|Q2lY2ResAn|*r=Wb3tcB2h*gj^3 zx4_!OiLYrul4ot2A#7&|{B^k07&ObPaw-gOJpGU&VAde>D|lQ|b-yb%fN4&=4Z6u4 zTCIAOI@#AgVNQ;|WDTjCyhHk1jMc_rTW&G{+$LaXM_MmEjaI_&9dg2`KXS{(R-z4a zBxm2A#&KdzB1@8(0A*a4|GGWZXuP6s(MPzz>EoZ#kURq{9A4NcZjfs7|HYGHM1(ta^44!o)G0OXP45gddrjG$J~g+E z_(L}>pb;dulKEynq1zDYLG1(mI+21V`L<|i(Q$9_cgXKQxiJ4>jkDc8+OhtgQj@XSu?j)z=d8ET<`(8O4?B2|!7 zume9*nNFErz?gsSK+3e#u?^^8jikT4!48}Q+DUk z9A3(xVT3W9w@(;3=jFdlB^lIK;`$j;Z0}=D$|mN`kFNwvpkzqMm>{p?sB(uo_*(^) zstlK04sEc_47(r&NrE0yzrAV)7YW5vw{)&8VK7ig@b}8`Oj6R3Mp^SOMq=D4!>CmS z16bLT=ef-s$-bAzOO)Qx5&2q4sfM9LP8e^ zhHh1_n74%cFt(E~JXvau5BbjcZ>_J?#SKFy+u|s%i8<;djXb!;l z$>x7@YwJY<7FXBA+{_Ixe4Z|_L&zBQrSE}uasrS0)dkjsMmuG5y}0VVq@;g<{?}^; z^#on<_}?s!F)RSUpZ~uP$EZbB%IVkW{kM~1%wsbqD+mLIrl?F2Fzk>>2?q$J6#=b+ z6oEBSA(&S4^iN#ZqXpSjY#-#!_!|-27aaT>M9o$^zv~4NybKC1RvAl1?nRofFSp^_ zdgbT+XsrNzV{Gu3iYMheHUN%sVBS9tum`diI3JvlF-L=;@zxDe#5gB~psL>rQba-@ zPRpbdqQFu@^;-(48m7Qgl9q1UU!Jh2Qkq3?NvH#?5o!TFTHXi82b>M$&87*~=McI; zjX|~Frr!*8kMufaZ!GwUJynCa74A=s+pIR;6KB632V`TFaW?#?YJD|qG4@n}*vPB& z(}UcO3@=|9rGxJ88gdC~`(Z`K&cBZL`IxIewR;fXqjGgN`T4c`_($p^6I7f$^d(aq zd?Z;M5n*ozw8B}7bhL@F{Gd2y>Gjs~)-yb;1@S(z4TRwloT=dPYSyL9VDP zFNt8WMkP3rAJ-O%^Oge3OeX=@c_e0%h!O=8rOlTg9hbO>4Z*{4k}(duvvinBxFf6bjlOlsj}iycdD6R%RQ%bkEonDr>E1e!OF}W4Oye58_cUQ zr3<9gma%cj9Zcu!SLsK^E9dNmxmj)lp-_|=m(H0HLLvYGK#dWk8xnE;aa*9iY!a8!$bK>W@TrVVBeg`mWHV%n_xyNsx#7XI zu21?}-xcyBVb#b~`cOY06Fs~S2ZIV048tNqfPgKaK0899!9az=)~a@Jn3S!g_q)qs zH+vWqXDOK`W#C{n7OM#-UbyhP+LR?|AB&I7T)-LcQx7!5h^=^+2$m9LjCEuZk%yB5D=VhPZejKM_um;+27AI{EH_;HRI^2v`s~j2Fj5xe)1tDBuiwATEa$_*dOQ{P=$D$#QIVs)O(M!-gQVJg<+tE;c7ERxY6OmU`GL` zqRavoxII`|SB`KiyMlE(voDC5b1}If5M(}rrkkWt6}ThRCRJUQS}d0)liEyOU7Hdd z$i7cMV)?tY^FSepY%Ea9%IWePAraD9dG;am+z=_MkSRv#G=aK$Q|d>Rblu7MCVr=C zLGNomK<6J=fKE3!{uf4o0-`h0UM8^Z63@(8P6J{{N}kcU{9vdH2MTLV4(m=HvlofJ z2ld+)ewC~<45oP2HaA+{J#C>KKY`#n-xjF!zq4 z!S5Jg*CG)rJgz39zy>|Rf_2u+>H^bh;gp-jplH$A*M)Jc>5){UU7BPV5SAf8T3`g+ z5uFhu=k{rIrj$9gJiaMSXDVw*KGGf4;MPKQ4^j0zMwEtgE`_WW zW7`!Tl{Z<((pWHO6wjA3B#_*9L`H=-G3v677M;9G3!59;ailuHJj0x~h3}O@fQf8R z-u7*!UHf8)>!0Zs$=219AB>Zd6d1D5d}maMJ~XjsV!BYPEI$9s@VKHOW+(G&uzsQg z0AT;`f4P&9oxO>RvxW69++s$3Cu#E+ZgE;Sp9p@%IhrK%9HCRWYLK4I1|toaq-;s` z7fIBgN%UUw9^v`o`{gp#*c$l3<;9VE8l!6EB2Mq8b0jN|7Mg?(oube9GvL^(2eJF9 z^eGfZxHs+GvuL8LjjxaB0hTC8x?GTvQL@7{F&Gp=3N@a7Y8*ubHG^^F7+4D|-yoSZ znyQr;R4=d{4Aww436@H&^Oz}#5;eOUdKU>Ebknxt`UeW@xe#BglT6!OksORtQAE{Z z6yhVHUOOo{N%+?;DEakR(=K8-Mkxe{dO1-I3n;{u;^z*wDrv>aS!uK8L@&GDJUX_n zfh?^#*8FE6R(t+Vo40%U8D!ERBtFTb)CTi)%zIFL}})H7#W5*HYcXh zEdSkMozzIIT?F|&ybP?x)QAEnvoeLR9 z88wk|X#}(}#3dg`Cg)a&@ zxT&KzYq*k$N`;qt8lH9$m) z2Mb>_x2bAL#7|q-3}@1UmW?FQ>NLrutpK|ve@d=Kdx=KpoSB6L-8%p-x;cII?VZzr=9OwCOc;!xruW8UOxX5{08rs;-m^jkI{0WbAU#bFae42rcZ~)B zie3%s>%WdC05XX~Rl5aQf*7BA29YG}2rITJXRVB`S8+gazG;_CgK1t)@#2**Pt~|i zs){gE+!0Hm8lbXbB7vfZGUrfF zHxR5~AxgFiVIV9C{R7E?DM$-0OLRZdic@Bq8q;x3usAKZ0J0RGQKN-@WW@Z zVsOO)lB3v|7TU;=NvO0LPpSq`#asE5HE*BPCh!j9qgIl~I;(IVLkNbi*?5oN`!&c= zg150wjPoy0@?+okj)kh?P~%T;F3vA!y+BoUe)f-|Lb>LvN^~PYB5)Y^{4nF|Vaueq z4Z4&iW{25NMkJ6UN7~b!F%CSb+K~JAlPg3|+cY5pN;KC@@iM{rN9%qe3;Lq#dU1Ry z)-)%zBN(=ufEpoM+-4`+bQ=ILyR z2M{Dsm_2?LIJ=bu<&gx5IQP_S>?0O;@IXJeW)%!D%M~Mfq?2ZsFn++bB1WzZgj6UY z7#I8Oo!AwvS2EAiVwhm$v;G>NIGMZ3-s||I8KqGA)EbT%RS=L1HzsG9%tZTrJ(Gn* z57oY`R6jgQEfcs49d@29Fm{csvdd+eni04sTtncITiRfxc;;YycdNU>7BgbTzKXvd zm%0yJ*%Org@uc(4A10I1mJjiH?sC!luyf=+rru^d+|H?M)SW-JfPgp_jZ7h3s1LK| zv{vsUdB{}p<(TI{?vCZG`+}aYT=E$?eQgS> zrFe~uoSnF^MvLYS&TL=PBi+gkfzVwmLmoN5V)w29Sv1CoLVJXcFO4qjyJzF*uK24A zr!I^wq;|ex3?unOTy0}BMJ49whh}^MjF<@t(hj`FjjJqYW9pPTK> zP`F-IGW_6q*;IaL{{Q1yD z_LoaAaN(+BI9T{)QvEl+z9~i&VB4~7+qP}nwr$(iY1`eWZQHhO+cxLiN#4B7Wb%^w zsQ;>uwf9ncl;f#P28;7vWXa*`#7U8k6$S6jGqbZpTM(If;0nu>{Y>ET*lWj!2grk( z=_UXFJy9kx0)k8Y%O?%_$B^Ru&nU#WS!327dlWTy`xri3#S2&3V=M{4eBR@VkWGfB|zGe5v4n3lH`T z`}#SPPqJlanx-|1(HG~bzEd9F({(>Resm2DgF>QB`T^&JZa`(|Dg8XDKkuX!$r<`3 z6GV^b+UDV0MKB2p{I z;S79{TeU;LIB1a_=y4v^*A>F-2euaa&_BTb*O`F8%Sv$#oIo zM44p-OR}r^Z-2ax+`33GD(T$Z;zkDs<)HaI-)#0X{h@~;je$Kz4S-{JoUUkzMI9R>ZhDI-(;cWr2CJU-VJ>#q#>dW!UQDcwZBnb) zVSKEU8);(_?jMpf+{J7Qz!DH-K@nIw{{q!fT4A$L4iL*%aax) zzU`2^JdJhZ*ogK(94$&wj&YTToTF<(vWXPFwkwUdca*0MFyKswaa4uDK|p482{8_J z(bF=u2zv1DFD*TJ8^r9%Lfzo3rKI@9*c6wpNkm*lz1yt+*t(i#*8R6K?qN=;Q ztphiK!OcmSPyzR?FB+`k>o!Ex3E8%dbj9jczw@lUwz;-t&$*e+WqC$882klHg!uB# z=jOWG{^S?Uvf!w*P*i7D?DwTaVL6bQgBG&sQ%45umxbD;Gw&x5+A%K16%V2!q>0hM z7-P5tS(n&V=1`;{!tS)Sz6im_pv`UhCxE*fa&xvWHDs@ogSl|Qc3brXlI@(fTfB*% z%)GwNAj2-#Ax_I5J%!V&Hi!=d1-TcQQPAaTn#M+d+E{a(?}Q@~*Si>BJ1enm3Oxs` zM1g^TSV?rudICiNx?tdL&w@0SfE>&vhTct8aJ!xNz}V7{wH zdU0()hk#S8{dbHFYH3Rab2ao_N3`WfQ+Y6(m5Y!6u<)WozKl3><|~p!V`)OyXfR7I zemi7%HrJVW`jT&}$R!VncDHlMin+P`BM$&E5HtYPV((VObNmqO-qL5Lr4fpUBZfG{ z^oysYrr#xlxyo>6R0W$JSW~)!WBWeT{cmS^z@9Iuz!d14gdv^};$Bx^M)jVv5=OTG zj@fBL{VZ0C8j1=&;yc&Gh8Wr$KD2LI{dO6BL})n~Y6QB+@Vrpat$Su?b#*tc0z3TPmvS@Tm(<5NrJ(gY`an10D&$YLh8k4zQ|FJI~9KrG;M z5y#{SAeum?RZPhCFND1S;@oygV<^Ul$S=e(XOAjG(#~Jqn1Nx#T4KhUYs@3?)=r^RB^#y*3 zl^?p^yn`2}fNDhhH_xCX5r;=Vf9J|13q;n*Lc_4&>TeD2{m`KH2C7ui$3=S00j9?k zW9hvFQ4B6@^)qg+O5otlwY^cuha#E5Hu=pgg=F2XHcahPjKKR_1ChV@pcifGfmEh4 zZnM~Bt8z+rYj*_seM7Ya8EPY>B)ZvYtfz^e@1S1DjEO6p_OEVQgVdUtdL@sshL$Lm zN#m|11|$4Lq%$mhv5_fj3ta3`K9K?HRYn!S3LUe{+!y0^WQL+7<0CdMhD~&IKXa*k zAe)GjC!nPLfh5(d4Ldo&wLis`hecI0`cGVArs0n3WAOUKD%VhUUw~QI;Z;5f(bD4P zk6{WsU0gzzdh`Chh6HE=!z2L;s7J-V*xAbohu;+^T>qBi7Ub44@@Tz%{nHN@fAxx+ zQ(umA&U2vmk<^DPQ@44-VU1y+^7>yH-zbp!EN8#AE$h0GJKqKOEonl12I`Xrz~A-- z@!~jX_57}H{r^T_{~-}K7|x@Q|5eRdV*SfJ{T~q6fANUZnwm+QY)F2mwP!h=2pJ0s ze;(Ot%42Ba8Y|YYRwY^{k6r5tkP_?i0k8nXSBf5X7h!|KV>z#Itohf7=0{iEgZaK! z^H{tX(4G2@VNtX@m6$kS%i;l6vNRh0O2N`Df zGbLbkoClv_XN{8Yl!|n)as?@wNm?lkHR@c3FO&GXjyE7k_a(o0Lqr@7;^YcSDGV#6 zG#Y6oO!HZAL?M8o6P)+{D(h(=ASpG|IrW=N7!qGil-~ zRWYxBYiep|!-Hi_lbONURy{ezt0$Y?Iq`I4%h1mv)E{~hJKMon{FI|$+2+^>qC!xZ zI)`L5{^JsD$~kj2MFalBC8qf@SQ)a*I437U8aJn>xs7VG^00JB2mBZz$%QR8-amol z!j>Ld;+uA1&;B?+n}OYit*#D^+xT#XR$61|%I5wH?W=_3>kimI;{9UE<+%kG*U!iL zX`c(h7le8+Ms24&!)V*r$_b@WO&dh z%UCWqDQoN&eX@~ra)v^AMlC!4fyN$t@!%Tq0y&hYWTXly8%vAXwQW zaDA-)gL;L}MluWd;9QW#V9;>buI);b914UJ=rKJBE^W3MRQD1HeEqv7rO@sbXH^pU z6Igsh@Xav%+L$!bYLG(1ip`zyChuRDVM#jAQYo$@>VTFZEoF@!H&isOn)-8rlRY$` zwo$(+np$ZBzpaTHzm*|gHnhs|RK+#cPUcb9TIW7pR5o&gfvzAaJljqxtb7cl=B2U` z8NK_VW>R3B?zQ5tS2&xQCfY}MnYJ+Ls#}CLV8O|IfxI(ZC-Q6}YvVbTx&37% zuk(#VCb7%Z62(O>Zf^m*o+52=0cp1*=+T4nxD)b?vBR%OXS#lS#%mnmuHx(Ioias9 zld#HxM`-WJI#gvc>oDdEn$+{hTw%b-6ZZvscsF)Qy&4KoZQS3Nf<1b)^{6sgS(Hef zD3#+j0CyXwf!+NU7NyIH>NsZTacI(nd_kqiHs5yma!x^m;9HAISHo{R0PwCIP}V+m z)7X2Ih9p*h`mSFo3xWCD&H&luY`azG6z*gD3_GefzvQuS^;uz-W zz1Cg)S+-IA-;rMXeH^}&zpuPq>dE1KohbTg;Xgo#xl2PHz+55zDshb`g8dxCnA?O$ z3y^jlRIDN9?Na{Tz12S=*MdcB-Re}2J(7MWEA<81TYV95ZtbJ+X;OXl{rM{?hdC$Q?&KDRv1!cW2=2T}2|LR&{61ISfZnXe+)&*G z%5BLv2fDQKZMy9-;DFeH?$}x`xbj`Km4f=kdDcJq8ecD;^;=w0(fMycR>ACdF3+R~ zzU@i_UM<~V3q|IK3F2=(l>-;Hr0+EnUvuHvlXKvQ0T-YPd8X4_IM^NjEp0r;I%P!B z1!vOxtgunfr}M2aOYewEneY2>bIR$q$>=U{5@M^rk#_`y-K6qATGBtYv3;`o2i4Zl zui&?%_7g|^myG;8TLfMnxTD_BSqBDAe$&YkW@(F>Ot`vn`m=80E{);r(qiiQ+Nz%3 zqy8QfSA;fV)7|O;Cs6Crq~berSaE~f?3>T;8a*sDAaBBJgRr4$kUEMT+Cw+Fg1cN6 zt`f2FJbt^6Ftshjk&Fqot)XhagWG>YMxXl$pj`iTkgGA zT=$f_frq)M1}tUI0Ts|OXl>#)p5+R+=4g)w+lPGGM~;9MNOIpK`Yd+PN9TfZ9Bzew z67+RR>mbd8KSQ69j%PwC&T!Q6b~KaCOdzZRK)=l}KM z{U566zaWBfx2Cl7<`^O#fKNn|#rOlY7B!<0IiqPUC3jYZEX}QODt_^V)+JE0TD27} z3Iq)07=eIcSOSG~iWFyxupQ?qd{GP4sm=qs)7b79YpYJ;4IYn#VoOSLI}ztVmv_&5 zNoT3ySEbtbEB|lDe#6mh!0zcC@^8S9u?@6RjIHI=VupF0fVnmpFS>?;!8Xw*6%*{E z=tSC@n&h!`GJRt;)6wLWJHrynW64?mKo=CRN`|`WmI1f926hQYaA+;&rgkekJ zTJq)=irvzo?>V>``@;vIeFVP1AcG6s+i4EPg}@(d*XfTA;?|rwo$v?IU;evRz16(@ z?9h---O|b8L(YYBPNA|)sF}>zn#dA{!1MZ{;Y6k{sJp=&1pgr|t+j-a7yn5(Oz$_N zzE~HxvPRsP>$9sdgK)4*i>tB#cy2Imss`UATr|zEY+ST8Il#aA)-u8V6<)8#E$zJ_ zDe(GR2I8b{5c)CQG>snkfBzE8Dby(s;ka#oqi$~Hm}d0_n(2zq*96tUd&=8o+47?; zpltowwq>@;l0!(hLy_u%&l~BQHpc7NV%7#;WHfzB3@?dibnl3Ux;;ld#a{>=Q8DtesALMLnWb*yT%tXY+A^Y=&wNo!iBBqy$hAWG;oJ%^nz7aZM#NLQPcLh3_g( zJ%wIBDZmil#UaOa{)YH@Z#>qA7Q(v97^NSw>f? zfC!D;)njAdmkjTKw?$U+ch!tV37QyTIYmo=2ZBfePD$J*fW={Jf30T)c`CEW3rGsk}c2LY#sqz=mqk^T&yKJ;dy?( zX&+dV#fKs zHqh@VcYD}wMHJ@(sb!!n`Z3zsCmitQKeKOe~m%?}QABy4A>k9+TeYnK(r}1v~LO7UBjz3f?a) zG<_J9)`LoC33wT9GdjP&L);-y&c9s+>uOEgh6r+@Q)mUkv${c?_OfjkUTKs zAn3*=>QN4q3Rxy}C6K*%Ac)*dBC5kElBr*n(zp({<$4(J0p;PB6#J#;ei(`!glk@C z#CQz5Xo~gTq^o!NvpYtg<2mwni(`0r^hxhf)~6}c==BYpZrlI`-=4&`2CAiHn?a+3 z>tfPuu|358i%L6E<9$1rT-Jx<96nM>Qsyb21V-G0OarNYll3x4=PS%vrL#Irs}K#H zCX}XAtUZr%U~uW?n!v<sEthBBH|>gQwY&%SG`Qoy1I3I z1IkH=YOk4J4E^C*WzB^0DvRdJW(;Enoj7WtXICJR+qUoJ(o(ruQn2KXEPp$&s9$^R z$1cvK9_1p@!b?G3q26|KU(hNUQTon_KZfVjwDN#g#~|_xMG`!rplEoS!Cs<2dG;Hp7gfmyz^`OuJH9^0|^Q+~7DM zoJ;Ty<{kjERI0N~u%1ylW=auDU~@}<>~24ErNrGDc3dX5oAa`kcZ*<{eE}Eja?{>J zGB=ui ziycfK!u|P8aQiSt2G<6d=Q)ReB0J zwXyO)jtxLj_J$uZvG%U9QcDEBS-{EBuVdS^$^@|MU88RH`)Lrop^JgGk0nTeVY+=# z_igbu9G2NO=DYRl_EG=DPIAMyi;whCe65FP0pWR=I}!vNL#*{|BAu>nN`G>J;k@N) zoW$s0lG-%2JNyl`4Si>jZgbDXr0VcrK${%T$i&eBI~e2xBHednM7`mI1r95b{UK^IedMh$J2LLsj@hiRE%#$~BluT%)a~ zu~IHmoQiq;y?<7|#7TX%KI#7(KmTXNhR2oF+)DufpqusomCZW47`m9cI2jsS|L;VL zXU0Vd|UoPq(fGgnu`RC({_&0l4g#y=BgW0E+UN4bse;3nc)tJ zd|_aQkxCOk>glZfH&r!PHME24v*t77;M=FlTd-en0-jXsdKo-XMK&W{X@G>~1G35D z_`j!4%Mn!wk526qjYa9w_wlT)EYV;UWo6t;mwX$R+-*vW*+w z&lmgqhRJg_;-<|WDV3?um51Z~^W($w(&>!P?+Ql^HKL@y-Z}qVJ?n)MY7F_FOu?2X zkB`oTvU8(?7n276-Jd}Gx{(i)c8BPbk_PkR z1E?{?{Pa{g{L@XsJc_!J(UrWA+bEYqE>g11+1GJaRB2Ymg(5{g=_`R!iPajk%vf*C zU>BtUrE+N-$t=$#trZJ%a&CKqx5bdW#$edlNO4_+lKb}jE_{uFQS@y5| zO`fAqo=aMmjJ8eQN5tZ&Gf*6CAilZViUZL5h;61wyMVt!jAQ-6z@fg=-T<<$udb#Z zLBM+6Ccg$z&(JYe<`3f0a=TVb+-Df^YsQ-pcw5~x^EC;@1If3o5|HA zQ)t2i`ptAvg%c*SssPji=mTl8-XmQ5{#+rx623iYcaNSZuYFeBYniH6D;E8EMzn0K z2XH{>KG$TNU8U^F1^hRr_NRzW=$;*3$C!}N|VjpAb^=>y>@mC=4-e4sMpZT@|wR4>IL9d z>*($^!^c6saV_ZXBVJm=?wWt{MAJF`t{JJcBXDsa@A!x3$@UTtB}y`|s+e6xJ8K_x zFIM#0j;zgi)Pen2^7(Oi%#9uy0dWmL^p6oRO)P&!qsl}UHbz+U(6ahn5U*!IbLU?I z;;nw!F?;}jV}5EA;2QWioDYMA3wcBSX z%OLpWCJFRmnipt~_+RL=1zrvGI)OdYov&JO7x3)tY`s{;x4XEM;c?Sv2yUq)A@7n*b-A)+bl`c=%2I}kHd^)!)~7%cD#Zu<%QRSFP9G0Ki$ zCDl-cB`W}ZwB+TPor6PCf>L6L2^6emqDex$4rA>Pn{f}JW+YN2oCr`7P3(Y zuFPEA!EEoss)NmY$x!`K_Ln5*vsFnWx~312uRLWt0Qhwqe$t@1=xKXCsU6CbDdxRZ zNf}nUM}yDK+ZQ+wwmy(#yZ!P6Yrx! z^?b<+Yrn=XPOHy1PWh~uQf-j(Uy3ZjYBwUZ$<%x>6mmU*q?R2+L9aM3YylZUBeQ;3deZUmbaArfXEw6mG!wzb zOyniOs-r%~C+l46J<*f32IwyEDQbZ}J82V4)uULM02OyrFL5|3MXG~jV4D$rd{HI3 z$<;z31nE#mMI1JSheCkV2m}NVL+WeNWtwO_!%K_9?Fp#UFI}UkyF(pRzVYoE->b(m z^=K!!yLhD(u4ofQ|EfTeaCapn4%or#l7Js3ou7Y>AHQ(CI^yIt#njRF#ScQ%JQrT= z+oqdHXcnZlfySUDB?T`&J{AT~mUnOctF3Lv`9&``WRetT^hu~LpmA!uOyUj^r;xIW zMeo>WJuk3~vQQ=gIV+iZd;t8zF&Bv7ySSKixZ48|PRkX8Xa?PgeKt~`nsonJsL(-- zq$V4?^H=wWjhDms_vh@$$%}`xe*{%iT_NI3z5Dkw?E~KbebB$p|2Ln#j+c&ZmYke_ zWW%TuR|V64$R!h1lR4)lkaY(pW88p3z=FP#a)VnyX`P=Eno}VRLV*;X3i7iu1k-kH zE){wsy~uLz3S0>BqHD=2VY|>Q#;d+w5TVYox$Kq2Nhlb!naoV2qPB&@h`KPdfH&}w{ckNb3(j8tRB;$dL$2u%?P%MrIEN1H=!Exeihh}wwIAd?>Y<9f`GcU3byW8QRc26#ojF+c^Nedig;cqfMx6~eJOu-9DHnz zFtE`VzN@c0&yS;*ixY%Nb;wcR4Rm9%<&~y}o1!p63p5Mi62s_uT!ZIdmPhTi$U>rT zz|ZKYCxO;8S1#pi0nx!P~d30gbD`t=*kQFBj97 z{(j%1tG^HS#Eg#7fXkwDVCN4)E6hnBTyuV)u4w|7LH!kf3{w8=CoA{*BQ+JK4tg+INap#!Tsm#M2P{`hUNa7kDVATJLEincotSJ9((wT zd!4|5kvC$_dE+LusHQaTH|MwJh&9w3wHkfT;|)xdY~wzF0RJe|1kMCL$-2-WBC#m5 zc1?B-M=lyQQ`i$BTiwEsB9pY!Xc7a{mh|dcFlp5!GPML%P6ST$flWMIGvege7!4TXuQLjS09+#PJ)dTR_%iR3Qu#tM<`9Xj z;Xj)yPMcy*&aPTu5Z+8!;evX1tCL3fm=)Wj~>Uy-m(7g zjh^Yq3C!C%`YqC4b6eT1`bKe{;_;%mLz&;yO*3SnC>=@WvWc@1o zew9)mL&Ir0Mh0DapusK%^*K>tD~fKPEfT{BM2%CQ(k>gu9dY?adZTsBJt1@k3Z;mvpVA)*uCe@~vhA_G|&2r1kj;5GSfjqsXRm4vX)l#GMu?(K$En=Lh3ow5*&aa>lEa27s+g3 z=@17k_96_M+#v#QZQz9fws8aWNg8U`vIUU&xa5#4e{#_~WUmo_&GZ$_4#t94Ke$Gz^or_?7D&C1_NLV<-;ho61xJp zNm47!PFGEdzcJdzYIX}n^Mcr;as6@cAp z0bvV~T4|_5+13fnop)G5>%)HwQ||pIO>*(Ljzg1C>=l6QK>8FiH;u)jdz!I8fpS>U z_W5g?m$ipy5^DiiJLLp6F1k<|4h5AnP1~StIBnn5ZJw$}l;Gg9d+=P}bRhD;{8jz0 zyxshs>>l)2?aJfYPH&80i%m-=#8mi8Z>g2qY3L=0+^A}an&1YUsOaY2OqKfoxF!mY z`o<2;*hZ7YQFHnj9BB~$$-MjlD=deYlRPidwfM(D6v3So^!gt$d#F?jhI&mmIK0|#&!M9?b6;V6A_Nhk80Vu@pe_;@+UE}}JYQ5aR^nGm5D7*U_ z*ca7=10OM88(+n+lJN>u#LM_fT81LePV*%D>!3pn>uFYshA2|M>JBDnUz$U^)Ctq| z=f$A6y#5qFmL`jGETaH%JC36hSOPl$tGI`Rk)d8}84!=tWe&gd0Jmf)-6C5HKQv<@ zk5R!~qKjaEp^33tv%Mes4D&5r`Ebd7-KrvbvmOD&mh|&&@#x{~z{bac+=1k0>8svg*dI;2>TnlEcKJXU0TC{Uboryi>VsZZ!4f^QT<}kLHXLLFPY|VNx!@ z0=yi2Ty*u~%Mdkdw$Pa%?9QOkn9bTw4xo10u*sY^dEb+>gHIu`9OQf+#_#j(?l5~R z+}ZWr*`9|F--%zqFFr#rWc`ljJ5`q+0P&n&O*qZ0Y7n>;V^fDBO0XAbY`Duf1K!2b z`yz~{x4=x5rpn~SMA*<&_?(?BkdH|wYcj9+0rQTb2~yz}rK_wJ)EpY4?I;Lk^NP%u zaJ3-r;kD*28`275z%)9kg-PpV{)C_iNu+5;V&PU%#=jyajTK?HwCRb5nvx@-uV*V8 z0~uQfG|TAP~ya71NiisLD!f)54}Rvsnj5OiEVB zF(O~~ohbIyA>5|Q)zuLuhvlrNnqhnqvn>`dF6$)N?Czr^?4#-UYDTVD+@lJ2i9(+a zxbS*eKHWyeHKi>Cw*Uh?yZS4ZS?RR{-}^A

6-v2J5BEr#Si5Y+MV3T3lcWQ@ZB3 zu>#7@o{y)4>1L1q(ID;)E7lM-hM?8>uWQZ5Sis>_4p8&bUlkL*5hKJT3eb4?RgSVluT7lg0`FJZHZD(YhHSen5a&ccuaRM8npkBhQ?hd$&j zny(?WWwz#3ZnlrnPdM9HK7t!NmKRZLceU5uzUlhoflj`}B0_ zj#Z;Qt%34JgH2Czk(S|?xE^e(n^TTqm(-)cHQ_#Ev20n{MKZ3e|BTK;rnWI@V&eBa zj{T;LI}NpXiM4gF=BK)G}HN`f;5A$p~*E3?)o#ZHun?*4#4PNB+v?TkUM-n}(<5j-g}9wdJ0}R2istFW zIl*6n@r^*tM>1PI%y{Q`6YqAR#4W5+d{9ANuyh(CSz^Rz%PNQcHpqZ4W4{g1)&+5e zdAc$&fb5=a!N8#yzCIAyEiYo@18{|CoG6o38ml7Mghn~wJs>+wqz`jUK&kNYFXPr5 zBG`^TuGNXyd|0id$1GVwa8}MJ_ceF&yxO>K<_(*^ubiyxTFndP>)C#-v*b;u>rP@L zXt>ZrY*XR=UWOqA9g^GvYp{#}9DEQ$CUCvR04VwEJ7Ekg$ft<*!5C#TdB*5@bV=N^ z0{fSuo#9Q488Iuz=P$l)YxnXexC$v~U?$CT?La>O0^G0ZQm4ux?~hiu$Jj)0c3T*c zdiGD{Q;|KEYzA9S%ISs(AV>_-c+-KHrDs#_BZEa~bY9A!epSX z{OZ|c*_Fu^6vbBHPAQ)olDX{Wkxtbr;xfziI^y>78-)J(T71T=&eJ7H7)BOInbR2Y zzZd&otc->>H&~_WqNUF0^UPM_eW${EFMxgqH0t<<<$UE3u+L{hBqBh@K~$xhN?ZG%UMaSBYn0Y z&b~0n2F)>yNP1h$gNE_Q9#^z4yVr8Dx(L9`O|ODXAi_n!eu!dMg))@y64sCpGfvg= z-$mDJLEtqnK8d-Aee4`$v$~ET3$OVA?aH?8e#$ehK>2%5UnMn`TTFy>&i5Dg&>eGUxIdof;Oc$I9iz)RZp_oWD64xvz^Ma-Z6fqR|Lq0p+0kA~J}K>14Y=#P)I z3Z0l)zTs|!A!WHL>uFTC|9$#*YW=+`{i!A4ItIaNsd-W&vB{vjp68^Wx|O`gz5uw( zz{$jW^-}1}z&>znI^hD0@v1#FRnuiHp+w^MBUO;%6_Hhofc8y5Y1hO+nk?OurcMzX zuJ;o=JX*&g7g*X4X<<8^52GTn#oQmb@i(~x5bx;QaoYC{+{FU4TkeA#u}jE6rbbxi zjk)kn2+LUsb0@)wYD^vLHJ^Tzp5ErNH1^@3xzRptD!5sCW`k6AYR&l6qE(a)wv->0 z&6C$rEy(l@729Y=g-dYP3+v3&K(NQ#n;3UX=p+&DebZU~l>CU)-Ps#!#DV$FJ~;W6 zLKw?`1@MOyAkC8SCqYl4ubLqdkXDy-af41(W}BlRT21101lF(GD#2l`82 ziP3-ajjt{|3sTE=f>Waf&#J}{%%&$TE2pcp03haX^H$PA4~HspqZfndY64bS+=|qp z8PYOKu~L1@31cgi2Nk>Ew1s+>Zk6(IiD&UT2}yW^c{%pI6A8ZErY9mziKw=)^m@6O zkUlp9J;o(X2PlY&Lfw9Mp9VUd0|5}@A#UsuuUp&D z@hwK)x$fmf)d_YY+~^d?R$T3Ric}{P?^~!!u_E#uPp6tQ6~OD7vPjb6QXXkOg9D7k z#YC*7hz4Xthlz?~dbdPlC~neV!s7ZdIIBT-=;7q)!pA2@*gB*Ql(YP1ymLvw(?z8S zu(s+B8233lX8<6jcm1fr7d-ahV_p_`yQ-0%B)+o|IDYphD6$;fgkBvs2=u-J!tCF6`%ZPfOItw-V;2KKJ`4->3ZUMfXH{%6)nTZL2=p-2aBsC;c=D9*d{ z{p_jr3s`K7m*h$f8)KoOF4}xem>2^nEXC+-TD?8s&!GxFWbPYN@#Jnq9bpRc3zt67 z&#GsT)6)`JEr{$=fy`VyJ2O^WpJ!p-mg7{3PAa5cLeIwXY_YklW(U0}GOO>1Tgj_r zilGwqL$xI@J!SsaM29I0ZEpX#-KjXJs4ULB0+`qJkVWokgAiHZgI6^))Yz<(oN7o0 zo!l^DIjNI;R?$N|g};OrQdfu&ls-@*K9p!q7{}s;qRQof2-;FT!x`KqK*^edE1`9> z5>pYk?==-A?y%zAzKYT~>aR)W5(M5)rNCt$@(dBp^HVp*j-gIUUX5Roqjz%Z@GAZ! zC#q^CLELaik><)m98&P4W8Hw2xC&KN=cp3XOhr(;O6K|}(bdS>x2?h5_FOC7#l<&< zv>`R8L)woz?FceXuMaQnJ9BDL{~f;r$u-`f&4axR>&4h_rEjjFMld~rL_13~% zbv}|SP_Fw)cD-yEDFZtPG9bO~9cVB8Mn4PVi05WWx@&6|n7pelhgzZlUmWiQ|LTxx z54)+NHX3yd|5ShEn}uX*-GnFYy9|t`vHPZZbHYJE=vi!cwQa4>X!Mxog9q96N``~J zlVg7TomNA6HBDxXI#4Zn_3D7PYKFd&3KsaMRloin_r5<>Tph>Nhf9Az<->O3AXeyy zvjnmEHJIBfp#~liM36pO`{Kc#e(^-F5E9xiE9Kz&zp>2n0Y&EeND|MktTVsWudY_@ z&Xhkf7O!z`4-yeKm18n&I zOd%M`6KRhV_N>MGm3CuJAVl_DW0i#D!?j4j#wFv zv*%1-I8>9_CF+}O9G0iM7}+?MtF0#pmv>E0#(V_-KXzM;EYPOS+ODXr$C5^+93|pEnRnU(Y-Az1V zIMb0-cUj7TNTF|XmwfIxyKQ|syhd0gbuCiQA6Rj4ivuL6m@Fh_{rE^yf%?|~r)ULD ztRoJaqOMC8Q=!?{Q#XXej4~qlZpzfYS`1NJVct)c|lcv)ufr4mlnZU`pit1#>Ub<KCh}arjC99JOPhQfvOLXTJstgk=Ds-OJv@hK&rUP@`$-1*LDCWr9;tmo?$*^WN5A z$n3^9X|lXZnI1W@^gEUA-QB-@X`SVMmLdD)rRHEYmt_q#;<_@FhtaBk(L;c$h74z^KJA>be0c~#inCvW~caSk6vwe)#OOol^!d#y#n|k zb%eQgCTBZktX(|i>DS@$Tkb@s{ni#{SQ9)cc3qXVd*}2HakRl>F>M+J_0eW2tVE+J zy8IJEb%E9~x#2n%OkvNaBW@mNB(thEj;qT)Mm-IE>E|V+lErQ`7nkCB19o=77{wp8 zswX+DrdlaWX3gIdk@U4So?+s*u&*tj)Qw4>dA_<22?cc386A_)H&^Y>#2Ub|Qd)Y= zdc#wUNUEX#AV7+ap}i+(^Qx+!3<|xuY~=kqj6FmP0<5g@dU8*J)PT*`a{>~a4<}D^ zsHYZJ&s3=)`p>`!o##MvqkdhN9&n=h(Xp*^YSY#L_vFh|K!ExVZ(@mq_e5yXoq(d2 z|N5z`xgOVA#-}CpV$TT9Fuz+U>#HxdYl`{?N({C$_C#sK`Y><7IMBWk0T11Fix=96 z2cbGBD|t2juWR`-pGrJo(g?63!sdhxd;w&g+LA5^7O04!Km{_Tk@68$xAB#<`VSv@ zIO#Jw@i(D$CEuJ#r;2vBxc9*T>a6$`6?Y(9gTGZ=5VPe}{7(xnV8mZ_K4mX0Hw-eHg>)tN7$y{K@7ck}5e=rnD_un;w;x*>1(JryW3}rFieWZ$ zHOF_Fijn9O&?{!)NPdD>(l#7<-H-qojfNIZxDf~eX9Yy5q0fb@FrWf#DeHoV9<*pf zTF730N+_fVin@(TGzq~r-Sq+iV*@3OMJ!VX9<3N(6io=s5809O@;@h2rjb@_;4FMZ z+Jf);rZP<8h8cWD*N>cFX3dLVnvV!_ z;zzS#aU7mN9n$ue?(h%fg@SoJ!i~^!()Mc*3CqEMKvS8jC2?bG6oZ*ky>%J7$81OX za6~Gjc@Y)P9xhe%*wK!>%L2Mw##N3ry;0yzY^JC)isDWd(~8)#nrxxIY*Y`RjG>#Q zrsV1(bz?Swo%ZSd(NN z)N-t>&rbd~e{*qBRy>tJv456;9r$#z=BHj9Ero-M9Rgy>pU%$$CGpI@}q zY)$oci|T+ce^1RigbP4O5UDaZ=Q`?dMD(hVo*UB;(e$sWK*ugxRtg1l7>2Xj91MOh zFF8ONB+#?y2_#+`yQyUOsff&>Pqwz#qqeiynsCK?Q%&x9qLY_##bw#a+cGfh8KF18 zJe9jDk;+z#HlB1^HRCSQ?^UC&8A33gDj0aE-C{Dz!ynknl{JGLO9b+*{y3`nNT78A z#LAYlo$&nUi^Fi)QPo+4+0>O8FF{=o1@4oC{9+q0?qE{4w_qYiKw@|Ko^V(H*rNrD zhmHPhH&&9`{RzjfE=p_qIFA0jxn@+qQR{Ab1xe!Nb4jE4=SJQFF8G&}&b?r4X#Z*q z;%P~!xdwKjBwg42A=|Q+KP(<(PoG#~5gPoF2jE`pifqAa>1y9mVXVBX$>~nUoq)gW z$IKpBELD6Pm|m%i9)x&vnTUX3!Sz%^Xn<&Sx!OvqUH2H-l1O7qRxXUu>xwq@a{kbAmE>*nR zUHv;-K5C=36mP^_I7`%#{}CVZ;E`EPj-dTED!OyObO7n=NKHuM*|;q?wqX&EZ7XW+ zrw^Peils4nI4Pks=ppn1PWEC+?8vx>(^bs*W3cDeqa{klGQCN_BfAaMz9w?)7RX8% z+t`Vs2@MY+K+NAWslC#$>~#`=ASb)wJ9oCd`5HqpjOM{H%l{;Pm=>~w>r{Xokh=V5 ze(Qc5y9Py(?PQu+w2t(p^J!QIQ+u=}mujtTEG24kp86h}yXdVUQF;gIVXImCOrUZ2 zt*M2YJUBI|P~PGi-S0~!<8SVudF-TTT9NF#!2N8;{%2gWz^V8RkU@^PQY@5GZfR;oHI0Fyd$1Wkxx~QTMJf)8!)|_;PkV z&pCvrScf&ci|y%!miG_I+7}SW*2h2ZWYWwZ(_ZEOLh_oD(ezVoL8)MUA`wTQqRy(C zXBU$4sjOVeNv3I_oOlg0Zb$iJo?ldko_w8ruwhDosB1>pQ>S5^@#m5>?2%T$B#}Au za&zxrEEbJU>vVdJOYk?d@%+ys@Zm{7Di_AF0DR{R`G@qwMd7lvi74)zX)s0Pn9wLf zKzSIIi1+2Nho;3!_v|FvN}P!|nzv*QLMS(!Y;*A(hcdDzV01<{K^rsz?N$hc$U!Kw zxnA~$lGe*=a-X@3>__FoJuQ6rFmsb(p`%(tSuk)HQrx&cIV&tf$ z7rw;0WcxHHL;ug{qe+jSpCj&so59?TanJ}Puzw(FEU)uTtOwYw;GH~oghk{lFvvdM zjbyz3ek^xt@w1@?5N&(9*7S5@AjqUePVssKV7w89s?lUjnF@_d{*Q(+RRl>__OLWy zNGe4|iCQh;*Kv}bM@_w$(@C{Kdh)YpjgO^E6L+F(izHlVPWJWkW^L!T%AjxJcSG^_ z_mjXtr<(ekL7{JM!QtuC)hVS@UPGpXt0h}yjVeLIUTB3X=g2j#@^r;pNRNRM_EoI+ zR%y-WxHs)K>cU&;=JTyrL+@5Cb**UiWBEDJS!VK5V+yRtz)!YfEyer!BhVRgdTHSJ zJl{FY(Zx@)!-um$qGKoeOd$Tv!pCvL+B5bRa(P#Kf2`-plDu5Mnulfr zH$8t3wtr@btAuytsFv|iUta+t1A_H~b)R*VwVE}Q)tl9vJ}zPENSH*}M2JMN-2;)* zL$w1k#YeIuEQP0Z`!m@`v_m4rr_VW#>b2e34#1zk4Vm&8@2o)iY6Rdf+)kRr)3~7m z@KK5sor1#_-nT*l0WGIy*eEV&QW_q zGk$f>bJVX?fS+*ZIjUE`Nxr)6vXswg=RCC=m&qLE8*qR;jfZ$g(_~Ni_G!xJU+2^0 z&q8Md+`5cl6f91h{t@2RdS;s!@hBtRbFA?jQQQHKE zu$k*AUzwt}N@oKlf+E%!0Gqx8YvJwbFtD0FW?$^Na?$xi_oVtVj{DoR~w};T+~(*hQ%4x#z1K9=k+>om2|S?9`O_fhT{|PH#Z(gdzv1jhD}>Pl&8e9*_ekG0 zi{qkEyWU0eU|jekN&RuwN>}gq*;7kP5f!p={-|T1)1^Xk zhPv-UsxEvEUKx$zx=3`e=U77s_DWrtqQRcoO~eXbHO}UPGNO11KkS2>a)#Z*=Y-_( zAxwlN3%Z4^PRi(>7_xfKkIVGKGcp1b3BZiFiVY!w#ilWGsOe@bGc}Ddmq~ItXFrtfO0ZaqqU8D_ zy*Q54F7Jh8)70v`O-75j+7AE08h&FkW=MIey{MaW;N1J2_y={dh{$C;YDDlqL;@C0 z)-)HH`F%elMQm(_r=`c@>OzHr#f_ty+T~=Sf~^_6=RjT)5-KP*X;}Ab?_Pzxu(`pG z$RXIh6kpgg*|6giTBSP9sztY(Q@rbsmob8lg3WgqGV^0sg6Cn&7F78pzO%}aRag(* ztJey{%VX?4W>Ei>DRhu!NR-lvY)J5`y*#i!Lyz@dhTKFUL(?ev3YJ2-eLC;6U+zY) z?Vm7Cmt(P6j?A~qL8{I$mX6TPbtDW-hUZedEIN9%hn+@O`+DS-8xCJf;osLkT~ZjS zl98Sju%95}C3xi#`%HvB%OYaB`pX5q zk*E*LkXPT$`Y5979L8vaYoWkMA)z1r3o*J-Gj=hVy82Bvr+YZ5ggbmA!l`z}PKYSD z*Z6oYZJq2tnguSp`xe;B?5q+7bg#bvQMjySvhYg?BeRr?YM%oxpGfA0;-h6iN{zwB zRPqT$0-kEs3=&9@pH0eHTd1i>dTeyLgfZt?Hab@U7{5kvB!|Ecp(J6s+@|J`R|R7d z7AekGJ^h4oBHkCeWHzF2oiBENyqxK3NT+$psOYOKnb64qgDjmYx$9(!yT7^pj~7G}5zS2=ze`rEUjvz`0uO5(UCUOa-b`U3@;|^LKox zEE2}T51gpDDYXwQT&G+or+}LmJAWzgH0o1TInvcn=n4^5PGDbE(p zt{`y#c8nUACqD3j3>&u#Iv4@#{Fq^;>1KVbxevP{cQj(+cLI zlIeSL7uW|>YP!D7VBtptfSD;YV2^Eru2~jTT#-8{3fZm~Ktfk4&?Ah|YanL+qwwyR zN>~$KhExz)IOE<;5e%C_V}kWRlnwy4A{3_=FtVX8ACO_0!41p4Y193MKiuecRy0v; zE45}Tf?>CJ-=Rip9tpcnufj+c-sbpl`B-u}dJeEK6A{#FUE6e+6%+u*hiQZfunY!0+PQj2h3$&=nfJ#f9DHmB9nN0%N|fYKUR?cMxJBVxM$0_39UD^` z7zQ=61|7a$?*0wVR`&j>IHYkH=u@JsQv@LK1Y-a;UH;9#8*RuvW^g3cMGN?Yc1{ir z6b^1j7L0uiYkR-$gO_*Jj$_I^&Ty^I9*hFnIIW>B!5T8)xoD(*NC#oq8S6$&)wW^Z z5|t|mGF33CZM?L(qz(hsGZ2c2c6Z%SN3hEV$(D?A z<`P4bbKTK&q;L=w5(XalX1Yhk@n&>979_m2lG$ZUX|}R4?gJ!+c{8yRZETd((vpjEj zMt2xtL#qa9i?-33-@>Wv+r}6fby&xzLy#HfEYkuV{T~0!<$O17K~cuDTv6gR`;Yl{)DcKT_I{m&K2CCi=|r z;z&x!aOq8d9BZs-m~BWJ*D_gkW%O69#Pwn1aG0=`t&z5RJ*Hw?zt%TcKY<^&y%szN zOErEjcT(IxuqMoF^rVk-c1_c1S!6X&nJ-(?KPWdPnSF?hN&`m~77Rs~0}64~%JeBR zX>-n;Js49?3Y+n6rm!Yh%P?FPp3V}D8?ozaB0tFf%h?IlSxDRFE?Y6ZPWw6v=+ujb zo2jh6$4I%&Xk>Ur@zqnUm3W0NptbN#!HDkd3Hdd-|83pgn5ps7f2f|dstVCTj-36= z)R#`OT#Rj|`qL4j%t!@Pg690iRg!Wo8>3eM6B75YIZ}g((ofb->ZJhzMsD27drIU{ zm3bqaH-53c-iw&1!-Q~PiMxBU5xcqYJIvE>*v|OjDnQuYb z;#79(ij{fEKFK+Qtx-KnGgThi{AZLWFizi!V-uOwrZZi!!t@YV&YQvgY82UJiqL^!TBouw z;^bd9PeqMW_XdRCGPRxuRr5a(RMuIPLigvO4%&f<{$pgjjmYz zA-ju!P`@EOJTSIh{4SdSa>CRs-c4%PlahkpxgsIR?!4O}P%6X^G`)Q!q_oDUQ40vR z*_Mhh$$yO2)7`%mpw$QA+QRGObn}f>`YN-7Nw#b4*>bY7ld+sU*AwZ#uvo3HMbcq} zSs_JAY0dI9d){GCda7;guXawF@{gQ{0co}P?}%X#+ss3qn;Us*h1!M2LI#LJpD{HY zc}##LR_;&dViE%{LV^M%0e9je4^*1?%}`<$+24A;*~n(f`Lbwft7Ksk^u@vDKX&|* zXbO|Uk-DOp#@*QMiKWj;(i<3{8=VWHO5vnV{-MdOMuRVcKjBz(|FU~F>eE{5T`+u< zhAO;KZJ15=Zk>$(vqTc5Z?#^rHhoV?6g8Am^zPTx+=sS7zfd`&%!*C0vW= zv6Le_CvTQAJ3G^%WSd$(;1}R$Y?{7APd?^op1%lhSXw7M3B%ymg7>}zQYs;2`hVCZ)b;pYvpBeT6ZqB;L>`_vG`0;PN7dSZ z8qTg4_5KTk^<;%op{x9czou@;S!3T?+n_P-${(yd=wz?15;FI140MOl?8X9~f$z%$ zURnYX2c={Vylnvvd=fl%UuTYMzsDgU&XhqM_2PMR8*rRj=9Hp|{x9DLx@%NuMw`o~ zmC}hj1x6*9`>-yvx~MG~0p*VBO^R9{oserW*N`Q@E_{lvI#rq(4|$;hn_!}Tkjcdx zV3teyIbFSbZi+sbR(nOAQ^#{DQ~NmG#z+JFNypmv)e=Jen`EyD2#{9%8E$sL{Yns9 zd7wb~3PRQQF|v1ZKcIF$aPm1=T>g@29AJ%iT&_|9YM})o%osUf?G^2!QC%+ zM7cDX&DJw|ykC@TOqOVr()aMvRQt_5WxkG|6sC38=m?#`bOh1!?nECN>fIV+a3Q*U zfo{N9n3plgrFhlZb&n|$+Y7lCtr{Y0@UEL`PlA@J(t`EF=PSeO*74{pnO$9ep~rvY z2EM?fQp}-tP-_oO7}%08J$^Cn0BQzs*)w4~g#Gh+(|D3i0l-OJYe-y+aN43vnpU+L zB+g>6eqsY+QD-6YPvtXIicxki;5@^fqgd|W2X>`QZ{dIhm#%Vz{?7UxlUxUkk=gI+ z&Q>pA#(J#;R7NfkmrOr*m@9L2L!_RIK+Zf+7{VuO3s;*XRIuYF^=WPdj>9rq>){id z)aDq1f9SxZcWs}pzvbX2TW}t5*h=`UG5Uk3l@j{SKO!dW+w|vUUtnf$RsrO<>+S@6 zSpEJR*FwDr6?W9Cb@oVP-(0ZgL9B8P4LB;66)@38igt(>_C+o~3ZNnM?~v#vO=L+Y z&B@cSnTxoBYx#jb*!7z<93v4dS5sDme2D?$Ky)_TZDeadThiKE%0 zMzY&aoE>of1OJKp@cV;HthOt8+t9Z(Nvq+66A$oW0F0VzgNdALlQ$Bk>{F5^k*p?O zT#NbiLZOlBCULG~?Dfwngw-Qem#FO9WdDVieE1xR;M;Z6(sz&Aoo=SFjJ4XVIjm)b z97!_a22)ttd05O0RZ%c(Tf?%Ey*Rm(Dte8r{m(AYp+kS<@iuLB|H7Akaxc2kxsdUB z^jPOKhTDAFcQ=;|p;B1`D|?#4+Xn4)rDx3CyM@-l`uch2dU3lfa8K9=s904wp2f3H zobYZ`d@@7rvd>hG9P zklowF?f>^yrWTzj|5-aUf8Yk5B}@&s@4N2C-j?Sv(35LT`^9~F2VZ9POm3+bSv9g@ z^&t4>i{Em%rr`D+rv@NsL)Q3l&0h_<^2v5?5dPvdB49+W0(Ye8!N#Gd(NESSh_PI0+}UE!E4^u?y+sHv%2jCd98ns3I}2wBzZU)HGJL`zFmsZH>yYF7Z=+$sqn~-hQDkIJ+3VNy z#4X66&N|dT6)3ohyv_%6mtr7uO;cU-5=}p8W_b*I9%DuAI~k|hDf$f>$3MTCv8>6V z>661rMp-;$gT=upU^(tsTkXs3`eXpSgIeH#IuW|GGYgY}^vM}F`~sqao%^%i>)>E^2*zuLv!`UN`P}(( zVbT<|&}U^vzsBAZzGht%R1;FVn$3IYa+N1b=3z>4l78;l_j0@cHPJw$cZm|4vJw#i z#4I<}AIr;Fwcey+l15Kt&kY@hdW$d`#Mr_9sv8VzovlL>_)Hfpz7}QOUx`Hyu`QgB z?lF|f7prZUmBbyM&>FW-B@|8Wcql^(aTdEoTp`ZQWmLEJWj^dugx5eAMim7>?PNj) zKI5$zB(}!>a@1_IvT}SUpzg6#AGLS#;v!SqmCM#NI%!2;_RYP+;=sOd08b;!Jkjv9 zpY$Rrq|V(=Yg0?Rq6MJEV=C&m$5a{Jbr|;!$2N$(k@1-dxFXwx=f0e}f6VtQD4`O| z*r~54x0Nd7TcRvSQ7~;KIL#KO?Y&@KMqa(As=gJ-R`{37ewhk60CcQP7`xyfqU_8E zsvS`2xL^KA%@{SCYCRa9oY9K>_cbAopu}Mn<0bE=2AZJT;Plw$h1@_?l)7Q#{w2$n zBpjG(HNPE^Y&m|;IhQjmBQV|?W#^Qva~iR($(+#~PM0CWdVgLKrRQM1iZjm+vEsyc z`xF4{+BO`(BP4SwMq^f7(s@|}m5W590E9DFqdlybe%l^c&pMfIn`8UQ`K$jUSF)2; zY}Hsu*c*w_gLyOJ`(V|?sJRJX`)y@F9yfo^ZUvMVDBnPRCK8aW_)4OKWv0C4chjOv zUN7|XQx+%fn6G@YUvABh_3brg2+NRm=8ImA5@j9vCRRYM-aCS~=ZlHZKp?&|BJr8} zX9dPTKRkY!_VePhB5njd(W+evq*AdA*Yzb-yx?$Os*GoX_l0GAwP~TY7<%*iWT`CE z)_T@#r#83BY)9%!l1F=dF z%+dPn#x5`K(Qbx#k@E(5shqM6CHZyc%q3>#cTl*@;=v`+NO`s`ct9s=oz~#q#mY2* z(#Sz;%e-w9Rsdr=>B`NdxK zt1Ez+_Oof=_l6TgwH(C8u%GfcWCg#Wrf>|l9(l&Tr_Q3@a~D>opv(gz@?tZR zYr!XUX69C;JhYrdBG3Zf!y~>%&2GJseU} z9MSbKi!_UpsZgV+92e$>bl(68NO}D{uJK!{;OJ?Fd)mq&vph*GzSU?cI45<^u~iDT zQTm6irc>b7)#MqoBd^6NF~(_dF)t{_au3d(I?8UESj0yQsvPnAkDfK*tu?~npZk)$ z;VzO7m(+MfQ=+=WMRa@)-F!|_$<9E`kFmJ5wM_(HZVyk!@^ps6wPQ`+aW@koEy8?u zQkz0+pd{W8w75lpm3!%}lAVrUKE`>%X|_8D`6^1bVG3Is*V}Y%&Vbh=ZyJ8MyzaHz zf9pJRS>iJnz`vTB6hh8;ob;t02W*P;(ahbvUm2Hxz0u$NJ)Far`+Qw^AWNfWNeW?K zB88L|?30n{S~?9Xi7w;Y$T6xSSPS-$I~Svo+?CU*-jI^&aEUJY;ddCo)Be@5cyp?E zKle+1wr@nPU*CgQnx{M!=jr5D59Br``DpiF_ihwK4VV7pI2^;7PP#Uy%mdd3vV}}p zCh3xUjApO^uZkwj-L<;9i>LPJ=4iM)Hp%UqkeHpw35qE?o-_JPPBNM(6cy!_C=nG! z0%;C9qA2_#WcWM$I%Jq>7+?61^AMiMhy)h}1(DDd83mCz@V|p78X1KG=^ZK}3>g+C zLIMdHCL$>u_TMlk2sM!tjPE6>BD|Jy+ne5W9Xgrzn(|}@x$?)5sOTX)(^`d);N&Ri z(t_*Zyvr&V1U#p8|JsFoz_tiohy-mH^<^R~>T$Ng5SO8{WuZEIR!xQa35qDlSZk&56j%_q0eTBKkH ziqgGZWk?MQTEDe@o!KmP=ktmRG#6`Wf;?k4@v+wNjCFK!ppe;Zhg`Vy;e08$Ak0fi zq#^QOD4&Gzs(Tg)u>>XrQ&)*aoY_c@3WW<1X>(?kiF(L_ z2SZ#m;-81Vazcry@qROsoJkqSoPl~hL}RVfEYy7?5;|f&q1$>!9U8DNASfa>_>NAQ z3^SdU?YdnX}Nb*#ib$P@SrJK z@|q%GM2`K^s_@wQ7#xJX3W~jHOS>m`qqkoymQKeh=uF4o!tjOu{7l63UQ7=6w2$Ur zCq8$X9+R-%W!cBjXG@3~@wg~xWMN=4yD?#R0w(9&rsshWN*DH-d>|`?N`J}I#_nN_ zKAzcotXbr*x6(^-sUuy4Ib>%UcAPaWih1rOOfcG(SeM&;d7?(Cs>;YEfFJ&<=EG@Ra1&7NlZ`T-6`xy6g{ zu!1_`yUeU~M|g{9ZMt%0btn%qUM`14eU@nJan|FwuV8h3-1Y{d_H7=9s6MUhen@2TKNSsyIXVrl@>(jUWoB=urG zK98K+1z?Q@qPFQI^_IYKfZNKOypqSVrBdGY&^clBJc7AHmahnP-%xhdsDINXa{`?d|<vbzk2W8Wd7J8z%9WFteVT;3e*-Oh7*yLf z5uKobU1Cdc)kA}T>hhC){$O@D3CSP4dybNvKkf+{CslJQCZc#Ka|!hbzOk z`rP<6&BrjsX}RupTdEPDL#az)8L<~byBJXRBxs%3jLEvsrh0qmytAg9P!KfNt%mJ+ zJ1+l3Ns`#ZRC|lJd-La>XQw3m?;(|41QTWNI`BwDy9cq5)1Bgl6@O~>5aFT6n|LA3 z*YOT9PcYhoQ3K8w`N>T_<>AmL(buw zCk5`FB>AI(*V3C=&c(H0J;XBmzdfd7gf>DC?3j*Kd!Yw~8&9)xJTy zE>bxgSHWk!;gl{O{n;+_>+$-70h}J_mJ|8;#g%9vrRZJyLuXqGdZ)(4N4>E)jy#T%Cyz|?!fhw^r? z^gO`H#Wd40XbJE|qA=C+yiqUj$~?q0jd%Q~IJcZc{}{^4ycekp$zxC&@~J{S z*WB7$;yFeAqJT@yJ6Nq@A^S4_hLwxh%H_c*tTM-`0>1wdgMX)RGh)JzOUKz+Jc9>PBd^2j}14p1iY5eEHShwT87ke6qiIcpqH&L zyBQ9){}W}8>%CqfQSe=l846?k%YhKTGKfiWZ3WpY>ZoT`+jI(Z(>{<9D(|knZxu`? z;EtdjG-avaig|t7lWv6$(fsI{rl3g|l+f8|2E@d>m24n2+7n=*Swk}!!uZD>wYL}% zdrDSw>e6n8ji0+$eLf7q>8wYBhkXCc5-y>45HAoP~YtLloC_ zaDHRa#P$q?eZ2^L_ep;BR#fMJv49d6R5dBi$=}aDK>&65+~a z+;lXJNa*Q&X3v6#o zV=n#~M8mMsYe2BrP@q?EJ!B$l)So=n6I&gZifHk>na-tg+RHh26Rk8;tw-SDvnosc zum#*uoU@5-%NzQT@i}s4CG5`%B@at7Oa(pu>f~V*rmz%kUBMfqopZTA(G z60qFR&Aca%TPdhL`;k3wq=T*iwHFz?yD^W{3;nDiS(Jeb_y+gWL#jlW9Sj08q~y_s zfZ2V^us%dVprEk4zZV%LJO zKBoFO%0~(5MfJ>vGkDJi=&NkSAU=J%t6uhbq=d`pyND% zLBw2DQDl{%*Y1f{RwgFNB?LzH3HFw}^_6RwBhO7G(%N=bZ*kD2%T^6sF_6EhaeA+L z>}1e`6GeaD@KSctf3H)U+&AvTQSdD@6fl%p*Kvswd1*?=VY=#+LG^E$?wtd&f{`9L z^3Kp^PskKP7b$?+B?x#^r|YKt1BAd0_U_%syGs@xy?k^6sDwj6&lmt>HK{|yzk)5E z&v(c&`rB$OEM_8j^85gTp^*VIkqW$fd}QS?vjjk`=bL0WHvhm29~|BbT8i5`xA8&s zy3tSQ_&tISQ&>^~9HJ?EC%W?WjDwk0L~2&f&5G*j!49aO@xiRht|pfDoJg@j`E_fF z$QUi_%s2-3wa#_#*QvHs(*1Xj%bE^zpNTosOqR;jy5WKT+0IZ6p83QO;+JeaSIv<$ zhIbc_H>4_Y%nalYNaZP-BjNed&UkYX-Ury6(oR^fhbcrMB4@b!TW%)*?&edLe*4Nf zEbJRZ(T&_W9`g5_EF$8_f@}aB!8y~n7l9soo@`-YuJHwf5vemZchP$!^o!|rIM6h4 zl$^%9A*rq1M?aAg(a|O_X*%1Oe7CoiKHT7Ol#Q~uO)vG{JpprUiut`NHaOA15$xF2A+IGnEd=W@Ky3IFc_1(L(e^ZXZhBp|22%Ur?VM zjRCt8x3S2w&1iT!FsXKkhC{ujJIg-*JtUujFvVs+Pr2vO3N0qi1lE-W+l7tke z8B_QLP=*I~5&M5|xFPMZYlOgdLi4ZUw>Eff@=Ol4pouMck>g_nI=!&&n*F}w5_^^?HSmn<)4F&z!t>2S(h140^R2ob>-l2OY|A`?|4ca9>eI? z@BssQjO`=)3^Q4YbBxt_v?5Dd|}btXEch~YJz8(prYO!?xLeeM=TA9UuRY^?4zlPnW_qvnu; zF@8}hpt46AQ-kXpH>59bvEmD&`(D__+BT^@JzG$~N}n@deT>E*OXrZO-U$_brDkF5 z{HlhTyA1L7N#2_ju}VyNy7EQNQPB*7^;J&io7Ug=i=f1r5{NjPw{8DY+N$PknhMoM zITm~2nm)DU!Q0?Y``~W68OxJ03J!SRqoxzEcX-T`z^BS9_GMcgM20rt=L>Ec1JoU2 z9cItO8L-0T@U3cI%nYB)kxV?W>tFmMyHCPz)#eS{2zy{E{;6BUsl|1Rh{9)i`4pe0 z{BMEGf*1cP07Sruj~&C?+u3yNZwC#dQX$y zB03Id94pV*sZQh`WAYWt=CVA_?l_Yc`Ky@ha5&;nkUx%NrFltnAd8e1KydpDPH!*< zE|CZrM#)1qoUjr#E4-7}%hk)KW4JqkSWDqQy5oAY?7nRrI+AIs)9+HwHJYZK*3ZrA zbv~c;eV6M0W?8=LbCNo|aU={X_uX71R}*hI>fFAGoStvKFSJ0wT96yO7v#EP-5`Bp z2;m~Bo#Kc}1j1u9FSeKiP<|Aj6OLV{xP+v8ekG*Uo`oz*tQO1)!wr|0>kl7c9-nCr z@tJtx`f*y~%^tmhelPC%y#eY35T+o_sN66$er#zAKM1DbAIB`CI)q~1d93l8VzoyO zkwue_O~Oo89sQTNh3n%^5Ohkm!`YUb1R$X(xl(z=&7LIesQAVkLhdo#h@{8;I+!N8 zh8Dw0Lhe`hg5BFcyzY`jDrt^1U|e=)!PFa#`bEsQU`ABMcWDywBcx06HO*8Hv-0_*1gvlRtEZHL<`? zP4cD00d6No!|t;YnYk?qY;$yZkvU7Qt@aW6BVhktCO0KIlkg!e>3SBrZzFIj(7&c6 z3AIqDE=4T`MOSoEEWYy_uZU@M&izN_sztnX%GW9G^?N54U;E>7!Q2P^1b8h8)xQ~x zP1hHm>4i zohkq}_47eMWOc6}5SP-xT?+e>mT2#062q*LcFy?kz@qjMQnM+kyjYbOAK3qITp}W~ z1r?J9720TZ&(js*8(0sQY~RtHu}xeih=47>V>kneWHsFnBP&8r+AHaLaFM3h9}HYh z27*yDTA@p1-Pl8rf+}BJ3}4%#FCcov!-x11aDs#bP0K994a78-_B(_*)b?&3qJ-N4 zKu`OAQ4OHX(F;2bTvxDva!qIc$9-|^7CBj9c;MGd`VXv_sP`=VOWMmKqVOaBKF!Z_ z%lA_ngiIJGTYlZR%6^_mi2)`2qeh|^*ptUdj(InBND2&3;a#?^9j=7f@f`jHJBH5C zPp+06ZN@pG$OC5MTjW~~Zb5ueRK)Zf-wK)SgapI!GpL2@_Imz+pH_z|lD!Zn%62hQ9H=KW zDG|Xm$C9nC2!*y*^+*p5ahyf{gRMd-Dt!`)6v`!|u9^3=kEog{1MD!UG2a4_b5V}^ zB?+aY)t_l6BnkRY{O>+CIC+wp>4ZX2P0ZI*?q;|c$cB^%mhFA zDzPnubSJ8=y~*@1N5*wqb|M#Bk&4vd>gMI(M@v`k+}xLlhR1h8=4I?Z{`BpIx~DpZ zT*%Vv*_P(Epvu5H)Ws|$bO$kqp;2at&ARFEDw32`YvT7_MlZ{i=Squqpo>w^XK-cT zmYYEap-YyVB++pTS34tNSiZROM7qhB6@~-6CX^c;iYn9jzU}3qC@rNLXnnv;y?ri3;o2@%F^M9lR1HP1pWUhx~ z9Lo;=0vR{WoXdB%7}~(O8x{WfS1?$Vi5zVI-x9S(QrZw<-L{9Z$;yBI7M=X7opHx! zC%ZE`ZSTrg?$4h^;eVTjz;cKuPEoe}WU+8wtnt}{zzZqn#*N$D%$ zmt`~&^lV5gCIhPMMY?7qePIP3Ye$${oxn|3LvydALax(bFJoWzlmM$nZ5kK;A4D*C zM@ogVapQXrSBI6mAz<^l)#4gNt-n(*vv~T(rXsVuD}~SvPp@cOW@awK*hYEJK=K^Y z?#wa0l%7DnU^*}80cyDWuVgMTlf?yca`6pPl%SfpnY;i7?UEd#ekU(=^5w35Uua@v zFJ1a5eWpJIzT0Xy_-EAKeCoV7oSHKo5K{b z^7s7e!=kSv?q#DIu%%H&{MzN_Bg&Qrg=JX{-n10OhK-AxOoVXJAFxp%Px~7x^EuF4 zIuP8u%bG?0qfg6LB6>Yrtkd zsJ+#$fn%9imYXCT=B*w+ELR|spiX*rKyxIJICF__$_lm|uePeT?*00(!UiAiKHIpl zS?~b$#i@@9;C!(&uOS+^S@gIk_`ezW|A#~B9?Cut0tjdm83>5<|KIR5>CxPC*%C$K zmuki3k=My&XZ@o5TQEG+ECtKL$e1H9vn9x_vZOQ(N9xKN=_&!+`B+kuKY^D*@83#vZ2nW?U?z;bvds(?80Y!aYE@7uv^{g{EbzxJ)lbVckozfA zXfs(5^otc0@I|krR6uB%MKG<5E{%zxAZJuKCA35)$$|5ftVhNl|I}(`f_kOtW_ohz z{>6r-!n6vabJ!Z(GnPqEiw5O*WqkQ-s2*!6^tXiU#kQe&r9o;zjh?f+u$v~5;EX(i zFOzki0imSUXzf5TQ?0P<*sv~mq+St+)LmkNP=7B5wPY>SD7oTv&0$xw%j?KH(V9y$ z?ep_;fl#GL7Xp-ba*YBQs{XcR2qRhu>G**W@$akQ)(M|SHcCEWzw%$fwsw}f3i%h5j91I$Uven&OERIh27!}*mcm2mevo#N(Mg${tw6qg z2(NstBgj^IPQ#%JwlEk%yKp{w@K`tE(B@}Aj084|7E-*V2HVC4R|6Q&hXQ%e{T%T- z-M{%;|239<_dip{?19sBfyL*Rf*FQb@g+^*>=$N~C#LQW6tNjXH_Z+5V%oA9h))hA z&zW%2#b=n%YTYL6ac=Li;5=Icj9Pm^zW6x-G=D~38xP`B|9g@@r-`F6NJxgkxnfG? zED+TW#_0FCxC3X^HDzGMmaMd#Cdw*Y5XBZN0@oOdCF%Na9O+%-9a;aQfaX*&9LShA z++&vCWUFrg>f+CCVc9X;OrSC!Vqmtsv&;rdKoqE6Z^=UM#g)~Rt=T@9J2FJ+zgsu$ zrOvh8?I8PLyGn)7G-(;KE>q#|FV=V$ZPCWvHvM6{?sQrGFNDOM)@OH%;!l~?J9Guo5B~BRxM=2zW5ji`x_*WZYKd6Yc;U3{U*=y31 z7N?}~rfW4uyynWuFsgiuT3*+vAOnFEe^ds_?~WE3%?n)fSe0i8)+YGdX|>pYSN!dw zrpo<=+&1yNtad~epjf4{G~l1l#r2##9M*zFSU$UBOp|_}D&)N@)w-_>Km6LF4kg$6 zP~-MI=+hgG4M4l?g=xlsZC0QIWJ!%<)R4jU!8}mcU3Y^yM**V%l~3A}9E6qb1o z+!DHtDOP>ns;PFUk=jcAa-elL@wQ%1S_XQ6zFq5x(>Uo--NtVUL)Cr$#=8 z!a)k0N#7dGzS%3Qeaqeb#og{3nNeD8W&~Mye1pXp97Ez$U!kJpAu*If<PBEyZzA5-5nkIRb^FXLX%f<9;V~P!Zf3sl*1*exT{=y40E0NhMI_rE*A94w5S{#W7et=ez1DTdH}NnP?Fh(w_5lA(?C);HE5Es_kSXoIi> zSWhIr1a?H%*x6+L`%pckqS7sq4@3^GC)1hf?1hbbWX-IJ(JIJl@G89R*=pU)85bAh zV_v6c+r!7TO*@RT(0{E4SyCtJ^Xo#&>j8sB1*vTnKy^NLYwBn+Qlq|IuW)H=0N$E~i*F zQI*J7D(QCWB%1;RVL96b6td0oYxeUFuDveDg~{7v zz@SKPni-S5)>Wx23@ws}RiMu=!yh?S+$xD5BzQN@z$`y}!I++O zHgikyBt|gM8rO-&lc`Xp_c@39zj5+)-0A@ZjBtF|=FJu`Bd({U3m0qfU@9wCqtR@M zlasIbbaZ@O@LYW->PFuZq)^76>Soe`ve}3Fe=3(QEjbE$!4D5<=kI*#mOp&#_%Xb> zy5CQSSI0kJQy@CNmLG-?w02UK~m@wITjeAxBG$9c4-65P5Y z&|1a&Za785(Z)~T9X^gdjgBkPN%DtloEk?g*GRd2$e1oBeir6FW6rDH7ect%il4cr zqJJ?TVDb^MLZ+F~u5pyeh}Bs^=QeyMk6F=Kn9a}qx?)3-G-_9B#`8GJeh{pmN#L?^ zs`Z8Q`$%NG?8?d>TvloSUZ2C#YtsT!tr-ih>mx6FFBCUwH{gR*D>JFKLqv?cUsY$L zBsZAKp{+PCPYG&-#UHMlfe+|vPD*daxTqADH5F#_Ou0GT?As^#js4iTJX_EzXw z{_VW$MRTj}MtZS%=NT($fB)3Y%+=KEG{cu;@Bs0P5tzZ?2cXB+mefel{4hD6nkUL~-zCoN;ihn<}( z={)w`E}a`mBj!f!+_AXwhQqTY7h=7F*VoajnU3XdHEzd~=v>L&F8|p13h4$twV_2( z&sW%rl@?T9T)OeeCEF-8i*+>zCvgEe-K=lR+ckGUAjnOCg4lZzHD!x%%S4a8b z+g^ae?8Y_-b=DA)L?WEd*bCU=&E}w;M9>y+B05&@DUF-F^s9#%6SKLWYgGT0Sf$tx zW!xD7)PB}k52zxa(DK`lc~={3v2K)a5)QfCLEz?<_5F!X=T2Q-yVvxggq8p8JW`KY zL$)k*h1VLdRp&J#D+Bv&$>&9a7@)43J=H>S_Y9OArNI6DO z!`D)6pp(~1IYoTHYA4(7u5sdxjuO{8gsaO&wqN7ahqvcTGK=x=Eaj7-KVvcp3l4fq z%yA>u&+1~=(FK3iO1ECSp+aSDgu!>|5dJ^a4FrzUndNdpVPl)49n-hCWRhpTlJgJ~ zXjY)uo0U^&_BBCgNIvss&I830xa}S(jhcvND~hbiVyebkX2DNpWye3>o%pX2FR}j` z*QK8f^HrDWMLvg;7Q}t|czE#CqmLV3%jfW~C|VX$5xkYS6*qpsmHy4xQib3x!cY20 zgt7iEwL8>J-@$V2m*%>z{U1!?c<$_)lflBSL*qsj@0zBX=i<=2i7I?CAG1uIFEkg`$pBscGnY zHyb#6im^X-saTJ?$naOvbvB>6p$w59t_*3arWtQ{!%}k|* za}vGpY!F0QaLQqdL;@(xA}Od)+D)#dsw!PbyO2$}TZ9K4OeF-hell-~Jx#fkyVx?s zg2n2u!U25LgML+EZ|s}$Nk)LwzH%moOjlN~jBODB=9bEoVU)tqa~qQ3B;s7{FXTJi7mp0%lKj2Sph^mKxng=)aZ7psRqQEsryg=WiK= z9S!>-Qqe)Cx~~5TWB2G=61j&_GS$(Y*_AKv&;?VLxDS$|I4ACQvrbHOh@O(y=_aqL(fvuPzO z-DcBK=IT#P<(JC2e;9RDO|(HIt4y8M8tZ8M`(A>jz)lx#K)zp+A-`chS3QB+sb4Ze zr>OiLNj6a4deqw90*<3igh`a&L3ez>>R&2370zFQBs!Zl_l0`uA~l{u&R6-vOBNA6 z%}W#yJ`8UaJ)@5WEG+at%IkY;ioz6FCn%O7juhv!JqPbu2>~dZm?@4AmkvXGJS9>8 z!2YY3`A@k**Y5!I9{{R=1OUMPe*%!vw5pyhwkSd__vR_Tv&2GylmtWy`zfK^g9>H9 zLK&d|XnsRr98Lb>iOqGE_l~3MYekDgjuM{xVdq`M$CUaW^xecCJ$=)g8GC^)P)Kq1 z_4MrQ^z8R(>+9I)nay2azR0|<_cz{wS-JQj0OBIND4sCyFsrx^h6i_@&?c})uBeMW z#9jR!z(TQp@F{Qxs5sjJE0*P;w&6&$^oY$nELvA{-Ye&^Q@{jUodmL*VvJKR2g#CZ z3^Oqf6P*HbL#Aeng6yz{ccMip?#)*GJ<951>Syx=lSGwgd*vs&;?6j&tO76ccVypE zUU<}PG$hpNV{J}MX>#<14Z~BugmN#szs3o{jdEPaQBkH7D4(x3SDn@+P7hM)W7Lak zgSU)2hjC4SDH6PG5B**sGoj`33Dod(jM`fj{Y`M{0I7I1ylHF`+l1VWy>az2{OTi+ zl-4Pk5|CZ)Vp+^dk=8UE5SzD7K^#oTo?$j%yr9!>b6eGf zUR?<@$SPiZCN zR1N-y78q<0LH0vMC%R-dD{cTihMYC@Z`hhMn!IPYgz_N$TtRw;o+y}I5%Zi+ywIwa zf*e%LW8@rX(d13UX?506MgMKe-b=dbEMBjL8irvgg0 z-A@eIdsa8}3AaxKSg9B#4<{q#(mC5SnFVnUdxhsBBz9aCsbn-bL^77vS zU5$T^Z9lh2bzD1EbBr)VHB!L5Z$k>FcS27IbL{<@*Y3rHnx&+p=NB8kI!LfZMqC!P zeV#~KjXl-OD!m|VuUDDPLwwWmogD1d^6c#XeP?N45R_l6fdKdqo6?UjP>X^L06;DG z-y@K%m5I&&tbwj^tQ@h%@40&o<^BPv+U6ZL%(0?D#`O6uQP!LG0Hy(bfOFCKa1KEcW%w%#*0Ort z>Nl^6T-~*Y&Km}eTPR$3(EUvCCW&H8H;MLA2;tGLfdP8mCT2PcPrv6eJr{AG z#c})Y>-qoLyctaKx!Brt1lsqq8xCT-aN=k`kACk1@O7JP_s3p+{@be<1i6DMLCj_2 zT!0rD857^&W-gKC+=6UiBHSQ9(C)Htw_BCv(iNN~eN z@5&=j78Q*iSzll0=tQU7M6*Xi_iR}!VFrrb`?W1dw@auW57MbqW1cNp{Jw#or1?We z6!6!qI@6l!A;9}+ynAeOS}qWjU-sW3ncr>6Bd;gDL;M)cU-r#tFd8mk7z&h=TSNvf zotMz)5u#8*X403$pdq~bDg$5^YyYQ7t-jnh;M7K*9TccBn0I0x{CP*MuhS^pTlNC~ zO;8yswJ|3zpedz7BKR^!bm*6Aik*>mmB7#Quv$=xMsq~BPnIzW5*z10Ji!uCbdQDPi_fWo`ru^9n5V2t&FQ*kOvuezJX0S zY~uz7!>G84%PL+}a1)eoS_dIsQMfo+4fVLrDg%KgUjz&EeMvQ$RNN*N3J+5U_{rQy zo$%foOmRY@apbG5t}g6f;k_Mq2%eR#!h{fOVNN>_V9nV~ngLz%rr?+NZR=k7@CMI5 zdNwBU)#3rCjjGaUJgdOmM-=_RrH_#Roc*a5^AP|C37+eZpd^|5Ri*9kQ^=KgOcz`b za3C_k|Bi9&R-8eB^ve_)mbK|t^H|PGTA`>x_?^KfHc`M}&wW$+3}{2G1OYRA^Rr75Hr?gx)vras92&}NAo4Fl2Bi|5f|5HIjSce;Lkx6 zPL@i4-IpPAu0FE?$7tT=eRMUJg3wvinLTht@UvY|{N1zDj4X+{p?Vm^J;(Y3gw?7D z)>Pbq$d;5Vw?YhFvAqu&s$em1b|HUYh4rFd((#XgCLD!8nb1GPyU5CU@U13dt)QHG zwS1~XA4vS7*Gz5-<{z_jjVIABMdMj0uuxo+=g1fUu{c2imo{rs#NM*g+3FnFVFXek zGGIb)P3BB>F1D{x-W_`wwIjF~{<0%=BL74xQtkokSP#YV`nZm?sA=9YZ=keGM;`x` zX~=Aob!JlbClr%Z>`%xKhFKLwQ|8m)3e>WG$40AU@LKx10Q^y3ufffl4g`R_8 zWCLG%Kc#qO6OgE3rkr|A`Sh+>_LN8qwo#r9Arem^i68*wb?eMiBHFAhY`iohdz8uW zeh0yHY-biY_=X&WH8UqQXn31{3SN8ggR}Gdi49U0#^%`u*znm<&SEtnApwdWXw{gS<_sqgWNgZF%BL1QS9GwbHOma)%EYPtO*zrzPbN}N z-()vF-f!!HE zZ{X@O^?Q{Otz=_8RZ2|ZTpm_QOgc{5%s>CL@3(hd3nxPB0|3&Y+wV90DB49$ex7pMVnP5bsWGK7{U?I?|(?4 zR|-+QDaDuiYoIKDNEOXDr8>C8zPjE2nt1sSAZ??Hg*>NErY@ltM_3x>qV7*@OlMpn zszxb+>c{~1aD-;cgvJ6O)8ka{m$lE~U72C-2pza&InsoqVMo|sc9wL)DO@Sk0m+1E zm6s#v^dKZH6O;-^P@5;r&pag2G?j$SaunZ9L&Fe`@)vorXeHS#MCZ@1YD7|_o!Y(7=V{&I~GiwaSBD1_m5a?@lP<_Ix}}{@A+JD6f!NJ#JXH`A`S)g3)d0Qj|Yt zw@YxKk!YmMDtcCW{#wy>j0v$B_HrxM-P^+xg(dhxd= zBRwv#V1ju!Tp&~-iQ$4(j3cvdX)(#hz}YM@pdPrLh=I%10>geuMNU>)cAd38g4V7- zV#&br@4$wB%$ML^oPs(@o2Sr<+f5-2;t0RCy0_pF&SBSSy&h}%Z|}M7uRqMUc{1m8 zwd3jf#FG2Lo**)+&eYlSC4}KLMh0sHI&;raRIHm>vfxd4Tt6#p@^z>G)r7s^Qd!D( zeUuq%`kgF>S709V5={?wms=#n@cVepP1d??)E(IU{JL47z*_&t3V}%FVJ8YnhncibqP*#Z;KU2ICK9%m9uwR2^Yct}KY=7LygNM;7`6S65ghi6J(n zzgQpUSu`X2VUbOnpO<|HwFp&b7HtBd#+pj#ED_4IfvXDfR3h%)s9cAJ$Wz+EGJwFf zEx8(V#X`gC3Y7(O`LV0L$BN3M#8eT|UOWejr|g$kS^~NGrqt`+i0FjtF(L}fI=wja z%GoVWXs5l1bcU@QT(EDK!lXfhMuO~+RX)kzu_Yb)YC)8BD}%rEp06a3Ul-0yn%X1| zTrWMgffbm4Aj#ooL<Ph&TMo%xFhKL9ct#-I1p^2crFDIX zGXN(V+~f0s&MtqWfm8H8&Lf-vfBP1Wyv`E%ua;fV>WfI_dur1;$Q=03w}pmPgV`vV zJ^VMsGW-vMAtjVTZ?$=Ib~5v78UybIF5rcS2ZeI^uP-NNBQC{gjOVtI;D@9eJexg* zXV}b6YIg@;ObEJErrRcqQC4Gsj8^^ua1wW1LI;Kc1C9(`sCxJK+vsa|e7_M^L0#O9 zy(>V;A;^rXLZXoA?$RQ%jpAKS`DH8WV7&tO}ZEUgvDY&`IbR#RC# z*+R|4W|5|-we`P=?6)?VizhlP+*Y1Md*U#30ZqlcC|o~OD}>62TOO4*G~2XuLJE(3 z7t)ql6^TXvRBahzpA?)Tr`@O(+9iP#7yDRYQxDyW4L(2KeYE9swu`fc#<}%G^4BP0~G68QQ8u z>k~%v2`*UB87(&yyf5IWjCDc_bj7;SZqzgl+cx`I0-=klD%KJYlV)#RJ1(bmXd=H? z#Iyx;;H&vKJ)Vr<%PveSHSNgv5KNa2NUd`Zy8a3-Vn!#IPXNHU>0clVm(!w3bngHb z*Taph0ROpJPjp^fl`Bjt*5BTqEYFn=&9@;^Z-O9$63e)mGci+3LDyWd%=M?4LaKQHP$bm>#MTyH--Hj8{+bCH&-<)VmxVv4 zn3H?&bIhVmgNAdBtlM0O&_~|Oy!Ktr`}|8vYNTsB)QkR!izEKWazk8=dB`fQ6QdV9 z=m~7wR4q?8&O0CxU61+?P)}gYzq&i~iN}`P9|dHH?WiLBJ!N%a)l#N9kvx>etgu}h zA#PB!-B=9$^&N#>1~LEgdD>~aEL71;HR&Py<})0BtmobC-W%B(-!om`GmAFv1MXB6 z>HA>v>bZPcBv6=^#5rc%v(53W^a;4E@$=eF;OZQwmWETR+u)wKYa?N@x3T&m1GY!%KbZm8NaUe+c%8&iRG@e_@L5le_KNQFNxOgiH4hk_sPh%YhC0Vy`bZ#LE{ z#@LHjRa>S4G$*d#&8ZIn^YWTmEjG}JH;n@rFddIGFjT05tck$)31ZG|fkNQW8#w8GyTP{1eOYqi^*zI4<3tcGUP$Km$LQjMRO>2FXI#Y2AOxUUy=A3)24_|$8gMa z=TM@4DrVibAHQx7hda5WTuBOI_>VmFEdClF;|$e3hfAtAyRFX!*}5V>23G2g@31F+ zcl2Ef^0{XPey%#;Y*hLzI4tIk&&gP*D|4^F<>ss!>Vb6IMxCAr@D=M@_EM+#MpD0V z;|g+g$>3e)45{9G!7-dhn$fKBmc}JZyFPj90C`ZjYH7@-Cic?rHqGFRRT}5@DNug2 zw*i$}6f`R~AFFLnQR;dlb!B+v)H^HOsr{{c+W(#mIBsNFYIgQ6>XA%vD=POx*;&hO zoIGarbn}+}MULIb7ek$F7p%netscgnY+_xF_m$AwKhE|kT5CRdLW@(tD?yP!7cV^RB61UWel_j#6K9pz=m1Dlu&(D|uI zHNh=f2;khv0!@@^KKs_#+|RRiy{&8XkUt36r1q`?9N!1RX}$^^q!lobW`0zI1DHh< zh}xk4T*ZT#N%(?}V=6`s?g_7qWq9%OM~yD*gYaUZ&5}+q(PtDZvJhr4ROO&fe{Ybd z9IA5AW?59T5T_ujGZ3dFsxwfhD5^QAGZqy*o>a&YN~ZC zSOUv%JDSOQplau6AU+;ebiI&D+O9{M*mT~q^n$Yw$PiG8g0d;1F$Tc|`;+)Vg3;}y zfo%T)Zgmm9z=8<6`Rgj-?x^FwM%g!RD{K zk?D>%ppiMck#!Y8IkaIw+;^-7>^hkum2o$XU9={jvKR*p!V^ip{#vgifweWV(5jt;P!@v0ueS{`o=Y@R3QMLXd zYvt_v6r|ox(*4+byvN@Em|RKDMgN=gYI9oC1n&-5LNOkdHrQ-->2kx3L)bU}gk6R5 z)2sE|PKR@XBn{9e&KYPN0c_QXw+e+#`#Y@#8;JJkt`Yf3bqvwTo)%WM7F@mVkfDlz z=~pRRYp^PrTL1~*Y$eYC$-_;j{|DLvBnFxt;Xt|*FNjfg>?bVEywk*^a%Oh&?HcXO z!ULt|$#%Uyh=FLHIG(%^_zloV0o%3w$$vTTn%nIMFQGHEO3f0; zTMAjfWx19^4GLJ22$4^}QV7{Y;WOMG%Ux&aS)&~zuAs^V!h^tJJJSzDuu_UZaFCYj z@M<(dY@P$YLwFv8>0u;o-pugYt>$rCu`xbX2o95DN5V-Cy8X7V9@dy@7i|nAPVZZ|RPb3u2_S^keCW2SkE(t^QJW^#FBI1EMTrF7A>5|1FCo z*N?=YE&5}q6u2dk&tf1*3|}l)Yh5%Ox_T0#*?z^TSGod?T53UoHvR!Z3QjAVGI(CW zIBUabmI5B~C%R<2g8LMx*UF$(Egh_l%K^xg%QC!K{cu+xfPywlPrUrNiSSBgqHNUSzIW6O~oJT%V z&|i7c?c*GYy9TwLWgL&B66=L=R7gO}o4tBWbbd3K&}j^?HzADRByJ+9!wjGuK;9m9 zt^u<_-hX2Dv8B5>l)vgS&yLC5N}Y#&$_&6^i(bTqSz5Z(KB!RgZ-(XSuo>64L> zxasmw;SkY(skT;Z>cpv=A(R7=D&ym$T5@kfJ&?VmdJjgjsyEn%uUK5d1A64HuDaGJ zbry?7>_Xl7C0Ocf2Cpd^A?wJa99%$VTV!iLxHF7~e}`IUpQZhh_kHg0UN#%4XP6E4+Aer)8q`*w+%DWKG}P?W zf<>@WO8Rc1M&aV%7{@zW9xK6{{ktLRGXH%@}(kApS3O=vlz> z)5iSM%H}u$h>DxEX_uv0EQ+{$=MzDxF7lt`qQNfYAq8+1g)fA@G#F?^9GvPXPh$9x*v5YyRfe?>FkHif48R)V|#O9XH-A z4kW+=Q2xa2DB!-3xC`Z}>E}!}O^+>qVC6~YAao7F?r4N~$%k0XM982#pKUfRE>EyLUP@jMMB1CcY z39|pQyIoEh9a`*;ZoHgbqqzv#5pZL`4ab1@q*3t52&dDTJYuuFZX*=gSXu~OLCiUh zMh1?mL>Av529R(RAhgqdM#hji@kn&Y3_LusO*);fTOi&$B7%5GnOP$rXuz6BcH%tN z^<BHm>l*LM_f1E>Uz^ay#kn zHxoAifZ7{cT*MF<_vk6YTvEWG{IaWiE@j~RB>|Oro>%9af)bKu%aak3hNw+0W`|pd ziHTLfzHLE4K5BCcKQ|X>!!pEm^MeLy5w7Eo2u+eZL9g#0q@0d40sW1HcGh3550f@M zQz?am@S)yA;DHN^JALrRvakA zy}G@4{UZB3v+)Tp7-13JpI$Lm(t3X=G#@MW(zEga3DVztdM+Fc(^Gshm>vfpxv-5F z;J7GARhQ|4HL$<Y$k)@rX7hV{ zux0?_I*k_q%;I7+E&%4=BL)E%ZF+Cu)VWJ;$RV*PMN`*cmQGJSyAXgz?nC9ZZ9ex4 zxyGG#Pk)u!*Saa%~L6D7Bl6pG{gXC05O?=8VmZ4IT`w47kqTvXLSn9ETvNwbXBq7tN+~b&N2x0e8l= z?iMCkB@nR5;0vQwjs)ycM0ALj8NoBq$CH2%Ii0p-@MxEIjHEwB-SCjU(}=#)l>ahF zzQ0gWma9w9#VO^eBQM}9nl(9-29Uq4^VGX!Q@Mf$X{Q!S#_~( z|4ze}R>5yE5cF16^KMiqnuM4*uofnDx=uK>Zn`9fTG5fT5-(HZjYy%9n`dB2mLyfl zAtmU^Rn$JAQ7V9@GJN=(z62VLAjZta*dR@8mc2rYA=( zoFeX!Em4r5<|0+k^}D4T3$+o-l85pVK558m{Om$Kt(tq?t0`M|cd+PaR&1x~b-lBB zOt#LM1=D##EB8zyYTT4fZq#oAZ9fr?2Y?R}1Vq5K~4< zwD`OpbGGDtDddG#-UXqzz%GwVSGtET!8w(*15{NklG6xg2(Oye{~iEbQmBQj*k z?Kg}@3HP3%=$2Z!&onMcWFur2E2jN{VpBm5bR+pI(``_+AUTel-4JnM({_=#fH_XF(^=vj!WD#>x*xD{!8*@sM*s8IKN0?noF4NTh(} z%ttb#<#zJSZHe-1yKWe(*u(NQ80%}tpu!SGRvr$G;&H4L%J`E%fKjP^53D)q$2|`U zWqp-+;EuH@26#?novyh~^8F-=DIz8ZezDLYgYbRuo%N)^>Hf0loBWVrGRyN`Q07xt zNPoo4Hd$Yb3=UHA2GF<~PKrUo_U&oy<;1ZfDcnG-h73`Nwv|n0KZ096SBD0v_A}FQ zX>_M@DzK`w2zc{Z)7qj7Z&v~+EeH9wUtQyZ-p~5Lv65+{h72|i*Jg^lwJL}mt|c&M z>(2);CX7`BHCvMO1ff{>pt=--I8`Jdt^rfGiGrz>e3J1e$ zl^yKLrqiiaa-*3K(%tOxT2(3bcj3Sl;w=hW|7TdKFZ2yuV0XOnmxqd=){9oS#s|8e#5D_yKgp90_~D^In`N^UiO^I9ry0_}IK|;_j>Ps_I64z&_u_(cZMB<%!zWv>pFl z_FxA~5km`)#l$@Qa6}R+nv@CgCg)(z{v`b35i?2*LFzlzB?R^)3P*n?(P(-WP9L`9 zzCxlde0-R^3^@Rx6z`Y_gcXf{^G1tx zn7FsAnee&amcW$$xl?9Ph}7JM<}1T2r3pw8xQzdvWJwAJy0jkqrMwafuEtUR%Ru4k zWz1vtrAtl5dlUU;gHfT-Xh#kXI8CdG?AozMJEMVY$8mFm^L{s^r{QsT_{We`yZyd{ zrkT2mT2PEZ!EBl)8B|Yu`g(A2`yaeM2FQKcb8inz>YfVwOCPp zRBXd;*%o?=_%IeSZ07=);`DivE$cYv)k(9k`Uk85wZJO_ zuKUOQAhQ6T;kUoR2{o0{So*E8gqC~s7~Ew-OvcW(@Vi2*jHr9m&6JrLuX)90wh+5o zLG@sf#M$drT-HjFH@!^3VLC4Ctren$ks>;kN92)&7@;JJA(b8)(_m~=Yi8ta7jRdw zGb2*-)5l6DG<)iafa0GtAG4I~5^Bqe_OrjPw%}Fz8_Gu`ECpPzGuiL!8(*s>r@Pst zytZ6I%-}cfY%qawxR--cQHdsXh;DqHDo_!NgpUb~QYbbg7>SuDSHfDcx01n}iuid5 zBqpXsn;!T)lXX3R1SQlzQIC9D?a~iP(_P}(NAFuT$D%R4+X>w+sE_=JV~$1~G4k

J+D9F`hQ3gl7$Fc4=I~6MJe@g;x>`nN*FWdIWTn5_4xlP0mM%dU z9-iW7KE=wrW%dUg#)KQLB=v}PLUK1Imu_We5Q4`P|DXe_8hUE{H4g4BOZ?ogxo0AN z&#&SEbUHrI!+iGikzNgD7h)&_zwf}QbOrQ=7xH(;EqHT`Fp(s$xDoc1E`XBnll6Ju z9xh(bu+Gv+%Wp{^cC~e~Frr%~XNq@<%DW4J55&eXvgwnMN7Ot3-DOw3Isvg1Aii5P zGp)ieICDJk>+R74nJo|FYF8vZ_koNsd8VyJliyGbyzAKmxd)|QI|yR7a$tp+aOUpP zCY$V-!W$TZ5wA47pRqRau&5uhQ+|5p!l-+F-~C()=wigF4a=LKaB4 zRmq>q?2efq$)ZrnDOjjRoUT>5e(lB!VZ}Q0EVP+PqUrL;VL*MSX~wa--es`cNc@q= zrF4e%!KWTPm9^K~?WYCw4Hz<^SRL*qOtA~iwMaEWd= zEhN|$cP!REmpG^RiV75Uaz1(+bDP27QsmOXqSB$$p^6rmj9;i^ zXlYX6Qm6WY|1SppKkKMLZs)~TzuLX^U-2Hn|ARsQU;5r_&0EI}G30+Wy@qi*^F+4h7su0FK6+hmN&MqL#ULMh7Sj`=kgu<=8#`aG zEaCY-<%yw82JhX0-+8A}Eved`r#!$?4SSOQ8CT>vFIe~6Mc846F(g3Pde@!^TX$XSx_AW6FuGg6be#f*5K4UUK9CMy z(D+Y}6Q*(=nf9R(kko~XXekSEcg6KQycfacV^o4BN~rPht1P$@HOE&k@j`J`3cMTQ z#B2l}-t0l_G=XmmW?OH$gz|59;s2Wc?%KYFY|jALeAu}!c3!}Bvh#X$gt*)Se=S_= zECW)ig9O4mmVc9zduNZn9f5^m!$pZ+DR79d;(yW&50fKi637hkO3Cf_;T%uFKU;(x z<%p{R`JPb!_Fjq#Ehf0QxZS5cK*5BJX2F8sLGcg3X^`|#b7ZWMRfyw50cm})z9@!5 z?as;p3kP192;{TQ8BRO&WTSA8F|5ilrDG5q4PwNmZ(LaYpeoc^jr@1L$VL-naDvUF zOGytK#O~EfjWiRXaYNh=ik=WOK^i*znkyO$Wb8FziU{f=4WQeYnfZoz$|Cs;Tg9L0 z+l_O{|FkLZgOM7g)qyXku9oK2q7qEn0sRMAKLdnmtD#PrJ1Sgk05MvXU>iSjUSJ^e zI^Y&oW>aZIHiSHRaIn#RL`+Rat>zjKZ--l+5Y2X7dlGeCe!vt~{*BG7ho-^lqNH!Z~U@VJgLs_1|U8aJp4jyY6GAl8J|S&a_X6+3g+r5TDsK%}Nb zQp6GG$>2=dgLcai_$#GHq9Lk8PyRQD#E#S65!;MLf{$xSK1--jfbaedzWpcM%Jq`E z`I+$~hEkZ1YrZqdpc6s)G1m>ulb!}vF%;y!re#OzoRC#cJWYGld-N`?EOR<6uEQc! z84(muW}Vj_hl8#ON@ZmPM+I7B@FQKP{8($4JN&gLY(RRS6l6k)6|h(1xrGDb2OcL( zqC;bOn_oqyq69WNq^1ee!Q_Rlr_clCgf%5>zf;VsZp-RoRXRcEdYYoT)Speves=-7 z&T>zD#D`YN+qP}nwrv|z|6)$}%;`lh za+&L!c_ZSBEQ@{#g?ezq{q2_hb@o~4BlkzJ*A(lIS*wpXo8w#Z;wzu`V45BOG~SmQ z>XNQrMEjFJ%TK3|X7D0ri#^Hux1k0O`j<1>%&=* z`Wi6}Vb9EOp&L{fL-AlC4C)&jWo1@N2WuS6vV@~#+10c-S~-_F;&gQWt+k8^8b#lQ z?{cVeFXe(OCl}TwIw2%q(lRi(r?4_+(RCR|y*Fu_sNX5mAsZRrO5s`n*eYwIC7e&d zZFUN!3EF((_&&HzoJ6llASy(qD8VZfs5qCHn^&@1dM3BkUTbSOI+Un8W_dDUug+F2 zXJC~gG+hvlc~e*@*&gmalju|+X`};_EDMY5FbxaCt<^80PfY|~+%^txMdGR#lNDM+KgfNj0{)qoXL4q%j54ii0ed z3zZWdr#alub_OfWaVP4dHkg%Z2kVAz4u%My3h)3ivmTN!k`v_^SIXtm6wix4mOg)s z-+MGC#{>)P)8*+o-r7z)Ci3Ykru^9Th`3XQ{qAF)sFm02!Wxof*Mo!Seh?M?$Q$<9 zqpTed#|q8$~zp<`*jzRTYhvI{1BoQh&@sB`$bB zFSVaf^2~srCmZG7fJ26RxHR_A_A`u*+vf@Kt%}zf-zu$yZVwmy@dRRO4Q$|BImqz^ z@X4#wpUYMcT=-5mh0tx7C+6n&D)_Msyx`hW;TVz3W6v+BPN>pI5ys<(xzcHmee#HFvb5~w~5scJ68^hQ>Y0z`%=lH z5R6P){p+o7Th*nJFh8tP>?|x+XS6ayHXG(#2+tyRgEUNGeFaArzU#%^P1EV+-37lL z2UhB&I?2q=ASxNHmkDpGEMWAKamnHNdOBO+9_B@@^7%QS)xeSbfqwM{*g1^h^BivW zGq=<*=$`frfN^TJg14#98x#54tfhy+qjK6d-du6sCNaa@to3h(**)7}!{L@|L@4Wu zufXY~N_Hr=7c}NvZ&;$AHAkUQ8gr6SzjdVXi8~G`BFhaz+Vag~c#2Is(79?jt zr=!d--x2^}J2WQnkSE16g;X3L{dR=wZeLer{4@^Y-xwBREl`INu@h0LpYUU2A8r^m z)!0{bc#AEOsuta2>w;N+VVOx)yEQ~|Yjxu(Kg^g@lCn(v24DMu?rDwV7hYTy!b`s4 z&;EJQ@_A7NKiQD2@S{8BHl4(9oo$FJa?K%FyQ!zeOPGNGr;R~?DW6ipsVT#|;Osh3 zk=g<7;V`hHE>Me;5F7&thM`m@vmr$&zF|28j-h!dhT$^G#3Nd{b+CDIhj{^A=pKl6wsj*1=Fen z_7C%71)Z8v4#RWVu(N3@E!vpwA$EGh^I*QZhz92PGN9URTJSq|zgUJjTdA1$cU2aS~T|nL0Y3Tp>rN}%1AOCg(yaNyzNi|Dl3OCUTZJ)> zvzP2xw@CJ6pUlqRK24!t6K_9OW+<$Ap0@u9M`4$+5Gx~z`%5C}wj z6srQwWSSI*0YZOo&e8-oCypPnZ!)T>T&;zR5hA0;{8rv>Vh6~DTgXD$xT%g6 z3j8ilf}@G4*a58ffekp%3NbOInk9xovGl(_z({uFsjmAl6d*tB4)V@Z4QM9v@Vl&# zlf(x$=~qHT6-F*ETwkt86R>Zk>m@!k_A3wY0*1U6Jn8AxE^pkUF&}9}-aO@Js1WY^ z%U!tN%;R^pi!>KDTt^D=TDTr9kf+>h*L1WQF?4{3LE1v7Q$+gP^+2i)y))?-gUzwS zHmR3GA@CT9t+6^st`4l!P)(}@e? z^3C*`r#gL~!(|vc_a+r0K4UgWb--Ht12s`AD5|MbcT^$7Bn;>T%OQ3`tmAYKyCx2; zExX?6DV|ie_rFvoSl|Q?P?9Tjx$EJ$$ioD!xp$k_Mjj5@&uq4zTf!L9eZ7j(w6n@J z-ddO8JW;n@(2tav?)TL1E#zVcHWPW*tD{z)V%x5>0MdXgzCq-p9kzN|l+kR4BjG_j4qkP2@}B*M0;jg>l3I^>eT7 zcKIxdPf`Jg;uX!43&FBfcBuqRw)tUZSYO#vmdXvA&WqGZvdkvO)Jc{VNR#@o9H==G z`2IbZ<#qU0aD7wa1b~aq2S5f_)$$8VDjFX9Rmb6MgF_t=B!nMN2rte`#jz{NH!+WZ zEm!U&pA;n$PzIEGGMY z2J#+bx7+5MrA#y!X}~*^XKi&hY;Uh`u4=jv4d>-l^S1h|vOBMR=*Q#X?u)F9DA)YY%Xv(OmHw1XxhunU~x`iB=a~ zs%XqE0a~iroKEoFc_1PxzH;Ni#IC1^0c*#nD4Ps5#k*Q7Ytn{Qsoi;schXU6Q-#G$ zhNnpE7pd755Lr)#$0QGq7!OSl-`$bht&x&4sG-!@4Oo!6JQ5(CXw@7=WQ`>fL)wVe zVg?)R{ZK&!88M4{P%nTbDf6N=;=qWG2G3oCN;12)6$)@HxXrv!LCsfY+t{~b;GTRZi{MqQWzC~^4$R#hg{j0z_%|2`00)+K-tt-*XQ76YHg!;FZe>39aTg>iS^gK%R6qRv>%jgA);I z>~6cAx|njDrm>m86e`mxJKO$tk>kHI3i2Ze6$4$4}y-@yO%s{7CSrRhuZ zofr-PfS%=lUcb268tGYE+c@eu{vu_5u_!~B8}=K+Pgy#3^&-Fo#c_qQ*fORm$wh%- zQf5T$)*?%`eD>shV1Qsyq576Hj~4HGU-8)uD-8f*W8x;`;1Pq_+%Fs1L2z*0RYG@H zRAb#oYup+(6CGR^Sl$o&m@G4>;?5f;N^PEZE7J`oGtmp0il8P(&Qk|e6BDgc_mNcx z0swds4WPXwW(p*lLI68{Gxeh?menjW3q+J|eep{iv=YM+1hj)Zp>TIzRL8>gG!vhM zGwON;Zr$pDsK{m|4x`oAyC>MUi)o+kcKu*5hI2EwyAnL9y8(a1K%r-T%{all!ODZy zJRiL$Myoe;VD?(~PJN&=5`Fq4q@Dx!#9Wz3()1vj;Z9^U7Y#7{)EX9 zOd)G4A4}!N+)4ElA_^n5*q<5b}dFVGI(GDOS&l!iyNE0X+v-z z8f8xL8?>&hH<)w*pUQg)k891HiP+Fii;NVEAqVKa9~`}PKtY7{?9RV#t*zZYomw)z zIlWtdPP~48tx{pvFZll``w`4n>AxE!+c40nJ_BjtWfzT$gT`az#FRj;p$}sYp5Ne6 z(RQq;=jtY(kQ+wljsO`jeP%0Fpwr%Bf*`R$cvmo(Nq0k7_j_eBwasFYiSPU{rDtYF zRg+vni|>fq+uXY~0bRa+5gi?0knh=N(zd>NfIUOIgH5ze!j6)00XDBWhI_MRIfKfk z_{7ch&TQgKTWth0JB)kGK`vyJ~-mgBhyoQ#3STcOBRvWmNn_>+bEi)s=pV8VA( z?g>s-YEfztPJ=ZmVmBRWTnckRXAix|lS;6wRZUzKusA~QLFk~OLecyY4f)a{Svddq z1)>FNs$`y$|8};N;w(E?HtB%is5=q4&})rWQNM*p^rhMbvtp4=O@zCPNUX!&7aS2} zdBCJkbwL9LPcOt;IJGmkBC*!X4+Z|cw1S1lvShGW!AynMtU4bJ&HeTwS0xd$C4cB4 zbvPSskg#DAA*7pxSsBq8s4##^yU+dzm|x^86t0c>M`+>SRA)WOuL0n(*4r;$q`6I! zjn;guHaxgOZx@~iRfN-DFatx71Z35E77BjeEhhpG_HQ6)PQa|GQyZ_>ix!d6?43$@ z+I1WRaF;)tMjp?Y{gO#v3E>@aZK5f`zs*luBJFj88y`Bz_jGN}K#AokDu9k?q1&VO zVqeUtL5gJt&qGHvuhE1`kEr5{iS> z|0GUq$<#^U=E+wbYM&{AsyEPJaK@j2TdX_Kfpx==!763|oR(kSn2cx}*(}3~xXz#r z>9|>?cv6f3BDLmG_5NLPVl$K@>k^p6>x|(vp|H;iO&TCF`O$Up7y{5ON@O7)(Z8Xn zd!`d;O^{NDZ&WHhsr3s7aZ&zBAZoYoQklTdd+^}1LG%ZSLPpZFh-LDvs?`%~LI)W~ z+AEnNvngL%KU{%+Vg;aV_lK$IKT?{F=4O%1L-kbN+Rk@gR;rs0%El#2&zHF1Sg*n@ z&**aJuVhh?i0)hN{VRgv3Xa+NX%g(MDWbpxg~&I6dp*B{Q`ij#*fNKIZ3l{jkrC&_ zEC0Zt86{aN^u|pzh_*k}R?e68nnVd064;nK?muV3*nk1p{6`kGsz?==RY_J4Lw*-@ ziLza}3PJ>tMW9?-UW-cqk8lz^Q99ozufDsdB3ygHq~?->lW^4E**c zES&xp*L5XLG8NhCPihO1C_kdw(MMvyBc&Qyaw=~ELsI}Bu2G0`aw*V&^f9d|D*3wEXQb~fUHJt6e*r^X$zZc6w2b&()0@Wqj z1RmCvh(J6wKrNTE!+O?lO1n$J0Yf)SS2KlAlE+OQc_J36raBOsjX3do-{%71mR9ZW z6ONw6B-7!8yprkK^xZwG> zfgEJ8k5R`|Wvxn;!IO;ce0d8X1ZR2iigAg%nlsQdvtm+TJcAz4wSY|Anx1M56R<1C zWJ!V%Ixj*d!WB;tvRT6VMxvNty@PPo3^YM=N3aX4IK!1v`=O3x{q#UX;T%w-_>Q`r z25+}z7U{LYgZME}O6a(*RC>Xdq0|$5MOr2JmZQ;wmLShT+w`VSzrHK6RJsPRJ!d3onm44i(dZpR za9Uw&NU3wsLgr?@+Ag}O%zY)ag&rnN1+;t!I;{)8UaM;AM0}$CfdL?hp7cA?02r*q zw35VL^;lRLt*w-g!v(OR#`G7;{f4R|fG?FbLgv>ZG(X zs170eTX7+B4Y&kD8cAJ)4DMlAU^}&L8 zlO(qH$m=;8u$l=P=uAAIQ|ci)nnvljQF-YYklRPc$3^fh4jy5Ts4{DknD4-`8jRJx zYT+!G4i$dfc^Lmkil36(X^eMh;wusLSXmng>I&pgMm@c1cAj;(>fBfd%49wZoZt>c zE5}$!HG<(=4)MDm+Xlf@=(__%Fs%kJ-?0CY9rc>x50bhhBH^ZsdNs}lQ<0zrWO1rb zFLy4-Fa~}$-uK^OW_PG%M5d*g|SW_+xbj z(^`jp!{?E!l^yE^1G7+vMPSJB`_K!kN7gKoZHPx!@Dx zFu>fcd>?$JK~6<|W?LNw`yXrlQ5(>hJOX86lgQok$r`(v**b&)g-8m-a$nLYoNJ>@6I;nr~?Gm*H_FvZo&iVk_x& zh^44T#?Hn(7a^sDzw?!D!8WKSyI~HaG7tGvPfAF^D`wu9RT?t>`Bcr`=gc7>>GU@bC~fp^^Blf;(ol7Z|#PJY;oM9@rw%O(Ml}>6w0rm_`EC7 zW^kw+E?fNR1f);sA^SN2$g>i(_5f3dn+aO6q{IEUbwIj*OBX>^6OY7WmLvR=pE5unb>2oe zvAf=VOd~LVI9HYMpE;M1K5&7zak{x@>b_|+n=dXbKpq~BksOQ*m|eJAKe4&ild*Do zygV4P+16$AH|OJdab1JM7B$^;F5Im?@*^e^7Up;zCn!#)R8VJYwz=XcCH>ED&$K#g zdR%NfbtY1(&nXnydqczi`9^cP@yzXs1*z2dH?o#-PHYl_Udwf-8PhK2?re{a6}f-# z92x1-O53@rZ|4;~Pp*A_3zB?Tmzzkx>xGhkf4ooRJ_g*R#Zr0RCNg2U1AU;RAF^vN z+ZdrI73{><2qC{d7%I}%#%vM*I}>5or}S25a=Xpgl`fgNdCA#l&+xdC5Z@q4sA6du zdM@yE(|u)&Gq&GUz?~;?aJPwnqs42&nl@#?$Jn@o9j$~O1IDJoU}Nq3RQ6^3dgvoe z+A{CVi!Rh+725A9-voeRx67oeLGOx1yFH)!5YL{U(YFuyF>#74LYE9EX@oo_ZRQS` zl(5@)zkmpEFSbFK#)J))sC&(|n;n@#p;#C)wT3mh zq(!0};lv$bo{<@OSbbY@@H)}KgO+sYn-^oGGPEK0KyDG-%R^aJ=T~6 z*)}2{J|K`$VqZAG=hy*)QT!4Zdkqlr?j04SEpDPLLqKVv1~sjinsV=_t_rQ!m%BNg z?+cx(mY{c)+#Q<*n@&+Ul~B<~ra+7EX#DnKS3}5OSc^Aps3_>0HgZ<}Y4Tt}JYA>% zDtUzH-@W8OR)XfzHY#tTK`4A2o^r?*{}j=Je(brarN*~FTT$T#XCTT}ftZafx@*zw zq{|mBQi%RR@lU#1cNyf5zUCv4>LuCF=$uz=&6l5u?vG+JMy2&!$qg)|>(YOD-z%zP3i5?Z zWxjW%jgC|^0;&e0L#+bp`reu zA|_gWSs`BN+nDzb&JqZ^f4{UU~&(zYEZlqlxtmgB{J|ElFS`9iQVRyAW= zS?pmpQV;xbj`iBc{hjV#;i`;tHXLu*MdAe^XvU7=jU2yb9YZ~5%N7N z8q4$SxM=geElc|zn+EI5S6qEVe)Wc0opLo4E)y%FqPLUzMEBv|Ul>i*TI?ckE*ZCx z(`%)n`JEp#(R|vdEn}3>HemDEC@3(wZ&{FPW#b-L2Pi5}X2b;<0v0=NAud2a#&$!+ zrw2k=r(y z2Y8JL4q%m$#dCa_J0uwyEbIXMOfx`09Bn8Jsqqher1Xpf{t;|CZ47GiK}PHoBsa@I ztzbEkpP0p{susxCbup@4TxuFDS(!o|E?s8!M7V!iV955LjV~JhUr;|MPRg9E$^zBA zB%vdBwXj~2^UUE2i%X~TeDHf8ZL%|FgEf?LGq^xwU@j^$bX=+?*{Og$s>CcmnWCef z5UY}to+4}3EXau5LZ#lk2+)=?COA{Ut@hRP@29fwNS)m4QAZWBjocr`3WlDllSWd-P&SrZDME7)PwIX$mv z3>+I`Kd0xfUl!Ef6J~RN&QnXjV0GQ=GA@%W+a$?0b9cQrs>LElLh|I76?R>0=SW$1 zys-mjMx~i*)zy{1y{~bGGLU9jm0I_=qE&>tDHGU$iz3`rLfDacRzqB7)1jHS*U z`%D~p&Xb(%tG|1-T+84A8N<$1iA?toco;^I&ni>CBdh8XSdNy#)rX9bPL|Ol8odB*I{({8u`37gN$!{l?hjaVzW{>9h5Sy!yIPraSWY5onJ6(` zjj5gAyg_O_y>|1IR-OuDE+R^GN=)Jx^%a-6t$lvN{n7_H+n~gteB;e_et*ccdkF*& zq!EmVn!{Ulc%L;4Luw8?9Ab`%49FJ&zDDgQM;c~55yBg7hxh$zyJF1>?UKQ4KH`H~ zRV8|OkyS;}|SJU_HxBZ2n$5o$Kut%vqCHpNBsd~Qj9u+Uh z6GH%(k(3b~CdP3b@-h98GHQ%b1vGQbhUD{wJ!TDzhv47sS%%_H7cnRp+X>I&pza}1 z!N{`sv}DLLoEp!BQ$Baony+1P_-hK4^N5U@q}$A8QQD`8iXYyQH$ED$kvAg<41SxU5TBVK16b6STu#^I+eNhisLaeoLv|OLB78Yw0D;| z#Z*dq$v`bh%&`?<8Vp85Vt;-V>6JZk&PmAdsy(bJa2(Wdg;x-cxoJapt@con7ldIz zyiaxdy0?pP4uQKQ^KjQE_O-;Fql2+$eTrAhO#kt%Uo?>4Wqxv=YYrmxLv-@#1*4#h zYAYdpQAei}j2Q|o<&JoiM{M?$hJGW&8Ta;)uNNA$IS2^#@O_eF8p|`2ZH63bY ziCcN1yt?&v!(m3OdwQlp=}eBywAp&`boYg@;Wp%JXU4eAf`4|7%|LB3;LR5em#c@Xe7ISXqH1xLtGhU4$V`fdsw9!pV4_BM{*oO*5ZnL zidWn2u0NVPzoq^2h4N1zsm`bfG_>lzrV9-J9gmxH*7ELp?v zz<=P91!TDoAM!P2VVP)#J2{F`1aauBxV!!ILwZ2QcZ=Sh$ z*clU?YR)6Wc^p-@tnK`A8Xw=~X(PK@rxDG)Yz-2L7wh$JmrR|NpPlkDNyr&5n}`u4 zv5FJG6{X-HN?!r}6=+1XH*bijWCYpDi1Qx?21uQjxKhNyz2gufc~R8$L`#(RkOCZU zwll^}JA2Et4DZtZ8K+gq)c&C_ds%218eV{+`YVCvI|WHSB|ZuiGDF5-&yl719yDoh zJF5Rd#A-OhE>c68;2}XtIIDhg>Lt)sW!Px1Ag8A^BxHE_c%P?5!lC28OD3qnEQ7Z2 zm@q6xUORzayE3%R2{~osAJ2{Be-GntuWkuq*}^l{#tk)W7dw8KgKIb(`VWQT2`pH| zp5KiR&a392m}hoenMU)eqbqvn8OboCT_*RzTt#sl?q3mlVmm=?aIDKpOqX!B&=SoB z)?rH#lshZ*#flC4T-1quyH<|rl>xCbn&Wd-qZdB{8kUD#L32Q{;Kc(CE-^nh3e|ZZ z6_H2T6a`B9obtc+!tfZL!Df4&3?@5!AHiMpK?a}H`$^NJLA&+TNd?4XdM-x1$at6h zbP6%REOCQtte?&#)Pa|n(7cmITnqXJEdO~zva=eAULfK6<7Hpu`U|=eyGdd~9*pR~ zEqo5%u|!mHv>%!ASU@mqA&t}?)Jq}}BuJ`zI9Nw2n387#ODQDoYVs5o?W5oE2>myh zmptN>r_mAV<}bn*yS8q-wS5eN|C<#)wia6%P&YMu^E~g6rca;>#_ggg{=J*I`x+Y^ zu{Nb}Pw7%lahyYEXp( zF@>oC=j50sgj&`^e;e7WRbG~^45i-!hDB6zrn6I;*LmhOAHZ#13R8U<`#<|hiXyv_ zXZJD7bt5M=l7B2b>S{XLT4vQN5@pwZUXn8*Ci#ThkLnnIQ5dHQPn>x`>NJQsbp9lX zAQ+rP{g%(u3(u4T{^&t}vm}X>9%sr^;YdUBh$X3bI?F-pNT+4$kKgoL{gc-x&!MAo z=)a}TqVtUKVq-XyNb{oZ=BJ`j)xb8v)D(8^+aOtBc7-7K;o2K>aGC+k3Cq7)VyGfc0cC2FEYIVt z?H)Wj5d;eEPe(O8cobGI&Ku-!w@ek=hQ?TNu%zE4#B21fQKnq6uryaUcP{p%_%ygm z>^|M-1dwizbx4jtVf{;>vKg&OeV>Nd=#f>pz}IAdgaPY-j@#!ssf7>m6x1?7Fr?;K zofaZACZds;>D!PjoZManAMM-}I7aOT@iAI(mqK#sG=;$P0W%kIx6vgGOY-gm=6gki zjI=w2!tq@1Pe~#}jyn_13uK~&G%9qHFWr4W{pJCkn{LTs#uR^)&0-P@m;&7cTeQX%G)&tHu8@?9NzddPcAbnT-c zcZA*SYxsCu8A_cH5QGjaO}L)=&0LBX@?BECD-%Kb*ZauzH_l;{Ak9c5jwL=G&Gk1) zgH*^?hOC^p_?>xEk0}ajsV6rGL790=?QV9uFpV~%%?}2Qm(p;9eQf4WR~GNwB1xLa zAGO!4f9^zGzLtOFk*4Yyhblh4eg;wvlChZ_CQ^+hwB@Zj`fGR@VPoX!GEroG@;ahz zSK{Abv}O+5m5X{D3Mu^u<41KVVHiNV0_LTna(&W!6)Gylqp2tUde>(R|Rv|}=Rr&FJS$bQfU`8^H#Xv%F+JWw$byyxOU zkxvvcgBw}Sb5}WILNsA7=CBP)oyp^F8>$H?FLP30>&vfYAInDf2ACgXyGSXc|4{Op zphYr*wS@N|#&yDDOMal)CHQ+P)Pnq|8hUz0tK2RW1cc{9@TRV`Jy0y)XRnoq=f+`v zV(`vHSdmBk*ak}IVpD$FGb43k$a!GzZ7jG94%4$;%0>X1RfR_FI_0HZ%AC<9%%|k=Hqbt@nZ#4szS|ILeKk1pu`hMQ{)&#t(>{yYR>S2GmH=))Jep6H_ggr#q2r z{1>2@-6l$SnsqB$5V;&Ux-)(b;ftG`-R9caaCHv#bZ%L@S!l>CJtGH`B3P{BFIj^i z--}=jPX@Xv7E{#H+dH-TuO@)+Roxr$WR>18nege<-~=FU)l^yk0(6 z0^E_JFggk^f%cB2wFU2p|7MtbP7J$5!`;v}{Btt7YM!)XT1mNlTCQ4Z+Q4;a#(^oH z)^6{bR8eDSfSXOO<3>NP$DKG!E#vAs8pam3(o}MQF}*92q`efUO|#*VJqp>@=xDHa zFMzT?m>pqw24XSCC}ZMuZgt)a=x6|(e7}uX{(!_ND9l6XCCwNc)K1i z`5aHnI`*=>{SeRscRC$qGvbJMC%YxNN*1Aq92oS@{q0PBks`1&VDpCd9JhO=_Zj0M z?&p1Ho5{sNLh77Usw2^V{MTA-1+Lz^*X)F|f#7M3}`KpfqRKPXTx;wCQ^m^Y%C`FG zj^9n{02GlFdt@2xg_r*rcHdvb6PM%t5Ct1g{=~n;IIFZn+4wK-Ce#mld1_BL=-{o3 zOL;tM#75X4sl!igyk6zc4qHOvegJn?)nEhA^g2Z-I-Mqe2PS&-r;_i&*=XS?9~bcQ zd5~TF-p14Z>Ke)A_Ch%Q)W54i>s4hii4rAU(MomB%g2tseCC^Yv$PmYg*8j_hdTsW z66W->x@JHCi>n)^ypiilYF3S?rl;jE;tq0VCb(l+5RKNR?TJ@)k-^h~WzN8YegYrM zLmD+wu?b}8HdfCML*BuIxLNVU8L&V(hD3NlJwl;mR`;mzY2g&8cf%9a6ZR_NEjSL40kfZI zs#DrddmmL)RZ;{Ud_s`9-tkU((a(SD9r=&Xr>hX;<^ci#fR6|OfcSrKZ~s3_$p4$Q zYEu6#QD;N^zSf}^M-YQ_9B=xV3O$%89?R>^({NlEx^4>Mn{dn*LFp=7T*N%-*}0V_ zup*Cha3PN!-sc)fGYKBRKT-iFZQ3Pm=;X>fa#5|S&`^$sg@fZwas&PQH^~?DKyFK+ zbeU3OJYK1ATba+74XdUFGpG+9kY#IW}bn*`AC~j)< zuwVqB=M=Qd$;sSyJWa*a?C{57ns6w`*(B@U7r_{AaYHkPvow<|P=!N?R=sZ19L z(K~GO&}kOdDN(l7t|gIaR7=GUxv{erNBjnoI~L5O-}=3hKNFNWLi6t6eG{;-FU3M} zGLfhwp^ui1a+t}5UO9sOlk=K3M$tts7iN5Z&J$=)m?tM(i&CqoUr;j<9dXuLh*YG8 z;y5g!n(CJp+UDnMZTOL_s&KD%4`0!~@sIZQ#JKwJ?!*B^s}Qi4B~iBPFU#>*M}gbp zJ&_c)-3!lNd(n@-M8n1X-Oao3yxr486@2H}&7?Vp)In^&WWLp7999io=YdCp5UpsF=`S>w!cpcBr0 zxQoF)h2+vsLO%X__AgS{=Q*s$5=DT!HyCCD5fjiZ$Ij4W-w&y$an7JZjhm&-T_xSo zR*)xh2+~p&^OcPqR0&*vlI0eXI%0*nqw4J-(_fyWq37%@cx;x@T~g2+D+XgS6XY>o z?Gn(EZ9ZsP8*@LbyCG&4F!D!VA@5`W>c!mG9%cAN3*Jq5j^yAFW)aL7-1G z*0rCq=y2r__!^9V6zsW(&+2mlko0XPr@c4Feh#K#<3bE!@rVJbH-FMgI_nFp@&L5f zfFC`#(&K}glqO64r6^ZqCGlDIOTz&!+4Y#+7S_AF#m+#U#PJk`r&PG`4(x^MYWHdl8h z5R%_@tWtr&<$&tksl&#pIG6&OB;6aKG$!rjNxkvXm#sU)yVSsa4Hsr|P&Esq5+8+Y zE7(hoSG0a{cXv=sQ%AHbLXTU0f|8NqukD?Jfk99_=gOff93U_&qFlBYQAoyu<1S)2 zn>qeGQBJTraj7(~-K^*Y09Mev|17X+Xq%0`X}H6Doy~gHp8AT_Gf=3e)Iu{-vr9ct zjUr5T4syxXz`5~ml=->TsKS2K9-{)6vop65;JA}*4ZEW1lr&wKb=vkX3K8{5$_en< zmhj6bD%dJ&Tre8jJ77S9YVM=<+RnYQ!86|rNg=n1m)4rn z#~^kh;-`;!X$swvX@^;G`<+Rt3^Y8tmwP(mi0m#QcG0JolDk-P!lIM z!y_thZV1sFGh%RsVNqc*H)UI-?R)3+kCyNpfHXHfow?kBT!K4tkjAvr`r**tKR1>7 z5BnRo)4vD#A@{%8m3$RHmBZ|R%Psb-;Iz#`20y6Ej_bL#pp-V3@*6kv?(N-wqKEvc zR6vKmZ5Xpaj$PB~d2UMgr$C;qk10;_8Ls@hTK1XX@n*(%%&`(y>c$H{I@^PCF6UI@ z;0G#`;bqF^RO*&?egz2>_1qitfy?A|8JnMm7t%|d)0pstN+1PfIg92-;`{T+#I(*q zs*~F+doPQ^DxHF$P*vc)<7J6|DGFm+(CO5N#vb=GHCb7WThs7*IOAWD`rLV$icFuI znrxS3{fo933C}D8%!a;!6}5loo=3hKdfwifEL@htgVF%_mwc964&&jq- zBlpBS$zuXLPhmc8HZ^};=38?LgUin?PxgOByzAnS4t8$F;@zhpiS@UZvbkcT4`S}s z2oSjw#J{kGWpTf8j3)d|dPl0Bc2+9JqD7_MMOW5$0I5rcESqdTR@t+^bf`0*QdhMz zm~S}Ub$BvaU9MYIrzQxKqwnq3QmaLXDGk*);mtO ztW4AQazFA5u~(`V{~>mzL_~`@AT$#mY>I<-g{PX>zGS77G8t6n7J7=5@72Dn--_Sa z(XwWhmIXAX(0$adGVSNpDPLR4 zAJT^Fs1SM76P4Tj17y5Zw{|q>J*P03c($se?l_l3vxP083xElX-Mb;j>ct|X!t-bR z?EIR7I_W?S{iXSdE%reCxcf4He?u+t_}{g(o0QR8%ujA~a{#x61^lJSNok6I7S^uW zO+k#-OL?5uYEN@XZGE6IlhqX*-j=(l1;;wNw;(5GF))Kf;>s*Mi$63<2V>yOw22-q z(PMi5Z{hU+NVj>AXdK-z002Vd001Qa+gFo8m%5|_<_JRe7APMO5H6y$#3Nx-VilUM z;jGO5Ab(gOS(v?QQoa`DdYzh`T*MZB6LKxo4Hdy02u0ayi{?F&{V3cme$uD!W&&=k zPplwK+flsZ3s@eo&A@sU=!eh|D#eqzo4%U_1B1M4aL|&yH34 zt6?}Q>VO|>_rRRqXZ7>+y=^BFeCztuy{*Lc*Bcg(;rg^-v$FuSN>D##U!G_H*m*oUAD z)x}p*WqcMjg?!IViGN85-E`>Mvi|qM!XWhPu!LU_WvQ}`YN?2b%^PlHOs?V zdBCv+plIP}^-f*$ zT1KFQ)>zxgvM6fk?mI_mit=U3O26s4)6r)h>{qS0z2FY6YWx7A#jC`RT8dB!Ng_!IF)mUJew1u=K_D%`T&=D3O-CDovuqtCLKg=I$#7WQ zYkbYH06c$Aaod)Q90>jEvR-m~xv^JJQ}ci|63l#O2Fhb)XoorVPGif=E`8AG`SkeX z)3iOZTBNo!c(Xi6K#dQ~a%?exg<1S0IrTm{58GKSET(Im7q!dA(oBv<0amh=6RBw1 z#?(<44oooQ*!9!7rTsq3YV_O1rE; z2bf*4RBu~%jNLK4YufRmI|{OE@Qf$0Epf78$+G>m8UtVFD7Pgh)Wx6YYQ$TMuY=?T zw2d>u!`%Q4r)Wij+k)ytDy_z~?Z48qk7<^gj@!M|S`H@pu`m2%FWQ);ruQNIKyg#@ zM+6D0lBSXu??IJSxv<@^#$VQFt?eCTa>pIxI0Uf(d0Y$_nELDmzPm2K`e79;;ehWl zYz~I!!FRv7Vr5W?y3rzkKr3?pQzj*0zfZDtJ7U;YbI3oni>zt{5GJ}^-kD{}7}?J| zxuwq-#cEcWNC$4wo>~)l#bi0&Y0RDq2^gcMBmRW&UFxuy_+She4hBxC%R&m&*QJlx zZq0bxPT9uM?o%Ypt&Cf9xHtt;PN}lfp-ulN6RNjgAux+HlS_USX^2aH9LWKV->hPk zE_~cc`F=2nOMVIo%1xhMmg*?%h|7*%R&yMQ(rrJ8t1g{%ITWw9pcIx&V3mj#70w+y zJHCF&oLPM7!pRTJP|%6MT|GUOF`-%5O>@T-p(w;ELU@7IQrv-j*1L>2xW0^r&eU9* zwVJ;8c3crWzX%NzHTDEA$8)e6OC zDWfW`HFXEB5qd53mqkvEf_AJS=@$sDELZ6&(5#ERLtB%V zCW%STG9jDezx?38FK3N*PEa7HD*ql4J|c{&NNP>H9gWPW`>h~=aIvv+HBti=C}{g) zY5jb%|Fk=NeJz?CZ7Bsrn-B^y;ty8(y^6MI18$wV z=4=cT;#tG-dGYm&%BLu5RUBTs5*Tyr=8yR~BI-y?9+;557#1LEv=)D`%W-`2>5fq! zm3W{giI4R1inJ)iJs}7~3Zu6RY!V%+RLWk>*1@A}=0=>fVepvGSd19ebeFaMEilE6 z!bzw9V022<=u)GG;Xn>gnGJC}6WLZg8+~RqcDi*XS_*Cn zaE&lP6cg=>$G230?2F{wefxTEGhi1sCElCyhkF)pZa1*6y6WETTLga>5m@2E$U^Ov z+!hl)9N65yeGD+rzn&i@(yqGe6&%vM107+)HKrx|C@hDS*)!Nz9(S;uc_TtZlR0rybP6jq zA_l2PiZ!<@y54Y#exyX?*oR%=m9BkFylSO`C#ToETxz}ws z?w?e_^?fyslC}{)!wx10dQuXXXSEU}3@jmK_1H%`jbaP4#hd7Hc<~gTZBl-U4Yo2gowE7 z)uWd@x?xFcVJTw#3L@i8)q1g=8gZpTy9(tVS9ZC= zzgDYW)7atJ(08jUJf%-`1Z@MWLy$1|f3UV$ZJ_>AJ9tn7#(d>q)G&7iBNmwQ~zg-w2tO7#@r%BJ5q!`oJ_o-8- zT34g9m%WFLlI+J3?t^OXR+-Pcjko(iW2W>yhLRoeWQWW?Tj3L3}B z$MGRCLm4LVre%ab*yie(r7r*CpW*S6fFub!$)-`lWd#DFzeH{3l#4{8N|P~2aO2jS z3`-01GuI2}`{Cf%kHG`7bv({qN|V*Rfg+HZ<T_?2y$H4VGF1|6gpJl1flC*WCD`wJ+a#d!5CNZO?L;dm1~jJBB+kP)wD*=|YO~y%EQ0LP*47QAONS$ev|*ww`NOhJb4K zcZrgpseY%xJaaJ|eI2Xp6w_^e{*p}TT^1h!X}H>3m+~oZu9VLkhWw9gvO3h{Ktx;9A=1 ziKdE6>1w&%(m7+%<#NIBP7Jc?zJ!vh=GntL+HW!%rB!Wb;3=pYwHV6)I#>rzz^LjfEVJvKxhU~Nv>%Q^s0hn zYD6)F;-xXn734@CvA4Fdy7Kn=UvqEVV00V3zQ3|S+_ghD4+UZp{9@IkucH<|ZSJ{f zX|btJ?1^b07@@^l5$VM78udBiS;pv+A6S1fmpL;42%=R^PF2((t*&^_?a zDClP-^rPnkpl@Mq?`UVJZDDHVU}#7EKObsnXJ>8qW7D3Bve9co@Ltc=DRyw+kjD8P z3iy0w{jmdQzp8p36S`E)-7T1C+pn(BaXue2HP?L-8#r`0*y-OVnYW>xyMLw~ynTW` zdmxLem<2ai9qcM&XB95CG&f#M?wOCqE# zK^e&qFG+faK2RcuwPdVFq`F;fu@ zJ{W^Ae+H2oCM5fj<&q-I^Ihc90o6G<$ljKGU-W>4cCYZ(B6sv#!ky&DZqYBhEwr9) z(5;$x-Ebb)YKW{o7RvvD=N1Gn=Z_mN{-eonwt%Khf{JxCH>W?#XB1vsb! zEg+Y0osusTM`k34hjis5D#0v~!D?;VTmYfYry%0C>yN1akV&|9&7Ubh77r1DItUAVtDAR5dU5Z z?mx}>t_1VJADg+0hLWTfHu^bm-_c+o5X8@DB`bN95CmYM76D1AO=1BqDzTPPiCXC$ zj)>cjxt9WEwdYb%J5j8lBBaKRI4!W6U~q#qs7DF`Xv@MnGn!jG=Ssbv*BDy1BML1)^kM zp@?qba^sno2Z^!U^z(S~$i74K^A^!kx!t96b$ECFKKW+f4Ck}aEdU&`fa$lgI&+4K z>T@F^gONpFx<{n|2tgYKCOJuWx)%&7x%lO?rM)yc;I^D}U1M~4EwRU}yf>%a^1QFR z*5n`bKYxTa@uq(cxU*!|GfAK!>IGSQ7OAJ(6P@mVgG^Q_YoBOpq+C^F^Sn=6T>s`D zeyYh%A+ogHbDb!P(Qw>W?PajTb>nPwR~lEFU2@(K6MR1;uIE928$e-nVWGd&r7M<9 z7K{C~$ZIk&BcclBQ_<%gzX<51aB*VH*zlR1s_obLRx^<%7HLNWZl(~$?{Vm{dv{KS zR^%;**^fA8y&YY1dY@V{^vLnSso>mA-oehWQ>lJx@|zk1v#bdXCOZ)=`dYEf#ExCE zV7i(>y4L<@xcEmObc-9TzW*m&-2Di3;`~3ug?^=?w)MUMyf%BNcz^G&fSf%*N{*Zi zflO>fJ_QKPD=N%HDN|d&nZrS~^>_pODCUc*b+E+Z_UM3t=N&+o5S^Q8Ktvz<=}>wBUwzgpI@kV#-A2h;!yU$Nv)qYD&$>LVd|v9v z%7E>96oNz!kW}nay@t!ASZlM}u^R80ROL^kjMdtY8G8rll;U!2Tgc1jA)Xjp zz;11?^g0&>aY|uwU$7fhX@u#v1D{;O<^{<|E2fy>3O%&hxw?whJmVdLyelYjV-(Oc$bj3 z`s*u|*L+$Wear5le3G`f@|!<>DX$O%+_D0edf( zs>XjN%K2b`Ib-g+GDB=Zii{KpfBXUyBCf^y zLO{r6DrF~joXZZq+Uvk$nA%P1vt|Lhd&#%S*Zmr1%xap#B=#p~n-3CXK3ZmJ zWwIMag?W*^Txz_%>2LwIR?9~_1>@jKNVb7K*wsiQ4UslEtm4)eHyJ;>o;AOH<7ln~ zQR;Bd{;NnYue+#W<4*>R4hjJ9GYa{4>4L7lzN4k1g|5SYWDbT2n^tT8$c8_WDm03*cSawF9~#+?4saduk~jQ@&^8m*Fn0ryrIq9u!5X6%Z;BhPg?16h) zKt-%0xeMzTqiJV~HQL~0ym@xpav~4-EN+4-kUs80?_)jtcS~YrpLT1j61(XJwQ_mZ z4uhJhMy=nYu_Lni)@-!+8lEzH$GD>-6Tq&T1*S`J;fB7z;_bv39Akjqr>}vhsZZiJ z-F};cYB*vpL$yfSxG$tBwI#jexC?7LNo%qc%l8q#tS&q>n7>tWObGo2i^MIdykF>s z6brnn(zQ7eCQ}h^NB~Y2Tn!#O2yYxjcbF&;GYxv2==}b!xv4}c&B3M5>{@{cEOy{q zFax#}5Q}@zUMt(YFdKItHKD@N$*m102rsWkY~#0&fa{r1`?0FcHgvmtK@|wBNw^9F zb$%BFf4;opiS_kbzsrz<#`InV%3sf+3C{~bb?%oyr4DpUVgNe|T+xCgDp8@|io2G8 zY1z7Sf}1_r-JLCySMhG21&vpID~x4i3$582>_l37M?3XkS}3xn#+uTdMn~O$DmyB3 zoZkWZziWrOFUMPo-+ZSCR_D=KJbpxAEIE7J+3&ot)H0+NkOVWX3x|&0Z@O5FISzoQ zYW%+cGxjAgmsEuP*ghv1007zlkL~L>Do9!{(7h`^|FHV{(SMu z;@!l*5MG(2XTa16assFaZ~^H6rVGILQw0IM_6yNj^@Z9v?`8He_ld5jy5Y)*Y$6#D zXCQVp4K(62FI+WLvkR@4C>o-1PgP4YQdLx-YwW9OFeqI*O)G`+?unR0ipX8`==!p2 zkTJs>WRVBO7&N15FvO926+O|sN9~+DJWCpHI52#BMOM>z-MT8fW|~I;okxD6(Vyv0mI4*0n-eb7Q$k?YxuQ+uY6xPa?w4* zsgXngEO9gxRiD$vKJb<1XPK`GpaDo&MQ0287=B0*OwQ7DM7dH8q4Fn268Jag-YUaRubhf4B}u37Xh z#0mmSS5>8Wv&aiaV@v!4s0DI|jpq|2#D`;oond}aECJIMp;4j24rzGMW~4^)u@AEi z;hrn_qQmjIqis|E5942b_X4OtYt(UVmd&N#e_LDq0k_-o+yNUN9@fUdU^qXMu(jpw z#abcNPv=;ETP5_R2k)|$W}>SVO@vG*lF;8SBiXrARqYQ%=YhQj?aaS>6I|3PAHcjI zC>H#dO8t#5dNCsjIY=%lUwjZyP9ds?Xe$AAyQ9>VF3R@T2wTpet@6)9U!{rp&f&xM z7k`vb%7QGgG`V{z1CY1R{CaDYXB91jH>Gf~a-!3!RT`pl>2Vhm^;P3S0Q^H<=E=WI zwf|B4X)LDD1pQ=lCqEYb-*5r{wfHm9wKA|U{6`IFsnBb+&jjCjPF^&H$F!5^&xu0% zyChArjSslErW59gQ07i#k4mtN5M|hVTX;-tPtucaa5$COdTU~ky1aK)k@PX_5$REQ z8$DBl@Q;K3TYjSC`RV@RVs(Rgs#SumzhPVQT797fz)D*JU7-n5P88F{3fC=HB<+>q z;M#2#Sm!WsG^#3fSyFL2(P#gXX#T0bo8Ub_N@kT&`1a3nL$I@f*oIpM79oh!lBBTE zZ1O8kQOU$pH=@9*6xg|D+DTFflbL3D2zfWa27s>NQm=4=U4q>g^UmvPkmixc(Tq>u9 zR!IjEmvDV_c^8HEN=g3Q^5%KLdD}oa$ukV&O*nU#5nm%suTG(;(XUtI{mWRKM(L7G zTZhf+ZYwjB{crYN<2Xw!o3c$`Lf(hx)8cw2fj=ac^&T8T!x`GAsHO>H5Hos2J5J@g zB$TtcNnG@zA&%Nbl3=%{Oo!Zb*oUW(JOkp+o7?gByg9X$rZ0P~akHluOJ9!~xR*#qoIE6!AAIj`%E01LAq2k4ePk z!P{zu`EkvD%%#vAV7D?I2Ex&x=^rRs#D?;fhJAqZs)ZQZh7QmcUkr5b>SX(|C1^4B0aq1NgRC`=^Ppw@SzxsR@d_al%qt?<@` zepUB!Sa#NloH zo(V9(KQS#AQJgmsO53IA-exjK-ZTzC$Gm}XnsH~ho&zkTad3a?H!~D1m{H}95i+*c zWpV#`Q&~;70`C}@J4PE=e=eJbh(p%NI3?-`i!hsXjSIfc_fCV1){stAd3 zE8r{|L~ZvAk91UqC0;?$u&8^$P1T91eq-R=@3>lLNSy~ zHKq=58EN`wVNuJwT)tcIal!Jk{sLVH2kBCofK@G;XW`!W8{o}nJu1X%(gcsq-7(^KtsxBC4<6(wNGJ}`^`bI`-4e#&?Y zv%Kj!;7^9u&dSTL7vBT5 z&XW>3Xg^m3(N00+P#)Joj0X^9@8*H9*jv5A05A#$iz;CYR9`3O&fmZdbf0IWXM`Q} z2?Cj-avojeYVI@xW6Cfxh7=tvg`#;-k-&Xg(BpKKJy)o6EjL_tga?JtZCM4}1Q|nH zDMH!l3Bfbm9X>3NXYbRM;kJZpi}Mf=JVWq5elOQF+VZtdXM;7v{m^}i3>w7Qe`vB? zqZ5m-+OOR^MwPfjRD<+W3iCAte8#JQGXE=*~RXKBj*>Mq*`T{zs~=W8opmf zMY%2(We6?aKV5RLXIq7%d{ubX<~ymv#qkAdu_M>_1?p((yD7Txo0TK?fCn=HfIi#wsP*2bC=xYCXQndVihLiWI99 z!k`i&!Okt2lnR?tJ%+9_lqJu)g4WRLXbm`o-DQh!Q-F}Cf+m9yLnOcCk1?eAIf`Mw zP=ZZ+s~JfeEo&l?gZzsKwG087Vrv(VE}*)EAsLA36Y4~r*BR#wc1t!;Eh(lRJ%B;R ztN>A?Kgk&41zBI$2P(u+%OE2ZHFwYGcL&WKu+X#&mrNkbbJh-DwDU zf3~EIESt5tOW~BnJi(0trkZ$G&rT{zb4tvhgpuS!)=seA%1j3oEo?Q&!l(kM)Ta=V z)Tn?_gVd@;Bsk}ANkIZnQM83A%v#J|;9Ms<$O%e*7F4;&cAHnz zCbd3Inr!e{x(o^(rr>i`2PCR~Re@r4h&=ZfOd)1|9z8+Jn^Ku2?$m>%{C0K~Tx_(J zv7s0$nj>}Wc@2C@)v%Z`VFkY-Um(S)#!gAeC&aS2+YAh&jb!eiJN8Vg=5{)c{h}aC z(mf%Oh!XWF=o~qxZjr@Phb8l)C(F(t@)LJ4D<;QBu0*=YnPL>5FAXahw3Z(xP?<0D zMbGSh?e$%n)-{_O~9*me$kmehNNwJ&aP)2rgN!he6 zO_^RhHEZTRcn~G?XE(=jx=4T8$GlpS#3$J{LO&w(sfAwC%d(2Rn{{mAen9k16qhW{ z8_WF1NDm%bCjv~6&H;Zd_KWoqwFFk%x>2fzD8soC>a8Zki{+xP`sboITe$#~meF4S zfu^ankIGfl`k8Cr zT66*4Gn}|-5z;Z9Mcvs60mAX!K3~p5qHn=K!puB>Y<#wHxqrR?`KN3A1NAfm)kC9y zTqF3W8S(!}U;npj=y%3TS|icI2N`pxHRYDR#Dulr0*9@LIn0Y?kOe@NC_o_3Btf1S z9$v7WHfgojb(;ClVSI;v4PhU}ivsE~%^_ zh^DY$uC~kgJ^?eb$e108lsIEBl|nRO1VKK?Bj+Qa)X{UIoOD`%i|9H&zMu9o+((OQyw8Sl%2Yszt%jo(SgX_LGy_}y*YE5A#N*b_^)sCO!YL$^<(9veF` ztW1EEHe)m+Rq#AP!kkSvR$N$P-;bVG{0Oo|J=Jg_KC|$V%wjCnDm_MzF$2m*=vtM& z3^EH&$Te3|V@~4iKC*Js#oRt0KIj%OT>=a(xlNAZu%!^}w;btUk75OAm~`=x5#^dM z$&_ZdwdMgVcKz8=-9EK@^GDY5t((>nvq=AM^EI8v*m3F#n&Go%mE0jUcR&pmpR>no z56|IcqDX(%jY@C7@|A);g+>n~;y+{WVD;~>1bMY;#tE8J+20Q}%xD zum*mDhy&>qjk5FU`(~-yFtv+nq1z^$0|Zse4IIMbboCt{x&h4Z+1?*+Q?jWHlZHt} zR$=oAN=GAP@olBvqOC^1z~T~>^2iaPZaGK|wQQdrzK}rI6cXZo*)~Qt(wYms=e&)?Fp-J+NCUxHKg^s<54ctSjsA-Ll-&1I1`%fL}65A zyQB&hm+W_vv@mo8C?&b{FZHjz@HZrxlTcxIuYhCMfnigq0e7f3 zQkOwjDP+A;6@;~Jtl?84pfR&-mat!tgVb+yF}cK5&vZw9$OSkG7iCGMn*KA6^y1p4 zK_OhSf3NpAep!s-k+rocX{{9gifDDM(MdX9#hVaACUsMxg~=q1rT;B+-GOW?%H{ z6ArY}0i@($x2JF|9P%)y`K1UlKxl-EN28q_E&V2f0TG(N89$OHJwOgawSW*DTc8?$ zwqR9x1i9(^z6gw^^R@&}_(!uVq8Nwgjoj1|a{uxdj{;mBJW{Y$p!F1`|a3YP9eQm1AX-FEn9E(9NisU9!IgvkK$1BJ|YIvGAP~4q%LknSZCKWm5~Ei5FcK z|4aQ~gxZ;=7W|bTa#F+??fuJ{o)5%cfN*6a0wU;A=_8;s~5*m^}!{f>-%jr@}ac$)YSF1 zS1Zf+{iiCM==PBC45n8GBwN@oChktr8*MY0b%^AC^*2xS{`4+PQpKhN^AUUBk%{|( z8#hinJn^PpFk+Jf(4}-2mjGQ+LNe&#+|garBGx`@2z2KO!x=ov@QfP;Wg|rV6AFnP zHV#=>EFJ%Wo;whu-BEf>%d{S|D2ssnB1=OKQqD2u0htDK+R8Cp5oB!W&a9NPX>FlWVn!#T@0z2Ni3J*O*xfP(34_Rh3l z1`#!h1N4hah3&bOpncWGI0y2JXMNh(TPdM%w~E*zL0`Lv2dt9$J0d$RR3XkAQ~hhf zK1~C{abP;@TEEBTa-bQ>qPRF#vr&c43vB=5Z5(y$68z0vL?~UipDb_EtOkL5^3h4N z0?U#wAt%2YLdt{CKVA6*^?f$HN#KrF%G@H~L|wM;k>W zDfHgiVInznIer$?Bo3Jydj%Fl#r2D_RRnxw3?f|AO+<84Df*fFDtotKbKk2_6?jpZ zf8h6ib=VS9JCRPerN)msQRM5aIFc^yuhr_7)fHj4z7-%9}pmVdrV=J zN2&XT_bhkb@CSEmQ?L~EGi9D%(pRb2C85zCVZ=fj=VJsn?0#(J z9D}Iq{0Njj*ZH8W9tYCj+N;KO*s1i|I^o^au_2DEDlHY{k_{%knZuQ|>>fe4a34e!&|5S6K5siktzK$+jv5nS>MSAWC zRxE{iMLWC3uQpPu+uG<}mnsOdg(~-X-x@cMC;hb#2A4Ida*s>PzU{A+^F5|u;kb(+ z>IMs!-mNB`iU;rm2#g{A(9Vs9D=#o(C#@1ZPu;Wx7fsV5Tn?rt z@hEbAbKU&b@0A^VH9vVZ-#I)X%IR9$=JoI}IP_*6rKHTHoHcf~rJvz)S2NB{p5|PA zyYHv=YBN|(Tdk&>YYhL`OhZ3AkS7{Q_3Ox09`Ae(XjjQpfUb15kfPnOtzxVIkRG;O zr{Ejbs#MQgLs^WWNz(L}Q2S(dr9%O;q0xBNN_v^(yDP1KD~Q4%Y>ka)P`fkcZIkgJ z1soT}8U79-EFZ5|5&f1EVEr)JrEQ3#vY3|_Vm2+V;+V2gZ8S|oq%#>U%JHnedHj)s zPVtz5sfXpFG*F^Fy`&CT`0n{86~z1z-Y`=F4HPIMfHa;4a|Hi>8`$dr<>GW{>D(H| z?Ui~?6)Ba+X6uFK%Z$zp85_coy^^?u1~=4^vM>TJ-O02I%Tyo92@ZG$r7<E-5JXO-Lz03u zDNdeUSH$D%X=Fkeu?t~wZKPN*Ww;1+1_3b54YT$igV~g;UcF_Jp3iBOLoUe?nvc$u z1T=dZw94MWv8k|;&$^m+*rYr(AhncFaJBN&N@q;d=dkLult=ZDIJ5JDdQZULMqZp3@BnMg!#37-76}}>l@fM?cM181pKXu?_#UZ)D^EH(W_kE4UXWQ zsYY!3$l&S>jLH5-o0P{CTE&LnHk+CDj_Mwa-{q7$*#OJnE!nQz6uEgc7k1M0@3gmY z@MW(rKZ$i~|;gcG%3bV{-fa&K&$RbcMGCLX#ZMx@cMNjrr)Coc|Noy1H? zz}%p&N}{di5F_t#@9}%K=i{T7NF1JQV}VR5OL|#?`Y+&^R)6a!u<0l8^(y)iz+uw~ z9C0eFLQ$spa;JEf>X40{*QF6f$GY&2`rp&!m? zR7-&By(;;<7iEN3aDXjGu{7&%mO7YeI_W0ODgDCu&!zO;6DV)o58!(s z{yV<5iG!tuql2l%PZ=GlqHe#=^xJE>nx(dw+$?WKREsTtD6=eT-sG&&nZl4hlVyFdpl1;Dy)o{1X4*PeU3wWpYRiY$LxiHtk`6*v*Ro${{tbSHr->@H`~ z$o%y3e0h6bkNgy$T%wRjvvfmDavpMEZGd5tuFbI^$f_{*jkiia$+i$N6{EF?OxwSg zZ>RxzRB4G>|E+pK2!*vXQd|B2LaJ|Z^aNal1sUaX!1u0C&X7=mguir*aCBH8DM>*} zw3eKn=uTixYn8J67oQH;=WhXIa&jI@9_>By%emMZ3!3kBHFc?}si${Oij}pNne_Sz z`Lz1-)xpEZk2@3!qbzCHzma&9-z5n_)#bpUh+sr|p!D>$#eDHACZX)=EdI}Gs)^P^ z24M?Vfk`oSN<(IiWfs(GjjlFr;yx~+8B#^}%Jr|mJ~=X{kMu`gIMh8lUK?_z=hxRy zAN8DAi__KHT3Wrh)hDc~ZXbP%mb+)G>(8I{<3dmtMuz3s-*pS@Y!5bOHO8_4O?P4F z3MCZE5ML6l$!cR8NU;m1c3~6ct`lrm8EC9wNK@BH$tkiqY{{^prKF^|R1@*g=W4@;(&tZ62dDt?|edw3ei&s8AR1K5!?f5tKVZEBGOV_XeFbrG z&cHHHowF;oGfW;I8KE%Lesv4t4_x@fNp!#@l*+9x-`5%BwB2W7zI9AA#5t=rVbBAN zA^{tw8@SBi3Yi$0$_y-L!G<+=k3?Amd&>dAZ#+$dg_&BXq)EpUgjv8U7tb!aHy8{; z@=G|0yhR)tPq*=R;BFO=+|*W+A`yCmx9R7zRLrv+Xhiwwp5F+uS-r{g$4+cY*4FZl ztk=8d<3GMxJ~2?4N67WKR$QI6^x*A9$Xp=0IO{N)nNwo>7K-@O7U#%f*mVVVk0XG9 zDXpjS{$A+;W~lYEu~`98YR~;GWL$?5;W#-kT`-RIZt*DUdCMBlzPLOIyXS3SLwT2A z#I;WHBI8?CKx+yAQVE1fBP65j>Vz5}W_a=T`b{>|1{)!y&fuqEE1szvU|_Z%wAfBx zeJ&i0#|$jCvYJP0-_3mYa>q(e9Y%J% z5jb_KaO9h7m51?&YcKU@2ra`la(xl#%BeRvaNUsRSFK#C7r~Giz|K@+fPMkmc$i$M5a1@C2qu(-v*ZRH?}7%N}z55vgcCIcSrkE>8HXGy-7N%W-$jI zgFepk*#dD2YcRx1ILw}stvwuO7BF;U2H1j9r{dO^;>NusR)=}J^W<*yX&g}9yP8`M z&)Nl#`$uJ?O6kSwR_31wD$jUJNI<8jX)07*qTANHTx`Oe8_jc3gPhGZA{_dTYSb{g zMow;rvsN?s=O%(CIWca?57q9lT8_GFGp`G5JGFUJ7bVO9W&VB^S=_4uEk07}n}yrX zahA5%Hwo`+6APSK0BYg9o|6<{1;#~>6FPPUILu+nyvM<;`nB! zNh2GP&rKG%7wMQ5$>#c&P`aX)=0s6tr~wnu;k{}KeE#oT%XWxo#c__ zGv~t^QYP##gRn^5s)ekzUn%-2rx^GHEv=OkXe`?Nq%W;JxcC7bLux$&%~R=>>%x)8 zJf}|q*nKt}xu#R+8ap14biw{Vl8xLNX>M`-J0NFI8fp>f+lNEFoZAK#o12n}gf3MD zD{ydG9+x-qkehoC30_N9{(Re_1=}fs>(1dhDR}xO%0#`I9togYR4z4+5`50TDJDy8 zIeM!cW8o}0Lo+4Ge@WJvcOVsSv>Q)!ZWGBJCO(8LfD>0#8BgA`J3=`ddmbVgHr-Th z+R@(0mRyl)Xus+3>&=s{mf5R$3EUiWO|YRmT5~pGBDxh*R`j72yrf4qjOW!E8m6B| zwNj^6a8^eiHO3}`e9K&0d`?b_pzcBNDYMFdk8PjH9vym#G9Q!$h}?$U(~SfHJ&KNzRp10;C)__=Sa z0&S>zzXx!f@UfT8Aq!LZt>ach@Fr-lat?JrqVD(l2l6eA-o=v;!snV}57>SZux-t> zVK^&WP!c(L_r%P$?VjTn=!;n+FIC`r!mA#}JQ4pB(Em}vJL9!$x&3UZB>(<*D>?rH z==zN+)7Be9DA;lBRQt&I4@oSa&U>cxeitSKTtnK2V z;#I~$NeUvJC7jJX)RZ&g?j$qaxXE9?I*+nP8k@D&o;&ec+wb5GVl;j8@mTMVCZD@p z?_oN>E1bXH50BSlNxZQ&oloYx3-EvIC5s7Pu;DuiUW6{7v|A}Y4^$~HV(2x?E~e`> zt1hbRH52Ux#lX}qBlFkIBX=Y1DTHWWRfV(!rf(`DRNE9%?kVD>E6zJ@HUR5)T7hT1 zDtMU1G0=uCe9v63VreF4SjuHM&nqWT+0$9!hs2cI}5pTi5&_hNPk$xz!lJa zNu^wgJ5s*ld^Sz6nyrT`&@tj*qoWA@Rc6G7)#M&3!I8%UCBQ=k4(2ohkTLDyS4c~t zBCE8kn$9$yIj`!w=%#g|4VjZM=foNlWB$x$S)$T#&Y9Tl@?Eg!7(qm4qbZ4 zfMQJlRGJSd@^$e(?U)RWNHs-mVRo357dY0-56wzWH(a+u+^ zWE&`>qyk_{>rdvRbE!*=YyxRZcdXY*cIa#nuTK-Ia6RAPio_fP8fuL}P=4>b7dM{x zXP4}P?fG#8{i*3o*&7cKZw<|1K0 z3vZm?>NV$DPHR#JNij<`Um;=rRV+>eox@!$cqXaw$@L}&&GUOc=Srs8z`nQgk zYG&|;)wS!tF_Ka_~_PhW` zECFrE0Fr2BSBR*`OuyipG>vfUtMzcDZ(?^=5mpzbdwV&vj!NYhH5Z;abGI~I-1kBv z*qLIWFm1<)gpAHD#`=NH_9sx;+z_;8Sq>-!OSu_#?M*_5lM=%vE!RLc>|K$tT#CHZ znP0VyJuSmEbrxWj@Mt4te)TYI(Y!eq%LAE7&*{zJ*6e@g(s*~KG*pRI*i9XssrSL5xZE5 zZhQ_wr_6=FGkU8#K3OQF=!kQBz?m=5F#k#wf4D}RMPx^!e|eNYSc1w>?^S_ojYmBX zB~~7UwY=-3TFLtc&3BDO8|tx&;UOe1mAy>NKPa8>mYz_NF3(wk;6boq{4lNS! zX+0bKWK!@^CPhPojftDrm}R;YFSL8SgQvZfl=|E~$WUjJ0kdV3kTFBX68ALv%!RCv zJ-Em~EUM@(D(F}Nq7G7(I$Ei#H9%{}4|(`S(Z@R5$|c@D<+0W%Q5?ZW|f5t?JE?BDX)_Y{ti7HQU+ui6Hjv9m5Ny@vZGU0Azzg2&;`LBIudBnM!) zD-Z{ur#hc~U>m#V6llHAarUh046-fYn}DFfV_U1%-|PJQ?+qjPpCuZfp_@vham(g4 z^Gl(34fn=QY(=*^tuA%HXJyFSU+3r;R%#_fgF=Yxg=EB%4rjMOPPlB*uIhF+^8(;i zjTG`KJ|BT$a@tdt{l)^%7L?huTaSAfYmUt?ZoaB;$4LB}Q)6}%j@TwgfJov5uie|9 zi`6)%!r7}mMWk-otiWGOtB8P(ub`NuLERaB5 z*+KEVm|i!?Nv_aJj3u{Gr3R=N$>d7Vzk$8D66lP$X(Jja9?Ash4IYWhkpSToK+Vw@ z22c9Kit;wCs$!X}SuLG6Pggt^D5#50vn_vCh=W#LU)*J6zC|%NeZO-kO0lasP-z)C zZ<0$@vErp%wu_#db&7wnvrA8ff*l@rqSS69oWb?6n$o0}F4AKC560fHJG5@w){Sl3 zw(ZQ=wr!g;wr$(CZQIUFW^CTP>%%>3?^D()=gas5qnFlOZ?!(PHK_&rT^L`*l-Zxd zHVAL?6(y@ld@94{L6EzppoNX$CmooP|rT=A`{w-bA_I=SsAm>>v>nCI3_MShZDSu z>|{y71tm=;USzV`G6$S0fAQf`K9KE}2?t#F8zI^7b)vv_XIG`uIXA?DFg5bdh3R27 zy-|lG000N7CeIxhw31>)c6AsD6EhksOkQYk0d z#vh-`9}oZl;{Ryj`iHo6bg{Mlm&rQ$hZy*Q$Gh);OxFD^M|)+Z6DmcDqbl;mr312s zS#vQqKrLT!l~gn~;8|^5-9znQ^3{xg7v(aGKE4g)}LOh5A-;u)!6DZC#@igiax{ zotslR6Y5~yxfIi;AVr}^*uANC4+S{v*8G~CrVaV7JLph$Y|P6tDgiA{-EJM5cdF>t z>t{XYpneoNy=sygR&IauHF-LwmhxZVAp*)lI8G((Tk+0{YRq*d&e;UUDMFfJOjg?( zt+G#TXU_mvsdEewY^k@VAbN!|UFnRYMqSbIR4@Va<|xmV$`Rgi2w4TV@|C}a#on0yPvfW8Fz4We?Riv$pkq;2S* z0pyo}7KROGQ`^q{+TX`$d3{I=%9iH~&V~xfIUoWlbGZX7kA^;+tJf&#Vyh%1&ptNV zQ%e1~4UY9hOe~n+HVu04@wc8~TaPL*8#wn3%-_0AYx&3d&oCC#hYDK-f{~w0PwKFZ zx2}&y6-RiFxmyf3dtp>w4KHX~FS~=A^?FXeY_F4uZe$();(_!>u5JGL!@xcPHKd;< zbBk}*9Hz5*l_t9H;5w1KLLt@-&4Zg0(TdV8((xTE`{e_B?EkY{p9Xisvi&*p=Ai%0 zw)j8J{0}BGYqcbR;w#aj)hYjr2j$lU1qJ1rwHGZ>b%A=E$U?xHlsK7PqXjwhZVLy>3IE(aB5Yj%(eS{ex z3gSM55@%-rS^zZw{8=K3)jbkjw_7=gm|_v$Y0if71sb!0l$3MT2~Ab&{QFlN7ehRS zB2|%kZ5&le6(HmqEVR-J0h{$uZSA}r#!Zyu+}Up^&V=KHZn5YAigqJ0l)D(sq9wpb zNg;uY;=&;a1Rjq8go!>zw<=*jX8#A0n6Gdi_%|@ZYBjn?#(pz@Cjjg|5eQH0Dmxd{ zzEprBw_6g|Tjcw=xX3LG*TKx?kmKNbuG5vqdTbDllfwoYmxN27)S62NX7n`zMp`|q zYUj(cwcm%o4Y4}q;ZI-%=T{1F&`oTt7ml>k&X%M!Pd9+qJoROIv+pVJ)Au){U%^sdFJ)YVkkq#(1W}D&!f2@I z6&NpS=1pO{ar%A`GWMU7kO zaWyfvBCEC?fFFs6f3xJ5F#q;o9AZ~-Qf;?$5^0?9C(~FduS2!!JZ~yGWYD{rpTDuz zY8jlFxlbwdzX_dng zVu<$suN(wV2@M6rpZaY3^TH(fk2TuHz}d+BUj;f^<=-e6#=Nn9U8o`m`Q`Bf+s2uJ zre_Q;kVh=LuM-7XL2o;QOPOy0@%to$uuuR3nh!pbwK zlL}sQ-D?`}w$67%HtwKy`I1G5S#8awI9HGrvfs)O>YQc2QL&c1XpnOc)6pqlQ|87K zfM|}zwNx)}UPwdu<*{o>yv48fMgYkMOUEC`C+y{MSuOz-HO^oZp=|RGfB<))tZD5b zgDo}t(`gVqZQZ^y2qEYFYlWebwM5NDUs84|oPL~Ekng)Q8&AG$7w>!qIW`5(yvTqS zrn8Z+i;kbpQc*rr=#+C9b1!D2tjihr{>{J2^f~8Is=$kXFJql&s>ImWwjVdd^p{OM zNfVn;w%2EzW_Qb9h{s0ofye1e50L2W(ybuBkm}H{FdnbbvGw+NulKR(R4*Kx-Zq7# zqE6|h#70gZm`K>+6YBT|@1jqRV#GE5lZCJx!ykVnE4It6POOVirc7Y0N#i~YT_LrC zTZw4dEJj<7W3Iry)n{PO zVbFTN3@rfjkARSYLXyeUJN@Jyplwa_#J%vhQ?)WJ1EEykwB&Iat#!J)2yb@`aodYd zqupOnzT{&-zCuxhq%2*z!moOV9RCdWL!>%@Rn`P zaJH+cmr%QKzi2*q&hw8nKF%Bwg(DSzIl)BiWWT`y+3RwS1X9@an27ixfQvb%q7s2g z8pOnzP&}mfkTu~*UjlEM)+3Wun531VxoZp;f#eXZY~bY zIdawA#J@h}g$ZPs*W5}rUU!XHQ(v_MfINo76g%ne+AM-d4KUlQ1c^KQSGAXE@nLA$ zE9CgWZHZwqkZ-&`SBMPU%aG6f0(@}CZT#SlNcB%v8^bXy;$Uc^hilN^&N?z(6HOK$ z_s*BcUK~n~Jcdi-qtSNcW8wUSvh?%2|MU90yse&F`vG#8e#!~q|3f)3YW`tyu-Org zB^y5C*~H;_Thew=`gF+!Yw4hEZEJ+HrE14%Z$ddr(S3@1%5^`j zNO2D?i!BlSB~7j)PRPLg8A*-de%-##=k|WPYD6rXiO)iNe#{zE5tjs44E|d0qCB(R*xPwPmGSLe|dZKdtbn`pugq#OZMmENa#Q5uT}ySYN_o z;XhftG=SF+Et!otmW|e{fp}Nw7u8wb;2^sXfkAmNJkR%%Vc4 zHaXT>tGYmL=r-Yt6h?EC`_37np(DAZNOmgQ8tZ%w84AQi@_#MJ}{5JEOLI-(2+h3A0u;ocK zopWhL&}6cP5Pv+oP$=0nOkn$D83X--8@j)|0QxL<9wuyvbB8}r>R`~PtRHr`OGcfd z_b(>6lhoUqAIqq6g|T6qCS(tFdR)h%k9)_)x*OL~RMqS}dx)p4-C=E2cvx&usuu~F zuej_%Rxm5!JsF~1(3k~77;pyMp?^ss2Sx)?DRRtRZ$i&3E|ToV(BAfmKd7~)%w#-6 z!PK;x@h7^DReuTf^_b`vV+voDwQ!ak`+K+8!m?_QYw=&dW5_-5D$?ObQ#D9Hx++0F zhKMIC`3wqkqd_uaKw9q8mWJ~~_2zc?1yIodLZ-(Yl0JItQtG-8lw8G?iv@~&MbR|Xz5E_;BtCCPzta8RP)_d`6}5Ibr&#IRmH1>fG!MVx% zDV|~G0{XJM@3?)~^R%7C4wrgXN@I9Gt#=T5&~?_;#Xx-~lyu%K;OYJV+2fiJsau2M zyOqn-ls(bb4$&rIFW=<02RXCvWIoTJcnnvp!m$zwoUTDlv(J%HyN@jDsgp2x`Lq2#Z*)Xk+p-k zMJG0A8?5Q|uDS{V-500B$r1MO@ttnLpI)Z=maF&;o8TYAqIflqo7kge^;6J4MVMg& zUxNki>emZmglnfl?MFxjLLDRoB*ze-0hzKX55llkU`rE+It(KxVJT6VNMgen41r3q zsn-VGfF_@`2f57ybg<0ye_M;$=Fwc3sXGRM@t+eA@@e16%sMy45IA5&dGfPH|rlW=HSi7-~i%drNZB?Qv&KZ;csyvId>O3eS%Y z-5GOa&vx+<V(R!JqY?Nr_#H5<9f%;BL_073?q+wQmqU|oB0;LV#focZ~i*#lOpE8DE(?5 znF@s~wfkNd0VMnwG?$8K+xrn1c@Y~4$sw)757wYE=l6)U)w+^JeFUQgNSI7A2e0$5 zCW8qW9We){G~kkuqWL3Fs9+Ml06xsgn00-o4)A_$p?{OUFZ+wX~E3Ld_-1ZC8`QCc3u<}x|JA{ z4=@vMjnaclq3W4*si5X;P!(rT9XoSS)nY5TTQSw{L$RPj#mo(~8XSGlyXty_HOC{F z&Cu)SuUA2SP!sb@Z%UPbXm}I{W1+}`-36rcKjVM>*6@1AH=RJ_OLu{lzqWy0W4)FX zitTZaiLaSQb04cs#@#N$`S)bHKjI7q1C8q2lPsQ_G)1A(^QrgeK1ZW%qXpByltEbw zYXrU45{3{ZvH4<`rVR|^S2Q&s0&_Bji&oqc_s7vJLP~U=8`Q9wR+33$TM(nk-CWuv zxuhXHO;1#|@+BT_v)OL!J_FZKVL;|XnxC)-U-%`BXhd*_y~1XeZuooWT$Jd*^aq3jeU!1U+B0xwEF@X16=d%+34ppcQwfqV|Q0zxDo<8q>|AfFOQm z=Od#-`BYoAi;Fr?tJV5Ks z1=Rz`;c?rmFBlG(2+OCeJx66TJ{=22l1^IJZjQY2BFNo~TjjSabRmoe&k14qRymt8 zk+SlSPf;XlXA&irU!B9ME0*=^dp2ZKC7%?aS2A9QSQ0=DJ{Nw7qh!A6q@m*C&C0CD zk0|; zz%7>1Ec9yPLuX6(hQU_?yvR>jN)caCDn(>tdB{PnKzWE-v3yX<937@bO4 zk4mBE7}kQp1-6%tv*LGD4iqzGUP)d73q4VK1eK%3Q*_*P5oattoHBq?93XSHSv`eW zYDpz%bhruNA)Tv~56yvfT6hqA$x1R+O7rR0ZgBLp;S#E`hl$9J?z6f13rG?-2p2LurAqMhD`hYYF0C zu?&V*w4r@`s<{}WrK##5_lr1cs(at0H&s#!<+2hDO~~QqvuZ()w^O~mnW``#Sdfvn z+}skBt}cwgBCVGrwNIi2O;a6TB~CnlV&PvCsdXut)yb$nx=O4&wZ9)q9bHKoxaxfA zSX|;TPI0W4E7~EB)g)`E8NDx)uC1&dWOF@RQstQJ(X$^LB%*#hJa0$&%!j1!V1X(X z)2H|`25@`uSuQ;VZ~gr0r0LZsxoG3)ncPy-;dQG@Z)83Hl()=Y@EZ$qM~1uKK4@0@ zL*8!{aa{Mt{#e^wO{)bpO*$;Lrg^TA!tZ%0^Xd=pWO*a%q=`70HkM>mSI@#zmk7J; z`LRGABhDC~7z`RbA&ROJVYq}DuzJF`ToLSO;y*f;=del6X}j-U?HextS=&u7tYh#) z-IPxzVuik+mU44l(7YZG$;^k5~ z>;jrb55E7Hs%ngInAQx0-kL+JKM+fBt3QcoPANY^1K2~ z2?4%pJG~iY-okl;&ZJYxrJ3zAwAJo==xVi(4!fKi^s)pE3?T)B~Z zE)OqntSTmx3p_txKbyinP!P8<<p%WI3n@qeEQ?=kE;W#;E?Mq_`Ls$bpN$Z_1wyG%q<$=v3D>7 zc8`f7qb_A1D>3Aa>T(I%GUj3fi&hOlGfFdCg|3U~tQH_17B8BBpF_W_Y2>)K!+>uV z?MWo_(yfE<_v$nL)%xiA_V(qIuWz&d_4V5E`=;Cd`|G>bhwHRyYi~pDY3ub2Mqcd= zhPyTDzE~XV>-rP)=w!?lVyPrYz;?pwLF;jvCP`Mg%=t_uiP|0i{~S=g0*@* z0l~sz@k5q~IWZ(w{SLt_%FuN* zR$}vzHw2l|2ESB1V>aVM-1NDEy?1N+PgW6v>%c4HRYELa-W{Mi7+5hf%O!KQ#0?HP zJK?Ap&e&*~Lf5akN4q||fh&T_*t>8c3!6qyS^Pd~1vI@1?RgY7?Y>a(m{e8@#1Cj> zXZZ_@8bU*1AM>UXTj^$F)+YHmzD6IZto^g2s0toeJE*Ygv zA)FXa*S3L?w>E-Z21>7I`IRjF3i|F#G(tq)16*Tv=LAcYEfKfDK7Z5VJ6=j@|Fy1f zb|yYDfrtQCBQBV#zue?7+10Kae?HxDAd!14s&@h^A3u{*EDmcn~0Vl3}^9M3CcmA>6Uh+ zr5Kp%J67UB$PORFc`Q;`buJAwL$lfi2(EHvxh30l2ThL^F`rF)eT~2V>YT8c^L`px zX)8MlyJ)6`gOo&dFpN%4Va`Vv^X7CQ23#?7re{o(1WqNr=NG6k5ZsPmnIe%$cd*#6 zzI{#Hq_G~MFFo|upA}6>+##;N@9WNbN#R-D61Dzx8@v3~;J-U^5b@SAi_9@=T)y8P zjzlHEIt!rYQ15ijT!^?c*ZbJ7OJsW0E9zl*>*Z}TP;L@aPE5+zSs z)WGnlCj34PII~>*Fvt*Z1A#jnk*_?{`}Ft(FqL z{U5b=*86g*;m84~{Hg9gdA0F)yNr9?P-dGnhKnpsU&BTZSdSE4{l{hve2{i{_YQ!30;t*AH+rZGNLF4u$OL*NJsb z2tdf~@cU!mf|UCQ`m!LxJNJWpbTV5Va3w_6Hd)^s*j^iK5KlNa{Q4M!2G%RY&XkB9C0KFMoyS zT7pI=&smg{dMbGH*5j<9d#2Zi5g~1ZzD|=TskPYePjvEWwwSg(fj#53%syRSs5OQh zW}n(+V}0(B!1{##pCeI%(vtw2pK&M@>VJ!l{J$?$qh)nLJM2-^ob_aJ1u`3r1k_@g zLJQc{BxFU0c%op0CPbojSS;v~2~fagsQNA31Swj#^7Q;)fG7xkBJsjrm-GDj0(O6k zdkap#egpA7u1LM_Wa>mDSmSP-$y;z@&CGPAd))i%dJ#Y8CcpZ9e#+4UrF^37-D7$` z=;cFTZiYH0|8+v^pvf|*AAHL0x@z!Yubr*?Oz+~Ge1eunQ?DI+id0TF+NbG(f~KKq zu#f3>Ro#Ny9tsW~O(?k3V?L|A^lBukxM`0kIL zJ)zWU5+2mq;)HtH#K*JUn?|b$b6GIq;^HsFC0;Fbg@DL`81zI#rx*-G=h|3f;=!LPL^%`z;+=Gxt5LqV3f=gnfZ~%)_V@3`?jD zfLKl#aR7HnJoo^g!hQuEcJC?iTDKO)FFn3`5Vk0b*$wD1VzyD{9Fgt$Lom7giHqGt z@wzjoNc6uvsaDF)cWdFZYQN>DRfM=x>y9W+A0D(5lTa6FP3NR=GJ{aYgg_&XN9XgLf#B6-U{O-?T%(~jh8{mrS2Q)fYg0|5LpR%U$J6dN2I18mN9RF{_$oyxQ z%}UF*S+oM|W(z!Cp2Oh*wZIB>XnF0ryJ^S$DooA@1}NBbf#2vXBYQQ)QU1*!^%9V1 zF(mG3(-Z;=-!t-jaLStF;eKIgUo!`cH);XJQE53pN_9S>1eGF?+|DVJwP1S;J<%QQ z0GTcUvPs+kwfDB{AqQ``|DQxYV+5H2j_Q2csg`t`_6ux4A(p7Y`)<9Hkr@bWz`v(M z=A~$F=D*Swe+Tt**p!T@y*7YO^6bv2xE&@T)&oY0ku*6=n}(dl1u35oSwxxxY>*|+ z2~Ss`-P@kAU#rSeR!Wgt#~M|dlXP+z+-o;&{HPZUZ0zEds7p7c3m=M7&8fuDDiYr? zrCP%h)X-gh(6+(5ht%Hd?H9@~xII(AIgB-F%vY^vP zyzsGuc?-oEc`iVlK>5?1nT62@9IBocNeB>-UOU}e32Z$~OTxwF`BnWVpc{kY*VRY* zotQ(kreTfikiQ6oHq|(mfn?RGI7oZ?Ryhi)W@<;S@DD|;U&us2mGdQmMfn7j^Vbvw zzCmSUcCzcrC+0rxYv9Po@$hN8w>|2P~W^FDZ< zkT$p&JS*jp&=9}yD5JnLTMRUWQq~E213>|>2Z2tocMU^2rrM&#P3ptnw4siogUat4 zkMXPa0LN)Nir+{sQjQSK=0m&ytv5=HoCU(fZGr<8+ein1bAY@CSc3#v9iz53JmOgy zkB?b3HTc|kO`tPNPh-*c4Qs{Aa4cO&%>Eo^u&^>uQO@v4@_Fh;UD69FDesXziztc3eDtR3oay>o z93>|lCepGho=?uZOV=l^Nuv`F-!i*iQVj$nnSR!Tw6nOZ2%7C#NB074Tgu>H!D>-jO)e3sV%#d44byk!3J&V`2N-zcY1lrg-rKc=ZYn#?F0IHz@fN z-18-&>gL%i?KxpMYJX_i2M0>XtwY|;F(*3tj)&lLa7PKqru;VUKem(Sd1iDWm>wR7 z438kVH^s}+JN}AnbDVaWhX>+haGv$`VrhdqLtm{z{DtFSCYVIk*W!jRXHB#ggo?BL z>yTuZy=<$s%CS(>5Ax|N%xUj!jYontcLqz9Tmx7aVt)vXI$bjLuZdskop)#QPqrEg zF0IWq*mFSLo6z+G_=z($x6an09+}s{lOEFU%6C*nB;^3$6Z`$vzDS-*&O|Kwj^fSY z<^`q?J}Q;}z4XPf*vuB>r)L@bugrn7ot25Lh3CI~A=T=;PKPaszNdQi&G;lU=jP<} z**`=NYHscFP&{|TV&;^NC~$mS5Ho1qPZ-bVMJKdBKf#2>LI}JYS=*YvZ0-M*@d7;OtT^scVnH)4(}j;)l5<@CO;_$2F;CCUm}poYdk z!Y^2w_WieD^z=msdPh1~`n(iS+qyC%)q3yIOnT%O@>Nj^$X(M_2D0CgH|Ip3$d()x zSWlM_iW+7AisKPY!g;Zpq5g^Esd-YNOq=4w3EJjl7KH2ohfarF6q z6#B*XiNh}|FGlXZw3q+m1tiyW^4R*8rPnJclyr-ahHonO1RmP^9inkq?&j0`)&K~{ zDA@GHs(9SbSy3KG=V~whe(ZPj9b9_EsUfi=PwLi+^`t?iZd-~wPFdRywD3C1neJ~* z$$In@jaDWOebOfy;eyZ zsT_kRgDz#O8d`6|W9HR!^8|s1sI^t9gxv81i0{3ORMBzgB(ibQL(eO~9xB{E3Fp`^ zeRUM7)<2lrvJ@O=k=yf#xiP3>xgVvM;a`QtBocQC2J+hxFYK#E&0iBvnX>cDJd3++ zM;XcT%N>d!7ou>1Kn#t9i>80z8q`2@JD_MV=`>sC2--HGC;|iwOv>TFs7!BRFx-c5 z48WO{p&PA;iML#bsL&pCQLfMHv^T=N%F|4}WJPojREHv_nXjQ5)e$zN_XEc$>!mw2 ziDI)T15NtsQKGeA7tF&oC|*Rx!7Eq7dlD(q`HcvPTx04`>9qq;y{`Tq=&Gi=nlN_Q zVY}&(?;sG~37A^M^Ce8I)G6O}Kf^^P^lGArc&TTiDB5K;8A|iJPs;KivuW|r_@!-| z@x>?6v4B^iYBp6auiZEX2HT{Ugw=|V7ex<}@QM9djI=yy)2KsQbWd0ul9+m6696<^ zR(koJC*V+O!Xtn-^!5EKokFRXP_5Um#kbV^HL9J!Fr{IcDuQ&y;b8}|Wxm}4c-bdv zCIV^R3plm}-g{^ql|V?i3Rr+x-xHiDoEaeRm~!goAq|e*NP8hBAQhAhnYyZo3HopU zw4|i6c$`whQOk%l8&+;s7F+%(t3h$K!V+D!v948g^uTB*dJ)0^bNpW$J|nPEEVK8j zTy*m0J4N=oKITVdeIpx|x`G?wX4(v=4f8>*iHyOvxN*HiPloR{ePV5)5uMJ}?2^9Q9^FPU!sfo;p=2}y z$nFLv)6=lH-NIR`A%{RD{Oq7gjs_>{82F;aaUAr|#VA3Sxg>kOn_*P}*^8~edQA|c3)uB^pz z+(gvfXqfyL?SNmzAH<0&>WyUmo3kyHq*gZ>gfZy;w#67_az6#QoU#hVFk>#q{^Y-C zrH;9^sTvH(bNKXc-9fZV86y@mT)6^j`lK*@$;@#cD{U_#%XVsD7;FhI z6Mmv^;!0!zYK4Ys6H*IvJTf{gtngykeUQu4F1tU7_|EIGiQmcMTy?0}IWXYa*P(wj zQ~MstIAcPxBCI}DQFPo*6$Ic$llkKKxbbUw_jcscU(JB<$OH=IGTA9mb(@)N_KOl< zi(Q~~6P8Hgt$amowM*D<$ijlyW9Tlm6tq9J+wAenZAwR`pc>Bz$}z+}fKmJXzOaBt zOTrP7 zTGR#k((gp)^@Vv-{rrkFX9jNMwpaCh-Wxb>w{EGTcGy&$s!EN8e3ii;h(HBW@9p?ghKNo1RK&=9(pxijzBa!-4X3_A?!| zA!ggGjd|cLNO>)x0{fghZagg6le!RcdhueAtQW4`&zNGbIN;!4@~c|lInM7GlP%Rz zkrtKqihJn=Rmkl2*t0j%7$N|d_$mInr5)P^mGxTQi%66(oQ`(Cd4mgm?id0)5zPO? z`a~af&HOn-7A3O*VO)LBF1&50TR~*hYN@9n>?m)=jrRl}@3h#6(pX7&>=_x}4gJJq z%&FBW_3u5zKQe1PQza4YPY;of^55M5|K3CVfQx!R6A#2);#e_U8I2U3b%)H7%m+ee zLZxwGqdtfQ1_vaN=JY)pf$EGs&n~TaG2H92ZC)uFnj^ti7$4|4zDME*V3afTbFKN- z9#o2r^mVPT#ORhXk?_dw?e*;S?A;I3n8#kbmap4Y{;Scdx264eLht3-JP8c$AlE=# zE}HVjArp;gnLn3xGdK!nMya@L>I?b&z@`j@GpGAS(Ui!rg-pAZ=WFq1S1 z+F3Hi@>H~rd|^rA?qoATmlf=SqG*Brmk)ym3NXy`(UFkXAj1OU)9?kC>SJDiVB;c2 z7E$AMbEaNWg(9R;U%MalZq7LfN_nj#jN+-IO)rhuKcnOB6Ib@$5wTt{Spw!U^Q-HJ z`kneP6R}*hd0=pU*3sg6x>f2&jyrc!*1Qo>i3xup)*aqG$kzxLDp1% z*;WpF$XTVsNzyI}yB~Nfk{l53;zQ<$FqB#Nql;Z_bnAG`(rJa#@9|1mF)WxhM0*->Pt$r`2>zueS z{b@I}y5o~RfV_LuT=g=5e;bMhUyFs`ZOv&!=xnJBIM(G4f@_Yeym~YMNbYI5Y$1$y z>&X~e`g-Ev#CH}ouEc}BdEO1kXKZTppuS5h())LJq3YH>KgZc}=NT$z_`NT?bW|Bq9B`gC#XCejN z()y>{?s?Dq%qPn#(I)cXzHSz3MH11Jz4(;@4%+Ju5bPo*CVGgy0T+yeUGT%OX3rtF zLv?}4s^N`VnBd4rm5`p{oLVa`xj4Rn~FTzF%2%^MoX?%salSM^(^?0e@$$(p(4c5YkC|Rbo3J?e2qtysImaNea z3jNf~8WbvMT$bTNsW=+N+DUA{)Re1q?r0{YcRKJ5*3w*#;@J|cGjd9EN%XXbA4+Wr zC{VEwVeYA7P6OOjP6g5gVr_Jo3Vuk_FBG0&xB9L}j7M&3Up7m3Yz&52vHI3f{q>?d z6b!LtmKUj`NE~#Tw`n-(Rc&)PNeim}ZS%0ueWZkfoeVJ0Iyh-- z!&+%%Zta26&e>K&U+UKri-58YkuMc60V}xx!>(551t}yiXe;fmk&O`G-Ta~M_v!xO z?0c@ca_(;g6R;stNcpC@6BIi|7(u~7G|IP7s|uL4_u#Bwn6QbL@@U6NC`J?unQ*X# zN($0IX8O^>l!%X!Jo%|8P?k{Jdt_|xOc51XEXooH!BgXzk$))n)KoxqN~z2_GrzD< zyJ9W7Xv=~>GETdW+vYrHVw}KxbZtU&cuq?e9Y5Z%XR6<1yLC2i zVFn!|%9KaJ*t5}i0OjwYQr{+0*MZ90EkI)lYWAUsvrX$VsI(avr%$VHTAn#*rnl3Y z;z(k*1_ecRCp}cZa!I3X(zypH2a&XMJ7>KGz|xN=Enp^ve)HAE0mS>o;sID>-cjLq z*+3z2sfCz4JjUA3Iw{B57reukx8<3FRt$Bu@z5>poReRbkC)z%XD|t*=tQb(8J|j1 z_yx!=#-}f{Yq7fV$OhuJdG1#B!cZ%v{vBk{j3BRW`m$CuS`;?OkO$qT$#Fvq0qorj zeFI*)_fGBaHGB#gN1)qUdZVd3-Fwx$(sSN>YWEy{t+!mO8#3o-i7bU7T8xJp$NT`t zQa(O*VwjtKNe@M0Gl4vh{MpZ1DFBD6A=*!UuE| zA^mxzKej(5EQ#UUueWYuN-4<&{0oLbf7&~Do*u7fch9fOriK&e;)n7jH)Xu7mbvcJ z?o(&oB=ABuEwSA$&qrF?mV}x60aek3;ssf(3{?g;SLsEbQY*G*XWD8OHzsT^{8Xys zjfp6#Y=hr{u@;mI)m3IlZ!%bwP%^twdh>CxZw?jwZ*X{UA|l)l1UeiQRYA}u7S)at zcYMjk#Kn}ft|m1>vZQ~LH{Waukb?xT3nLAWL8?G6rFg10T}iUw*Zf*$lFdp>-#>k6 zyqgXOtYz=@E#&CUPSNi(BU8iHYCTGwENC%b)rw(eI=JE}5ap>#0o);FKF_Bo*JW~jGj^tIkzU%eX?JwH zwdVdlym@lzP0`t&{mGcAJ*hQ|TwlhKvoq)NljEN~sYOocgW4Q>U$FmxP-KkZb8{v( zA@_Gd1ZpUjUe+w;NZnC3B(_l6k|=b*p(`9-gIlRcx}kugmA;1JZ9eX9HIveErL(zt#>8_ZJTz0i(}a z$3>v~B(tB+oY~)9U0KtgTToYg0J72TveI%R$S0g8+GQR!Y z@k+OTbdnJ%H!I|c*3B00wAs|be_tTka+>%b#CWfn_Z8BP(hz{ILD*-gNa_6TORnSi z_+B`6BdWDG_N-PUt6x6U=Cv|0A&noR z@C;7rmp?SOA+C4rsoP*%fjmrsB6a%)FuV&yfuhW^fyWHX1c~)^8PeexMM|Ruo}W3J z_$K1bI8K7BJH$xVWv;~pM5-)sqz&y~l;w(tB+qe1JjJ6YeJgH&6OS8Xs;wC%0uwO_ zH{x8cq8B`LC<84qCR=R+ft#RB6{fy-K?+X=d6qIYJund8;1M_cB6AXWlV>AHwrsT4 z@TycG;FZdV4-1zPpY)+nf;xL;63kDNp^3M}WkD@4-E z6>~{(mh9JZyU1@8llh{&DqXtHABMAtc(7x6Pn%)aO%Y(+T7?WJZn= zzy4d@CVUqtCYfJt&%dVwre7o3ib1Vlybw7$kBhy8xLR|OhktTiC}tvDEP}@sgJzCE zZjm{suxa$t*R+kMlEbJ2osBObFQ#$a=s6_ti94g3K{Pzf{S@X;=`nb@#7seK0dunQ z=gXaVL=&Bk@L02RI<^?U1{(p|8sAG-TGQF0glL^C-evli@+#!yC4+Q~W@FUCP~b9d zl=?{f%TgN85RNN+DAwUvjzp0HbN(e$V!N?5*@NhjYDk3V4Cb#)#RmJ|2wZ*e`Y%|w zyvWLQ2=k{2DqRx>NJ>PFriq&2^`L_r(sg%ySk@9Yylya%o%X-Fu$cutEL;`@Bj)3x zCWUQ09^Wi6-csi=x3Lt!8Yp*Vmkw4gUj>qS^>@*mk?G?cjIJ*zvZIkzr_+53qg4$sy8L>*gefMHHUza?1u`e3$22*nv-5U0}tc&&=OQQ?g6x@C` z5*%=O-5COfI^c6%iRX@(7!drW(xrb?S-Je%Nn(*{K~`_gRR<2yoK#EfGkPfZm>=91 zIWpvn2AmHc>rj^qGQcMOvWkO4`sp8OACU@!MAH_~U)hU)J3HA*YGHMNDSXjIY{@+(VgD@3;zWMWGUL`0q9^852?? z4AcCz*s7ng##B~7w$vge0mYzDZ7*m%%T zF>4sw5R|MTkKi#s{Z&mDN%n5ijFPQ4&!}FtqBwRkzZx09uxOKMPw}CRmZoub{Xv7o ziFV|o8>oVJavF4!?KhLVLtYU|{xmIPU2R_#S&*khGyJ{n$ZaWV@m3n*R@7s>22NeO zlB=sCnUCX7&>SI?sZrk=1EXtY{QoLD4|poSKaSs$krlGZ%HDfLcJ@g2zFjxl<=UGf zB3aov{V;=R3~#oO_;g7M?1f zJk;dp5ywd1p`qre&HtH}*W`4uMy8u_Mzc+>a<_IJi@Q?t+e$MbT>~NVHaTid2Nv0v z;;vM*DMsH^T^%Au=Xv>bZ6jaW-Nn6fOwQy&+0p!*2u=cu7b!}L>q^hH^OY`+InAAL zA`lmqVqQCBG7#gke6v43Xg4*k!(gzDEP0`85MzB^u<3=@5; zbYQ_RI->kVbK6yESM6C5h*0=NRf*-Ya;Ftu_A{at!=-uZtRivUF|7y12}|9ar$@P2 z*XR&MSt*89bBXyKAy0J`Oa@8|dNXpSgY)@ve!))>RvZYMdgA5QgQm;qM(tPt}M4iZ))U*RfCnhQ#vozW=!ks5| zu9LJMnGi8?WGH4&{t?YGBi?R}^t}JU2i_EuqH$YnG^&Pep_5y~azr#Tiut8b23M6b z$1FsCG+%3&_FW8wrrg`E?SQPNyptMxnY5wm*M;{bR}^Szi8MUO)xu#e8ZC}4qwU9w z;nR+x%%imFL?X6t?nVaCJeqWHB-L7gyd)rFB4@hIQNUd9o_XZ*g^bhqr&NuGlu`*C z`BZgaS;}Yk(_T0%Ba(i2S;kY8OOLh6rtMiq?zP679Ig!c{7FHhYUMZ?YzI$k`c-ph z5vQIsC@s*}h@Xgh-qrPF%yDUyc0D1SXMG=mgb19?A|^=Go^zmm5*DRS#@c?C?s(@> zd!sqRxbdPcT$_wYy)8rSB5f z3lBs~U%qTh=-Hn5QJN+PA=_VixFM#ry_kZ0xi*$d@!UGD@vR}*$S@rH2e4jUe9uUx z9L`T%2LgD_V1^2006lu0USB{`wK+;~`0y*H0q4 z8r_2jhI2&O$r30Cu8Ko!V>-X)33Hg53=ahpY(yL)xy(VGOK6y6DOpc3GuyHD)rvC6b2{{lX9R ziX6{se@pb_4IlhVRdLcXZm!C-?FTA-0!F@i#^Tp9l zN_+~L+Mm+k8$WO(B}F{H&FuU1tkI0{WAQf+PI&+HOTeArRl4X`s<+QQKKSq~n*qd;F9@ZaA3qJ;-DpDCJ zkQKS{VqVHMp13}l_(d%}u4zXtBG5rgZLwtL;j)PM;=Q}isxPKwQdr2Ey6ac@MhV>W zvP((s*OmP-_9pJk1zRg|n)KD{=Mj<3cjr<^qf3hGhSiK0MnxP*OgookX%940#;VX| zFB7&sv2B>oQcITzT^ZUqHhVV<_9GtunIuV=Ei5~KS!hw-i*2d0OV^I3MVZ%8ww#(% zKv(^ocygP`?YHJ+>z&od>SMV%wH@|^D1aC+ZerOp@TE9xY`WNEEP z)SGslW%Wbl=V!9jtF&pyCqAZ#HAOwcb$uQb68wHDEc}`J(3>Du=E(=ma#imK*Vb_J zDF!9(rOz?+8@|e}H_DFaxiH*eQ7Ad){igZW;2A+ezJNf&{UViPMh+kIMguOtKKP>h zlDgC9-t0yAP`OxLNh!yS@l&GSU;DTEYSbW>QH57i6ug;44qlUF|4UzOi#iDhjAWmg zxFIG^kZmm5if}Mj8lP5PT+|MfNljK&Q@eD)O23G}JWl?6f2Q^LqO4+Ah{GTmvGhs4 znwL3jN3Mp{1P_B=zG#}Fyp`a;O8y7i|P{)TE{9i_(+ zn+U|d@-aic)hOE9>PV`Qvx#_G@v09c?B&!S=%U`0WJ$Wo_D+YLz{NdHuBm3wn zLxt+DTfMWi!#$`qbj5zgmBetLC3k_u(egJn6X)a9@b7eWW%015U3SS|Z4g)QAAXu& zFb^pOXP?dK3r43VyEf^mkVX2DYf1Y2LzuVJ6;kG-Yk}dR zD^we_C;W~mmQqDM)XggkrRZ=wV*SzXtUo(TN-B%BU5nUN1Dn$)t8rVDkLOcdCZ#Mk zu^MrAPEM!L?lv72p5%AXjJo>nv51!nWt#_)8|#qm>b!EN+9g&Ws`Z#m4mf?1ykij4 z!OO9DN**u+20MD*J3l{*stIaJ8ma)wm7DKwE3icZ1!oqPl~K!{-(D~MoO%g z&M_P+PPk!o8lU85lrJZ|9w&)Xyh?tJ$Y%0+D4RBv@sxA9r<&?}`Dg8C=4nUhhPZiI z2A;S1XEp3so0EzmCS{vv3Q{{Po|@crDN%CzN^P-_Z?mr%&2z1{5=bI zdL*L;OpJng@4EI&-8i$6+11^B`+i%I<-JnJvNrNxhjXK-JK#=y-uDApG8@STPlrZw zb|50Fn@FIfmnolUBTv-l@xHzMq*Fo1%&{3?=O}K~&qAc-vVnq!+OLDMG%p4-9QtJ+ z;NDAh&TuUm>zEdDa*v7(nNga3Fd$MeRNQ5hI@GteBuyEAcG><&#wi<;NB&2o6_*X< z%$vmQ*m-pep*S1Ak}#70D%mhI-`8_qy-m@%U*jEAJ|bZ>j49q~$3? zt<1(4uML{rIJ2gLII(a?!q~$ltWd<4WyAQCvqkG>KaDgEqFsD!=U6UESUt{c5h zDTeJjOT@AG=M7F6m&N6`rWwBPVz*OB1;@!zLUh)S7a)=om!hQEd7=<8wUp6#cXeM#srzJLVgyw7FzNNW$ zk}@=jf#Yp@q-d*?Ib-iJxOWrTM6`-!aqNkAG^Jmi-b&v96mNsQXgYIx?o)C}V1b`?()Y){F>eYl(et0LfYi z^o7=tv$YD7`bmiH==7Tq5?qqmKI$84d8K(_+{qOhj6UaTn?0xSFW7waf5u&BHZp8A zK5Rv#Tt3WD+CS`An_s}dcQ^i)|2fH*7h2-`tW5a=Kdl=!>)sz7GYhbgU-XqtZ}`05 zM5Hp!wX;iN#?ID~-9mY#@ZKr`!veVt57RGkp|$g~nyzLH5beg#0mi}lA@~<>cGufG z(p1sZQc5*0dr_@jlDPH&l66w5FOVV$a&xrLnM7bQL!;}pDq8@})5~;fCrzfU2wOXw zh^I^!)NycMrN6Xe>3LAxwrH#K!oVdD5zD0BVy@IWZg*T>@NEL6_AOR!`k)zu-b^yj zRENw*<~LYwDQZJg)eftld|+>vffK_Bbt7aS3S+vNd#*U!!zgM_d09^WnZDBe6nAWe zA*nY?^p`O|U8){~q>THoIP9;kpyrg2gL@o{g zc&aaVrb>S`gfyxMk1awud?@8t`?$b4hgC)@_kA}Ege0z&#C$b~N7&FE7K(rP>k8+7 z#zDv{zI#N256o^%a$Jb@Il9Pu{B&S!tIcQjn8S@Ui}-m+;ipO~^9NgreCnWgy;Kpp zQy&#R4PVeo;JU+J8qx)wsFI_HA(ew#U+b2MK9YJO7xmzxllDk)as$VBd)TM=V7GB; zFS3Os&9CiEeq*epi)0S0&CJxSX)Lel#^Z%kRG72wq=-o&XR4j;yQ&)3hksaDHGU;# z6kIEGsp#SPAl`U%b*1ov;q^iK6^nB=Pw>lqqAM)2TdVwT(5Jt1=DRmg^UUizp3lX7 zwoSHQDJ~XK#*@dw_M3I=JN*`s*VL}*bh3LKQub->L5$w~8CP8U(zmpa_@SNV(FfdB!Eok7_1J>cQxSenx8|X;^|xqwuZAugHwg{v3Vx zbUJQY?z2g{Y4I6LbL$h(+WQTul1v5Z?w8_wyt8DQgI^bpc9?ozpB#B>(tS^6PHuy| zvz(X?=aHi)WWk(}U-$e0&8HXR3l#K;+AkVatsG>ji7Q?RWQ6~uSe{x}FVsw6OR8ph zY!@Ns@mZ8s9&bX~Wtt&EzJqT=fGJFfQ4A>#{gMmKLOeXFcZ z^n^6+;ivEAlwTcZakEypxQ+kAW9<1n_u%x@-2I&PkG1b#CEL;(CZZy(`wwFIdapx!hI@S@Liy{jn2&ur``pJx13&Yg4tT=Ts(g9z&#D_ zwARzIc3wfjk}gf-N@I=g4k1ASt@xgfEz!Y!RkZ{q{X}UGXq85i4Ju-~kt^;Buk&H6-kf ztO-$Jh^o_hBbkpfWuRyAC@2pRzc8p+HRLxVA*C8jJ56e4*2_h_pkLN%!7sItm2+;% zBv=aBKwpO=!!LK|T%L9ZACH6tx5f2=>(8eO(#3o-PpKA@GXcu9yCHGM3A6Hcy~q;br$`qQm<460h6E`FwOr>@h%hir z*Z;;4O|b7B2Y+%LZ0(Q{(LuLxN&f!)J4H9}?Pr;m z{_0>|crsG77&oL)CP%pY;J`O4Zl0!$Au<{VbIZ~<6?23$$=&XXT2YT{*mY{kL^?U+ zuIV1l7B%s0Dk@3&(e_Iy`n5vQSIdL$GCzZ|=8nN5rN>j!V#07@pW%J`(Pk1*JKh`| zxW4{jGKH`~?x%lfbI_|LzMEa)bR+YXr&9F#qTZ<+ai;XH*B`ni(-$obpZs*u=31>} zRO0ChSXrx~(_*`w8%aOTIL9F}d?CMN243TCV^>ugM{lN)59Wm*tcr2)E2ds*SKa&7jFaD@dr;DY|J}R_B2a&WxgxxLkZ@G$@P;FWY;B#~oxE*r!7$Q5dy9igA^X3?|HuP( zbbt>7>ehd+f`~~#-F};EU`a;5w+>y?GTxJmAN|flV1xHjBig(QBJ5>$;++0{J};&u)`nja zCkY`CEx=*gSZ=XGs0nF!b0Xz$hWvq+M`p4_RiwgGcP&YX6FBsg* z$^8ss8z+nH4E1IN+&a*G@}Y4hKxc|!%-{dqmWw60Lpc20I#`>q5($9_p@Fl4_YB_2 z1@+nKMZ7_)s>Olc1r0u#@PEUxq+6%U;P7%mAmNvOFRj@81qBg>eEj3RK6V9mH|P@!xA!w|G?K`!LOqdQ=P^mcXAsdl+7V(X||iX7uo3;>HFd zkqD&iKQX;c%T3X#6?+3zFbE;^@Zl5u3kpkCS8u$JBgih5)|j$J#CBkvaq{0b(KIZN zku!s|i2zN8E-O^`FDNWoV&F{6Z=hGW3lbOGX50Vc9IqUAW9<|ddNa*erU7pl@IWs9 z?<$BObKMIMOQ61GSl$>81o92^9_Z$jL~IX4!eI!c{Wj+0iAywG0H|?*LT@)cufL$M zWX%dRE--_6<0(*CqIZqHemk&s2+spkoMHHYbPekPdxCZ+VTrO9)M-Jvz82`6(fdX6 z@Lf|WAl7mU3U`ZlAs5z0L`@mN+tBhk9xc-2ul+8(xpcqK#6+oD5f{K zcVUqnoV`#TEOzuEq&dymE&fZzR>#~i{pfvRq%%X zyZ!kZ20?>>rHAwMCkE7BC%wO^&l z?u~@*P?uBwD*lxN(lqKcFP=>#_*oD!?yz?%mRKRaC7+8xtTl*v0W|C*k3C{{3J(^Y zG)h`90)PYa3!nBAyvPDv6Wu0#Em*vN=hHlp2wx|AxEBHmW-7KxlEE=$em?ln*fZ1UzgJD5>JKwHqx4F(oake1Nv7?w{vQ?SHHEv+CVf#Yz2vH`vN2>Y=E zW~UZ2OR~{O1g5YDy)t@zy!7iYC@fk1=IRJt&_?TEWRG4dG~nW3rWr37!UO*A*~pm9 zlp?YZUjxKgBxok|QbCz?R}hw@Vd3`|r$I__0$G&>%^_N8b|&rASV-p!CHp{|i-F)r zkNu!J2`-v;x@$H z(r_kSpVeo8$^W-n7Tm+9lMSVLhUrNgQ6aIh7th?>>pZ~F&R^{D+o(cr2qRu z)V%O3(AelP+1a%-X(!iWzMeRp4q8bb(9mPjtMBhLEO9kIwbwYn@JU1xQ=H~&Y+Qf` z9O@1_`km@|c8V0f6QG*$m%tQP_Tld|EOCtm2R<%?1gi*Q2)(2i*?#~t!Fr>K+o}2T z2x(=TK$xrYVAc!{M0-SH$z*zTTDf8$1X2v7;$?z??QGO+B#Q38_2 zAo!q{Nu~^Yz+uUIXp{V88N@pWXi4;VS7h9Y2S?2rZc`q;R=~Uc2(Uf_e@49zw|Nyr zAXxs!!V;zwqzSJ9ZAK6JV_=T|E&yZS6$XXDeB69oq2AkSyew5*77MWQAOV9a?!T)b z!iDSqz+%a@>dz2951xG%RdD@BMU{r~yt{H8q3-ss+g6)-hd;We10*8|XihXFp5Pvl zSTf~X=9*uD$0q?Y(fgjaBD*pXKHeTa+f5u;M_FD-0K{luC-etj7vCEZOC-TeO&$&K zp>~jZ(2?CzJ0VfqLZiA6*!JH8y$}<1E(b_H(C4DJCAY#JkytW6f5lZw2Fzga6$^S~ z)2Z*o{Nu=M%sLPA%S{=i|$!#oLdi&p@KZQJvH&v*QLE+(cx z%wq<(fEgBh0%A$TJP2-!sA0VmaqCz(OnsQ=KyA^|p?{}giNicUW(#)%stb$c;<+mc-R}hVyeX4IChI#>brL;mRQWa-L|kcVLM@Wn{dJ0 z=Vgm`J$f%ZEPa&H025_bh|H`Rjq`r8(aC;hK5ENPev^tWjJS9hoFROVnVzuh8g z=I@P&CUU#AV_V4a;saYNQMOx2hUtQs3(2-nM@s%cVJ<7f#9*!p+rp&(iP>BuhIu>Y z(yOi8Jxl*k@#jJ;Oa$iIt1ZOsKM{Yd#KI(CuISk! will be interpreted as having a relative path from -the current working directory rather than from the place the file was -included from: - myPythonScript.py --flagfile=config/somefile.cfg - -If somefile.cfg includes further --flagfile= directives, these will be -referenced relative to the original CWD, not from the directory the -including flagfile was found in! - -The caveat applies to people who are including a series of nested files -in a different dir than they are executing out of. Relative path names -are always from CWD, not from the directory of the parent include -flagfile. We do now support '~' expanded directory names. - -Absolute path names ALWAYS work! - - -EXAMPLE USAGE: - - import gflags - FLAGS = gflags.FLAGS - - # Flag names are globally defined! So in general, we need to be - # careful to pick names that are unlikely to be used by other libraries. - # If there is a conflict, we'll get an error at import time. - gflags.DEFINE_string('name', 'Mr. President', 'your name') - gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0) - gflags.DEFINE_boolean('debug', False, 'produces debugging output') - gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender') - - def main(argv): - try: - argv = FLAGS(argv) # parse flags - except gflags.FlagsError, e: - print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS) - sys.exit(1) - if FLAGS.debug: print 'non-flag arguments:', argv - print 'Happy Birthday', FLAGS.name - if FLAGS.age is not None: - print 'You are a %s, who is %d years old' % (FLAGS.gender, FLAGS.age) - - if __name__ == '__main__': - main(sys.argv) - - -KEY FLAGS: - -As we already explained, each module gains access to all flags defined -by all the other modules it transitively imports. In the case of -non-trivial scripts, this means a lot of flags ... For documentation -purposes, it is good to identify the flags that are key (i.e., really -important) to a module. Clearly, the concept of "key flag" is a -subjective one. When trying to determine whether a flag is key to a -module or not, assume that you are trying to explain your module to a -potential user: which flags would you really like to mention first? - -We'll describe shortly how to declare which flags are key to a module. -For the moment, assume we know the set of key flags for each module. -Then, if you use the app.py module, you can use the --helpshort flag to -print only the help for the flags that are key to the main module, in a -human-readable format. - -NOTE: If you need to parse the flag help, do NOT use the output of ---help / --helpshort. That output is meant for human consumption, and -may be changed in the future. Instead, use --helpxml; flags that are -key for the main module are marked there with a yes element. - -The set of key flags for a module M is composed of: - -1. Flags defined by module M by calling a DEFINE_* function. - -2. Flags that module M explictly declares as key by using the function - - DECLARE_key_flag() - -3. Key flags of other modules that M specifies by using the function - - ADOPT_module_key_flags() - - This is a "bulk" declaration of key flags: each flag that is key for - becomes key for the current module too. - -Notice that if you do not use the functions described at points 2 and 3 -above, then --helpshort prints information only about the flags defined -by the main module of our script. In many cases, this behavior is good -enough. But if you move part of the main module code (together with the -related flags) into a different module, then it is nice to use -DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort -lists all relevant flags (otherwise, your code refactoring may confuse -your users). - -Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own -pluses and minuses: DECLARE_key_flag is more targeted and may lead a -more focused --helpshort documentation. ADOPT_module_key_flags is good -for cases when an entire module is considered key to the current script. -Also, it does not require updates to client scripts when a new flag is -added to the module. - - -EXAMPLE USAGE 2 (WITH KEY FLAGS): - -Consider an application that contains the following three files (two -auxiliary modules and a main module): - -File libfoo.py: - - import gflags - - gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start') - gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.') - - ... some code ... - -File libbar.py: - - import gflags - - gflags.DEFINE_string('bar_gfs_path', '/gfs/path', - 'Path to the GFS files for libbar.') - gflags.DEFINE_string('email_for_bar_errors', 'bar-team@google.com', - 'Email address for bug reports about module libbar.') - gflags.DEFINE_boolean('bar_risky_hack', False, - 'Turn on an experimental and buggy optimization.') - - ... some code ... - -File myscript.py: - - import gflags - import libfoo - import libbar - - gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.') - - # Declare that all flags that are key for libfoo are - # key for this module too. - gflags.ADOPT_module_key_flags(libfoo) - - # Declare that the flag --bar_gfs_path (defined in libbar) is key - # for this module. - gflags.DECLARE_key_flag('bar_gfs_path') - - ... some code ... - -When myscript is invoked with the flag --helpshort, the resulted help -message lists information about all the key flags for myscript: ---num_iterations, --num_replicas, --rpc2, and --bar_gfs_path (in -addition to the special flags --help and --helpshort). - -Of course, myscript uses all the flags declared by it (in this case, -just --num_replicas) or by any of the modules it transitively imports -(e.g., the modules libfoo, libbar). E.g., it can access the value of -FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key -flag for myscript. - - -OUTPUT FOR --helpxml: - -The --helpxml flag generates output with the following structure: - - - - PROGRAM_BASENAME - MAIN_MODULE_DOCSTRING - ( - [yes] - DECLARING_MODULE - FLAG_NAME - FLAG_HELP_MESSAGE - DEFAULT_FLAG_VALUE - CURRENT_FLAG_VALUE - FLAG_TYPE - [OPTIONAL_ELEMENTS] - )* - - -Notes: - -1. The output is intentionally similar to the output generated by the -C++ command-line flag library. The few differences are due to the -Python flags that do not have a C++ equivalent (at least not yet), -e.g., DEFINE_list. - -2. New XML elements may be added in the future. - -3. DEFAULT_FLAG_VALUE is in serialized form, i.e., the string you can -pass for this flag on the command-line. E.g., for a flag defined -using DEFINE_list, this field may be foo,bar, not ['foo', 'bar']. - -4. CURRENT_FLAG_VALUE is produced using str(). This means that the -string 'false' will be represented in the same way as the boolean -False. Using repr() would have removed this ambiguity and simplified -parsing, but would have broken the compatibility with the C++ -command-line flags. - -5. OPTIONAL_ELEMENTS describe elements relevant for certain kinds of -flags: lower_bound, upper_bound (for flags that specify bounds), -enum_value (for enum flags), list_separator (for flags that consist of -a list of values, separated by a special token). - -6. We do not provide any example here: please use --helpxml instead. -""" - -import cgi -import getopt -import os -import re -import string -import sys - -# Are we running at least python 2.2? -try: - if tuple(sys.version_info[:3]) < (2,2,0): - raise NotImplementedError("requires python 2.2.0 or later") -except AttributeError: # a very old python, that lacks sys.version_info - raise NotImplementedError("requires python 2.2.0 or later") - -# If we're not running at least python 2.2.1, define True, False, and bool. -# Thanks, Guido, for the code. -try: - True, False, bool -except NameError: - False = 0 - True = 1 - def bool(x): - if x: - return True - else: - return False - -# Are we running under pychecker? -_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules - - -def _GetCallingModule(): - """Returns the name of the module that's calling into this module. - - We generally use this function to get the name of the module calling a - DEFINE_foo... function. - """ - # Walk down the stack to find the first globals dict that's not ours. - for depth in range(1, sys.getrecursionlimit()): - if not sys._getframe(depth).f_globals is globals(): - globals_for_frame = sys._getframe(depth).f_globals - module_name = _GetModuleObjectAndName(globals_for_frame)[1] - if module_name is not None: - return module_name - raise AssertionError("No module was found") - - -def _GetThisModuleObjectAndName(): - """Returns: (module object, module name) for this module.""" - return _GetModuleObjectAndName(globals()) - - -# module exceptions: -class FlagsError(Exception): - """The base class for all flags errors.""" - pass - - -class DuplicateFlag(FlagsError): - """Raised if there is a flag naming conflict.""" - pass - - -class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag): - """Special case of DuplicateFlag -- SWIG flag value can't be set to None. - - This can be raised when a duplicate flag is created. Even if allow_override is - True, we still abort if the new value is None, because it's currently - impossible to pass None default value back to SWIG. See FlagValues.SetDefault - for details. - """ - pass - - -# A DuplicateFlagError conveys more information than a -# DuplicateFlag. Since there are external modules that create -# DuplicateFlags, the interface to DuplicateFlag shouldn't change. -class DuplicateFlagError(DuplicateFlag): - - def __init__(self, flagname, flag_values): - self.flagname = flagname - message = "The flag '%s' is defined twice." % self.flagname - flags_by_module = flag_values.FlagsByModuleDict() - for module in flags_by_module: - for flag in flags_by_module[module]: - if flag.name == flagname or flag.short_name == flagname: - message = message + " First from " + module + "," - break - message = message + " Second from " + _GetCallingModule() - DuplicateFlag.__init__(self, message) - - -class IllegalFlagValue(FlagsError): - """The flag command line argument is illegal.""" - pass - - -class UnrecognizedFlag(FlagsError): - """Raised if a flag is unrecognized.""" - pass - - -# An UnrecognizedFlagError conveys more information than an -# UnrecognizedFlag. Since there are external modules that create -# DuplicateFlags, the interface to DuplicateFlag shouldn't change. -class UnrecognizedFlagError(UnrecognizedFlag): - def __init__(self, flagname): - self.flagname = flagname - UnrecognizedFlag.__init__( - self, "Unknown command line flag '%s'" % flagname) - - -# Global variable used by expvar -_exported_flags = {} -_help_width = 80 # width of help output - - -def GetHelpWidth(): - """Returns: an integer, the width of help lines that is used in TextWrap.""" - return _help_width - - -def CutCommonSpacePrefix(text): - """Removes a common space prefix from the lines of a multiline text. - - If the first line does not start with a space, it is left as it is and - only in the remaining lines a common space prefix is being searched - for. That means the first line will stay untouched. This is especially - useful to turn doc strings into help texts. This is because some - people prefer to have the doc comment start already after the - apostrophy and then align the following lines while others have the - apostrophies on a seperately line. - - The function also drops trailing empty lines and ignores empty lines - following the initial content line while calculating the initial - common whitespace. - - Args: - text: text to work on - - Returns: - the resulting text - """ - text_lines = text.splitlines() - # Drop trailing empty lines - while text_lines and not text_lines[-1]: - text_lines = text_lines[:-1] - if text_lines: - # We got some content, is the first line starting with a space? - if text_lines[0] and text_lines[0][0].isspace(): - text_first_line = [] - else: - text_first_line = [text_lines.pop(0)] - # Calculate length of common leading whitesppace (only over content lines) - common_prefix = os.path.commonprefix([line for line in text_lines if line]) - space_prefix_len = len(common_prefix) - len(common_prefix.lstrip()) - # If we have a common space prefix, drop it from all lines - if space_prefix_len: - for index in xrange(len(text_lines)): - if text_lines[index]: - text_lines[index] = text_lines[index][space_prefix_len:] - return '\n'.join(text_first_line + text_lines) - return '' - - -def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '): - """Wraps a given text to a maximum line length and returns it. - - We turn lines that only contain whitespaces into empty lines. We keep - new lines and tabs (e.g., we do not treat tabs as spaces). - - Args: - text: text to wrap - length: maximum length of a line, includes indentation - if this is None then use GetHelpWidth() - indent: indent for all but first line - firstline_indent: indent for first line; if None, fall back to indent - tabs: replacement for tabs - - Returns: - wrapped text - - Raises: - FlagsError: if indent not shorter than length - FlagsError: if firstline_indent not shorter than length - """ - # Get defaults where callee used None - if length is None: - length = GetHelpWidth() - if indent is None: - indent = '' - if len(indent) >= length: - raise FlagsError('Indent must be shorter than length') - # In line we will be holding the current line which is to be started - # with indent (or firstline_indent if available) and then appended - # with words. - if firstline_indent is None: - firstline_indent = '' - line = indent - else: - line = firstline_indent - if len(firstline_indent) >= length: - raise FlagsError('First iline indent must be shorter than length') - - # If the callee does not care about tabs we simply convert them to - # spaces If callee wanted tabs to be single space then we do that - # already here. - if not tabs or tabs == ' ': - text = text.replace('\t', ' ') - else: - tabs_are_whitespace = not tabs.strip() - - line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE) - - # Split the text into lines and the lines with the regex above. The - # resulting lines are collected in result[]. For each split we get the - # spaces, the tabs and the next non white space (e.g. next word). - result = [] - for text_line in text.splitlines(): - # Store result length so we can find out whether processing the next - # line gave any new content - old_result_len = len(result) - # Process next line with line_regex. For optimization we do an rstrip(). - # - process tabs (changes either line or word, see below) - # - process word (first try to squeeze on line, then wrap or force wrap) - # Spaces found on the line are ignored, they get added while wrapping as - # needed. - for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()): - # If tabs weren't converted to spaces, handle them now - if current_tabs: - # If the last thing we added was a space anyway then drop - # it. But let's not get rid of the indentation. - if (((result and line != indent) or - (not result and line != firstline_indent)) and line[-1] == ' '): - line = line[:-1] - # Add the tabs, if that means adding whitespace, just add it at - # the line, the rstrip() code while shorten the line down if - # necessary - if tabs_are_whitespace: - line += tabs * len(current_tabs) - else: - # if not all tab replacement is whitespace we prepend it to the word - word = tabs * len(current_tabs) + word - # Handle the case where word cannot be squeezed onto current last line - if len(line) + len(word) > length and len(indent) + len(word) <= length: - result.append(line.rstrip()) - line = indent + word - word = '' - # No space left on line or can we append a space? - if len(line) + 1 >= length: - result.append(line.rstrip()) - line = indent - else: - line += ' ' - # Add word and shorten it up to allowed line length. Restart next - # line with indent and repeat, or add a space if we're done (word - # finished) This deals with words that caanot fit on one line - # (e.g. indent + word longer than allowed line length). - while len(line) + len(word) >= length: - line += word - result.append(line[:length]) - word = line[length:] - line = indent - # Default case, simply append the word and a space - if word: - line += word + ' ' - # End of input line. If we have content we finish the line. If the - # current line is just the indent but we had content in during this - # original line then we need to add an emoty line. - if (result and line != indent) or (not result and line != firstline_indent): - result.append(line.rstrip()) - elif len(result) == old_result_len: - result.append('') - line = indent - - return '\n'.join(result) - - -def DocToHelp(doc): - """Takes a __doc__ string and reformats it as help.""" - - # Get rid of starting and ending white space. Using lstrip() or even - # strip() could drop more than maximum of first line and right space - # of last line. - doc = doc.strip() - - # Get rid of all empty lines - whitespace_only_line = re.compile('^[ \t]+$', re.M) - doc = whitespace_only_line.sub('', doc) - - # Cut out common space at line beginnings - doc = CutCommonSpacePrefix(doc) - - # Just like this module's comment, comments tend to be aligned somehow. - # In other words they all start with the same amount of white space - # 1) keep double new lines - # 2) keep ws after new lines if not empty line - # 3) all other new lines shall be changed to a space - # Solution: Match new lines between non white space and replace with space. - doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M) - - return doc - - -def _GetModuleObjectAndName(globals_dict): - """Returns the module that defines a global environment, and its name. - - Args: - globals_dict: A dictionary that should correspond to an environment - providing the values of the globals. - - Returns: - A pair consisting of (1) module object and (2) module name (a - string). Returns (None, None) if the module could not be - identified. - """ - # The use of .items() (instead of .iteritems()) is NOT a mistake: if - # a parallel thread imports a module while we iterate over - # .iteritems() (not nice, but possible), we get a RuntimeError ... - # Hence, we use the slightly slower but safer .items(). - for name, module in sys.modules.items(): - if getattr(module, '__dict__', None) is globals_dict: - if name == '__main__': - # Pick a more informative name for the main module. - name = sys.argv[0] - return (module, name) - return (None, None) - - -def _GetMainModule(): - """Returns the name of the module from which execution started.""" - for depth in range(1, sys.getrecursionlimit()): - try: - globals_of_main = sys._getframe(depth).f_globals - except ValueError: - return _GetModuleObjectAndName(globals_of_main)[1] - raise AssertionError("No module was found") - - -class FlagValues: - """Registry of 'Flag' objects. - - A 'FlagValues' can then scan command line arguments, passing flag - arguments through to the 'Flag' objects that it owns. It also - provides easy access to the flag values. Typically only one - 'FlagValues' object is needed by an application: gflags.FLAGS - - This class is heavily overloaded: - - 'Flag' objects are registered via __setitem__: - FLAGS['longname'] = x # register a new flag - - The .value attribute of the registered 'Flag' objects can be accessed - as attributes of this 'FlagValues' object, through __getattr__. Both - the long and short name of the original 'Flag' objects can be used to - access its value: - FLAGS.longname # parsed flag value - FLAGS.x # parsed flag value (short name) - - Command line arguments are scanned and passed to the registered 'Flag' - objects through the __call__ method. Unparsed arguments, including - argv[0] (e.g. the program name) are returned. - argv = FLAGS(sys.argv) # scan command line arguments - - The original registered Flag objects can be retrieved through the use - of the dictionary-like operator, __getitem__: - x = FLAGS['longname'] # access the registered Flag object - - The str() operator of a 'FlagValues' object provides help for all of - the registered 'Flag' objects. - """ - - def __init__(self): - # Since everything in this class is so heavily overloaded, the only - # way of defining and using fields is to access __dict__ directly. - - # Dictionary: flag name (string) -> Flag object. - self.__dict__['__flags'] = {} - # Dictionary: module name (string) -> list of Flag objects that are defined - # by that module. - self.__dict__['__flags_by_module'] = {} - # Dictionary: module name (string) -> list of Flag objects that are - # key for that module. - self.__dict__['__key_flags_by_module'] = {} - - # Set if we should use new style gnu_getopt rather than getopt when parsing - # the args. Only possible with Python 2.3+ - self.UseGnuGetOpt(False) - - def UseGnuGetOpt(self, use_gnu_getopt=True): - self.__dict__['__use_gnu_getopt'] = use_gnu_getopt - - def IsGnuGetOpt(self): - return self.__dict__['__use_gnu_getopt'] - - def FlagDict(self): - return self.__dict__['__flags'] - - def FlagsByModuleDict(self): - """Returns the dictionary of module_name -> list of defined flags. - - Returns: - A dictionary. Its keys are module names (strings). Its values - are lists of Flag objects. - """ - return self.__dict__['__flags_by_module'] - - def KeyFlagsByModuleDict(self): - """Returns the dictionary of module_name -> list of key flags. - - Returns: - A dictionary. Its keys are module names (strings). Its values - are lists of Flag objects. - """ - return self.__dict__['__key_flags_by_module'] - - def _RegisterFlagByModule(self, module_name, flag): - """Records the module that defines a specific flag. - - We keep track of which flag is defined by which module so that we - can later sort the flags by module. - - Args: - module_name: A string, the name of a Python module. - flag: A Flag object, a flag that is key to the module. - """ - flags_by_module = self.FlagsByModuleDict() - flags_by_module.setdefault(module_name, []).append(flag) - - def _RegisterKeyFlagForModule(self, module_name, flag): - """Specifies that a flag is a key flag for a module. - - Args: - module_name: A string, the name of a Python module. - flag: A Flag object, a flag that is key to the module. - """ - key_flags_by_module = self.KeyFlagsByModuleDict() - # The list of key flags for the module named module_name. - key_flags = key_flags_by_module.setdefault(module_name, []) - # Add flag, but avoid duplicates. - if flag not in key_flags: - key_flags.append(flag) - - def _GetFlagsDefinedByModule(self, module): - """Returns the list of flags defined by a module. - - Args: - module: A module object or a module name (a string). - - Returns: - A new list of Flag objects. Caller may update this list as he - wishes: none of those changes will affect the internals of this - FlagValue object. - """ - if not isinstance(module, str): - module = module.__name__ - - return list(self.FlagsByModuleDict().get(module, [])) - - def _GetKeyFlagsForModule(self, module): - """Returns the list of key flags for a module. - - Args: - module: A module object or a module name (a string) - - Returns: - A new list of Flag objects. Caller may update this list as he - wishes: none of those changes will affect the internals of this - FlagValue object. - """ - if not isinstance(module, str): - module = module.__name__ - - # Any flag is a key flag for the module that defined it. NOTE: - # key_flags is a fresh list: we can update it without affecting the - # internals of this FlagValues object. - key_flags = self._GetFlagsDefinedByModule(module) - - # Take into account flags explicitly declared as key for a module. - for flag in self.KeyFlagsByModuleDict().get(module, []): - if flag not in key_flags: - key_flags.append(flag) - return key_flags - - def AppendFlagValues(self, flag_values): - """Appends flags registered in another FlagValues instance. - - Args: - flag_values: registry to copy from - """ - for flag_name, flag in flag_values.FlagDict().iteritems(): - # Each flags with shortname appears here twice (once under its - # normal name, and again with its short name). To prevent - # problems (DuplicateFlagError) with double flag registration, we - # perform a check to make sure that the entry we're looking at is - # for its normal name. - if flag_name == flag.name: - self[flag_name] = flag - - def RemoveFlagValues(self, flag_values): - """Remove flags that were previously appended from another FlagValues. - - Args: - flag_values: registry containing flags to remove. - """ - for flag_name in flag_values.FlagDict(): - self.__delattr__(flag_name) - - def __setitem__(self, name, flag): - """Registers a new flag variable.""" - fl = self.FlagDict() - if not isinstance(flag, Flag): - raise IllegalFlagValue(flag) - if not isinstance(name, type("")): - raise FlagsError("Flag name must be a string") - if len(name) == 0: - raise FlagsError("Flag name cannot be empty") - # If running under pychecker, duplicate keys are likely to be - # defined. Disable check for duplicate keys when pycheck'ing. - if (fl.has_key(name) and not flag.allow_override and - not fl[name].allow_override and not _RUNNING_PYCHECKER): - raise DuplicateFlagError(name, self) - short_name = flag.short_name - if short_name is not None: - if (fl.has_key(short_name) and not flag.allow_override and - not fl[short_name].allow_override and not _RUNNING_PYCHECKER): - raise DuplicateFlagError(short_name, self) - fl[short_name] = flag - fl[name] = flag - global _exported_flags - _exported_flags[name] = flag - - def __getitem__(self, name): - """Retrieves the Flag object for the flag --name.""" - return self.FlagDict()[name] - - def __getattr__(self, name): - """Retrieves the 'value' attribute of the flag --name.""" - fl = self.FlagDict() - if not fl.has_key(name): - raise AttributeError(name) - return fl[name].value - - def __setattr__(self, name, value): - """Sets the 'value' attribute of the flag --name.""" - fl = self.FlagDict() - fl[name].value = value - return value - - def _FlagIsRegistered(self, flag_obj): - """Checks whether a Flag object is registered under some name. - - Note: this is non trivial: in addition to its normal name, a flag - may have a short name too. In self.FlagDict(), both the normal and - the short name are mapped to the same flag object. E.g., calling - only "del FLAGS.short_name" is not unregistering the corresponding - Flag object (it is still registered under the longer name). - - Args: - flag_obj: A Flag object. - - Returns: - A boolean: True iff flag_obj is registered under some name. - """ - flag_dict = self.FlagDict() - # Check whether flag_obj is registered under its long name. - name = flag_obj.name - if flag_dict.get(name, None) == flag_obj: - return True - # Check whether flag_obj is registered under its short name. - short_name = flag_obj.short_name - if (short_name is not None and - flag_dict.get(short_name, None) == flag_obj): - return True - # The flag cannot be registered under any other name, so we do not - # need to do a full search through the values of self.FlagDict(). - return False - - def __delattr__(self, flag_name): - """Deletes a previously-defined flag from a flag object. - - This method makes sure we can delete a flag by using - - del flag_values_object. - - E.g., - - flags.DEFINE_integer('foo', 1, 'Integer flag.') - del flags.FLAGS.foo - - Args: - flag_name: A string, the name of the flag to be deleted. - - Raises: - AttributeError: When there is no registered flag named flag_name. - """ - fl = self.FlagDict() - if flag_name not in fl: - raise AttributeError(flag_name) - - flag_obj = fl[flag_name] - del fl[flag_name] - - if not self._FlagIsRegistered(flag_obj): - # If the Flag object indicated by flag_name is no longer - # registered (please see the docstring of _FlagIsRegistered), then - # we delete the occurences of the flag object in all our internal - # dictionaries. - self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj) - self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj) - - def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj): - """Removes a flag object from a module -> list of flags dictionary. - - Args: - flags_by_module_dict: A dictionary that maps module names to lists of - flags. - flag_obj: A flag object. - """ - for unused_module, flags_in_module in flags_by_module_dict.iteritems(): - # while (as opposed to if) takes care of multiple occurences of a - # flag in the list for the same module. - while flag_obj in flags_in_module: - flags_in_module.remove(flag_obj) - - def SetDefault(self, name, value): - """Changes the default value of the named flag object.""" - fl = self.FlagDict() - if not fl.has_key(name): - raise AttributeError(name) - fl[name].SetDefault(value) - - def __contains__(self, name): - """Returns True if name is a value (flag) in the dict.""" - return name in self.FlagDict() - - has_key = __contains__ # a synonym for __contains__() - - def __iter__(self): - return self.FlagDict().iterkeys() - - def __call__(self, argv): - """Parses flags from argv; stores parsed flags into this FlagValues object. - - All unparsed arguments are returned. Flags are parsed using the GNU - Program Argument Syntax Conventions, using getopt: - - http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt - - Args: - argv: argument list. Can be of any type that may be converted to a list. - - Returns: - The list of arguments not parsed as options, including argv[0] - - Raises: - FlagsError: on any parsing error - """ - # Support any sequence type that can be converted to a list - argv = list(argv) - - shortopts = "" - longopts = [] - - fl = self.FlagDict() - - # This pre parses the argv list for --flagfile=<> options. - argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False) - - # Correct the argv to support the google style of passing boolean - # parameters. Boolean parameters may be passed by using --mybool, - # --nomybool, --mybool=(true|false|1|0). getopt does not support - # having options that may or may not have a parameter. We replace - # instances of the short form --mybool and --nomybool with their - # full forms: --mybool=(true|false). - original_argv = list(argv) # list() makes a copy - shortest_matches = None - for name, flag in fl.items(): - if not flag.boolean: - continue - if shortest_matches is None: - # Determine the smallest allowable prefix for all flag names - shortest_matches = self.ShortestUniquePrefixes(fl) - no_name = 'no' + name - prefix = shortest_matches[name] - no_prefix = shortest_matches[no_name] - - # Replace all occurences of this boolean with extended forms - for arg_idx in range(1, len(argv)): - arg = argv[arg_idx] - if arg.find('=') >= 0: continue - if arg.startswith('--'+prefix) and ('--'+name).startswith(arg): - argv[arg_idx] = ('--%s=true' % name) - elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg): - argv[arg_idx] = ('--%s=false' % name) - - # Loop over all of the flags, building up the lists of short options - # and long options that will be passed to getopt. Short options are - # specified as a string of letters, each letter followed by a colon - # if it takes an argument. Long options are stored in an array of - # strings. Each string ends with an '=' if it takes an argument. - for name, flag in fl.items(): - longopts.append(name + "=") - if len(name) == 1: # one-letter option: allow short flag type also - shortopts += name - if not flag.boolean: - shortopts += ":" - - longopts.append('undefok=') - undefok_flags = [] - - # In case --undefok is specified, loop to pick up unrecognized - # options one by one. - unrecognized_opts = [] - args = argv[1:] - while True: - try: - if self.__dict__['__use_gnu_getopt']: - optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts) - else: - optlist, unparsed_args = getopt.getopt(args, shortopts, longopts) - break - except getopt.GetoptError, e: - if not e.opt or e.opt in fl: - # Not an unrecognized option, reraise the exception as a FlagsError - raise FlagsError(e) - # Handle an unrecognized option. - unrecognized_opts.append(e.opt) - # Remove offender from args and try again - for arg_index in range(len(args)): - if ((args[arg_index] == '--' + e.opt) or - (args[arg_index] == '-' + e.opt) or - args[arg_index].startswith('--' + e.opt + '=')): - args = args[0:arg_index] + args[arg_index+1:] - break - else: - # We should have found the option, so we don't expect to get - # here. We could assert, but raising the original exception - # might work better. - raise FlagsError(e) - - for name, arg in optlist: - if name == '--undefok': - flag_names = arg.split(',') - undefok_flags.extend(flag_names) - # For boolean flags, if --undefok=boolflag is specified, then we should - # also accept --noboolflag, in addition to --boolflag. - # Since we don't know the type of the undefok'd flag, this will affect - # non-boolean flags as well. - # NOTE: You shouldn't use --undefok=noboolflag, because then we will - # accept --nonoboolflag here. We are choosing not to do the conversion - # from noboolflag -> boolflag because of the ambiguity that flag names - # can start with 'no'. - undefok_flags.extend('no' + name for name in flag_names) - continue - if name.startswith('--'): - # long option - name = name[2:] - short_option = 0 - else: - # short option - name = name[1:] - short_option = 1 - if fl.has_key(name): - flag = fl[name] - if flag.boolean and short_option: arg = 1 - flag.Parse(arg) - - # If there were unrecognized options, raise an exception unless - # the options were named via --undefok. - for opt in unrecognized_opts: - if opt not in undefok_flags: - raise UnrecognizedFlagError(opt) - - if unparsed_args: - if self.__dict__['__use_gnu_getopt']: - # if using gnu_getopt just return the program name + remainder of argv. - return argv[:1] + unparsed_args - else: - # unparsed_args becomes the first non-flag detected by getopt to - # the end of argv. Because argv may have been modified above, - # return original_argv for this region. - return argv[:1] + original_argv[-len(unparsed_args):] - else: - return argv[:1] - - def Reset(self): - """Resets the values to the point before FLAGS(argv) was called.""" - for f in self.FlagDict().values(): - f.Unparse() - - def RegisteredFlags(self): - """Returns: a list of the names and short names of all registered flags.""" - return self.FlagDict().keys() - - def FlagValuesDict(self): - """Returns: a dictionary that maps flag names to flag values.""" - flag_values = {} - - for flag_name in self.RegisteredFlags(): - flag = self.FlagDict()[flag_name] - flag_values[flag_name] = flag.value - - return flag_values - - def __str__(self): - """Generates a help string for all known flags.""" - return self.GetHelp() - - def GetHelp(self, prefix=''): - """Generates a help string for all known flags.""" - helplist = [] - - flags_by_module = self.FlagsByModuleDict() - if flags_by_module: - - modules = flags_by_module.keys() - modules.sort() - - # Print the help for the main module first, if possible. - main_module = _GetMainModule() - if main_module in modules: - modules.remove(main_module) - modules = [main_module] + modules - - for module in modules: - self.__RenderOurModuleFlags(module, helplist) - - self.__RenderModuleFlags('gflags', - _SPECIAL_FLAGS.FlagDict().values(), - helplist) - - else: - # Just print one long list of flags. - self.__RenderFlagList( - self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(), - helplist, prefix) - - return '\n'.join(helplist) - - def __RenderModuleFlags(self, module, flags, output_lines, prefix=""): - """Generates a help string for a given module.""" - if not isinstance(module, str): - module = module.__name__ - output_lines.append('\n%s%s:' % (prefix, module)) - self.__RenderFlagList(flags, output_lines, prefix + " ") - - def __RenderOurModuleFlags(self, module, output_lines, prefix=""): - """Generates a help string for a given module.""" - flags = self._GetFlagsDefinedByModule(module) - if flags: - self.__RenderModuleFlags(module, flags, output_lines, prefix) - - def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""): - """Generates a help string for the key flags of a given module. - - Args: - module: A module object or a module name (a string). - output_lines: A list of strings. The generated help message - lines will be appended to this list. - prefix: A string that is prepended to each generated help line. - """ - key_flags = self._GetKeyFlagsForModule(module) - if key_flags: - self.__RenderModuleFlags(module, key_flags, output_lines, prefix) - - def ModuleHelp(self, module): - """Describe the key flags of a module. - - Args: - module: A module object or a module name (a string). - - Returns: - string describing the key flags of a module. - """ - helplist = [] - self.__RenderOurModuleKeyFlags(module, helplist) - return '\n'.join(helplist) - - def MainModuleHelp(self): - """Describe the key flags of the main module. - - Returns: - string describing the key flags of a module. - """ - return self.ModuleHelp(_GetMainModule()) - - def __RenderFlagList(self, flaglist, output_lines, prefix=" "): - fl = self.FlagDict() - special_fl = _SPECIAL_FLAGS.FlagDict() - flaglist = [(flag.name, flag) for flag in flaglist] - flaglist.sort() - flagset = {} - for (name, flag) in flaglist: - # It's possible this flag got deleted or overridden since being - # registered in the per-module flaglist. Check now against the - # canonical source of current flag information, the FlagDict. - if fl.get(name, None) != flag and special_fl.get(name, None) != flag: - # a different flag is using this name now - continue - # only print help once - if flagset.has_key(flag): continue - flagset[flag] = 1 - flaghelp = "" - if flag.short_name: flaghelp += "-%s," % flag.short_name - if flag.boolean: - flaghelp += "--[no]%s" % flag.name + ":" - else: - flaghelp += "--%s" % flag.name + ":" - flaghelp += " " - if flag.help: - flaghelp += flag.help - flaghelp = TextWrap(flaghelp, indent=prefix+" ", - firstline_indent=prefix) - if flag.default_as_str: - flaghelp += "\n" - flaghelp += TextWrap("(default: %s)" % flag.default_as_str, - indent=prefix+" ") - if flag.parser.syntactic_help: - flaghelp += "\n" - flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help, - indent=prefix+" ") - output_lines.append(flaghelp) - - def get(self, name, default): - """Returns the value of a flag (if not None) or a default value. - - Args: - name: A string, the name of a flag. - default: Default value to use if the flag value is None. - """ - - value = self.__getattr__(name) - if value is not None: # Can't do if not value, b/c value might be '0' or "" - return value - else: - return default - - def ShortestUniquePrefixes(self, fl): - """Returns: dictionary; maps flag names to their shortest unique prefix.""" - # Sort the list of flag names - sorted_flags = [] - for name, flag in fl.items(): - sorted_flags.append(name) - if flag.boolean: - sorted_flags.append('no%s' % name) - sorted_flags.sort() - - # For each name in the sorted list, determine the shortest unique - # prefix by comparing itself to the next name and to the previous - # name (the latter check uses cached info from the previous loop). - shortest_matches = {} - prev_idx = 0 - for flag_idx in range(len(sorted_flags)): - curr = sorted_flags[flag_idx] - if flag_idx == (len(sorted_flags) - 1): - next = None - else: - next = sorted_flags[flag_idx+1] - next_len = len(next) - for curr_idx in range(len(curr)): - if (next is None - or curr_idx >= next_len - or curr[curr_idx] != next[curr_idx]): - # curr longer than next or no more chars in common - shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1] - prev_idx = curr_idx - break - else: - # curr shorter than (or equal to) next - shortest_matches[curr] = curr - prev_idx = curr_idx + 1 # next will need at least one more char - return shortest_matches - - def __IsFlagFileDirective(self, flag_string): - """Checks whether flag_string contain a --flagfile= directive.""" - if isinstance(flag_string, type("")): - if flag_string.startswith('--flagfile='): - return 1 - elif flag_string == '--flagfile': - return 1 - elif flag_string.startswith('-flagfile='): - return 1 - elif flag_string == '-flagfile': - return 1 - else: - return 0 - return 0 - - def ExtractFilename(self, flagfile_str): - """Returns filename from a flagfile_str of form -[-]flagfile=filename. - - The cases of --flagfile foo and -flagfile foo shouldn't be hitting - this function, as they are dealt with in the level above this - function. - """ - if flagfile_str.startswith('--flagfile='): - return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip()) - elif flagfile_str.startswith('-flagfile='): - return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip()) - else: - raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str) - - def __GetFlagFileLines(self, filename, parsed_file_list): - """Returns the useful (!=comments, etc) lines from a file with flags. - - Args: - filename: A string, the name of the flag file. - parsed_file_list: A list of the names of the files we have - already read. MUTATED BY THIS FUNCTION. - - Returns: - List of strings. See the note below. - - NOTE(springer): This function checks for a nested --flagfile= - tag and handles the lower file recursively. It returns a list of - all the lines that _could_ contain command flags. This is - EVERYTHING except whitespace lines and comments (lines starting - with '#' or '//'). - """ - line_list = [] # All line from flagfile. - flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags. - try: - file_obj = open(filename, 'r') - except IOError, e_msg: - print e_msg - print 'ERROR:: Unable to open flagfile: %s' % (filename) - return flag_line_list - - line_list = file_obj.readlines() - file_obj.close() - parsed_file_list.append(filename) - - # This is where we check each line in the file we just read. - for line in line_list: - if line.isspace(): - pass - # Checks for comment (a line that starts with '#'). - elif line.startswith('#') or line.startswith('//'): - pass - # Checks for a nested "--flagfile=" flag in the current file. - # If we find one, recursively parse down into that file. - elif self.__IsFlagFileDirective(line): - sub_filename = self.ExtractFilename(line) - # We do a little safety check for reparsing a file we've already done. - if not sub_filename in parsed_file_list: - included_flags = self.__GetFlagFileLines(sub_filename, - parsed_file_list) - flag_line_list.extend(included_flags) - else: # Case of hitting a circularly included file. - print >>sys.stderr, ('Warning: Hit circular flagfile dependency: %s' - % sub_filename) - else: - # Any line that's not a comment or a nested flagfile should get - # copied into 2nd position. This leaves earlier arguements - # further back in the list, thus giving them higher priority. - flag_line_list.append(line.strip()) - return flag_line_list - - def ReadFlagsFromFiles(self, argv, force_gnu=True): - """Processes command line args, but also allow args to be read from file. - Args: - argv: A list of strings, usually sys.argv[1:], which may contain one or - more flagfile directives of the form --flagfile="./filename". - Note that the name of the program (sys.argv[0]) should be omitted. - force_gnu: If False, --flagfile parsing obeys normal flag semantics. - If True, --flagfile parsing instead follows gnu_getopt semantics. - *** WARNING *** force_gnu=False may become the future default! - - Returns: - - A new list which has the original list combined with what we read - from any flagfile(s). - - References: Global gflags.FLAG class instance. - - This function should be called before the normal FLAGS(argv) call. - This function scans the input list for a flag that looks like: - --flagfile=. Then it opens , reads all valid key - and value pairs and inserts them into the input list between the - first item of the list and any subsequent items in the list. - - Note that your application's flags are still defined the usual way - using gflags DEFINE_flag() type functions. - - Notes (assuming we're getting a commandline of some sort as our input): - --> Flags from the command line argv _should_ always take precedence! - --> A further "--flagfile=" CAN be nested in a flagfile. - It will be processed after the parent flag file is done. - --> For duplicate flags, first one we hit should "win". - --> In a flagfile, a line beginning with # or // is a comment. - --> Entirely blank lines _should_ be ignored. - """ - parsed_file_list = [] - rest_of_args = argv - new_argv = [] - while rest_of_args: - current_arg = rest_of_args[0] - rest_of_args = rest_of_args[1:] - if self.__IsFlagFileDirective(current_arg): - # This handles the case of -(-)flagfile foo. In this case the - # next arg really is part of this one. - if current_arg == '--flagfile' or current_arg == '-flagfile': - if not rest_of_args: - raise IllegalFlagValue('--flagfile with no argument') - flag_filename = os.path.expanduser(rest_of_args[0]) - rest_of_args = rest_of_args[1:] - else: - # This handles the case of (-)-flagfile=foo. - flag_filename = self.ExtractFilename(current_arg) - new_argv[0:0] = self.__GetFlagFileLines(flag_filename, parsed_file_list) - else: - new_argv.append(current_arg) - # Stop parsing after '--', like getopt and gnu_getopt. - if current_arg == '--': - break - # Stop parsing after a non-flag, like getopt. - if not current_arg.startswith('-'): - if not force_gnu and not self.__dict__['__use_gnu_getopt']: - break - - if rest_of_args: - new_argv.extend(rest_of_args) - - return new_argv - - def FlagsIntoString(self): - """Returns a string with the flags assignments from this FlagValues object. - - This function ignores flags whose value is None. Each flag - assignment is separated by a newline. - - NOTE: MUST mirror the behavior of the C++ function - CommandlineFlagsIntoString from google3/base/commandlineflags.cc. - """ - s = '' - for flag in self.FlagDict().values(): - if flag.value is not None: - s += flag.Serialize() + '\n' - return s - - def AppendFlagsIntoFile(self, filename): - """Appends all flags assignments from this FlagInfo object to a file. - - Output will be in the format of a flagfile. - - NOTE: MUST mirror the behavior of the C++ version of - AppendFlagsIntoFile from google3/base/commandlineflags.cc. - """ - out_file = open(filename, 'a') - out_file.write(self.FlagsIntoString()) - out_file.close() - - def WriteHelpInXMLFormat(self, outfile=None): - """Outputs flag documentation in XML format. - - NOTE: We use element names that are consistent with those used by - the C++ command-line flag library, from - google3/base/commandlineflags_reporting.cc. We also use a few new - elements (e.g., ), but we do not interfere / overlap with - existing XML elements used by the C++ library. Please maintain this - consistency. - - Args: - outfile: File object we write to. Default None means sys.stdout. - """ - outfile = outfile or sys.stdout - - outfile.write('\n') - outfile.write('\n') - indent = ' ' - _WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]), - indent) - - usage_doc = sys.modules['__main__'].__doc__ - if not usage_doc: - usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0] - else: - usage_doc = usage_doc.replace('%s', sys.argv[0]) - _WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent) - - # Get list of key flags for the main module. - key_flags = self._GetKeyFlagsForModule(_GetMainModule()) - - # Sort flags by declaring module name and next by flag name. - flags_by_module = self.FlagsByModuleDict() - all_module_names = list(flags_by_module.keys()) - all_module_names.sort() - for module_name in all_module_names: - flag_list = [(f.name, f) for f in flags_by_module[module_name]] - flag_list.sort() - for unused_flag_name, flag in flag_list: - is_key = flag in key_flags - flag.WriteInfoInXMLFormat(outfile, module_name, - is_key=is_key, indent=indent) - - outfile.write('\n') - outfile.flush() -# end of FlagValues definition - - -# The global FlagValues instance -FLAGS = FlagValues() - - -def _MakeXMLSafe(s): - """Escapes <, >, and & from s, and removes XML 1.0-illegal chars.""" - s = cgi.escape(s) # Escape <, >, and & - # Remove characters that cannot appear in an XML 1.0 document - # (http://www.w3.org/TR/REC-xml/#charsets). - # - # NOTE: if there are problems with current solution, one may move to - # XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;). - s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s) - return s - - -def _WriteSimpleXMLElement(outfile, name, value, indent): - """Writes a simple XML element. - - Args: - outfile: File object we write the XML element to. - name: A string, the name of XML element. - value: A Python object, whose string representation will be used - as the value of the XML element. - indent: A string, prepended to each line of generated output. - """ - value_str = str(value) - if isinstance(value, bool): - # Display boolean values as the C++ flag library does: no caps. - value_str = value_str.lower() - outfile.write('%s<%s>%s\n' % - (indent, name, _MakeXMLSafe(value_str), name)) - - -class Flag: - """Information about a command-line flag. - - 'Flag' objects define the following fields: - .name - the name for this flag - .default - the default value for this flag - .default_as_str - default value as repr'd string, e.g., "'true'" (or None) - .value - the most recent parsed value of this flag; set by Parse() - .help - a help string or None if no help is available - .short_name - the single letter alias for this flag (or None) - .boolean - if 'true', this flag does not accept arguments - .present - true if this flag was parsed from command line flags. - .parser - an ArgumentParser object - .serializer - an ArgumentSerializer object - .allow_override - the flag may be redefined without raising an error - - The only public method of a 'Flag' object is Parse(), but it is - typically only called by a 'FlagValues' object. The Parse() method is - a thin wrapper around the 'ArgumentParser' Parse() method. The parsed - value is saved in .value, and the .present attribute is updated. If - this flag was already present, a FlagsError is raised. - - Parse() is also called during __init__ to parse the default value and - initialize the .value attribute. This enables other python modules to - safely use flags even if the __main__ module neglects to parse the - command line arguments. The .present attribute is cleared after - __init__ parsing. If the default value is set to None, then the - __init__ parsing step is skipped and the .value attribute is - initialized to None. - - Note: The default value is also presented to the user in the help - string, so it is important that it be a legal value for this flag. - """ - - def __init__(self, parser, serializer, name, default, help_string, - short_name=None, boolean=0, allow_override=0): - self.name = name - - if not help_string: - help_string = '(no help available)' - - self.help = help_string - self.short_name = short_name - self.boolean = boolean - self.present = 0 - self.parser = parser - self.serializer = serializer - self.allow_override = allow_override - self.value = None - - self.SetDefault(default) - - def __GetParsedValueAsString(self, value): - if value is None: - return None - if self.serializer: - return repr(self.serializer.Serialize(value)) - if self.boolean: - if value: - return repr('true') - else: - return repr('false') - return repr(str(value)) - - def Parse(self, argument): - try: - self.value = self.parser.Parse(argument) - except ValueError, e: # recast ValueError as IllegalFlagValue - raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e)) - self.present += 1 - - def Unparse(self): - if self.default is None: - self.value = None - else: - self.Parse(self.default) - self.present = 0 - - def Serialize(self): - if self.value is None: - return '' - if self.boolean: - if self.value: - return "--%s" % self.name - else: - return "--no%s" % self.name - else: - if not self.serializer: - raise FlagsError("Serializer not present for flag %s" % self.name) - return "--%s=%s" % (self.name, self.serializer.Serialize(self.value)) - - def SetDefault(self, value): - """Changes the default value (and current value too) for this Flag.""" - # We can't allow a None override because it may end up not being - # passed to C++ code when we're overriding C++ flags. So we - # cowardly bail out until someone fixes the semantics of trying to - # pass None to a C++ flag. See swig_flags.Init() for details on - # this behavior. - if value is None and self.allow_override: - raise DuplicateFlagCannotPropagateNoneToSwig(self.name) - - self.default = value - self.Unparse() - self.default_as_str = self.__GetParsedValueAsString(self.value) - - def Type(self): - """Returns: a string that describes the type of this Flag.""" - # NOTE: we use strings, and not the types.*Type constants because - # our flags can have more exotic types, e.g., 'comma separated list - # of strings', 'whitespace separated list of strings', etc. - return self.parser.Type() - - def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''): - """Writes common info about this flag, in XML format. - - This is information that is relevant to all flags (e.g., name, - meaning, etc.). If you defined a flag that has some other pieces of - info, then please override _WriteCustomInfoInXMLFormat. - - Please do NOT override this method. - - Args: - outfile: File object we write to. - module_name: A string, the name of the module that defines this flag. - is_key: A boolean, True iff this flag is key for main module. - indent: A string that is prepended to each generated line. - """ - outfile.write(indent + '\n') - inner_indent = indent + ' ' - if is_key: - _WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent) - _WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent) - # Print flag features that are relevant for all flags. - _WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent) - if self.short_name: - _WriteSimpleXMLElement(outfile, 'short_name', self.short_name, - inner_indent) - if self.help: - _WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent) - # The default flag value can either be represented as a string like on the - # command line, or as a Python object. We serialize this value in the - # latter case in order to remain consistent. - if self.serializer and not isinstance(self.default, str): - default_serialized = self.serializer.Serialize(self.default) - else: - default_serialized = self.default - _WriteSimpleXMLElement(outfile, 'default', default_serialized, inner_indent) - _WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent) - _WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent) - # Print extra flag features this flag may have. - self._WriteCustomInfoInXMLFormat(outfile, inner_indent) - outfile.write(indent + '\n') - - def _WriteCustomInfoInXMLFormat(self, outfile, indent): - """Writes extra info about this flag, in XML format. - - "Extra" means "not already printed by WriteInfoInXMLFormat above." - - Args: - outfile: File object we write to. - indent: A string that is prepended to each generated line. - """ - # Usually, the parser knows the extra details about the flag, so - # we just forward the call to it. - self.parser.WriteCustomInfoInXMLFormat(outfile, indent) -# End of Flag definition - - -class ArgumentParser: - """Base class used to parse and convert arguments. - - The Parse() method checks to make sure that the string argument is a - legal value and convert it to a native type. If the value cannot be - converted, it should throw a 'ValueError' exception with a human - readable explanation of why the value is illegal. - - Subclasses should also define a syntactic_help string which may be - presented to the user to describe the form of the legal values. - """ - syntactic_help = "" - - def Parse(self, argument): - """Default implementation: always returns its argument unmodified.""" - return argument - - def Type(self): - return 'string' - - def WriteCustomInfoInXMLFormat(self, outfile, indent): - pass - - -class ArgumentSerializer: - """Base class for generating string representations of a flag value.""" - - def Serialize(self, value): - return str(value) - - -class ListSerializer(ArgumentSerializer): - - def __init__(self, list_sep): - self.list_sep = list_sep - - def Serialize(self, value): - return self.list_sep.join([str(x) for x in value]) - - -# The DEFINE functions are explained in mode details in the module doc string. - - -def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None, - **args): - """Registers a generic Flag object. - - NOTE: in the docstrings of all DEFINE* functions, "registers" is short - for "creates a new flag and registers it". - - Auxiliary function: clients should use the specialized DEFINE_ - function instead. - - Args: - parser: ArgumentParser that is used to parse the flag arguments. - name: A string, the flag name. - default: The default value of the flag. - help: A help string. - flag_values: FlagValues object the flag will be registered with. - serializer: ArgumentSerializer that serializes the flag value. - args: Dictionary with extra keyword args that are passes to the - Flag __init__. - """ - DEFINE_flag(Flag(parser, serializer, name, default, help, **args), - flag_values) - - -def DEFINE_flag(flag, flag_values=FLAGS): - """Registers a 'Flag' object with a 'FlagValues' object. - - By default, the global FLAGS 'FlagValue' object is used. - - Typical users will use one of the more specialized DEFINE_xxx - functions, such as DEFINE_string or DEFINE_integer. But developers - who need to create Flag objects themselves should use this function - to register their flags. - """ - # copying the reference to flag_values prevents pychecker warnings - fv = flag_values - fv[flag.name] = flag - # Tell flag_values who's defining the flag. - if isinstance(flag_values, FlagValues): - # Regarding the above isinstance test: some users pass funny - # values of flag_values (e.g., {}) in order to avoid the flag - # registration (in the past, there used to be a flag_values == - # FLAGS test here) and redefine flags with the same name (e.g., - # debug). To avoid breaking their code, we perform the - # registration only if flag_values is a real FlagValues object. - flag_values._RegisterFlagByModule(_GetCallingModule(), flag) - - -def _InternalDeclareKeyFlags(flag_names, - flag_values=FLAGS, key_flag_values=None): - """Declares a flag as key for the calling module. - - Internal function. User code should call DECLARE_key_flag or - ADOPT_module_key_flags instead. - - Args: - flag_names: A list of strings that are names of already-registered - Flag objects. - flag_values: A FlagValues object that the flags listed in - flag_names have registered with (the value of the flag_values - argument from the DEFINE_* calls that defined those flags). - This should almost never need to be overridden. - key_flag_values: A FlagValues object that (among possibly many - other things) keeps track of the key flags for each module. - Default None means "same as flag_values". This should almost - never need to be overridden. - - Raises: - UnrecognizedFlagError: when we refer to a flag that was not - defined yet. - """ - key_flag_values = key_flag_values or flag_values - - module = _GetCallingModule() - - for flag_name in flag_names: - if flag_name not in flag_values: - raise UnrecognizedFlagError(flag_name) - flag = flag_values.FlagDict()[flag_name] - key_flag_values._RegisterKeyFlagForModule(module, flag) - - -def DECLARE_key_flag(flag_name, flag_values=FLAGS): - """Declares one flag as key to the current module. - - Key flags are flags that are deemed really important for a module. - They are important when listing help messages; e.g., if the - --helpshort command-line flag is used, then only the key flags of the - main module are listed (instead of all flags, as in the case of - --help). - - Sample usage: - - flags.DECLARED_key_flag('flag_1') - - Args: - flag_name: A string, the name of an already declared flag. - (Redeclaring flags as key, including flags implicitly key - because they were declared in this module, is a no-op.) - flag_values: A FlagValues object. This should almost never - need to be overridden. - """ - if flag_name in _SPECIAL_FLAGS: - # Take care of the special flags, e.g., --flagfile, --undefok. - # These flags are defined in _SPECIAL_FLAGS, and are treated - # specially during flag parsing, taking precedence over the - # user-defined flags. - _InternalDeclareKeyFlags([flag_name], - flag_values=_SPECIAL_FLAGS, - key_flag_values=flag_values) - return - _InternalDeclareKeyFlags([flag_name], flag_values=flag_values) - - -def ADOPT_module_key_flags(module, flag_values=FLAGS): - """Declares that all flags key to a module are key to the current module. - - Args: - module: A module object. - flag_values: A FlagValues object. This should almost never need - to be overridden. - - Raises: - FlagsError: When given an argument that is a module name (a - string), instead of a module object. - """ - # NOTE(salcianu): an even better test would be if not - # isinstance(module, types.ModuleType) but I didn't want to import - # types for such a tiny use. - if isinstance(module, str): - raise FlagsError('Received module name %s; expected a module object.' - % module) - _InternalDeclareKeyFlags( - [f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)], - flag_values=flag_values) - # If module is this flag module, take _SPECIAL_FLAGS into account. - if module == _GetThisModuleObjectAndName()[0]: - _InternalDeclareKeyFlags( - # As we associate flags with _GetCallingModule(), the special - # flags defined in this module are incorrectly registered with - # a different module. So, we can't use _GetKeyFlagsForModule. - # Instead, we take all flags from _SPECIAL_FLAGS (a private - # FlagValues, where no other module should register flags). - [f.name for f in _SPECIAL_FLAGS.FlagDict().values()], - flag_values=_SPECIAL_FLAGS, - key_flag_values=flag_values) - - -# -# STRING FLAGS -# - - -def DEFINE_string(name, default, help, flag_values=FLAGS, **args): - """Registers a flag whose value can be any string.""" - parser = ArgumentParser() - serializer = ArgumentSerializer() - DEFINE(parser, name, default, help, flag_values, serializer, **args) - - -# -# BOOLEAN FLAGS -# -# and the special HELP flags. - -class BooleanParser(ArgumentParser): - """Parser of boolean values.""" - - def Convert(self, argument): - """Converts the argument to a boolean; raise ValueError on errors.""" - if type(argument) == str: - if argument.lower() in ['true', 't', '1']: - return True - elif argument.lower() in ['false', 'f', '0']: - return False - - bool_argument = bool(argument) - if argument == bool_argument: - # The argument is a valid boolean (True, False, 0, or 1), and not just - # something that always converts to bool (list, string, int, etc.). - return bool_argument - - raise ValueError('Non-boolean argument to boolean flag', argument) - - def Parse(self, argument): - val = self.Convert(argument) - return val - - def Type(self): - return 'bool' - - -class BooleanFlag(Flag): - """Basic boolean flag. - - Boolean flags do not take any arguments, and their value is either - True (1) or False (0). The false value is specified on the command - line by prepending the word 'no' to either the long or the short flag - name. - - For example, if a Boolean flag was created whose long name was - 'update' and whose short name was 'x', then this flag could be - explicitly unset through either --noupdate or --nox. - """ - - def __init__(self, name, default, help, short_name=None, **args): - p = BooleanParser() - Flag.__init__(self, p, None, name, default, help, short_name, 1, **args) - if not self.help: self.help = "a boolean value" - - -def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args): - """Registers a boolean flag. - - Such a boolean flag does not take an argument. If a user wants to - specify a false value explicitly, the long option beginning with 'no' - must be used: i.e. --noflag - - This flag will have a value of None, True or False. None is possible - if default=None and the user does not specify the flag on the command - line. - """ - DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values) - -# Match C++ API to unconfuse C++ people. -DEFINE_bool = DEFINE_boolean - -class HelpFlag(BooleanFlag): - """ - HelpFlag is a special boolean flag that prints usage information and - raises a SystemExit exception if it is ever found in the command - line arguments. Note this is called with allow_override=1, so other - apps can define their own --help flag, replacing this one, if they want. - """ - def __init__(self): - BooleanFlag.__init__(self, "help", 0, "show this help", - short_name="?", allow_override=1) - def Parse(self, arg): - if arg: - doc = sys.modules["__main__"].__doc__ - flags = str(FLAGS) - print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0]) - if flags: - print "flags:" - print flags - sys.exit(1) - - -class HelpXMLFlag(BooleanFlag): - """Similar to HelpFlag, but generates output in XML format.""" - - def __init__(self): - BooleanFlag.__init__(self, 'helpxml', False, - 'like --help, but generates XML output', - allow_override=1) - - def Parse(self, arg): - if arg: - FLAGS.WriteHelpInXMLFormat(sys.stdout) - sys.exit(1) - - -class HelpshortFlag(BooleanFlag): - """ - HelpshortFlag is a special boolean flag that prints usage - information for the "main" module, and rasies a SystemExit exception - if it is ever found in the command line arguments. Note this is - called with allow_override=1, so other apps can define their own - --helpshort flag, replacing this one, if they want. - """ - def __init__(self): - BooleanFlag.__init__(self, "helpshort", 0, - "show usage only for this module", allow_override=1) - def Parse(self, arg): - if arg: - doc = sys.modules["__main__"].__doc__ - flags = FLAGS.MainModuleHelp() - print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0]) - if flags: - print "flags:" - print flags - sys.exit(1) - -# -# Numeric parser - base class for Integer and Float parsers -# - - -class NumericParser(ArgumentParser): - """Parser of numeric values. - - Parsed value may be bounded to a given upper and lower bound. - """ - - def Parse(self, argument): - val = self.Convert(argument) - if ((self.lower_bound is not None and val < self.lower_bound) or - (self.upper_bound is not None and val > self.upper_bound)): - raise ValueError("%s is not %s" % (val, self.syntactic_help)) - return val - - def WriteCustomInfoInXMLFormat(self, outfile, indent): - if self.lower_bound is not None: - _WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent) - if self.upper_bound is not None: - _WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent) - - def Convert(self, argument): - """Default implementation: always returns its argument unmodified.""" - return argument - -# End of Numeric Parser - -# -# FLOAT FLAGS -# - -class FloatParser(NumericParser): - """Parser of floating point values. - - Parsed value may be bounded to a given upper and lower bound. - """ - number_article = "a" - number_name = "number" - syntactic_help = " ".join((number_article, number_name)) - - def __init__(self, lower_bound=None, upper_bound=None): - self.lower_bound = lower_bound - self.upper_bound = upper_bound - sh = self.syntactic_help - if lower_bound is not None and upper_bound is not None: - sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound)) - elif lower_bound == 0: - sh = "a non-negative %s" % self.number_name - elif upper_bound == 0: - sh = "a non-positive %s" % self.number_name - elif upper_bound is not None: - sh = "%s <= %s" % (self.number_name, upper_bound) - elif lower_bound is not None: - sh = "%s >= %s" % (self.number_name, lower_bound) - self.syntactic_help = sh - - def Convert(self, argument): - """Converts argument to a float; raises ValueError on errors.""" - return float(argument) - - def Type(self): - return 'float' -# End of FloatParser - - -def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None, - flag_values=FLAGS, **args): - """Registers a flag whose value must be a float. - - If lower_bound or upper_bound are set, then this flag must be - within the given range. - """ - parser = FloatParser(lower_bound, upper_bound) - serializer = ArgumentSerializer() - DEFINE(parser, name, default, help, flag_values, serializer, **args) - - -# -# INTEGER FLAGS -# - - -class IntegerParser(NumericParser): - """Parser of an integer value. - - Parsed value may be bounded to a given upper and lower bound. - """ - number_article = "an" - number_name = "integer" - syntactic_help = " ".join((number_article, number_name)) - - def __init__(self, lower_bound=None, upper_bound=None): - self.lower_bound = lower_bound - self.upper_bound = upper_bound - sh = self.syntactic_help - if lower_bound is not None and upper_bound is not None: - sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound)) - elif lower_bound == 1: - sh = "a positive %s" % self.number_name - elif upper_bound == -1: - sh = "a negative %s" % self.number_name - elif lower_bound == 0: - sh = "a non-negative %s" % self.number_name - elif upper_bound == 0: - sh = "a non-positive %s" % self.number_name - elif upper_bound is not None: - sh = "%s <= %s" % (self.number_name, upper_bound) - elif lower_bound is not None: - sh = "%s >= %s" % (self.number_name, lower_bound) - self.syntactic_help = sh - - def Convert(self, argument): - __pychecker__ = 'no-returnvalues' - if type(argument) == str: - base = 10 - if len(argument) > 2 and argument[0] == "0" and argument[1] == "x": - base = 16 - try: - return int(argument, base) - # ValueError is thrown when argument is a string, and overflows an int. - except ValueError: - return long(argument, base) - else: - try: - return int(argument) - # OverflowError is thrown when argument is numeric, and overflows an int. - except OverflowError: - return long(argument) - - def Type(self): - return 'int' - - -def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None, - flag_values=FLAGS, **args): - """Registers a flag whose value must be an integer. - - If lower_bound, or upper_bound are set, then this flag must be - within the given range. - """ - parser = IntegerParser(lower_bound, upper_bound) - serializer = ArgumentSerializer() - DEFINE(parser, name, default, help, flag_values, serializer, **args) - - -# -# ENUM FLAGS -# - - -class EnumParser(ArgumentParser): - """Parser of a string enum value (a string value from a given set). - - If enum_values (see below) is not specified, any string is allowed. - """ - - def __init__(self, enum_values=None): - self.enum_values = enum_values - - def Parse(self, argument): - if self.enum_values and argument not in self.enum_values: - raise ValueError("value should be one of <%s>" % - "|".join(self.enum_values)) - return argument - - def Type(self): - return 'string enum' - - -class EnumFlag(Flag): - """Basic enum flag; its value can be any string from list of enum_values.""" - - def __init__(self, name, default, help, enum_values=None, - short_name=None, **args): - enum_values = enum_values or [] - p = EnumParser(enum_values) - g = ArgumentSerializer() - Flag.__init__(self, p, g, name, default, help, short_name, **args) - if not self.help: self.help = "an enum string" - self.help = "<%s>: %s" % ("|".join(enum_values), self.help) - - def _WriteCustomInfoInXMLFormat(self, outfile, indent): - for enum_value in self.parser.enum_values: - _WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent) - - -def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS, - **args): - """Registers a flag whose value can be any string from enum_values.""" - DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args), - flag_values) - - -# -# LIST FLAGS -# - - -class BaseListParser(ArgumentParser): - """Base class for a parser of lists of strings. - - To extend, inherit from this class; from the subclass __init__, call - - BaseListParser.__init__(self, token, name) - - where token is a character used to tokenize, and name is a description - of the separator. - """ - - def __init__(self, token=None, name=None): - assert name - self._token = token - self._name = name - self.syntactic_help = "a %s separated list" % self._name - - def Parse(self, argument): - if isinstance(argument, list): - return argument - elif argument == '': - return [] - else: - return [s.strip() for s in argument.split(self._token)] - - def Type(self): - return '%s separated list of strings' % self._name - - -class ListParser(BaseListParser): - """Parser for a comma-separated list of strings.""" - - def __init__(self): - BaseListParser.__init__(self, ',', 'comma') - - def WriteCustomInfoInXMLFormat(self, outfile, indent): - BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent) - _WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent) - - -class WhitespaceSeparatedListParser(BaseListParser): - """Parser for a whitespace-separated list of strings.""" - - def __init__(self): - BaseListParser.__init__(self, None, 'whitespace') - - def WriteCustomInfoInXMLFormat(self, outfile, indent): - BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent) - separators = list(string.whitespace) - separators.sort() - for ws_char in string.whitespace: - _WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent) - - -def DEFINE_list(name, default, help, flag_values=FLAGS, **args): - """Registers a flag whose value is a comma-separated list of strings.""" - parser = ListParser() - serializer = ListSerializer(',') - DEFINE(parser, name, default, help, flag_values, serializer, **args) - - -def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args): - """Registers a flag whose value is a whitespace-separated list of strings. - - Any whitespace can be used as a separator. - """ - parser = WhitespaceSeparatedListParser() - serializer = ListSerializer(' ') - DEFINE(parser, name, default, help, flag_values, serializer, **args) - - -# -# MULTI FLAGS -# - - -class MultiFlag(Flag): - """A flag that can appear multiple time on the command-line. - - The value of such a flag is a list that contains the individual values - from all the appearances of that flag on the command-line. - - See the __doc__ for Flag for most behavior of this class. Only - differences in behavior are described here: - - * The default value may be either a single value or a list of values. - A single value is interpreted as the [value] singleton list. - - * The value of the flag is always a list, even if the option was - only supplied once, and even if the default value is a single - value - """ - - def __init__(self, *args, **kwargs): - Flag.__init__(self, *args, **kwargs) - self.help += ';\n repeat this option to specify a list of values' - - def Parse(self, arguments): - """Parses one or more arguments with the installed parser. - - Args: - arguments: a single argument or a list of arguments (typically a - list of default values); a single argument is converted - internally into a list containing one item. - """ - if not isinstance(arguments, list): - # Default value may be a list of values. Most other arguments - # will not be, so convert them into a single-item list to make - # processing simpler below. - arguments = [arguments] - - if self.present: - # keep a backup reference to list of previously supplied option values - values = self.value - else: - # "erase" the defaults with an empty list - values = [] - - for item in arguments: - # have Flag superclass parse argument, overwriting self.value reference - Flag.Parse(self, item) # also increments self.present - values.append(self.value) - - # put list of option values back in the 'value' attribute - self.value = values - - def Serialize(self): - if not self.serializer: - raise FlagsError("Serializer not present for flag %s" % self.name) - if self.value is None: - return '' - - s = '' - - multi_value = self.value - - for self.value in multi_value: - if s: s += ' ' - s += Flag.Serialize(self) - - self.value = multi_value - - return s - - def Type(self): - return 'multi ' + self.parser.Type() - - -def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS, - **args): - """Registers a generic MultiFlag that parses its args with a given parser. - - Auxiliary function. Normal users should NOT use it directly. - - Developers who need to create their own 'Parser' classes for options - which can appear multiple times can call this module function to - register their flags. - """ - DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args), - flag_values) - - -def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args): - """Registers a flag whose value can be a list of any strings. - - Use the flag on the command line multiple times to place multiple - string values into the list. The 'default' may be a single string - (which will be converted into a single-element list) or a list of - strings. - """ - parser = ArgumentParser() - serializer = ArgumentSerializer() - DEFINE_multi(parser, serializer, name, default, help, flag_values, **args) - - -def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None, - flag_values=FLAGS, **args): - """Registers a flag whose value can be a list of arbitrary integers. - - Use the flag on the command line multiple times to place multiple - integer values into the list. The 'default' may be a single integer - (which will be converted into a single-element list) or a list of - integers. - """ - parser = IntegerParser(lower_bound, upper_bound) - serializer = ArgumentSerializer() - DEFINE_multi(parser, serializer, name, default, help, flag_values, **args) - - -# Now register the flags that we want to exist in all applications. -# These are all defined with allow_override=1, so user-apps can use -# these flagnames for their own purposes, if they want. -DEFINE_flag(HelpFlag()) -DEFINE_flag(HelpshortFlag()) -DEFINE_flag(HelpXMLFlag()) - -# Define special flags here so that help may be generated for them. -# NOTE: Please do NOT use _SPECIAL_FLAGS from outside this module. -_SPECIAL_FLAGS = FlagValues() - - -DEFINE_string( - 'flagfile', "", - "Insert flag definitions from the given file into the command line.", - _SPECIAL_FLAGS) - -DEFINE_string( - 'undefok', "", - "comma-separated list of flag names that it is okay to specify " - "on the command line even if the program does not define a flag " - "with that name. IMPORTANT: flags in this list that have " - "arguments MUST use the --flag=value format.", _SPECIAL_FLAGS) diff --git a/tools/closure_linter/setup.cfg b/tools/closure_linter/setup.cfg deleted file mode 100644 index 861a9f5..0000000 --- a/tools/closure_linter/setup.cfg +++ /dev/null @@ -1,5 +0,0 @@ -[egg_info] -tag_build = -tag_date = 0 -tag_svn_revision = 0 - diff --git a/tools/closure_linter/setup.py b/tools/closure_linter/setup.py index 1d1764f..d320b65 100755 --- a/tools/closure_linter/setup.py +++ b/tools/closure_linter/setup.py @@ -20,7 +20,7 @@ except ImportError: from distutils.core import setup setup(name='closure_linter', - version='2.2.6', + version='2.3.17', description='Closure Linter', license='Apache', author='The Closure Linter Authors', -- 2.7.4