3 # Copyright 2012 The Closure Linter Authors. All Rights Reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS-IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
16 """Unit tests for the scopeutil module."""
18 # Allow non-Google copyright
19 # pylint: disable=g-bad-file-header
21 __author__ = ('nnaze@google.com (Nathan Naze)')
23 import unittest as googletest
25 from closure_linter import ecmametadatapass
26 from closure_linter import javascripttokens
27 from closure_linter import testutil
28 from closure_linter import tokenutil
31 class FakeToken(object):
35 class TokenUtilTest(googletest.TestCase):
37 def testGetTokenRange(self):
49 self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d))
51 # This is an error as e does not come after a in the token chain.
52 self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e))
54 def testTokensToString(self):
75 'aaa\nbbbccc\n\n\n\nddd\neee',
76 tokenutil.TokensToString([a, b, c, d, e]))
79 'ddd\neee\naaa\nbbbccc',
80 tokenutil.TokensToString([d, e, a, b, c]),
81 'Neighboring tokens not in line_number order should have a newline '
84 def testGetPreviousCodeToken(self):
86 tokens = testutil.TokenizeSource("""
92 def _GetTokenStartingWith(token_starts_with):
94 if t.string.startswith(token_starts_with):
99 tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1')))
103 tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string)
105 def testGetNextCodeToken(self):
107 tokens = testutil.TokenizeSource("""
109 /* another comment */
113 def _GetTokenStartingWith(token_starts_with):
115 if t.string.startswith(token_starts_with):
120 tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)
124 tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
126 def testGetIdentifierStart(self):
128 tokens = testutil.TokenizeSource("""
130 prototype. /* another comment */
133 ['edge'][case].prototype.
137 def _GetTokenStartingWith(token_starts_with):
139 if t.string.startswith(token_starts_with):
144 tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string)
148 tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string)
152 tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2')))
154 def testInsertTokenBefore(self):
156 self.AssertInsertTokenAfterBefore(False)
158 def testInsertTokenAfter(self):
160 self.AssertInsertTokenAfterBefore(True)
162 def AssertInsertTokenAfterBefore(self, after):
164 new_token = javascripttokens.JavaScriptToken(
165 'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1)
167 existing_token1 = javascripttokens.JavaScriptToken(
168 'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1)
169 existing_token1.start_index = 0
170 existing_token1.metadata = ecmametadatapass.EcmaMetaData()
172 existing_token2 = javascripttokens.JavaScriptToken(
173 ' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1)
174 existing_token2.start_index = 3
175 existing_token2.metadata = ecmametadatapass.EcmaMetaData()
176 existing_token2.metadata.last_code = existing_token1
178 existing_token1.next = existing_token2
179 existing_token2.previous = existing_token1
182 tokenutil.InsertTokenAfter(new_token, existing_token1)
184 tokenutil.InsertTokenBefore(new_token, existing_token2)
186 self.assertEquals(existing_token1, new_token.previous)
187 self.assertEquals(existing_token2, new_token.next)
189 self.assertEquals(new_token, existing_token1.next)
190 self.assertEquals(new_token, existing_token2.previous)
192 self.assertEquals(existing_token1, new_token.metadata.last_code)
193 self.assertEquals(new_token, existing_token2.metadata.last_code)
195 self.assertEquals(0, existing_token1.start_index)
196 self.assertEquals(3, new_token.start_index)
197 self.assertEquals(4, existing_token2.start_index)
199 def testGetIdentifierForToken(self):
201 tokens = testutil.TokenizeSource("""
202 start1.abc.def.prototype.
210 .hij = function() {};
212 // An absurd multi-liner.
217 start5 . aaa . bbb . ccc
218 shouldntBePartOfThePreviousSymbol
220 start6.abc.def ghi.shouldntBePartOfThePreviousSymbol
228 start9.abc. // why is there a comment here?
229 def /* another comment */
232 start10.abc // why is there a comment here?
233 .def /* another comment */
236 start11.abc. middle1.shouldNotBeIdentifier
239 def _GetTokenStartingWith(token_starts_with):
241 if t.string.startswith(token_starts_with):
245 'start1.abc.def.prototype.onContinuedLine',
246 tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1')))
249 'start2.abc.def.hij.klm.nop',
250 tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2')))
253 'start3.abc.def.hij',
254 tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3')))
257 'start4.abc.def.hij.klm',
258 tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4')))
261 'start5.aaa.bbb.ccc',
262 tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5')))
266 tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6')))
270 tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7')))
274 tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8')))
278 tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9')))
282 tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10')))
285 tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1')))
288 if __name__ == '__main__':