aboutsummaryrefslogtreecommitdiff
path: root/tools/closure_linter-2.3.4/closure_linter/common/tokenizer.py
diff options
context:
space:
mode:
authorGravatar Trevor Elliott <trevor@galois.com>2013-05-16 14:28:25 -0700
committerGravatar Trevor Elliott <trevor@galois.com>2013-05-16 14:28:25 -0700
commitd316614847c16569da34a42e808dfb332fc6b6c9 (patch)
tree1d76a49c647c645d4513ab970b500f3c7b6a0cf1 /tools/closure_linter-2.3.4/closure_linter/common/tokenizer.py
parentb4f01ad9fa584c77fe6b2a6f55a9e5c00a701e58 (diff)
Remove google closure, and start reworking the build system
Squashed commit of the following: commit 446aae2afd089c28abd1d03a5fd20d4735837e16 Author: Trevor Elliott <trevor@galois.com> Date: Thu May 16 11:50:17 2013 -0700 stage-dir doesn't need to be cleaned Since everything goes into a common build tree now, cleaning just involves removing the entire build tree. commit d8f531ddf8ee1406ec915502c28dc0eb3912d0ee Author: Trevor Elliott <trevor@galois.com> Date: Thu May 16 11:47:10 2013 -0700 Switch to placing build artifacts in a build tree commit 9eedeec8d6a1012b1b7e466120260276b1e952d4 Author: Trevor Elliott <trevor@galois.com> Date: Thu May 16 11:35:01 2013 -0700 Remove the closure_compiler and closure_linter commit 5784158cf2cd55f0ffd01147ae014379ecc857fd Author: Trevor Elliott <trevor@galois.com> Date: Thu May 16 11:34:27 2013 -0700 Move the scripts in build to tools/bin commit 64a6a53ea0fd5e299e9d17c0e4f8fedf305272dc Author: Trevor Elliott <trevor@galois.com> Date: Thu May 16 11:27:55 2013 -0700 Build jsdoc Also, remove the old Makefiles that were in doc, as they're not necessary anymore. commit 1ef0d9e39cd4a24807ee6ca956fbc627fb851b9d Author: Trevor Elliott <trevor@galois.com> Date: Thu May 16 11:18:59 2013 -0700 Conditionally build the manual commit c326c58059e0d5035edecfd6261ee42797c49c2c Author: Trevor Elliott <trevor@galois.com> Date: Thu May 16 11:13:31 2013 -0700 Get the html manual building again commit 480fa132ffb0562eb3f61d45d79d3315b1d3cc29 Author: Trevor Elliott <trevor@galois.com> Date: Thu May 16 11:13:01 2013 -0700 Move doc specific .gitignore stuff to doc/.gitignore commit 8c108d4e0df848839bcd6b4c22d623053f590e95 Author: Trevor Elliott <trevor@galois.com> Date: Wed May 15 10:42:41 2013 -0700 Fix some path inconsistencies in the contexts build.mk commit ee53404be09cf26983365374da84ade564b92926 Author: Trevor Elliott <trevor@galois.com> Date: Wed May 15 10:37:40 2013 -0700 Preliminary build system changes * Chrome extension builds, but there are problems commit 474c6b88190787aeffd960ffb5855d31770e7141 Author: Trevor Elliott <trevor@galois.com> Date: Mon May 13 19:06:31 2013 -0700 Remove the closure toolkit
Diffstat (limited to 'tools/closure_linter-2.3.4/closure_linter/common/tokenizer.py')
-rwxr-xr-xtools/closure_linter-2.3.4/closure_linter/common/tokenizer.py184
1 files changed, 0 insertions, 184 deletions
diff --git a/tools/closure_linter-2.3.4/closure_linter/common/tokenizer.py b/tools/closure_linter-2.3.4/closure_linter/common/tokenizer.py
deleted file mode 100755
index 0234720..0000000
--- a/tools/closure_linter-2.3.4/closure_linter/common/tokenizer.py
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Regular expression based lexer."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-from closure_linter.common import tokens
-
-# Shorthand
-Type = tokens.TokenType
-
-
-class Tokenizer(object):
- """General purpose tokenizer.
-
- Attributes:
- mode: The latest mode of the tokenizer. This allows patterns to distinguish
- if they are mid-comment, mid-parameter list, etc.
- matchers: Dictionary of modes to sequences of matchers that define the
- patterns to check at any given time.
- default_types: Dictionary of modes to types, defining what type to give
- non-matched text when in the given mode. Defaults to Type.NORMAL.
- """
-
- def __init__(self, starting_mode, matchers, default_types):
- """Initialize the tokenizer.
-
- Args:
- starting_mode: Mode to start in.
- matchers: Dictionary of modes to sequences of matchers that defines the
- patterns to check at any given time.
- default_types: Dictionary of modes to types, defining what type to give
- non-matched text when in the given mode. Defaults to Type.NORMAL.
- """
- self.__starting_mode = starting_mode
- self.matchers = matchers
- self.default_types = default_types
-
- def TokenizeFile(self, file):
- """Tokenizes the given file.
-
- Args:
- file: An iterable that yields one line of the file at a time.
-
- Returns:
- The first token in the file
- """
- # The current mode.
- self.mode = self.__starting_mode
- # The first token in the stream.
- self.__first_token = None
- # The last token added to the token stream.
- self.__last_token = None
- # The current line number.
- self.__line_number = 0
-
- for line in file:
- self.__line_number += 1
- self.__TokenizeLine(line)
-
- return self.__first_token
-
- def _CreateToken(self, string, token_type, line, line_number, values=None):
- """Creates a new Token object (or subclass).
-
- Args:
- string: The string of input the token represents.
- token_type: The type of token.
- line: The text of the line this token is in.
- line_number: The line number of the token.
- values: A dict of named values within the token. For instance, a
- function declaration may have a value called 'name' which captures the
- name of the function.
-
- Returns:
- The newly created Token object.
- """
- return tokens.Token(string, token_type, line, line_number, values)
-
- def __TokenizeLine(self, line):
- """Tokenizes the given line.
-
- Args:
- line: The contents of the line.
- """
- string = line.rstrip('\n\r\f')
- line_number = self.__line_number
- self.__start_index = 0
-
- if not string:
- self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number))
- return
-
- normal_token = ''
- index = 0
- while index < len(string):
- for matcher in self.matchers[self.mode]:
- if matcher.line_start and index > 0:
- continue
-
- match = matcher.regex.match(string, index)
-
- if match:
- if normal_token:
- self.__AddToken(
- self.__CreateNormalToken(self.mode, normal_token, line,
- line_number))
- normal_token = ''
-
- # Add the match.
- self.__AddToken(self._CreateToken(match.group(), matcher.type, line,
- line_number, match.groupdict()))
-
- # Change the mode to the correct one for after this match.
- self.mode = matcher.result_mode or self.mode
-
- # Shorten the string to be matched.
- index = match.end()
-
- break
-
- else:
- # If the for loop finishes naturally (i.e. no matches) we just add the
- # first character to the string of consecutive non match characters.
- # These will constitute a NORMAL token.
- if string:
- normal_token += string[index:index + 1]
- index += 1
-
- if normal_token:
- self.__AddToken(
- self.__CreateNormalToken(self.mode, normal_token, line, line_number))
-
- def __CreateNormalToken(self, mode, string, line, line_number):
- """Creates a normal token.
-
- Args:
- mode: The current mode.
- string: The string to tokenize.
- line: The line of text.
- line_number: The line number within the file.
-
- Returns:
- A Token object, of the default type for the current mode.
- """
- type = Type.NORMAL
- if mode in self.default_types:
- type = self.default_types[mode]
- return self._CreateToken(string, type, line, line_number)
-
- def __AddToken(self, token):
- """Add the given token to the token stream.
-
- Args:
- token: The token to add.
- """
- # Store the first token, or point the previous token to this one.
- if not self.__first_token:
- self.__first_token = token
- else:
- self.__last_token.next = token
-
- # Establish the doubly linked list
- token.previous = self.__last_token
- self.__last_token = token
-
- # Compute the character indices
- token.start_index = self.__start_index
- self.__start_index += token.length