diff --git a/openpyxl/formula/tests/test_tokenizer.py b/openpyxl/formula/tests/test_tokenizer.py
index 2ddeebafac5f1918a68aa1f0f2e31ae8b334ff63..e0eed3b4e56e391a60ec6c4ef3fe5db5f9aae489 100644
--- a/openpyxl/formula/tests/test_tokenizer.py
+++ b/openpyxl/formula/tests/test_tokenizer.py
@@ -246,6 +246,7 @@ class TestTokenizer(object):
         result = [(token.value, token.type, token.subtype)
                   for token in tok.items]
         assert result == tokens
+        assert tok.render() == formula
 
     @pytest.mark.parametrize('formula, offset, result', [
         ('"spamspamspam"spam', 0, '"spamspamspam"'),
@@ -604,3 +605,34 @@ class TestToken(object):
         assert token.value == ';'
         assert token.type == SEP
         assert token.subtype == ROW
+
+    @pytest.mark.parametrize('formula, tokens', [
+        ("SUM(Inputs!$W$111:'Input 1'!W111)",
+         [("SUM(Inputs!$W$111:'Input 1'!W111)", 'LITERAL', '')]),
+
+        ("=SUM('Inputs 1'!$W$111:'Input 1'!W111)",
+         [('SUM(', 'FUNC', 'OPEN'),
+          ("'Inputs 1'!$W$111:'Input 1'!W111", 'OPERAND', 'RANGE'),
+          (')', 'FUNC', 'CLOSE')]),
+
+        ("=SUM(Inputs!$W$111:'Input 1'!W111)",
+         [('SUM(', 'FUNC', 'OPEN'),
+          ("Inputs!$W$111:'Input 1'!W111", 'OPERAND', 'RANGE'),
+          (')', 'FUNC', 'CLOSE')]),
+
+        ("=SUM(Inputs!$W$111:'Input ''\"1'!W111)",
+         [('SUM(', 'FUNC', 'OPEN'),
+          ("Inputs!$W$111:'Input ''\"1'!W111", 'OPERAND', 'RANGE'),
+          (')', 'FUNC', 'CLOSE')]),
+
+        ("=SUM(Inputs!$W$111:Input1!W111)",
+         [('SUM(', 'FUNC', 'OPEN'),
+          ('Inputs!$W$111:Input1!W111', 'OPERAND', 'RANGE'),
+          (')', 'FUNC', 'CLOSE')]),
+    ])
+    def test_parse_quoted_sheet_name_in_range(self, tokenizer, formula, tokens):
+        tok = tokenizer.Tokenizer(formula)
+        result = [(token.value, token.type, token.subtype)
+                  for token in tok.items]
+        assert result == tokens
+        assert tok.render() == formula
diff --git a/openpyxl/formula/tokenizer.py b/openpyxl/formula/tokenizer.py
index 50b989abb6a48a59c00fa879bec32102b99eace7..ac37ce67d14a9940d21c0b54cdeb2c4b3ca19222 100644
--- a/openpyxl/formula/tokenizer.py
+++ b/openpyxl/formula/tokenizer.py
@@ -10,7 +10,7 @@ import re
 
 
 class TokenizerError(Exception):
-    "Base class for all Tokenizer errors."
+    """Base class for all Tokenizer errors."""
 
 
 class Tokenizer(object):
@@ -172,7 +172,7 @@ class Tokenizer(object):
         """
         Consume the characters constituting an operator.
 
-        Returns the number of charactes consumed. (Does not update
+        Returns the number of characters consumed. (Does not update
         self.offset)
 
         """
@@ -210,7 +210,7 @@ class Tokenizer(object):
         """
         Consumes a ( or { character.
 
-        Returns the number of charactes consumed. (Does not update
+        Returns the number of characters consumed. (Does not update
         self.offset)
 
         """
@@ -232,7 +232,7 @@ class Tokenizer(object):
         """
         Consumes a } or ) character.
 
-        Returns the number of charactes consumed. (Does not update
+        Returns the number of characters consumed. (Does not update
         self.offset)
 
         """
@@ -248,7 +248,7 @@ class Tokenizer(object):
         """
         Consumes a ; or , character.
 
-        Returns the number of charactes consumed. (Does not update
+        Returns the number of characters consumed. (Does not update
         self.offset)
 
         """
@@ -294,7 +294,7 @@ class Tokenizer(object):
         token transition. In this case, we raise a TokenizerError
 
         """
-        if self.token:
+        if self.token and self.token[-1] != ':':
             raise TokenizerError(
                 "Unexpected character at position %d in '%s'" %
                 (self.offset, self.formula))
@@ -306,7 +306,7 @@ class Tokenizer(object):
             del self.token[:]
 
     def render(self):
-        "Convert the parsed tokens back to a string."
+        """Convert the parsed tokens back to a string."""
         if not self.items:
             return ""
         elif self.items[0].type == Token.LITERAL:
@@ -364,7 +364,7 @@ class Token(object):
 
     @classmethod
     def make_operand(cls, value):
-        "Create an operand token."
+        """Create an operand token."""
         if value.startswith('"'):
             subtype = cls.TEXT
         elif value.startswith('#'):
@@ -384,7 +384,7 @@ class Token(object):
     #
     # There are 3 types of `Subexpressions`: functions, array literals, and
     # parentheticals. Subexpressions have 'OPEN' and 'CLOSE' tokens. 'OPEN'
-    # is used when parsing the initital expression token (i.e., '(' or '{')
+    # is used when parsing the initial expression token (i.e., '(' or '{')
     # and 'CLOSE' is used when parsing the closing expression token ('}' or
     # ')').
 
@@ -414,7 +414,7 @@ class Token(object):
         return cls(value, type_, subtype)
 
     def get_closer(self):
-        "Return a closing token that matches this token's type."
+        """Return a closing token that matches this token's type."""
         assert self.type in (self.FUNC, self.ARRAY, self.PAREN)
         assert self.subtype == self.OPEN
         value = "}" if self.type == self.ARRAY else ")"
@@ -433,7 +433,7 @@ class Token(object):
 
     @classmethod
     def make_separator(cls, value):
-        "Create a separator token"
+        """Create a separator token"""
         assert value in (',', ';')
         subtype = cls.ARG if value == ',' else cls.ROW
         return cls(value, cls.SEP, subtype)