effbot.orgに、字句解析のための正規表現の使用に関する優れた記事があります。
トークナイザーを問題に適応させる:
import re
token_pattern = r"""
(?P<identifier>[a-zA-Z_][a-zA-Z0-9_]*)
|(?P<integer>[0-9]+)
|(?P<dot>\.)
|(?P<open_variable>[$][{])
|(?P<open_curly>[{])
|(?P<close_curly>[}])
|(?P<newline>\n)
|(?P<whitespace>\s+)
|(?P<equals>[=])
|(?P<slash>[/])
"""
token_re = re.compile(token_pattern, re.VERBOSE)
class TokenizerException(Exception): pass
def tokenize(text):
pos = 0
while True:
m = token_re.match(text, pos)
if not m: break
pos = m.end()
tokname = m.lastgroup
tokvalue = m.group(tokname)
yield tokname, tokvalue
if pos != len(text):
raise TokenizerException('tokenizer stopped at pos %r of %r' % (
pos, len(text)))
それをテストするために、次のことを行います。
stuff = r'property.${general.name}.ip = ${general.ip}'
stuff2 = r'''
general {
name = myname
ip = 127.0.0.1
}
'''
print ' stuff '.center(60, '=')
for tok in tokenize(stuff):
print tok
print ' stuff2 '.center(60, '=')
for tok in tokenize(stuff2):
print tok
にとって:
========================== stuff ===========================
('identifier', 'property')
('dot', '.')
('open_variable', '${')
('identifier', 'general')
('dot', '.')
('identifier', 'name')
('close_curly', '}')
('dot', '.')
('identifier', 'ip')
('whitespace', ' ')
('equals', '=')
('whitespace', ' ')
('open_variable', '${')
('identifier', 'general')
('dot', '.')
('identifier', 'ip')
('close_curly', '}')
========================== stuff2 ==========================
('newline', '\n')
('identifier', 'general')
('whitespace', ' ')
('open_curly', '{')
('newline', '\n')
('whitespace', ' ')
('identifier', 'name')
('whitespace', ' ')
('equals', '=')
('whitespace', ' ')
('identifier', 'myname')
('newline', '\n')
('whitespace', ' ')
('identifier', 'ip')
('whitespace', ' ')
('equals', '=')
('whitespace', ' ')
('integer', '127')
('dot', '.')
('integer', '0')
('dot', '.')
('integer', '0')
('dot', '.')
('integer', '1')
('newline', '\n')
('close_curly', '}')
('newline', '\n')