tokenize.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. """Tokenization help for Python programs.
  2. tokenize(readline) is a generator that breaks a stream of bytes into
  3. Python tokens. It decodes the bytes according to PEP-0263 for
  4. determining source file encoding.
  5. It accepts a readline-like method which is called repeatedly to get the
  6. next line of input (or b"" for EOF). It generates 5-tuples with these
  7. members:
  8. the token type (see token.py)
  9. the token (a string)
  10. the starting (row, column) indices of the token (a 2-tuple of ints)
  11. the ending (row, column) indices of the token (a 2-tuple of ints)
  12. the original line (string)
  13. It is designed to match the working of the Python tokenizer exactly, except
  14. that it produces COMMENT tokens for comments and gives type OP for all
  15. operators. Additionally, all token lists start with an ENCODING token
  16. which tells you which encoding was used to decode the bytes stream.
  17. """
  18. __author__ = 'Ka-Ping Yee <ping@lfw.org>'
  19. __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
  20. 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
  21. 'Michael Foord')
  22. from builtins import open as _builtin_open
  23. from codecs import lookup, BOM_UTF8
  24. import collections
  25. from io import TextIOWrapper
  26. import itertools as _itertools
  27. import re
  28. import sys
  29. from token import *
  30. from token import EXACT_TOKEN_TYPES
  31. cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
  32. blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
  33. import token
  34. __all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding",
  35. "untokenize", "TokenInfo"]
  36. del token
  37. class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
  38. def __repr__(self):
  39. annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
  40. return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
  41. self._replace(type=annotated_type))
  42. @property
  43. def exact_type(self):
  44. if self.type == OP and self.string in EXACT_TOKEN_TYPES:
  45. return EXACT_TOKEN_TYPES[self.string]
  46. else:
  47. return self.type
  48. def group(*choices): return '(' + '|'.join(choices) + ')'
  49. def any(*choices): return group(*choices) + '*'
  50. def maybe(*choices): return group(*choices) + '?'
  51. # Note: we use unicode matching for names ("\w") but ascii matching for
  52. # number literals.
  53. Whitespace = r'[ \f\t]*'
  54. Comment = r'#[^\r\n]*'
  55. Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
  56. Name = r'\w+'
  57. Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
  58. Binnumber = r'0[bB](?:_?[01])+'
  59. Octnumber = r'0[oO](?:_?[0-7])+'
  60. Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
  61. Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
  62. Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
  63. Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
  64. r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
  65. Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
  66. Floatnumber = group(Pointfloat, Expfloat)
  67. Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
  68. Number = group(Imagnumber, Floatnumber, Intnumber)
  69. # Return the empty string, plus all of the valid string prefixes.
  70. def _all_string_prefixes():
  71. # The valid string prefixes. Only contain the lower case versions,
  72. # and don't contain any permutations (include 'fr', but not
  73. # 'rf'). The various permutations will be generated.
  74. _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
  75. # if we add binary f-strings, add: ['fb', 'fbr']
  76. result = {''}
  77. for prefix in _valid_string_prefixes:
  78. for t in _itertools.permutations(prefix):
  79. # create a list with upper and lower versions of each
  80. # character
  81. for u in _itertools.product(*[(c, c.upper()) for c in t]):
  82. result.add(''.join(u))
  83. return result
  84. def _compile(expr):
  85. return re.compile(expr, re.UNICODE)
  86. # Note that since _all_string_prefixes includes the empty string,
  87. # StringPrefix can be the empty string (making it optional).
  88. StringPrefix = group(*_all_string_prefixes())
  89. # Tail end of ' string.
  90. Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
  91. # Tail end of " string.
  92. Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
  93. # Tail end of ''' string.
  94. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
  95. # Tail end of """ string.
  96. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
  97. Triple = group(StringPrefix + "'''", StringPrefix + '"""')
  98. # Single-line ' or " string.
  99. String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
  100. StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
  101. # Sorting in reverse order puts the long operators before their prefixes.
  102. # Otherwise if = came before ==, == would get recognized as two instances
  103. # of =.
  104. Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True)))
  105. Funny = group(r'\r?\n', Special)
  106. PlainToken = group(Number, Funny, String, Name)
  107. Token = Ignore + PlainToken
  108. # First (or only) line of ' or " string.
  109. ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
  110. group("'", r'\\\r?\n'),
  111. StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
  112. group('"', r'\\\r?\n'))
  113. PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
  114. PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
  115. # For a given string prefix plus quotes, endpats maps it to a regex
  116. # to match the remainder of that string. _prefix can be empty, for
  117. # a normal single or triple quoted string (with no prefix).
  118. endpats = {}
  119. for _prefix in _all_string_prefixes():
  120. endpats[_prefix + "'"] = Single
  121. endpats[_prefix + '"'] = Double
  122. endpats[_prefix + "'''"] = Single3
  123. endpats[_prefix + '"""'] = Double3
  124. # A set of all of the single and triple quoted string prefixes,
  125. # including the opening quotes.
  126. single_quoted = set()
  127. triple_quoted = set()
  128. for t in _all_string_prefixes():
  129. for u in (t + '"', t + "'"):
  130. single_quoted.add(u)
  131. for u in (t + '"""', t + "'''"):
  132. triple_quoted.add(u)
  133. tabsize = 8
  134. class TokenError(Exception): pass
  135. class StopTokenizing(Exception): pass
  136. class Untokenizer:
  137. def __init__(self):
  138. self.tokens = []
  139. self.prev_row = 1
  140. self.prev_col = 0
  141. self.encoding = None
  142. def add_whitespace(self, start):
  143. row, col = start
  144. if row < self.prev_row or row == self.prev_row and col < self.prev_col:
  145. raise ValueError("start ({},{}) precedes previous end ({},{})"
  146. .format(row, col, self.prev_row, self.prev_col))
  147. row_offset = row - self.prev_row
  148. if row_offset:
  149. self.tokens.append("\\\n" * row_offset)
  150. self.prev_col = 0
  151. col_offset = col - self.prev_col
  152. if col_offset:
  153. self.tokens.append(" " * col_offset)
  154. def untokenize(self, iterable):
  155. it = iter(iterable)
  156. indents = []
  157. startline = False
  158. for t in it:
  159. if len(t) == 2:
  160. self.compat(t, it)
  161. break
  162. tok_type, token, start, end, line = t
  163. if tok_type == ENCODING:
  164. self.encoding = token
  165. continue
  166. if tok_type == ENDMARKER:
  167. break
  168. if tok_type == INDENT:
  169. indents.append(token)
  170. continue
  171. elif tok_type == DEDENT:
  172. indents.pop()
  173. self.prev_row, self.prev_col = end
  174. continue
  175. elif tok_type in (NEWLINE, NL):
  176. startline = True
  177. elif startline and indents:
  178. indent = indents[-1]
  179. if start[1] >= len(indent):
  180. self.tokens.append(indent)
  181. self.prev_col = len(indent)
  182. startline = False
  183. self.add_whitespace(start)
  184. self.tokens.append(token)
  185. self.prev_row, self.prev_col = end
  186. if tok_type in (NEWLINE, NL):
  187. self.prev_row += 1
  188. self.prev_col = 0
  189. return "".join(self.tokens)
  190. def compat(self, token, iterable):
  191. indents = []
  192. toks_append = self.tokens.append
  193. startline = token[0] in (NEWLINE, NL)
  194. prevstring = False
  195. for tok in _itertools.chain([token], iterable):
  196. toknum, tokval = tok[:2]
  197. if toknum == ENCODING:
  198. self.encoding = tokval
  199. continue
  200. if toknum in (NAME, NUMBER):
  201. tokval += ' '
  202. # Insert a space between two consecutive strings
  203. if toknum == STRING:
  204. if prevstring:
  205. tokval = ' ' + tokval
  206. prevstring = True
  207. else:
  208. prevstring = False
  209. if toknum == INDENT:
  210. indents.append(tokval)
  211. continue
  212. elif toknum == DEDENT:
  213. indents.pop()
  214. continue
  215. elif toknum in (NEWLINE, NL):
  216. startline = True
  217. elif startline and indents:
  218. toks_append(indents[-1])
  219. startline = False
  220. toks_append(tokval)
  221. def untokenize(iterable):
  222. """Transform tokens back into Python source code.
  223. It returns a bytes object, encoded using the ENCODING
  224. token, which is the first token sequence output by tokenize.
  225. Each element returned by the iterable must be a token sequence
  226. with at least two elements, a token number and token value. If
  227. only two tokens are passed, the resulting output is poor.
  228. Round-trip invariant for full input:
  229. Untokenized source will match input source exactly
  230. Round-trip invariant for limited input:
  231. # Output bytes will tokenize back to the input
  232. t1 = [tok[:2] for tok in tokenize(f.readline)]
  233. newcode = untokenize(t1)
  234. readline = BytesIO(newcode).readline
  235. t2 = [tok[:2] for tok in tokenize(readline)]
  236. assert t1 == t2
  237. """
  238. ut = Untokenizer()
  239. out = ut.untokenize(iterable)
  240. if ut.encoding is not None:
  241. out = out.encode(ut.encoding)
  242. return out
  243. def _get_normal_name(orig_enc):
  244. """Imitates get_normal_name in tokenizer.c."""
  245. # Only care about the first 12 characters.
  246. enc = orig_enc[:12].lower().replace("_", "-")
  247. if enc == "utf-8" or enc.startswith("utf-8-"):
  248. return "utf-8"
  249. if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
  250. enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
  251. return "iso-8859-1"
  252. return orig_enc
  253. def detect_encoding(readline):
  254. """
  255. The detect_encoding() function is used to detect the encoding that should
  256. be used to decode a Python source file. It requires one argument, readline,
  257. in the same way as the tokenize() generator.
  258. It will call readline a maximum of twice, and return the encoding used
  259. (as a string) and a list of any lines (left as bytes) it has read in.
  260. It detects the encoding from the presence of a utf-8 bom or an encoding
  261. cookie as specified in pep-0263. If both a bom and a cookie are present,
  262. but disagree, a SyntaxError will be raised. If the encoding cookie is an
  263. invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
  264. 'utf-8-sig' is returned.
  265. If no encoding is specified, then the default of 'utf-8' will be returned.
  266. """
  267. try:
  268. filename = readline.__self__.name
  269. except AttributeError:
  270. filename = None
  271. bom_found = False
  272. encoding = None
  273. default = 'utf-8'
  274. def read_or_stop():
  275. try:
  276. return readline()
  277. except StopIteration:
  278. return b''
  279. def find_cookie(line):
  280. try:
  281. # Decode as UTF-8. Either the line is an encoding declaration,
  282. # in which case it should be pure ASCII, or it must be UTF-8
  283. # per default encoding.
  284. line_string = line.decode('utf-8')
  285. except UnicodeDecodeError:
  286. msg = "invalid or missing encoding declaration"
  287. if filename is not None:
  288. msg = '{} for {!r}'.format(msg, filename)
  289. raise SyntaxError(msg)
  290. match = cookie_re.match(line_string)
  291. if not match:
  292. return None
  293. encoding = _get_normal_name(match.group(1))
  294. try:
  295. codec = lookup(encoding)
  296. except LookupError:
  297. # This behaviour mimics the Python interpreter
  298. if filename is None:
  299. msg = "unknown encoding: " + encoding
  300. else:
  301. msg = "unknown encoding for {!r}: {}".format(filename,
  302. encoding)
  303. raise SyntaxError(msg)
  304. if bom_found:
  305. if encoding != 'utf-8':
  306. # This behaviour mimics the Python interpreter
  307. if filename is None:
  308. msg = 'encoding problem: utf-8'
  309. else:
  310. msg = 'encoding problem for {!r}: utf-8'.format(filename)
  311. raise SyntaxError(msg)
  312. encoding += '-sig'
  313. return encoding
  314. first = read_or_stop()
  315. if first.startswith(BOM_UTF8):
  316. bom_found = True
  317. first = first[3:]
  318. default = 'utf-8-sig'
  319. if not first:
  320. return default, []
  321. encoding = find_cookie(first)
  322. if encoding:
  323. return encoding, [first]
  324. if not blank_re.match(first):
  325. return default, [first]
  326. second = read_or_stop()
  327. if not second:
  328. return default, [first]
  329. encoding = find_cookie(second)
  330. if encoding:
  331. return encoding, [first, second]
  332. return default, [first, second]
  333. def open(filename):
  334. """Open a file in read only mode using the encoding detected by
  335. detect_encoding().
  336. """
  337. buffer = _builtin_open(filename, 'rb')
  338. try:
  339. encoding, lines = detect_encoding(buffer.readline)
  340. buffer.seek(0)
  341. text = TextIOWrapper(buffer, encoding, line_buffering=True)
  342. text.mode = 'r'
  343. return text
  344. except:
  345. buffer.close()
  346. raise
  347. def tokenize(readline):
  348. """
  349. The tokenize() generator requires one argument, readline, which
  350. must be a callable object which provides the same interface as the
  351. readline() method of built-in file objects. Each call to the function
  352. should return one line of input as bytes. Alternatively, readline
  353. can be a callable function terminating with StopIteration:
  354. readline = open(myfile, 'rb').__next__ # Example of alternate readline
  355. The generator produces 5-tuples with these members: the token type; the
  356. token string; a 2-tuple (srow, scol) of ints specifying the row and
  357. column where the token begins in the source; a 2-tuple (erow, ecol) of
  358. ints specifying the row and column where the token ends in the source;
  359. and the line on which the token was found. The line passed is the
  360. physical line.
  361. The first token sequence will always be an ENCODING token
  362. which tells you which encoding was used to decode the bytes stream.
  363. """
  364. encoding, consumed = detect_encoding(readline)
  365. empty = _itertools.repeat(b"")
  366. rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
  367. return _tokenize(rl_gen.__next__, encoding)
  368. def _tokenize(readline, encoding):
  369. lnum = parenlev = continued = 0
  370. numchars = '0123456789'
  371. contstr, needcont = '', 0
  372. contline = None
  373. indents = [0]
  374. if encoding is not None:
  375. if encoding == "utf-8-sig":
  376. # BOM will already have been stripped.
  377. encoding = "utf-8"
  378. yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
  379. last_line = b''
  380. line = b''
  381. while True: # loop over lines in stream
  382. try:
  383. # We capture the value of the line variable here because
  384. # readline uses the empty string '' to signal end of input,
  385. # hence `line` itself will always be overwritten at the end
  386. # of this loop.
  387. last_line = line
  388. line = readline()
  389. except StopIteration:
  390. line = b''
  391. if encoding is not None:
  392. line = line.decode(encoding)
  393. lnum += 1
  394. pos, max = 0, len(line)
  395. if contstr: # continued string
  396. if not line:
  397. raise TokenError("EOF in multi-line string", strstart)
  398. endmatch = endprog.match(line)
  399. if endmatch:
  400. pos = end = endmatch.end(0)
  401. yield TokenInfo(STRING, contstr + line[:end],
  402. strstart, (lnum, end), contline + line)
  403. contstr, needcont = '', 0
  404. contline = None
  405. elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
  406. yield TokenInfo(ERRORTOKEN, contstr + line,
  407. strstart, (lnum, len(line)), contline)
  408. contstr = ''
  409. contline = None
  410. continue
  411. else:
  412. contstr = contstr + line
  413. contline = contline + line
  414. continue
  415. elif parenlev == 0 and not continued: # new statement
  416. if not line: break
  417. column = 0
  418. while pos < max: # measure leading whitespace
  419. if line[pos] == ' ':
  420. column += 1
  421. elif line[pos] == '\t':
  422. column = (column//tabsize + 1)*tabsize
  423. elif line[pos] == '\f':
  424. column = 0
  425. else:
  426. break
  427. pos += 1
  428. if pos == max:
  429. break
  430. if line[pos] in '#\r\n': # skip comments or blank lines
  431. if line[pos] == '#':
  432. comment_token = line[pos:].rstrip('\r\n')
  433. yield TokenInfo(COMMENT, comment_token,
  434. (lnum, pos), (lnum, pos + len(comment_token)), line)
  435. pos += len(comment_token)
  436. yield TokenInfo(NL, line[pos:],
  437. (lnum, pos), (lnum, len(line)), line)
  438. continue
  439. if column > indents[-1]: # count indents or dedents
  440. indents.append(column)
  441. yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
  442. while column < indents[-1]:
  443. if column not in indents:
  444. raise IndentationError(
  445. "unindent does not match any outer indentation level",
  446. ("<tokenize>", lnum, pos, line))
  447. indents = indents[:-1]
  448. yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
  449. else: # continued statement
  450. if not line:
  451. raise TokenError("EOF in multi-line statement", (lnum, 0))
  452. continued = 0
  453. while pos < max:
  454. pseudomatch = _compile(PseudoToken).match(line, pos)
  455. if pseudomatch: # scan for tokens
  456. start, end = pseudomatch.span(1)
  457. spos, epos, pos = (lnum, start), (lnum, end), end
  458. if start == end:
  459. continue
  460. token, initial = line[start:end], line[start]
  461. if (initial in numchars or # ordinary number
  462. (initial == '.' and token != '.' and token != '...')):
  463. yield TokenInfo(NUMBER, token, spos, epos, line)
  464. elif initial in '\r\n':
  465. if parenlev > 0:
  466. yield TokenInfo(NL, token, spos, epos, line)
  467. else:
  468. yield TokenInfo(NEWLINE, token, spos, epos, line)
  469. elif initial == '#':
  470. assert not token.endswith("\n")
  471. yield TokenInfo(COMMENT, token, spos, epos, line)
  472. elif token in triple_quoted:
  473. endprog = _compile(endpats[token])
  474. endmatch = endprog.match(line, pos)
  475. if endmatch: # all on one line
  476. pos = endmatch.end(0)
  477. token = line[start:pos]
  478. yield TokenInfo(STRING, token, spos, (lnum, pos), line)
  479. else:
  480. strstart = (lnum, start) # multiple lines
  481. contstr = line[start:]
  482. contline = line
  483. break
  484. # Check up to the first 3 chars of the token to see if
  485. # they're in the single_quoted set. If so, they start
  486. # a string.
  487. # We're using the first 3, because we're looking for
  488. # "rb'" (for example) at the start of the token. If
  489. # we switch to longer prefixes, this needs to be
  490. # adjusted.
  491. # Note that initial == token[:1].
  492. # Also note that single quote checking must come after
  493. # triple quote checking (above).
  494. elif (initial in single_quoted or
  495. token[:2] in single_quoted or
  496. token[:3] in single_quoted):
  497. if token[-1] == '\n': # continued string
  498. strstart = (lnum, start)
  499. # Again, using the first 3 chars of the
  500. # token. This is looking for the matching end
  501. # regex for the correct type of quote
  502. # character. So it's really looking for
  503. # endpats["'"] or endpats['"'], by trying to
  504. # skip string prefix characters, if any.
  505. endprog = _compile(endpats.get(initial) or
  506. endpats.get(token[1]) or
  507. endpats.get(token[2]))
  508. contstr, needcont = line[start:], 1
  509. contline = line
  510. break
  511. else: # ordinary string
  512. yield TokenInfo(STRING, token, spos, epos, line)
  513. elif initial.isidentifier(): # ordinary name
  514. yield TokenInfo(NAME, token, spos, epos, line)
  515. elif initial == '\\': # continued stmt
  516. continued = 1
  517. else:
  518. if initial in '([{':
  519. parenlev += 1
  520. elif initial in ')]}':
  521. parenlev -= 1
  522. yield TokenInfo(OP, token, spos, epos, line)
  523. else:
  524. yield TokenInfo(ERRORTOKEN, line[pos],
  525. (lnum, pos), (lnum, pos+1), line)
  526. pos += 1
  527. # Add an implicit NEWLINE if the input doesn't end in one
  528. if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"):
  529. yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
  530. for indent in indents[1:]: # pop remaining indent levels
  531. yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
  532. yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
  533. def generate_tokens(readline):
  534. """Tokenize a source reading Python code as unicode strings.
  535. This has the same API as tokenize(), except that it expects the *readline*
  536. callable to return str objects instead of bytes.
  537. """
  538. return _tokenize(readline, None)
  539. def main():
  540. import argparse
  541. # Helper error handling routines
  542. def perror(message):
  543. sys.stderr.write(message)
  544. sys.stderr.write('\n')
  545. def error(message, filename=None, location=None):
  546. if location:
  547. args = (filename,) + location + (message,)
  548. perror("%s:%d:%d: error: %s" % args)
  549. elif filename:
  550. perror("%s: error: %s" % (filename, message))
  551. else:
  552. perror("error: %s" % message)
  553. sys.exit(1)
  554. # Parse the arguments and options
  555. parser = argparse.ArgumentParser(prog='python -m tokenize')
  556. parser.add_argument(dest='filename', nargs='?',
  557. metavar='filename.py',
  558. help='the file to tokenize; defaults to stdin')
  559. parser.add_argument('-e', '--exact', dest='exact', action='store_true',
  560. help='display token names using the exact type')
  561. args = parser.parse_args()
  562. try:
  563. # Tokenize the input
  564. if args.filename:
  565. filename = args.filename
  566. with _builtin_open(filename, 'rb') as f:
  567. tokens = list(tokenize(f.readline))
  568. else:
  569. filename = "<stdin>"
  570. tokens = _tokenize(sys.stdin.readline, None)
  571. # Output the tokenization
  572. for token in tokens:
  573. token_type = token.type
  574. if args.exact:
  575. token_type = token.exact_type
  576. token_range = "%d,%d-%d,%d:" % (token.start + token.end)
  577. print("%-20s%-15s%-15r" %
  578. (token_range, tok_name[token_type], token.string))
  579. except IndentationError as err:
  580. line, column = err.args[1][1:3]
  581. error(err.args[0], filename, (line, column))
  582. except TokenError as err:
  583. line, column = err.args[1]
  584. error(err.args[0], filename, (line, column))
  585. except SyntaxError as err:
  586. error(err, filename)
  587. except OSError as err:
  588. error(err)
  589. except KeyboardInterrupt:
  590. print("interrupted\n")
  591. except Exception as err:
  592. perror("unexpected error: %s" % err)
  593. raise
  594. if __name__ == "__main__":
  595. main()