lexer.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963
  1. """
  2. pygments.lexer
  3. ~~~~~~~~~~~~~~
  4. Base lexer classes.
  5. :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
  6. :license: BSD, see LICENSE for details.
  7. """
  8. import re
  9. import sys
  10. import time
  11. from pip._vendor.pygments.filter import apply_filters, Filter
  12. from pip._vendor.pygments.filters import get_filter_by_name
  13. from pip._vendor.pygments.token import Error, Text, Other, Whitespace, _TokenType
  14. from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
  15. make_analysator, Future, guess_decode
  16. from pip._vendor.pygments.regexopt import regex_opt
  17. __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
  18. 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
  19. 'default', 'words', 'line_re']
  20. line_re = re.compile('.*?\n')
  21. _encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
  22. (b'\xff\xfe\0\0', 'utf-32'),
  23. (b'\0\0\xfe\xff', 'utf-32be'),
  24. (b'\xff\xfe', 'utf-16'),
  25. (b'\xfe\xff', 'utf-16be')]
  26. _default_analyse = staticmethod(lambda x: 0.0)
  27. class LexerMeta(type):
  28. """
  29. This metaclass automagically converts ``analyse_text`` methods into
  30. static methods which always return float values.
  31. """
  32. def __new__(mcs, name, bases, d):
  33. if 'analyse_text' in d:
  34. d['analyse_text'] = make_analysator(d['analyse_text'])
  35. return type.__new__(mcs, name, bases, d)
  36. class Lexer(metaclass=LexerMeta):
  37. """
  38. Lexer for a specific language.
  39. See also :doc:`lexerdevelopment`, a high-level guide to writing
  40. lexers.
  41. Lexer classes have attributes used for choosing the most appropriate
  42. lexer based on various criteria.
  43. .. autoattribute:: name
  44. :no-value:
  45. .. autoattribute:: aliases
  46. :no-value:
  47. .. autoattribute:: filenames
  48. :no-value:
  49. .. autoattribute:: alias_filenames
  50. .. autoattribute:: mimetypes
  51. :no-value:
  52. .. autoattribute:: priority
  53. Lexers included in Pygments should have two additional attributes:
  54. .. autoattribute:: url
  55. :no-value:
  56. .. autoattribute:: version_added
  57. :no-value:
  58. Lexers included in Pygments may have additional attributes:
  59. .. autoattribute:: _example
  60. :no-value:
  61. You can pass options to the constructor. The basic options recognized
  62. by all lexers and processed by the base `Lexer` class are:
  63. ``stripnl``
  64. Strip leading and trailing newlines from the input (default: True).
  65. ``stripall``
  66. Strip all leading and trailing whitespace from the input
  67. (default: False).
  68. ``ensurenl``
  69. Make sure that the input ends with a newline (default: True). This
  70. is required for some lexers that consume input linewise.
  71. .. versionadded:: 1.3
  72. ``tabsize``
  73. If given and greater than 0, expand tabs in the input (default: 0).
  74. ``encoding``
  75. If given, must be an encoding name. This encoding will be used to
  76. convert the input string to Unicode, if it is not already a Unicode
  77. string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
  78. Latin1 detection. Can also be ``'chardet'`` to use the chardet
  79. library, if it is installed.
  80. ``inencoding``
  81. Overrides the ``encoding`` if given.
  82. """
  83. #: Full name of the lexer, in human-readable form
  84. name = None
  85. #: A list of short, unique identifiers that can be used to look
  86. #: up the lexer from a list, e.g., using `get_lexer_by_name()`.
  87. aliases = []
  88. #: A list of `fnmatch` patterns that match filenames which contain
  89. #: content for this lexer. The patterns in this list should be unique among
  90. #: all lexers.
  91. filenames = []
  92. #: A list of `fnmatch` patterns that match filenames which may or may not
  93. #: contain content for this lexer. This list is used by the
  94. #: :func:`.guess_lexer_for_filename()` function, to determine which lexers
  95. #: are then included in guessing the correct one. That means that
  96. #: e.g. every lexer for HTML and a template language should include
  97. #: ``\*.html`` in this list.
  98. alias_filenames = []
  99. #: A list of MIME types for content that can be lexed with this lexer.
  100. mimetypes = []
  101. #: Priority, should multiple lexers match and no content is provided
  102. priority = 0
  103. #: URL of the language specification/definition. Used in the Pygments
  104. #: documentation. Set to an empty string to disable.
  105. url = None
  106. #: Version of Pygments in which the lexer was added.
  107. version_added = None
  108. #: Example file name. Relative to the ``tests/examplefiles`` directory.
  109. #: This is used by the documentation generator to show an example.
  110. _example = None
  111. def __init__(self, **options):
  112. """
  113. This constructor takes arbitrary options as keyword arguments.
  114. Every subclass must first process its own options and then call
  115. the `Lexer` constructor, since it processes the basic
  116. options like `stripnl`.
  117. An example looks like this:
  118. .. sourcecode:: python
  119. def __init__(self, **options):
  120. self.compress = options.get('compress', '')
  121. Lexer.__init__(self, **options)
  122. As these options must all be specifiable as strings (due to the
  123. command line usage), there are various utility functions
  124. available to help with that, see `Utilities`_.
  125. """
  126. self.options = options
  127. self.stripnl = get_bool_opt(options, 'stripnl', True)
  128. self.stripall = get_bool_opt(options, 'stripall', False)
  129. self.ensurenl = get_bool_opt(options, 'ensurenl', True)
  130. self.tabsize = get_int_opt(options, 'tabsize', 0)
  131. self.encoding = options.get('encoding', 'guess')
  132. self.encoding = options.get('inencoding') or self.encoding
  133. self.filters = []
  134. for filter_ in get_list_opt(options, 'filters', ()):
  135. self.add_filter(filter_)
  136. def __repr__(self):
  137. if self.options:
  138. return f'<pygments.lexers.{self.__class__.__name__} with {self.options!r}>'
  139. else:
  140. return f'<pygments.lexers.{self.__class__.__name__}>'
  141. def add_filter(self, filter_, **options):
  142. """
  143. Add a new stream filter to this lexer.
  144. """
  145. if not isinstance(filter_, Filter):
  146. filter_ = get_filter_by_name(filter_, **options)
  147. self.filters.append(filter_)
  148. def analyse_text(text):
  149. """
  150. A static method which is called for lexer guessing.
  151. It should analyse the text and return a float in the range
  152. from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
  153. will not be selected as the most probable one, if it returns
  154. ``1.0``, it will be selected immediately. This is used by
  155. `guess_lexer`.
  156. The `LexerMeta` metaclass automatically wraps this function so
  157. that it works like a static method (no ``self`` or ``cls``
  158. parameter) and the return value is automatically converted to
  159. `float`. If the return value is an object that is boolean `False`
  160. it's the same as if the return values was ``0.0``.
  161. """
  162. def _preprocess_lexer_input(self, text):
  163. """Apply preprocessing such as decoding the input, removing BOM and normalizing newlines."""
  164. if not isinstance(text, str):
  165. if self.encoding == 'guess':
  166. text, _ = guess_decode(text)
  167. elif self.encoding == 'chardet':
  168. try:
  169. # pip vendoring note: this code is not reachable by pip,
  170. # removed import of chardet to make it clear.
  171. raise ImportError('chardet is not vendored by pip')
  172. except ImportError as e:
  173. raise ImportError('To enable chardet encoding guessing, '
  174. 'please install the chardet library '
  175. 'from http://chardet.feedparser.org/') from e
  176. # check for BOM first
  177. decoded = None
  178. for bom, encoding in _encoding_map:
  179. if text.startswith(bom):
  180. decoded = text[len(bom):].decode(encoding, 'replace')
  181. break
  182. # no BOM found, so use chardet
  183. if decoded is None:
  184. enc = chardet.detect(text[:1024]) # Guess using first 1KB
  185. decoded = text.decode(enc.get('encoding') or 'utf-8',
  186. 'replace')
  187. text = decoded
  188. else:
  189. text = text.decode(self.encoding)
  190. if text.startswith('\ufeff'):
  191. text = text[len('\ufeff'):]
  192. else:
  193. if text.startswith('\ufeff'):
  194. text = text[len('\ufeff'):]
  195. # text now *is* a unicode string
  196. text = text.replace('\r\n', '\n')
  197. text = text.replace('\r', '\n')
  198. if self.stripall:
  199. text = text.strip()
  200. elif self.stripnl:
  201. text = text.strip('\n')
  202. if self.tabsize > 0:
  203. text = text.expandtabs(self.tabsize)
  204. if self.ensurenl and not text.endswith('\n'):
  205. text += '\n'
  206. return text
  207. def get_tokens(self, text, unfiltered=False):
  208. """
  209. This method is the basic interface of a lexer. It is called by
  210. the `highlight()` function. It must process the text and return an
  211. iterable of ``(tokentype, value)`` pairs from `text`.
  212. Normally, you don't need to override this method. The default
  213. implementation processes the options recognized by all lexers
  214. (`stripnl`, `stripall` and so on), and then yields all tokens
  215. from `get_tokens_unprocessed()`, with the ``index`` dropped.
  216. If `unfiltered` is set to `True`, the filtering mechanism is
  217. bypassed even if filters are defined.
  218. """
  219. text = self._preprocess_lexer_input(text)
  220. def streamer():
  221. for _, t, v in self.get_tokens_unprocessed(text):
  222. yield t, v
  223. stream = streamer()
  224. if not unfiltered:
  225. stream = apply_filters(stream, self.filters, self)
  226. return stream
  227. def get_tokens_unprocessed(self, text):
  228. """
  229. This method should process the text and return an iterable of
  230. ``(index, tokentype, value)`` tuples where ``index`` is the starting
  231. position of the token within the input text.
  232. It must be overridden by subclasses. It is recommended to
  233. implement it as a generator to maximize effectiveness.
  234. """
  235. raise NotImplementedError
  236. class DelegatingLexer(Lexer):
  237. """
  238. This lexer takes two lexer as arguments. A root lexer and
  239. a language lexer. First everything is scanned using the language
  240. lexer, afterwards all ``Other`` tokens are lexed using the root
  241. lexer.
  242. The lexers from the ``template`` lexer package use this base lexer.
  243. """
  244. def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
  245. self.root_lexer = _root_lexer(**options)
  246. self.language_lexer = _language_lexer(**options)
  247. self.needle = _needle
  248. Lexer.__init__(self, **options)
  249. def get_tokens_unprocessed(self, text):
  250. buffered = ''
  251. insertions = []
  252. lng_buffer = []
  253. for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
  254. if t is self.needle:
  255. if lng_buffer:
  256. insertions.append((len(buffered), lng_buffer))
  257. lng_buffer = []
  258. buffered += v
  259. else:
  260. lng_buffer.append((i, t, v))
  261. if lng_buffer:
  262. insertions.append((len(buffered), lng_buffer))
  263. return do_insertions(insertions,
  264. self.root_lexer.get_tokens_unprocessed(buffered))
  265. # ------------------------------------------------------------------------------
  266. # RegexLexer and ExtendedRegexLexer
  267. #
  268. class include(str): # pylint: disable=invalid-name
  269. """
  270. Indicates that a state should include rules from another state.
  271. """
  272. pass
  273. class _inherit:
  274. """
  275. Indicates the a state should inherit from its superclass.
  276. """
  277. def __repr__(self):
  278. return 'inherit'
  279. inherit = _inherit() # pylint: disable=invalid-name
  280. class combined(tuple): # pylint: disable=invalid-name
  281. """
  282. Indicates a state combined from multiple states.
  283. """
  284. def __new__(cls, *args):
  285. return tuple.__new__(cls, args)
  286. def __init__(self, *args):
  287. # tuple.__init__ doesn't do anything
  288. pass
  289. class _PseudoMatch:
  290. """
  291. A pseudo match object constructed from a string.
  292. """
  293. def __init__(self, start, text):
  294. self._text = text
  295. self._start = start
  296. def start(self, arg=None):
  297. return self._start
  298. def end(self, arg=None):
  299. return self._start + len(self._text)
  300. def group(self, arg=None):
  301. if arg:
  302. raise IndexError('No such group')
  303. return self._text
  304. def groups(self):
  305. return (self._text,)
  306. def groupdict(self):
  307. return {}
  308. def bygroups(*args):
  309. """
  310. Callback that yields multiple actions for each group in the match.
  311. """
  312. def callback(lexer, match, ctx=None):
  313. for i, action in enumerate(args):
  314. if action is None:
  315. continue
  316. elif type(action) is _TokenType:
  317. data = match.group(i + 1)
  318. if data:
  319. yield match.start(i + 1), action, data
  320. else:
  321. data = match.group(i + 1)
  322. if data is not None:
  323. if ctx:
  324. ctx.pos = match.start(i + 1)
  325. for item in action(lexer,
  326. _PseudoMatch(match.start(i + 1), data), ctx):
  327. if item:
  328. yield item
  329. if ctx:
  330. ctx.pos = match.end()
  331. return callback
  332. class _This:
  333. """
  334. Special singleton used for indicating the caller class.
  335. Used by ``using``.
  336. """
  337. this = _This()
  338. def using(_other, **kwargs):
  339. """
  340. Callback that processes the match with a different lexer.
  341. The keyword arguments are forwarded to the lexer, except `state` which
  342. is handled separately.
  343. `state` specifies the state that the new lexer will start in, and can
  344. be an enumerable such as ('root', 'inline', 'string') or a simple
  345. string which is assumed to be on top of the root state.
  346. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
  347. """
  348. gt_kwargs = {}
  349. if 'state' in kwargs:
  350. s = kwargs.pop('state')
  351. if isinstance(s, (list, tuple)):
  352. gt_kwargs['stack'] = s
  353. else:
  354. gt_kwargs['stack'] = ('root', s)
  355. if _other is this:
  356. def callback(lexer, match, ctx=None):
  357. # if keyword arguments are given the callback
  358. # function has to create a new lexer instance
  359. if kwargs:
  360. # XXX: cache that somehow
  361. kwargs.update(lexer.options)
  362. lx = lexer.__class__(**kwargs)
  363. else:
  364. lx = lexer
  365. s = match.start()
  366. for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
  367. yield i + s, t, v
  368. if ctx:
  369. ctx.pos = match.end()
  370. else:
  371. def callback(lexer, match, ctx=None):
  372. # XXX: cache that somehow
  373. kwargs.update(lexer.options)
  374. lx = _other(**kwargs)
  375. s = match.start()
  376. for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
  377. yield i + s, t, v
  378. if ctx:
  379. ctx.pos = match.end()
  380. return callback
  381. class default:
  382. """
  383. Indicates a state or state action (e.g. #pop) to apply.
  384. For example default('#pop') is equivalent to ('', Token, '#pop')
  385. Note that state tuples may be used as well.
  386. .. versionadded:: 2.0
  387. """
  388. def __init__(self, state):
  389. self.state = state
  390. class words(Future):
  391. """
  392. Indicates a list of literal words that is transformed into an optimized
  393. regex that matches any of the words.
  394. .. versionadded:: 2.0
  395. """
  396. def __init__(self, words, prefix='', suffix=''):
  397. self.words = words
  398. self.prefix = prefix
  399. self.suffix = suffix
  400. def get(self):
  401. return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
  402. class RegexLexerMeta(LexerMeta):
  403. """
  404. Metaclass for RegexLexer, creates the self._tokens attribute from
  405. self.tokens on the first instantiation.
  406. """
  407. def _process_regex(cls, regex, rflags, state):
  408. """Preprocess the regular expression component of a token definition."""
  409. if isinstance(regex, Future):
  410. regex = regex.get()
  411. return re.compile(regex, rflags).match
  412. def _process_token(cls, token):
  413. """Preprocess the token component of a token definition."""
  414. assert type(token) is _TokenType or callable(token), \
  415. f'token type must be simple type or callable, not {token!r}'
  416. return token
  417. def _process_new_state(cls, new_state, unprocessed, processed):
  418. """Preprocess the state transition action of a token definition."""
  419. if isinstance(new_state, str):
  420. # an existing state
  421. if new_state == '#pop':
  422. return -1
  423. elif new_state in unprocessed:
  424. return (new_state,)
  425. elif new_state == '#push':
  426. return new_state
  427. elif new_state[:5] == '#pop:':
  428. return -int(new_state[5:])
  429. else:
  430. assert False, f'unknown new state {new_state!r}'
  431. elif isinstance(new_state, combined):
  432. # combine a new state from existing ones
  433. tmp_state = '_tmp_%d' % cls._tmpname
  434. cls._tmpname += 1
  435. itokens = []
  436. for istate in new_state:
  437. assert istate != new_state, f'circular state ref {istate!r}'
  438. itokens.extend(cls._process_state(unprocessed,
  439. processed, istate))
  440. processed[tmp_state] = itokens
  441. return (tmp_state,)
  442. elif isinstance(new_state, tuple):
  443. # push more than one state
  444. for istate in new_state:
  445. assert (istate in unprocessed or
  446. istate in ('#pop', '#push')), \
  447. 'unknown new state ' + istate
  448. return new_state
  449. else:
  450. assert False, f'unknown new state def {new_state!r}'
  451. def _process_state(cls, unprocessed, processed, state):
  452. """Preprocess a single state definition."""
  453. assert isinstance(state, str), f"wrong state name {state!r}"
  454. assert state[0] != '#', f"invalid state name {state!r}"
  455. if state in processed:
  456. return processed[state]
  457. tokens = processed[state] = []
  458. rflags = cls.flags
  459. for tdef in unprocessed[state]:
  460. if isinstance(tdef, include):
  461. # it's a state reference
  462. assert tdef != state, f"circular state reference {state!r}"
  463. tokens.extend(cls._process_state(unprocessed, processed,
  464. str(tdef)))
  465. continue
  466. if isinstance(tdef, _inherit):
  467. # should be processed already, but may not in the case of:
  468. # 1. the state has no counterpart in any parent
  469. # 2. the state includes more than one 'inherit'
  470. continue
  471. if isinstance(tdef, default):
  472. new_state = cls._process_new_state(tdef.state, unprocessed, processed)
  473. tokens.append((re.compile('').match, None, new_state))
  474. continue
  475. assert type(tdef) is tuple, f"wrong rule def {tdef!r}"
  476. try:
  477. rex = cls._process_regex(tdef[0], rflags, state)
  478. except Exception as err:
  479. raise ValueError(f"uncompilable regex {tdef[0]!r} in state {state!r} of {cls!r}: {err}") from err
  480. token = cls._process_token(tdef[1])
  481. if len(tdef) == 2:
  482. new_state = None
  483. else:
  484. new_state = cls._process_new_state(tdef[2],
  485. unprocessed, processed)
  486. tokens.append((rex, token, new_state))
  487. return tokens
  488. def process_tokendef(cls, name, tokendefs=None):
  489. """Preprocess a dictionary of token definitions."""
  490. processed = cls._all_tokens[name] = {}
  491. tokendefs = tokendefs or cls.tokens[name]
  492. for state in list(tokendefs):
  493. cls._process_state(tokendefs, processed, state)
  494. return processed
  495. def get_tokendefs(cls):
  496. """
  497. Merge tokens from superclasses in MRO order, returning a single tokendef
  498. dictionary.
  499. Any state that is not defined by a subclass will be inherited
  500. automatically. States that *are* defined by subclasses will, by
  501. default, override that state in the superclass. If a subclass wishes to
  502. inherit definitions from a superclass, it can use the special value
  503. "inherit", which will cause the superclass' state definition to be
  504. included at that point in the state.
  505. """
  506. tokens = {}
  507. inheritable = {}
  508. for c in cls.__mro__:
  509. toks = c.__dict__.get('tokens', {})
  510. for state, items in toks.items():
  511. curitems = tokens.get(state)
  512. if curitems is None:
  513. # N.b. because this is assigned by reference, sufficiently
  514. # deep hierarchies are processed incrementally (e.g. for
  515. # A(B), B(C), C(RegexLexer), B will be premodified so X(B)
  516. # will not see any inherits in B).
  517. tokens[state] = items
  518. try:
  519. inherit_ndx = items.index(inherit)
  520. except ValueError:
  521. continue
  522. inheritable[state] = inherit_ndx
  523. continue
  524. inherit_ndx = inheritable.pop(state, None)
  525. if inherit_ndx is None:
  526. continue
  527. # Replace the "inherit" value with the items
  528. curitems[inherit_ndx:inherit_ndx+1] = items
  529. try:
  530. # N.b. this is the index in items (that is, the superclass
  531. # copy), so offset required when storing below.
  532. new_inh_ndx = items.index(inherit)
  533. except ValueError:
  534. pass
  535. else:
  536. inheritable[state] = inherit_ndx + new_inh_ndx
  537. return tokens
  538. def __call__(cls, *args, **kwds):
  539. """Instantiate cls after preprocessing its token definitions."""
  540. if '_tokens' not in cls.__dict__:
  541. cls._all_tokens = {}
  542. cls._tmpname = 0
  543. if hasattr(cls, 'token_variants') and cls.token_variants:
  544. # don't process yet
  545. pass
  546. else:
  547. cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
  548. return type.__call__(cls, *args, **kwds)
  549. class RegexLexer(Lexer, metaclass=RegexLexerMeta):
  550. """
  551. Base for simple stateful regular expression-based lexers.
  552. Simplifies the lexing process so that you need only
  553. provide a list of states and regular expressions.
  554. """
  555. #: Flags for compiling the regular expressions.
  556. #: Defaults to MULTILINE.
  557. flags = re.MULTILINE
  558. #: At all time there is a stack of states. Initially, the stack contains
  559. #: a single state 'root'. The top of the stack is called "the current state".
  560. #:
  561. #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
  562. #:
  563. #: ``new_state`` can be omitted to signify no state transition.
  564. #: If ``new_state`` is a string, it is pushed on the stack. This ensure
  565. #: the new current state is ``new_state``.
  566. #: If ``new_state`` is a tuple of strings, all of those strings are pushed
  567. #: on the stack and the current state will be the last element of the list.
  568. #: ``new_state`` can also be ``combined('state1', 'state2', ...)``
  569. #: to signify a new, anonymous state combined from the rules of two
  570. #: or more existing ones.
  571. #: Furthermore, it can be '#pop' to signify going back one step in
  572. #: the state stack, or '#push' to push the current state on the stack
  573. #: again. Note that if you push while in a combined state, the combined
  574. #: state itself is pushed, and not only the state in which the rule is
  575. #: defined.
  576. #:
  577. #: The tuple can also be replaced with ``include('state')``, in which
  578. #: case the rules from the state named by the string are included in the
  579. #: current one.
  580. tokens = {}
  581. def get_tokens_unprocessed(self, text, stack=('root',)):
  582. """
  583. Split ``text`` into (tokentype, text) pairs.
  584. ``stack`` is the initial stack (default: ``['root']``)
  585. """
  586. pos = 0
  587. tokendefs = self._tokens
  588. statestack = list(stack)
  589. statetokens = tokendefs[statestack[-1]]
  590. while 1:
  591. for rexmatch, action, new_state in statetokens:
  592. m = rexmatch(text, pos)
  593. if m:
  594. if action is not None:
  595. if type(action) is _TokenType:
  596. yield pos, action, m.group()
  597. else:
  598. yield from action(self, m)
  599. pos = m.end()
  600. if new_state is not None:
  601. # state transition
  602. if isinstance(new_state, tuple):
  603. for state in new_state:
  604. if state == '#pop':
  605. if len(statestack) > 1:
  606. statestack.pop()
  607. elif state == '#push':
  608. statestack.append(statestack[-1])
  609. else:
  610. statestack.append(state)
  611. elif isinstance(new_state, int):
  612. # pop, but keep at least one state on the stack
  613. # (random code leading to unexpected pops should
  614. # not allow exceptions)
  615. if abs(new_state) >= len(statestack):
  616. del statestack[1:]
  617. else:
  618. del statestack[new_state:]
  619. elif new_state == '#push':
  620. statestack.append(statestack[-1])
  621. else:
  622. assert False, f"wrong state def: {new_state!r}"
  623. statetokens = tokendefs[statestack[-1]]
  624. break
  625. else:
  626. # We are here only if all state tokens have been considered
  627. # and there was not a match on any of them.
  628. try:
  629. if text[pos] == '\n':
  630. # at EOL, reset state to "root"
  631. statestack = ['root']
  632. statetokens = tokendefs['root']
  633. yield pos, Whitespace, '\n'
  634. pos += 1
  635. continue
  636. yield pos, Error, text[pos]
  637. pos += 1
  638. except IndexError:
  639. break
  640. class LexerContext:
  641. """
  642. A helper object that holds lexer position data.
  643. """
  644. def __init__(self, text, pos, stack=None, end=None):
  645. self.text = text
  646. self.pos = pos
  647. self.end = end or len(text) # end=0 not supported ;-)
  648. self.stack = stack or ['root']
  649. def __repr__(self):
  650. return f'LexerContext({self.text!r}, {self.pos!r}, {self.stack!r})'
  651. class ExtendedRegexLexer(RegexLexer):
  652. """
  653. A RegexLexer that uses a context object to store its state.
  654. """
  655. def get_tokens_unprocessed(self, text=None, context=None):
  656. """
  657. Split ``text`` into (tokentype, text) pairs.
  658. If ``context`` is given, use this lexer context instead.
  659. """
  660. tokendefs = self._tokens
  661. if not context:
  662. ctx = LexerContext(text, 0)
  663. statetokens = tokendefs['root']
  664. else:
  665. ctx = context
  666. statetokens = tokendefs[ctx.stack[-1]]
  667. text = ctx.text
  668. while 1:
  669. for rexmatch, action, new_state in statetokens:
  670. m = rexmatch(text, ctx.pos, ctx.end)
  671. if m:
  672. if action is not None:
  673. if type(action) is _TokenType:
  674. yield ctx.pos, action, m.group()
  675. ctx.pos = m.end()
  676. else:
  677. yield from action(self, m, ctx)
  678. if not new_state:
  679. # altered the state stack?
  680. statetokens = tokendefs[ctx.stack[-1]]
  681. # CAUTION: callback must set ctx.pos!
  682. if new_state is not None:
  683. # state transition
  684. if isinstance(new_state, tuple):
  685. for state in new_state:
  686. if state == '#pop':
  687. if len(ctx.stack) > 1:
  688. ctx.stack.pop()
  689. elif state == '#push':
  690. ctx.stack.append(ctx.stack[-1])
  691. else:
  692. ctx.stack.append(state)
  693. elif isinstance(new_state, int):
  694. # see RegexLexer for why this check is made
  695. if abs(new_state) >= len(ctx.stack):
  696. del ctx.stack[1:]
  697. else:
  698. del ctx.stack[new_state:]
  699. elif new_state == '#push':
  700. ctx.stack.append(ctx.stack[-1])
  701. else:
  702. assert False, f"wrong state def: {new_state!r}"
  703. statetokens = tokendefs[ctx.stack[-1]]
  704. break
  705. else:
  706. try:
  707. if ctx.pos >= ctx.end:
  708. break
  709. if text[ctx.pos] == '\n':
  710. # at EOL, reset state to "root"
  711. ctx.stack = ['root']
  712. statetokens = tokendefs['root']
  713. yield ctx.pos, Text, '\n'
  714. ctx.pos += 1
  715. continue
  716. yield ctx.pos, Error, text[ctx.pos]
  717. ctx.pos += 1
  718. except IndexError:
  719. break
  720. def do_insertions(insertions, tokens):
  721. """
  722. Helper for lexers which must combine the results of several
  723. sublexers.
  724. ``insertions`` is a list of ``(index, itokens)`` pairs.
  725. Each ``itokens`` iterable should be inserted at position
  726. ``index`` into the token stream given by the ``tokens``
  727. argument.
  728. The result is a combined token stream.
  729. TODO: clean up the code here.
  730. """
  731. insertions = iter(insertions)
  732. try:
  733. index, itokens = next(insertions)
  734. except StopIteration:
  735. # no insertions
  736. yield from tokens
  737. return
  738. realpos = None
  739. insleft = True
  740. # iterate over the token stream where we want to insert
  741. # the tokens from the insertion list.
  742. for i, t, v in tokens:
  743. # first iteration. store the position of first item
  744. if realpos is None:
  745. realpos = i
  746. oldi = 0
  747. while insleft and i + len(v) >= index:
  748. tmpval = v[oldi:index - i]
  749. if tmpval:
  750. yield realpos, t, tmpval
  751. realpos += len(tmpval)
  752. for it_index, it_token, it_value in itokens:
  753. yield realpos, it_token, it_value
  754. realpos += len(it_value)
  755. oldi = index - i
  756. try:
  757. index, itokens = next(insertions)
  758. except StopIteration:
  759. insleft = False
  760. break # not strictly necessary
  761. if oldi < len(v):
  762. yield realpos, t, v[oldi:]
  763. realpos += len(v) - oldi
  764. # leftover tokens
  765. while insleft:
  766. # no normal tokens, set realpos to zero
  767. realpos = realpos or 0
  768. for p, t, v in itokens:
  769. yield realpos, t, v
  770. realpos += len(v)
  771. try:
  772. index, itokens = next(insertions)
  773. except StopIteration:
  774. insleft = False
  775. break # not strictly necessary
  776. class ProfilingRegexLexerMeta(RegexLexerMeta):
  777. """Metaclass for ProfilingRegexLexer, collects regex timing info."""
  778. def _process_regex(cls, regex, rflags, state):
  779. if isinstance(regex, words):
  780. rex = regex_opt(regex.words, prefix=regex.prefix,
  781. suffix=regex.suffix)
  782. else:
  783. rex = regex
  784. compiled = re.compile(rex, rflags)
  785. def match_func(text, pos, endpos=sys.maxsize):
  786. info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
  787. t0 = time.time()
  788. res = compiled.match(text, pos, endpos)
  789. t1 = time.time()
  790. info[0] += 1
  791. info[1] += t1 - t0
  792. return res
  793. return match_func
  794. class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
  795. """Drop-in replacement for RegexLexer that does profiling of its regexes."""
  796. _prof_data = []
  797. _prof_sort_index = 4 # defaults to time per call
  798. def get_tokens_unprocessed(self, text, stack=('root',)):
  799. # this needs to be a stack, since using(this) will produce nested calls
  800. self.__class__._prof_data.append({})
  801. yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
  802. rawdata = self.__class__._prof_data.pop()
  803. data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
  804. n, 1000 * t, 1000 * t / n)
  805. for ((s, r), (n, t)) in rawdata.items()),
  806. key=lambda x: x[self._prof_sort_index],
  807. reverse=True)
  808. sum_total = sum(x[3] for x in data)
  809. print()
  810. print('Profiling result for %s lexing %d chars in %.3f ms' %
  811. (self.__class__.__name__, len(text), sum_total))
  812. print('=' * 110)
  813. print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
  814. print('-' * 110)
  815. for d in data:
  816. print('%-20s %-65s %5d %8.4f %8.4f' % d)
  817. print('=' * 110)