textfmts.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. """
  2. pygments.lexers.textfmts
  3. ~~~~~~~~~~~~~~~~~~~~~~~~
  4. Lexers for various text formats.
  5. :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
  6. :license: BSD, see LICENSE for details.
  7. """
  8. import re
  9. from pygments.lexers import guess_lexer, get_lexer_by_name
  10. from pygments.lexer import RegexLexer, bygroups, default, include
  11. from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
  12. Number, Generic, Literal, Punctuation
  13. from pygments.util import ClassNotFound
  14. __all__ = ['IrcLogsLexer', 'TodotxtLexer', 'HttpLexer', 'GettextLexer',
  15. 'NotmuchLexer', 'KernelLogLexer']
  16. class IrcLogsLexer(RegexLexer):
  17. """
  18. Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
  19. """
  20. name = 'IRC logs'
  21. aliases = ['irc']
  22. filenames = ['*.weechatlog']
  23. mimetypes = ['text/x-irclog']
  24. flags = re.VERBOSE | re.MULTILINE
  25. timestamp = r"""
  26. (
  27. # irssi / xchat and others
  28. (?: \[|\()? # Opening bracket or paren for the timestamp
  29. (?: # Timestamp
  30. (?: (?:\d{1,4} [-/])* # Date as - or /-separated groups of digits
  31. (?:\d{1,4})
  32. [T ])? # Date/time separator: T or space
  33. (?: \d?\d [:.])* # Time as :/.-separated groups of 1 or 2 digits
  34. (?: \d?\d)
  35. )
  36. (?: \]|\))?\s+ # Closing bracket or paren for the timestamp
  37. |
  38. # weechat
  39. \d{4}\s\w{3}\s\d{2}\s # Date
  40. \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
  41. |
  42. # xchat
  43. \w{3}\s\d{2}\s # Date
  44. \d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
  45. )?
  46. """
  47. tokens = {
  48. 'root': [
  49. # log start/end
  50. (r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
  51. # hack
  52. ("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
  53. # normal msgs
  54. ("^" + timestamp + r"""
  55. (\s*<.*?>\s*) # Nick """,
  56. bygroups(Comment.Preproc, Name.Tag), 'msg'),
  57. # /me msgs
  58. ("^" + timestamp + r"""
  59. (\s*[*]\s+) # Star
  60. (\S+\s+.*?\n) # Nick + rest of message """,
  61. bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
  62. # join/part msgs
  63. ("^" + timestamp + r"""
  64. (\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
  65. (\S+\s+) # Nick + Space
  66. (.*?\n) # Rest of message """,
  67. bygroups(Comment.Preproc, Keyword, String, Comment)),
  68. (r"^.*?\n", Text),
  69. ],
  70. 'msg': [
  71. (r"\S+:(?!//)", Name.Attribute), # Prefix
  72. (r".*\n", Text, '#pop'),
  73. ],
  74. }
  75. class GettextLexer(RegexLexer):
  76. """
  77. Lexer for Gettext catalog files.
  78. .. versionadded:: 0.9
  79. """
  80. name = 'Gettext Catalog'
  81. aliases = ['pot', 'po']
  82. filenames = ['*.pot', '*.po']
  83. mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
  84. tokens = {
  85. 'root': [
  86. (r'^#,\s.*?$', Keyword.Type),
  87. (r'^#:\s.*?$', Keyword.Declaration),
  88. # (r'^#$', Comment),
  89. (r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
  90. (r'^(")([A-Za-z-]+:)(.*")$',
  91. bygroups(String, Name.Property, String)),
  92. (r'^".*"$', String),
  93. (r'^(msgid|msgid_plural|msgstr|msgctxt)(\s+)(".*")$',
  94. bygroups(Name.Variable, Text, String)),
  95. (r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
  96. bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
  97. ]
  98. }
  99. class HttpLexer(RegexLexer):
  100. """
  101. Lexer for HTTP sessions.
  102. .. versionadded:: 1.5
  103. """
  104. name = 'HTTP'
  105. aliases = ['http']
  106. flags = re.DOTALL
  107. def get_tokens_unprocessed(self, text, stack=('root',)):
  108. """Reset the content-type state."""
  109. self.content_type = None
  110. return RegexLexer.get_tokens_unprocessed(self, text, stack)
  111. def header_callback(self, match):
  112. if match.group(1).lower() == 'content-type':
  113. content_type = match.group(5).strip()
  114. if ';' in content_type:
  115. content_type = content_type[:content_type.find(';')].strip()
  116. self.content_type = content_type
  117. yield match.start(1), Name.Attribute, match.group(1)
  118. yield match.start(2), Text, match.group(2)
  119. yield match.start(3), Operator, match.group(3)
  120. yield match.start(4), Text, match.group(4)
  121. yield match.start(5), Literal, match.group(5)
  122. yield match.start(6), Text, match.group(6)
  123. def continuous_header_callback(self, match):
  124. yield match.start(1), Text, match.group(1)
  125. yield match.start(2), Literal, match.group(2)
  126. yield match.start(3), Text, match.group(3)
  127. def content_callback(self, match):
  128. content_type = getattr(self, 'content_type', None)
  129. content = match.group()
  130. offset = match.start()
  131. if content_type:
  132. from pygments.lexers import get_lexer_for_mimetype
  133. possible_lexer_mimetypes = [content_type]
  134. if '+' in content_type:
  135. # application/calendar+xml can be treated as application/xml
  136. # if there's not a better match.
  137. general_type = re.sub(r'^(.*)/.*\+(.*)$', r'\1/\2',
  138. content_type)
  139. possible_lexer_mimetypes.append(general_type)
  140. for i in possible_lexer_mimetypes:
  141. try:
  142. lexer = get_lexer_for_mimetype(i)
  143. except ClassNotFound:
  144. pass
  145. else:
  146. for idx, token, value in lexer.get_tokens_unprocessed(content):
  147. yield offset + idx, token, value
  148. return
  149. yield offset, Text, content
  150. tokens = {
  151. 'root': [
  152. (r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH)( +)([^ ]+)( +)'
  153. r'(HTTP)(/)(1\.[01]|2(?:\.0)?|3)(\r?\n|\Z)',
  154. bygroups(Name.Function, Text, Name.Namespace, Text,
  155. Keyword.Reserved, Operator, Number, Text),
  156. 'headers'),
  157. (r'(HTTP)(/)(1\.[01]|2(?:\.0)?|3)( +)(\d{3})(?:( +)([^\r\n]*))?(\r?\n|\Z)',
  158. bygroups(Keyword.Reserved, Operator, Number, Text, Number, Text,
  159. Name.Exception, Text),
  160. 'headers'),
  161. ],
  162. 'headers': [
  163. (r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|\Z)', header_callback),
  164. (r'([\t ]+)([^\r\n]+)(\r?\n|\Z)', continuous_header_callback),
  165. (r'\r?\n', Text, 'content')
  166. ],
  167. 'content': [
  168. (r'.+', content_callback)
  169. ]
  170. }
  171. def analyse_text(text):
  172. return text.startswith(('GET /', 'POST /', 'PUT /', 'DELETE /', 'HEAD /',
  173. 'OPTIONS /', 'TRACE /', 'PATCH /'))
  174. class TodotxtLexer(RegexLexer):
  175. """
  176. Lexer for `Todo.txt <http://todotxt.com/>`_ todo list format.
  177. .. versionadded:: 2.0
  178. """
  179. name = 'Todotxt'
  180. aliases = ['todotxt']
  181. # *.todotxt is not a standard extension for Todo.txt files; including it
  182. # makes testing easier, and also makes autodetecting file type easier.
  183. filenames = ['todo.txt', '*.todotxt']
  184. mimetypes = ['text/x-todo']
  185. # Aliases mapping standard token types of Todo.txt format concepts
  186. CompleteTaskText = Operator # Chosen to de-emphasize complete tasks
  187. IncompleteTaskText = Text # Incomplete tasks should look like plain text
  188. # Priority should have most emphasis to indicate importance of tasks
  189. Priority = Generic.Heading
  190. # Dates should have next most emphasis because time is important
  191. Date = Generic.Subheading
  192. # Project and context should have equal weight, and be in different colors
  193. Project = Generic.Error
  194. Context = String
  195. # If tag functionality is added, it should have the same weight as Project
  196. # and Context, and a different color. Generic.Traceback would work well.
  197. # Regex patterns for building up rules; dates, priorities, projects, and
  198. # contexts are all atomic
  199. # TODO: Make date regex more ISO 8601 compliant
  200. date_regex = r'\d{4,}-\d{2}-\d{2}'
  201. priority_regex = r'\([A-Z]\)'
  202. project_regex = r'\+\S+'
  203. context_regex = r'@\S+'
  204. # Compound regex expressions
  205. complete_one_date_regex = r'(x )(' + date_regex + r')'
  206. complete_two_date_regex = (complete_one_date_regex + r'( )(' +
  207. date_regex + r')')
  208. priority_date_regex = r'(' + priority_regex + r')( )(' + date_regex + r')'
  209. tokens = {
  210. # Should parse starting at beginning of line; each line is a task
  211. 'root': [
  212. # Complete task entry points: two total:
  213. # 1. Complete task with two dates
  214. (complete_two_date_regex, bygroups(CompleteTaskText, Date,
  215. CompleteTaskText, Date),
  216. 'complete'),
  217. # 2. Complete task with one date
  218. (complete_one_date_regex, bygroups(CompleteTaskText, Date),
  219. 'complete'),
  220. # Incomplete task entry points: six total:
  221. # 1. Priority plus date
  222. (priority_date_regex, bygroups(Priority, IncompleteTaskText, Date),
  223. 'incomplete'),
  224. # 2. Priority only
  225. (priority_regex, Priority, 'incomplete'),
  226. # 3. Leading date
  227. (date_regex, Date, 'incomplete'),
  228. # 4. Leading context
  229. (context_regex, Context, 'incomplete'),
  230. # 5. Leading project
  231. (project_regex, Project, 'incomplete'),
  232. # 6. Non-whitespace catch-all
  233. (r'\S+', IncompleteTaskText, 'incomplete'),
  234. ],
  235. # Parse a complete task
  236. 'complete': [
  237. # Newline indicates end of task, should return to root
  238. (r'\s*\n', CompleteTaskText, '#pop'),
  239. # Tokenize contexts and projects
  240. (context_regex, Context),
  241. (project_regex, Project),
  242. # Tokenize non-whitespace text
  243. (r'\S+', CompleteTaskText),
  244. # Tokenize whitespace not containing a newline
  245. (r'\s+', CompleteTaskText),
  246. ],
  247. # Parse an incomplete task
  248. 'incomplete': [
  249. # Newline indicates end of task, should return to root
  250. (r'\s*\n', IncompleteTaskText, '#pop'),
  251. # Tokenize contexts and projects
  252. (context_regex, Context),
  253. (project_regex, Project),
  254. # Tokenize non-whitespace text
  255. (r'\S+', IncompleteTaskText),
  256. # Tokenize whitespace not containing a newline
  257. (r'\s+', IncompleteTaskText),
  258. ],
  259. }
  260. class NotmuchLexer(RegexLexer):
  261. """
  262. For `Notmuch <https://notmuchmail.org/>`_ email text format.
  263. .. versionadded:: 2.5
  264. Additional options accepted:
  265. `body_lexer`
  266. If given, highlight the contents of the message body with the specified
  267. lexer, else guess it according to the body content (default: ``None``).
  268. """
  269. name = 'Notmuch'
  270. aliases = ['notmuch']
  271. def _highlight_code(self, match):
  272. code = match.group(1)
  273. try:
  274. if self.body_lexer:
  275. lexer = get_lexer_by_name(self.body_lexer)
  276. else:
  277. lexer = guess_lexer(code.strip())
  278. except ClassNotFound:
  279. lexer = get_lexer_by_name('text')
  280. yield from lexer.get_tokens_unprocessed(code)
  281. tokens = {
  282. 'root': [
  283. (r'\fmessage\{\s*', Keyword, ('message', 'message-attr')),
  284. ],
  285. 'message-attr': [
  286. (r'(\s*id:\s*)(\S+)', bygroups(Name.Attribute, String)),
  287. (r'(\s*(?:depth|match|excluded):\s*)(\d+)',
  288. bygroups(Name.Attribute, Number.Integer)),
  289. (r'(\s*filename:\s*)(.+\n)',
  290. bygroups(Name.Attribute, String)),
  291. default('#pop'),
  292. ],
  293. 'message': [
  294. (r'\fmessage\}\n', Keyword, '#pop'),
  295. (r'\fheader\{\n', Keyword, 'header'),
  296. (r'\fbody\{\n', Keyword, 'body'),
  297. ],
  298. 'header': [
  299. (r'\fheader\}\n', Keyword, '#pop'),
  300. (r'((?:Subject|From|To|Cc|Date):\s*)(.*\n)',
  301. bygroups(Name.Attribute, String)),
  302. (r'(.*)(\s*\(.*\))(\s*\(.*\)\n)',
  303. bygroups(Generic.Strong, Literal, Name.Tag)),
  304. ],
  305. 'body': [
  306. (r'\fpart\{\n', Keyword, 'part'),
  307. (r'\f(part|attachment)\{\s*', Keyword, ('part', 'part-attr')),
  308. (r'\fbody\}\n', Keyword, '#pop'),
  309. ],
  310. 'part-attr': [
  311. (r'(ID:\s*)(\d+)', bygroups(Name.Attribute, Number.Integer)),
  312. (r'(,\s*)((?:Filename|Content-id):\s*)([^,]+)',
  313. bygroups(Punctuation, Name.Attribute, String)),
  314. (r'(,\s*)(Content-type:\s*)(.+\n)',
  315. bygroups(Punctuation, Name.Attribute, String)),
  316. default('#pop'),
  317. ],
  318. 'part': [
  319. (r'\f(?:part|attachment)\}\n', Keyword, '#pop'),
  320. (r'\f(?:part|attachment)\{\s*', Keyword, ('#push', 'part-attr')),
  321. (r'^Non-text part: .*\n', Comment),
  322. (r'(?s)(.*?(?=\f(?:part|attachment)\}\n))', _highlight_code),
  323. ],
  324. }
  325. def analyse_text(text):
  326. return 1.0 if text.startswith('\fmessage{') else 0.0
  327. def __init__(self, **options):
  328. self.body_lexer = options.get('body_lexer', None)
  329. RegexLexer.__init__(self, **options)
  330. class KernelLogLexer(RegexLexer):
  331. """
  332. For Linux Kernel log ("dmesg") output.
  333. .. versionadded:: 2.6
  334. """
  335. name = 'Kernel log'
  336. aliases = ['kmsg', 'dmesg']
  337. filenames = ['*.kmsg', '*.dmesg']
  338. tokens = {
  339. 'root': [
  340. (r'^[^:]+:debug : (?=\[)', Text, 'debug'),
  341. (r'^[^:]+:info : (?=\[)', Text, 'info'),
  342. (r'^[^:]+:warn : (?=\[)', Text, 'warn'),
  343. (r'^[^:]+:notice: (?=\[)', Text, 'warn'),
  344. (r'^[^:]+:err : (?=\[)', Text, 'error'),
  345. (r'^[^:]+:crit : (?=\[)', Text, 'error'),
  346. (r'^(?=\[)', Text, 'unknown'),
  347. ],
  348. 'unknown': [
  349. (r'^(?=.+(warning|notice|audit|deprecated))', Text, 'warn'),
  350. (r'^(?=.+(error|critical|fail|Bug))', Text, 'error'),
  351. default('info'),
  352. ],
  353. 'base': [
  354. (r'\[[0-9. ]+\] ', Number),
  355. (r'(?<=\] ).+?:', Keyword),
  356. (r'\n', Text, '#pop'),
  357. ],
  358. 'debug': [
  359. include('base'),
  360. (r'.+\n', Comment, '#pop')
  361. ],
  362. 'info': [
  363. include('base'),
  364. (r'.+\n', Text, '#pop')
  365. ],
  366. 'warn': [
  367. include('base'),
  368. (r'.+\n', Generic.Strong, '#pop')
  369. ],
  370. 'error': [
  371. include('base'),
  372. (r'.+\n', Generic.Error, '#pop')
  373. ]
  374. }