123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817 |
- # Module doctest.
- # Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
- # Major enhancements and refactoring by:
- # Jim Fulton
- # Edward Loper
- # Provided as-is; use at your own risk; no warranty; no promises; enjoy!
- r"""Module doctest -- a framework for running examples in docstrings.
- In simplest use, end each module M to be tested with:
- def _test():
- import doctest
- doctest.testmod()
- if __name__ == "__main__":
- _test()
- Then running the module as a script will cause the examples in the
- docstrings to get executed and verified:
- python M.py
- This won't display anything unless an example fails, in which case the
- failing example(s) and the cause(s) of the failure(s) are printed to stdout
- (why not stderr? because stderr is a lame hack <0.2 wink>), and the final
- line of output is "Test failed.".
- Run it with the -v switch instead:
- python M.py -v
- and a detailed report of all examples tried is printed to stdout, along
- with assorted summaries at the end.
- You can force verbose mode by passing "verbose=True" to testmod, or prohibit
- it by passing "verbose=False". In either of those cases, sys.argv is not
- examined by testmod.
- There are a variety of other ways to run doctests, including integration
- with the unittest framework, and support for running non-Python text
- files containing doctests. There are also many ways to override parts
- of doctest's default behaviors. See the Library Reference Manual for
- details.
- """
- __docformat__ = 'reStructuredText en'
- __all__ = [
- # 0, Option Flags
- 'register_optionflag',
- 'DONT_ACCEPT_TRUE_FOR_1',
- 'DONT_ACCEPT_BLANKLINE',
- 'NORMALIZE_WHITESPACE',
- 'ELLIPSIS',
- 'SKIP',
- 'IGNORE_EXCEPTION_DETAIL',
- 'COMPARISON_FLAGS',
- 'REPORT_UDIFF',
- 'REPORT_CDIFF',
- 'REPORT_NDIFF',
- 'REPORT_ONLY_FIRST_FAILURE',
- 'REPORTING_FLAGS',
- 'FAIL_FAST',
- # 1. Utility Functions
- # 2. Example & DocTest
- 'Example',
- 'DocTest',
- # 3. Doctest Parser
- 'DocTestParser',
- # 4. Doctest Finder
- 'DocTestFinder',
- # 5. Doctest Runner
- 'DocTestRunner',
- 'OutputChecker',
- 'DocTestFailure',
- 'UnexpectedException',
- 'DebugRunner',
- # 6. Test Functions
- 'testmod',
- 'testfile',
- 'run_docstring_examples',
- # 7. Unittest Support
- 'DocTestSuite',
- 'DocFileSuite',
- 'set_unittest_reportflags',
- # 8. Debugging Support
- 'script_from_examples',
- 'testsource',
- 'debug_src',
- 'debug',
- ]
- import __future__
- import difflib
- import inspect
- import linecache
- import os
- import pdb
- import re
- import sys
- import traceback
- import unittest
- from io import StringIO, IncrementalNewlineDecoder
- from collections import namedtuple
- TestResults = namedtuple('TestResults', 'failed attempted')
- # There are 4 basic classes:
- # - Example: a <source, want> pair, plus an intra-docstring line number.
- # - DocTest: a collection of examples, parsed from a docstring, plus
- # info about where the docstring came from (name, filename, lineno).
- # - DocTestFinder: extracts DocTests from a given object's docstring and
- # its contained objects' docstrings.
- # - DocTestRunner: runs DocTest cases, and accumulates statistics.
- #
- # So the basic picture is:
- #
- # list of:
- # +------+ +---------+ +-------+
- # |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
- # +------+ +---------+ +-------+
- # | Example |
- # | ... |
- # | Example |
- # +---------+
- # Option constants.
- OPTIONFLAGS_BY_NAME = {}
- def register_optionflag(name):
- # Create a new flag unless `name` is already known.
- return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
- DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
- DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
- NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
- ELLIPSIS = register_optionflag('ELLIPSIS')
- SKIP = register_optionflag('SKIP')
- IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
- COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
- DONT_ACCEPT_BLANKLINE |
- NORMALIZE_WHITESPACE |
- ELLIPSIS |
- SKIP |
- IGNORE_EXCEPTION_DETAIL)
- REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
- REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
- REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
- REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
- FAIL_FAST = register_optionflag('FAIL_FAST')
- REPORTING_FLAGS = (REPORT_UDIFF |
- REPORT_CDIFF |
- REPORT_NDIFF |
- REPORT_ONLY_FIRST_FAILURE |
- FAIL_FAST)
- # Special string markers for use in `want` strings:
- BLANKLINE_MARKER = '<BLANKLINE>'
- ELLIPSIS_MARKER = '...'
- ######################################################################
- ## Table of Contents
- ######################################################################
- # 1. Utility Functions
- # 2. Example & DocTest -- store test cases
- # 3. DocTest Parser -- extracts examples from strings
- # 4. DocTest Finder -- extracts test cases from objects
- # 5. DocTest Runner -- runs test cases
- # 6. Test Functions -- convenient wrappers for testing
- # 7. Unittest Support
- # 8. Debugging Support
- # 9. Example Usage
- ######################################################################
- ## 1. Utility Functions
- ######################################################################
- def _extract_future_flags(globs):
- """
- Return the compiler-flags associated with the future features that
- have been imported into the given namespace (globs).
- """
- flags = 0
- for fname in __future__.all_feature_names:
- feature = globs.get(fname, None)
- if feature is getattr(__future__, fname):
- flags |= feature.compiler_flag
- return flags
- def _normalize_module(module, depth=2):
- """
- Return the module specified by `module`. In particular:
- - If `module` is a module, then return module.
- - If `module` is a string, then import and return the
- module with that name.
- - If `module` is None, then return the calling module.
- The calling module is assumed to be the module of
- the stack frame at the given depth in the call stack.
- """
- if inspect.ismodule(module):
- return module
- elif isinstance(module, str):
- return __import__(module, globals(), locals(), ["*"])
- elif module is None:
- try:
- try:
- return sys.modules[sys._getframemodulename(depth)]
- except AttributeError:
- return sys.modules[sys._getframe(depth).f_globals['__name__']]
- except KeyError:
- pass
- else:
- raise TypeError("Expected a module, string, or None")
- def _newline_convert(data):
- # The IO module provides a handy decoder for universal newline conversion
- return IncrementalNewlineDecoder(None, True).decode(data, True)
- def _load_testfile(filename, package, module_relative, encoding):
- if module_relative:
- package = _normalize_module(package, 3)
- filename = _module_relative_path(package, filename)
- if (loader := getattr(package, '__loader__', None)) is None:
- try:
- loader = package.__spec__.loader
- except AttributeError:
- pass
- if hasattr(loader, 'get_data'):
- file_contents = loader.get_data(filename)
- file_contents = file_contents.decode(encoding)
- # get_data() opens files as 'rb', so one must do the equivalent
- # conversion as universal newlines would do.
- return _newline_convert(file_contents), filename
- with open(filename, encoding=encoding) as f:
- return f.read(), filename
- def _indent(s, indent=4):
- """
- Add the given number of space characters to the beginning of
- every non-blank line in `s`, and return the result.
- """
- # This regexp matches the start of non-blank lines:
- return re.sub('(?m)^(?!$)', indent*' ', s)
- def _exception_traceback(exc_info):
- """
- Return a string containing a traceback message for the given
- exc_info tuple (as returned by sys.exc_info()).
- """
- # Get a traceback message.
- excout = StringIO()
- exc_type, exc_val, exc_tb = exc_info
- traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
- return excout.getvalue()
- # Override some StringIO methods.
- class _SpoofOut(StringIO):
- def getvalue(self):
- result = StringIO.getvalue(self)
- # If anything at all was written, make sure there's a trailing
- # newline. There's no way for the expected output to indicate
- # that a trailing newline is missing.
- if result and not result.endswith("\n"):
- result += "\n"
- return result
- def truncate(self, size=None):
- self.seek(size)
- StringIO.truncate(self)
- # Worst-case linear-time ellipsis matching.
- def _ellipsis_match(want, got):
- """
- Essentially the only subtle case:
- >>> _ellipsis_match('aa...aa', 'aaa')
- False
- """
- if ELLIPSIS_MARKER not in want:
- return want == got
- # Find "the real" strings.
- ws = want.split(ELLIPSIS_MARKER)
- assert len(ws) >= 2
- # Deal with exact matches possibly needed at one or both ends.
- startpos, endpos = 0, len(got)
- w = ws[0]
- if w: # starts with exact match
- if got.startswith(w):
- startpos = len(w)
- del ws[0]
- else:
- return False
- w = ws[-1]
- if w: # ends with exact match
- if got.endswith(w):
- endpos -= len(w)
- del ws[-1]
- else:
- return False
- if startpos > endpos:
- # Exact end matches required more characters than we have, as in
- # _ellipsis_match('aa...aa', 'aaa')
- return False
- # For the rest, we only need to find the leftmost non-overlapping
- # match for each piece. If there's no overall match that way alone,
- # there's no overall match period.
- for w in ws:
- # w may be '' at times, if there are consecutive ellipses, or
- # due to an ellipsis at the start or end of `want`. That's OK.
- # Search for an empty string succeeds, and doesn't change startpos.
- startpos = got.find(w, startpos, endpos)
- if startpos < 0:
- return False
- startpos += len(w)
- return True
- def _comment_line(line):
- "Return a commented form of the given line"
- line = line.rstrip()
- if line:
- return '# '+line
- else:
- return '#'
- def _strip_exception_details(msg):
- # Support for IGNORE_EXCEPTION_DETAIL.
- # Get rid of everything except the exception name; in particular, drop
- # the possibly dotted module path (if any) and the exception message (if
- # any). We assume that a colon is never part of a dotted name, or of an
- # exception name.
- # E.g., given
- # "foo.bar.MyError: la di da"
- # return "MyError"
- # Or for "abc.def" or "abc.def:\n" return "def".
- start, end = 0, len(msg)
- # The exception name must appear on the first line.
- i = msg.find("\n")
- if i >= 0:
- end = i
- # retain up to the first colon (if any)
- i = msg.find(':', 0, end)
- if i >= 0:
- end = i
- # retain just the exception name
- i = msg.rfind('.', 0, end)
- if i >= 0:
- start = i+1
- return msg[start: end]
- class _OutputRedirectingPdb(pdb.Pdb):
- """
- A specialized version of the python debugger that redirects stdout
- to a given stream when interacting with the user. Stdout is *not*
- redirected when traced code is executed.
- """
- def __init__(self, out):
- self.__out = out
- self.__debugger_used = False
- # do not play signal games in the pdb
- pdb.Pdb.__init__(self, stdout=out, nosigint=True)
- # still use input() to get user input
- self.use_rawinput = 1
- def set_trace(self, frame=None):
- self.__debugger_used = True
- if frame is None:
- frame = sys._getframe().f_back
- pdb.Pdb.set_trace(self, frame)
- def set_continue(self):
- # Calling set_continue unconditionally would break unit test
- # coverage reporting, as Bdb.set_continue calls sys.settrace(None).
- if self.__debugger_used:
- pdb.Pdb.set_continue(self)
- def trace_dispatch(self, *args):
- # Redirect stdout to the given stream.
- save_stdout = sys.stdout
- sys.stdout = self.__out
- # Call Pdb's trace dispatch method.
- try:
- return pdb.Pdb.trace_dispatch(self, *args)
- finally:
- sys.stdout = save_stdout
- # [XX] Normalize with respect to os.path.pardir?
- def _module_relative_path(module, test_path):
- if not inspect.ismodule(module):
- raise TypeError('Expected a module: %r' % module)
- if test_path.startswith('/'):
- raise ValueError('Module-relative files may not have absolute paths')
- # Normalize the path. On Windows, replace "/" with "\".
- test_path = os.path.join(*(test_path.split('/')))
- # Find the base directory for the path.
- if hasattr(module, '__file__'):
- # A normal module/package
- basedir = os.path.split(module.__file__)[0]
- elif module.__name__ == '__main__':
- # An interactive session.
- if len(sys.argv)>0 and sys.argv[0] != '':
- basedir = os.path.split(sys.argv[0])[0]
- else:
- basedir = os.curdir
- else:
- if hasattr(module, '__path__'):
- for directory in module.__path__:
- fullpath = os.path.join(directory, test_path)
- if os.path.exists(fullpath):
- return fullpath
- # A module w/o __file__ (this includes builtins)
- raise ValueError("Can't resolve paths relative to the module "
- "%r (it has no __file__)"
- % module.__name__)
- # Combine the base directory and the test path.
- return os.path.join(basedir, test_path)
- ######################################################################
- ## 2. Example & DocTest
- ######################################################################
- ## - An "example" is a <source, want> pair, where "source" is a
- ## fragment of source code, and "want" is the expected output for
- ## "source." The Example class also includes information about
- ## where the example was extracted from.
- ##
- ## - A "doctest" is a collection of examples, typically extracted from
- ## a string (such as an object's docstring). The DocTest class also
- ## includes information about where the string was extracted from.
- class Example:
- """
- A single doctest example, consisting of source code and expected
- output. `Example` defines the following attributes:
- - source: A single Python statement, always ending with a newline.
- The constructor adds a newline if needed.
- - want: The expected output from running the source code (either
- from stdout, or a traceback in case of exception). `want` ends
- with a newline unless it's empty, in which case it's an empty
- string. The constructor adds a newline if needed.
- - exc_msg: The exception message generated by the example, if
- the example is expected to generate an exception; or `None` if
- it is not expected to generate an exception. This exception
- message is compared against the return value of
- `traceback.format_exception_only()`. `exc_msg` ends with a
- newline unless it's `None`. The constructor adds a newline
- if needed.
- - lineno: The line number within the DocTest string containing
- this Example where the Example begins. This line number is
- zero-based, with respect to the beginning of the DocTest.
- - indent: The example's indentation in the DocTest string.
- I.e., the number of space characters that precede the
- example's first prompt.
- - options: A dictionary mapping from option flags to True or
- False, which is used to override default options for this
- example. Any option flags not contained in this dictionary
- are left at their default value (as specified by the
- DocTestRunner's optionflags). By default, no options are set.
- """
- def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
- options=None):
- # Normalize inputs.
- if not source.endswith('\n'):
- source += '\n'
- if want and not want.endswith('\n'):
- want += '\n'
- if exc_msg is not None and not exc_msg.endswith('\n'):
- exc_msg += '\n'
- # Store properties.
- self.source = source
- self.want = want
- self.lineno = lineno
- self.indent = indent
- if options is None: options = {}
- self.options = options
- self.exc_msg = exc_msg
- def __eq__(self, other):
- if type(self) is not type(other):
- return NotImplemented
- return self.source == other.source and \
- self.want == other.want and \
- self.lineno == other.lineno and \
- self.indent == other.indent and \
- self.options == other.options and \
- self.exc_msg == other.exc_msg
- def __hash__(self):
- return hash((self.source, self.want, self.lineno, self.indent,
- self.exc_msg))
- class DocTest:
- """
- A collection of doctest examples that should be run in a single
- namespace. Each `DocTest` defines the following attributes:
- - examples: the list of examples.
- - globs: The namespace (aka globals) that the examples should
- be run in.
- - name: A name identifying the DocTest (typically, the name of
- the object whose docstring this DocTest was extracted from).
- - filename: The name of the file that this DocTest was extracted
- from, or `None` if the filename is unknown.
- - lineno: The line number within filename where this DocTest
- begins, or `None` if the line number is unavailable. This
- line number is zero-based, with respect to the beginning of
- the file.
- - docstring: The string that the examples were extracted from,
- or `None` if the string is unavailable.
- """
- def __init__(self, examples, globs, name, filename, lineno, docstring):
- """
- Create a new DocTest containing the given examples. The
- DocTest's globals are initialized with a copy of `globs`.
- """
- assert not isinstance(examples, str), \
- "DocTest no longer accepts str; use DocTestParser instead"
- self.examples = examples
- self.docstring = docstring
- self.globs = globs.copy()
- self.name = name
- self.filename = filename
- self.lineno = lineno
- def __repr__(self):
- if len(self.examples) == 0:
- examples = 'no examples'
- elif len(self.examples) == 1:
- examples = '1 example'
- else:
- examples = '%d examples' % len(self.examples)
- return ('<%s %s from %s:%s (%s)>' %
- (self.__class__.__name__,
- self.name, self.filename, self.lineno, examples))
- def __eq__(self, other):
- if type(self) is not type(other):
- return NotImplemented
- return self.examples == other.examples and \
- self.docstring == other.docstring and \
- self.globs == other.globs and \
- self.name == other.name and \
- self.filename == other.filename and \
- self.lineno == other.lineno
- def __hash__(self):
- return hash((self.docstring, self.name, self.filename, self.lineno))
- # This lets us sort tests by name:
- def __lt__(self, other):
- if not isinstance(other, DocTest):
- return NotImplemented
- return ((self.name, self.filename, self.lineno, id(self))
- <
- (other.name, other.filename, other.lineno, id(other)))
- ######################################################################
- ## 3. DocTestParser
- ######################################################################
- class DocTestParser:
- """
- A class used to parse strings containing doctest examples.
- """
- # This regular expression is used to find doctest examples in a
- # string. It defines three groups: `source` is the source code
- # (including leading indentation and prompts); `indent` is the
- # indentation of the first (PS1) line of the source code; and
- # `want` is the expected output (including leading indentation).
- _EXAMPLE_RE = re.compile(r'''
- # Source consists of a PS1 line followed by zero or more PS2 lines.
- (?P<source>
- (?:^(?P<indent> [ ]*) >>> .*) # PS1 line
- (?:\n [ ]* \.\.\. .*)*) # PS2 lines
- \n?
- # Want consists of any non-blank lines that do not start with PS1.
- (?P<want> (?:(?![ ]*$) # Not a blank line
- (?![ ]*>>>) # Not a line starting with PS1
- .+$\n? # But any other line
- )*)
- ''', re.MULTILINE | re.VERBOSE)
- # A regular expression for handling `want` strings that contain
- # expected exceptions. It divides `want` into three pieces:
- # - the traceback header line (`hdr`)
- # - the traceback stack (`stack`)
- # - the exception message (`msg`), as generated by
- # traceback.format_exception_only()
- # `msg` may have multiple lines. We assume/require that the
- # exception message is the first non-indented line starting with a word
- # character following the traceback header line.
- _EXCEPTION_RE = re.compile(r"""
- # Grab the traceback header. Different versions of Python have
- # said different things on the first traceback line.
- ^(?P<hdr> Traceback\ \(
- (?: most\ recent\ call\ last
- | innermost\ last
- ) \) :
- )
- \s* $ # toss trailing whitespace on the header.
- (?P<stack> .*?) # don't blink: absorb stuff until...
- ^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
- """, re.VERBOSE | re.MULTILINE | re.DOTALL)
- # A callable returning a true value iff its argument is a blank line
- # or contains a single comment.
- _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
- def parse(self, string, name='<string>'):
- """
- Divide the given string into examples and intervening text,
- and return them as a list of alternating Examples and strings.
- Line numbers for the Examples are 0-based. The optional
- argument `name` is a name identifying this string, and is only
- used for error messages.
- """
- string = string.expandtabs()
- # If all lines begin with the same indentation, then strip it.
- min_indent = self._min_indent(string)
- if min_indent > 0:
- string = '\n'.join([l[min_indent:] for l in string.split('\n')])
- output = []
- charno, lineno = 0, 0
- # Find all doctest examples in the string:
- for m in self._EXAMPLE_RE.finditer(string):
- # Add the pre-example text to `output`.
- output.append(string[charno:m.start()])
- # Update lineno (lines before this example)
- lineno += string.count('\n', charno, m.start())
- # Extract info from the regexp match.
- (source, options, want, exc_msg) = \
- self._parse_example(m, name, lineno)
- # Create an Example, and add it to the list.
- if not self._IS_BLANK_OR_COMMENT(source):
- output.append( Example(source, want, exc_msg,
- lineno=lineno,
- indent=min_indent+len(m.group('indent')),
- options=options) )
- # Update lineno (lines inside this example)
- lineno += string.count('\n', m.start(), m.end())
- # Update charno.
- charno = m.end()
- # Add any remaining post-example text to `output`.
- output.append(string[charno:])
- return output
- def get_doctest(self, string, globs, name, filename, lineno):
- """
- Extract all doctest examples from the given string, and
- collect them into a `DocTest` object.
- `globs`, `name`, `filename`, and `lineno` are attributes for
- the new `DocTest` object. See the documentation for `DocTest`
- for more information.
- """
- return DocTest(self.get_examples(string, name), globs,
- name, filename, lineno, string)
- def get_examples(self, string, name='<string>'):
- """
- Extract all doctest examples from the given string, and return
- them as a list of `Example` objects. Line numbers are
- 0-based, because it's most common in doctests that nothing
- interesting appears on the same line as opening triple-quote,
- and so the first interesting line is called \"line 1\" then.
- The optional argument `name` is a name identifying this
- string, and is only used for error messages.
- """
- return [x for x in self.parse(string, name)
- if isinstance(x, Example)]
- def _parse_example(self, m, name, lineno):
- """
- Given a regular expression match from `_EXAMPLE_RE` (`m`),
- return a pair `(source, want)`, where `source` is the matched
- example's source code (with prompts and indentation stripped);
- and `want` is the example's expected output (with indentation
- stripped).
- `name` is the string's name, and `lineno` is the line number
- where the example starts; both are used for error messages.
- """
- # Get the example's indentation level.
- indent = len(m.group('indent'))
- # Divide source into lines; check that they're properly
- # indented; and then strip their indentation & prompts.
- source_lines = m.group('source').split('\n')
- self._check_prompt_blank(source_lines, indent, name, lineno)
- self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
- source = '\n'.join([sl[indent+4:] for sl in source_lines])
- # Divide want into lines; check that it's properly indented; and
- # then strip the indentation. Spaces before the last newline should
- # be preserved, so plain rstrip() isn't good enough.
- want = m.group('want')
- want_lines = want.split('\n')
- if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
- del want_lines[-1] # forget final newline & spaces after it
- self._check_prefix(want_lines, ' '*indent, name,
- lineno + len(source_lines))
- want = '\n'.join([wl[indent:] for wl in want_lines])
- # If `want` contains a traceback message, then extract it.
- m = self._EXCEPTION_RE.match(want)
- if m:
- exc_msg = m.group('msg')
- else:
- exc_msg = None
- # Extract options from the source.
- options = self._find_options(source, name, lineno)
- return source, options, want, exc_msg
- # This regular expression looks for option directives in the
- # source code of an example. Option directives are comments
- # starting with "doctest:". Warning: this may give false
- # positives for string-literals that contain the string
- # "#doctest:". Eliminating these false positives would require
- # actually parsing the string; but we limit them by ignoring any
- # line containing "#doctest:" that is *followed* by a quote mark.
- _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
- re.MULTILINE)
- def _find_options(self, source, name, lineno):
- """
- Return a dictionary containing option overrides extracted from
- option directives in the given source string.
- `name` is the string's name, and `lineno` is the line number
- where the example starts; both are used for error messages.
- """
- options = {}
- # (note: with the current regexp, this will match at most once:)
- for m in self._OPTION_DIRECTIVE_RE.finditer(source):
- option_strings = m.group(1).replace(',', ' ').split()
- for option in option_strings:
- if (option[0] not in '+-' or
- option[1:] not in OPTIONFLAGS_BY_NAME):
- raise ValueError('line %r of the doctest for %s '
- 'has an invalid option: %r' %
- (lineno+1, name, option))
- flag = OPTIONFLAGS_BY_NAME[option[1:]]
- options[flag] = (option[0] == '+')
- if options and self._IS_BLANK_OR_COMMENT(source):
- raise ValueError('line %r of the doctest for %s has an option '
- 'directive on a line with no example: %r' %
- (lineno, name, source))
- return options
- # This regular expression finds the indentation of every non-blank
- # line in a string.
- _INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE)
- def _min_indent(self, s):
- "Return the minimum indentation of any non-blank line in `s`"
- indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
- if len(indents) > 0:
- return min(indents)
- else:
- return 0
- def _check_prompt_blank(self, lines, indent, name, lineno):
- """
- Given the lines of a source string (including prompts and
- leading indentation), check to make sure that every prompt is
- followed by a space character. If any line is not followed by
- a space character, then raise ValueError.
- """
- for i, line in enumerate(lines):
- if len(line) >= indent+4 and line[indent+3] != ' ':
- raise ValueError('line %r of the docstring for %s '
- 'lacks blank after %s: %r' %
- (lineno+i+1, name,
- line[indent:indent+3], line))
- def _check_prefix(self, lines, prefix, name, lineno):
- """
- Check that every line in the given list starts with the given
- prefix; if any line does not, then raise a ValueError.
- """
- for i, line in enumerate(lines):
- if line and not line.startswith(prefix):
- raise ValueError('line %r of the docstring for %s has '
- 'inconsistent leading whitespace: %r' %
- (lineno+i+1, name, line))
- ######################################################################
- ## 4. DocTest Finder
- ######################################################################
- class DocTestFinder:
- """
- A class used to extract the DocTests that are relevant to a given
- object, from its docstring and the docstrings of its contained
- objects. Doctests can currently be extracted from the following
- object types: modules, functions, classes, methods, staticmethods,
- classmethods, and properties.
- """
- def __init__(self, verbose=False, parser=DocTestParser(),
- recurse=True, exclude_empty=True):
- """
- Create a new doctest finder.
- The optional argument `parser` specifies a class or
- function that should be used to create new DocTest objects (or
- objects that implement the same interface as DocTest). The
- signature for this factory function should match the signature
- of the DocTest constructor.
- If the optional argument `recurse` is false, then `find` will
- only examine the given object, and not any contained objects.
- If the optional argument `exclude_empty` is false, then `find`
- will include tests for objects with empty docstrings.
- """
- self._parser = parser
- self._verbose = verbose
- self._recurse = recurse
- self._exclude_empty = exclude_empty
- def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
- """
- Return a list of the DocTests that are defined by the given
- object's docstring, or by any of its contained objects'
- docstrings.
- The optional parameter `module` is the module that contains
- the given object. If the module is not specified or is None, then
- the test finder will attempt to automatically determine the
- correct module. The object's module is used:
- - As a default namespace, if `globs` is not specified.
- - To prevent the DocTestFinder from extracting DocTests
- from objects that are imported from other modules.
- - To find the name of the file containing the object.
- - To help find the line number of the object within its
- file.
- Contained objects whose module does not match `module` are ignored.
- If `module` is False, no attempt to find the module will be made.
- This is obscure, of use mostly in tests: if `module` is False, or
- is None but cannot be found automatically, then all objects are
- considered to belong to the (non-existent) module, so all contained
- objects will (recursively) be searched for doctests.
- The globals for each DocTest is formed by combining `globs`
- and `extraglobs` (bindings in `extraglobs` override bindings
- in `globs`). A new copy of the globals dictionary is created
- for each DocTest. If `globs` is not specified, then it
- defaults to the module's `__dict__`, if specified, or {}
- otherwise. If `extraglobs` is not specified, then it defaults
- to {}.
- """
- # If name was not specified, then extract it from the object.
- if name is None:
- name = getattr(obj, '__name__', None)
- if name is None:
- raise ValueError("DocTestFinder.find: name must be given "
- "when obj.__name__ doesn't exist: %r" %
- (type(obj),))
- # Find the module that contains the given object (if obj is
- # a module, then module=obj.). Note: this may fail, in which
- # case module will be None.
- if module is False:
- module = None
- elif module is None:
- module = inspect.getmodule(obj)
- # Read the module's source code. This is used by
- # DocTestFinder._find_lineno to find the line number for a
- # given object's docstring.
- try:
- file = inspect.getsourcefile(obj)
- except TypeError:
- source_lines = None
- else:
- if not file:
- # Check to see if it's one of our special internal "files"
- # (see __patched_linecache_getlines).
- file = inspect.getfile(obj)
- if not file[0]+file[-2:] == '<]>': file = None
- if file is None:
- source_lines = None
- else:
- if module is not None:
- # Supply the module globals in case the module was
- # originally loaded via a PEP 302 loader and
- # file is not a valid filesystem path
- source_lines = linecache.getlines(file, module.__dict__)
- else:
- # No access to a loader, so assume it's a normal
- # filesystem path
- source_lines = linecache.getlines(file)
- if not source_lines:
- source_lines = None
- # Initialize globals, and merge in extraglobs.
- if globs is None:
- if module is None:
- globs = {}
- else:
- globs = module.__dict__.copy()
- else:
- globs = globs.copy()
- if extraglobs is not None:
- globs.update(extraglobs)
- if '__name__' not in globs:
- globs['__name__'] = '__main__' # provide a default module name
- # Recursively explore `obj`, extracting DocTests.
- tests = []
- self._find(tests, obj, name, module, source_lines, globs, {})
- # Sort the tests by alpha order of names, for consistency in
- # verbose-mode output. This was a feature of doctest in Pythons
- # <= 2.3 that got lost by accident in 2.4. It was repaired in
- # 2.4.4 and 2.5.
- tests.sort()
- return tests
- def _from_module(self, module, object):
- """
- Return true if the given object is defined in the given
- module.
- """
- if module is None:
- return True
- elif inspect.getmodule(object) is not None:
- return module is inspect.getmodule(object)
- elif inspect.isfunction(object):
- return module.__dict__ is object.__globals__
- elif (inspect.ismethoddescriptor(object) or
- inspect.ismethodwrapper(object)):
- if hasattr(object, '__objclass__'):
- obj_mod = object.__objclass__.__module__
- elif hasattr(object, '__module__'):
- obj_mod = object.__module__
- else:
- return True # [XX] no easy way to tell otherwise
- return module.__name__ == obj_mod
- elif inspect.isclass(object):
- return module.__name__ == object.__module__
- elif hasattr(object, '__module__'):
- return module.__name__ == object.__module__
- elif isinstance(object, property):
- return True # [XX] no way not be sure.
- else:
- raise ValueError("object must be a class or function")
- def _is_routine(self, obj):
- """
- Safely unwrap objects and determine if they are functions.
- """
- maybe_routine = obj
- try:
- maybe_routine = inspect.unwrap(maybe_routine)
- except ValueError:
- pass
- return inspect.isroutine(maybe_routine)
- def _find(self, tests, obj, name, module, source_lines, globs, seen):
- """
- Find tests for the given object and any contained objects, and
- add them to `tests`.
- """
- if self._verbose:
- print('Finding tests in %s' % name)
- # If we've already processed this object, then ignore it.
- if id(obj) in seen:
- return
- seen[id(obj)] = 1
- # Find a test for this object, and add it to the list of tests.
- test = self._get_test(obj, name, module, globs, source_lines)
- if test is not None:
- tests.append(test)
- # Look for tests in a module's contained objects.
- if inspect.ismodule(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- valname = '%s.%s' % (name, valname)
- # Recurse to functions & classes.
- if ((self._is_routine(val) or inspect.isclass(val)) and
- self._from_module(module, val)):
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
- # Look for tests in a module's __test__ dictionary.
- if inspect.ismodule(obj) and self._recurse:
- for valname, val in getattr(obj, '__test__', {}).items():
- if not isinstance(valname, str):
- raise ValueError("DocTestFinder.find: __test__ keys "
- "must be strings: %r" %
- (type(valname),))
- if not (inspect.isroutine(val) or inspect.isclass(val) or
- inspect.ismodule(val) or isinstance(val, str)):
- raise ValueError("DocTestFinder.find: __test__ values "
- "must be strings, functions, methods, "
- "classes, or modules: %r" %
- (type(val),))
- valname = '%s.__test__.%s' % (name, valname)
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
- # Look for tests in a class's contained objects.
- if inspect.isclass(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- # Special handling for staticmethod/classmethod.
- if isinstance(val, (staticmethod, classmethod)):
- val = val.__func__
- # Recurse to methods, properties, and nested classes.
- if ((inspect.isroutine(val) or inspect.isclass(val) or
- isinstance(val, property)) and
- self._from_module(module, val)):
- valname = '%s.%s' % (name, valname)
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
- def _get_test(self, obj, name, module, globs, source_lines):
- """
- Return a DocTest for the given object, if it defines a docstring;
- otherwise, return None.
- """
- # Extract the object's docstring. If it doesn't have one,
- # then return None (no test for this object).
- if isinstance(obj, str):
- docstring = obj
- else:
- try:
- if obj.__doc__ is None:
- docstring = ''
- else:
- docstring = obj.__doc__
- if not isinstance(docstring, str):
- docstring = str(docstring)
- except (TypeError, AttributeError):
- docstring = ''
- # Find the docstring's location in the file.
- lineno = self._find_lineno(obj, source_lines)
- # Don't bother if the docstring is empty.
- if self._exclude_empty and not docstring:
- return None
- # Return a DocTest for this object.
- if module is None:
- filename = None
- else:
- # __file__ can be None for namespace packages.
- filename = getattr(module, '__file__', None) or module.__name__
- if filename[-4:] == ".pyc":
- filename = filename[:-1]
- return self._parser.get_doctest(docstring, globs, name,
- filename, lineno)
- def _find_lineno(self, obj, source_lines):
- """
- Return a line number of the given object's docstring.
- Returns `None` if the given object does not have a docstring.
- """
- lineno = None
- docstring = getattr(obj, '__doc__', None)
- # Find the line number for modules.
- if inspect.ismodule(obj) and docstring is not None:
- lineno = 0
- # Find the line number for classes.
- # Note: this could be fooled if a class is defined multiple
- # times in a single file.
- if inspect.isclass(obj) and docstring is not None:
- if source_lines is None:
- return None
- pat = re.compile(r'^\s*class\s*%s\b' %
- re.escape(getattr(obj, '__name__', '-')))
- for i, line in enumerate(source_lines):
- if pat.match(line):
- lineno = i
- break
- # Find the line number for functions & methods.
- if inspect.ismethod(obj): obj = obj.__func__
- if inspect.isfunction(obj) and getattr(obj, '__doc__', None):
- # We don't use `docstring` var here, because `obj` can be changed.
- obj = obj.__code__
- if inspect.istraceback(obj): obj = obj.tb_frame
- if inspect.isframe(obj): obj = obj.f_code
- if inspect.iscode(obj):
- lineno = obj.co_firstlineno - 1
- # Find the line number where the docstring starts. Assume
- # that it's the first line that begins with a quote mark.
- # Note: this could be fooled by a multiline function
- # signature, where a continuation line begins with a quote
- # mark.
- if lineno is not None:
- if source_lines is None:
- return lineno+1
- pat = re.compile(r'(^|.*:)\s*\w*("|\')')
- for lineno in range(lineno, len(source_lines)):
- if pat.match(source_lines[lineno]):
- return lineno
- # We couldn't find the line number.
- return None
- ######################################################################
- ## 5. DocTest Runner
- ######################################################################
- class DocTestRunner:
- """
- A class used to run DocTest test cases, and accumulate statistics.
- The `run` method is used to process a single DocTest case. It
- returns a tuple `(f, t)`, where `t` is the number of test cases
- tried, and `f` is the number of test cases that failed.
- >>> tests = DocTestFinder().find(_TestClass)
- >>> runner = DocTestRunner(verbose=False)
- >>> tests.sort(key = lambda test: test.name)
- >>> for test in tests:
- ... print(test.name, '->', runner.run(test))
- _TestClass -> TestResults(failed=0, attempted=2)
- _TestClass.__init__ -> TestResults(failed=0, attempted=2)
- _TestClass.get -> TestResults(failed=0, attempted=2)
- _TestClass.square -> TestResults(failed=0, attempted=1)
- The `summarize` method prints a summary of all the test cases that
- have been run by the runner, and returns an aggregated `(f, t)`
- tuple:
- >>> runner.summarize(verbose=1)
- 4 items passed all tests:
- 2 tests in _TestClass
- 2 tests in _TestClass.__init__
- 2 tests in _TestClass.get
- 1 tests in _TestClass.square
- 7 tests in 4 items.
- 7 passed and 0 failed.
- Test passed.
- TestResults(failed=0, attempted=7)
- The aggregated number of tried examples and failed examples is
- also available via the `tries` and `failures` attributes:
- >>> runner.tries
- 7
- >>> runner.failures
- 0
- The comparison between expected outputs and actual outputs is done
- by an `OutputChecker`. This comparison may be customized with a
- number of option flags; see the documentation for `testmod` for
- more information. If the option flags are insufficient, then the
- comparison may also be customized by passing a subclass of
- `OutputChecker` to the constructor.
- The test runner's display output can be controlled in two ways.
- First, an output function (`out) can be passed to
- `TestRunner.run`; this function will be called with strings that
- should be displayed. It defaults to `sys.stdout.write`. If
- capturing the output is not sufficient, then the display output
- can be also customized by subclassing DocTestRunner, and
- overriding the methods `report_start`, `report_success`,
- `report_unexpected_exception`, and `report_failure`.
- """
- # This divider string is used to separate failure messages, and to
- # separate sections of the summary.
- DIVIDER = "*" * 70
- def __init__(self, checker=None, verbose=None, optionflags=0):
- """
- Create a new test runner.
- Optional keyword arg `checker` is the `OutputChecker` that
- should be used to compare the expected outputs and actual
- outputs of doctest examples.
- Optional keyword arg 'verbose' prints lots of stuff if true,
- only failures if false; by default, it's true iff '-v' is in
- sys.argv.
- Optional argument `optionflags` can be used to control how the
- test runner compares expected output to actual output, and how
- it displays failures. See the documentation for `testmod` for
- more information.
- """
- self._checker = checker or OutputChecker()
- if verbose is None:
- verbose = '-v' in sys.argv
- self._verbose = verbose
- self.optionflags = optionflags
- self.original_optionflags = optionflags
- # Keep track of the examples we've run.
- self.tries = 0
- self.failures = 0
- self._name2ft = {}
- # Create a fake output target for capturing doctest output.
- self._fakeout = _SpoofOut()
- #/////////////////////////////////////////////////////////////////
- # Reporting methods
- #/////////////////////////////////////////////////////////////////
- def report_start(self, out, test, example):
- """
- Report that the test runner is about to process the given
- example. (Only displays a message if verbose=True)
- """
- if self._verbose:
- if example.want:
- out('Trying:\n' + _indent(example.source) +
- 'Expecting:\n' + _indent(example.want))
- else:
- out('Trying:\n' + _indent(example.source) +
- 'Expecting nothing\n')
- def report_success(self, out, test, example, got):
- """
- Report that the given example ran successfully. (Only
- displays a message if verbose=True)
- """
- if self._verbose:
- out("ok\n")
- def report_failure(self, out, test, example, got):
- """
- Report that the given example failed.
- """
- out(self._failure_header(test, example) +
- self._checker.output_difference(example, got, self.optionflags))
- def report_unexpected_exception(self, out, test, example, exc_info):
- """
- Report that the given example raised an unexpected exception.
- """
- out(self._failure_header(test, example) +
- 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
- def _failure_header(self, test, example):
- out = [self.DIVIDER]
- if test.filename:
- if test.lineno is not None and example.lineno is not None:
- lineno = test.lineno + example.lineno + 1
- else:
- lineno = '?'
- out.append('File "%s", line %s, in %s' %
- (test.filename, lineno, test.name))
- else:
- out.append('Line %s, in %s' % (example.lineno+1, test.name))
- out.append('Failed example:')
- source = example.source
- out.append(_indent(source))
- return '\n'.join(out)
- #/////////////////////////////////////////////////////////////////
- # DocTest Running
- #/////////////////////////////////////////////////////////////////
- def __run(self, test, compileflags, out):
- """
- Run the examples in `test`. Write the outcome of each example
- with one of the `DocTestRunner.report_*` methods, using the
- writer function `out`. `compileflags` is the set of compiler
- flags that should be used to execute examples. Return a tuple
- `(f, t)`, where `t` is the number of examples tried, and `f`
- is the number of examples that failed. The examples are run
- in the namespace `test.globs`.
- """
- # Keep track of the number of failures and tries.
- failures = tries = 0
- # Save the option flags (since option directives can be used
- # to modify them).
- original_optionflags = self.optionflags
- SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
- check = self._checker.check_output
- # Process each example.
- for examplenum, example in enumerate(test.examples):
- # If REPORT_ONLY_FIRST_FAILURE is set, then suppress
- # reporting after the first failure.
- quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
- failures > 0)
- # Merge in the example's options.
- self.optionflags = original_optionflags
- if example.options:
- for (optionflag, val) in example.options.items():
- if val:
- self.optionflags |= optionflag
- else:
- self.optionflags &= ~optionflag
- # If 'SKIP' is set, then skip this example.
- if self.optionflags & SKIP:
- continue
- # Record that we started this example.
- tries += 1
- if not quiet:
- self.report_start(out, test, example)
- # Use a special filename for compile(), so we can retrieve
- # the source code during interactive debugging (see
- # __patched_linecache_getlines).
- filename = '<doctest %s[%d]>' % (test.name, examplenum)
- # Run the example in the given context (globs), and record
- # any exception that gets raised. (But don't intercept
- # keyboard interrupts.)
- try:
- # Don't blink! This is where the user's code gets run.
- exec(compile(example.source, filename, "single",
- compileflags, True), test.globs)
- self.debugger.set_continue() # ==== Example Finished ====
- exception = None
- except KeyboardInterrupt:
- raise
- except:
- exception = sys.exc_info()
- self.debugger.set_continue() # ==== Example Finished ====
- got = self._fakeout.getvalue() # the actual output
- self._fakeout.truncate(0)
- outcome = FAILURE # guilty until proved innocent or insane
- # If the example executed without raising any exceptions,
- # verify its output.
- if exception is None:
- if check(example.want, got, self.optionflags):
- outcome = SUCCESS
- # The example raised an exception: check if it was expected.
- else:
- exc_msg = traceback.format_exception_only(*exception[:2])[-1]
- if not quiet:
- got += _exception_traceback(exception)
- # If `example.exc_msg` is None, then we weren't expecting
- # an exception.
- if example.exc_msg is None:
- outcome = BOOM
- # We expected an exception: see whether it matches.
- elif check(example.exc_msg, exc_msg, self.optionflags):
- outcome = SUCCESS
- # Another chance if they didn't care about the detail.
- elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
- if check(_strip_exception_details(example.exc_msg),
- _strip_exception_details(exc_msg),
- self.optionflags):
- outcome = SUCCESS
- # Report the outcome.
- if outcome is SUCCESS:
- if not quiet:
- self.report_success(out, test, example, got)
- elif outcome is FAILURE:
- if not quiet:
- self.report_failure(out, test, example, got)
- failures += 1
- elif outcome is BOOM:
- if not quiet:
- self.report_unexpected_exception(out, test, example,
- exception)
- failures += 1
- else:
- assert False, ("unknown outcome", outcome)
- if failures and self.optionflags & FAIL_FAST:
- break
- # Restore the option flags (in case they were modified)
- self.optionflags = original_optionflags
- # Record and return the number of failures and tries.
- self.__record_outcome(test, failures, tries)
- return TestResults(failures, tries)
- def __record_outcome(self, test, f, t):
- """
- Record the fact that the given DocTest (`test`) generated `f`
- failures out of `t` tried examples.
- """
- f2, t2 = self._name2ft.get(test.name, (0,0))
- self._name2ft[test.name] = (f+f2, t+t2)
- self.failures += f
- self.tries += t
- __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
- r'(?P<name>.+)'
- r'\[(?P<examplenum>\d+)\]>$')
- def __patched_linecache_getlines(self, filename, module_globals=None):
- m = self.__LINECACHE_FILENAME_RE.match(filename)
- if m and m.group('name') == self.test.name:
- example = self.test.examples[int(m.group('examplenum'))]
- return example.source.splitlines(keepends=True)
- else:
- return self.save_linecache_getlines(filename, module_globals)
- def run(self, test, compileflags=None, out=None, clear_globs=True):
- """
- Run the examples in `test`, and display the results using the
- writer function `out`.
- The examples are run in the namespace `test.globs`. If
- `clear_globs` is true (the default), then this namespace will
- be cleared after the test runs, to help with garbage
- collection. If you would like to examine the namespace after
- the test completes, then use `clear_globs=False`.
- `compileflags` gives the set of flags that should be used by
- the Python compiler when running the examples. If not
- specified, then it will default to the set of future-import
- flags that apply to `globs`.
- The output of each example is checked using
- `DocTestRunner.check_output`, and the results are formatted by
- the `DocTestRunner.report_*` methods.
- """
- self.test = test
- if compileflags is None:
- compileflags = _extract_future_flags(test.globs)
- save_stdout = sys.stdout
- if out is None:
- encoding = save_stdout.encoding
- if encoding is None or encoding.lower() == 'utf-8':
- out = save_stdout.write
- else:
- # Use backslashreplace error handling on write
- def out(s):
- s = str(s.encode(encoding, 'backslashreplace'), encoding)
- save_stdout.write(s)
- sys.stdout = self._fakeout
- # Patch pdb.set_trace to restore sys.stdout during interactive
- # debugging (so it's not still redirected to self._fakeout).
- # Note that the interactive output will go to *our*
- # save_stdout, even if that's not the real sys.stdout; this
- # allows us to write test cases for the set_trace behavior.
- save_trace = sys.gettrace()
- save_set_trace = pdb.set_trace
- self.debugger = _OutputRedirectingPdb(save_stdout)
- self.debugger.reset()
- pdb.set_trace = self.debugger.set_trace
- # Patch linecache.getlines, so we can see the example's source
- # when we're inside the debugger.
- self.save_linecache_getlines = linecache.getlines
- linecache.getlines = self.__patched_linecache_getlines
- # Make sure sys.displayhook just prints the value to stdout
- save_displayhook = sys.displayhook
- sys.displayhook = sys.__displayhook__
- try:
- return self.__run(test, compileflags, out)
- finally:
- sys.stdout = save_stdout
- pdb.set_trace = save_set_trace
- sys.settrace(save_trace)
- linecache.getlines = self.save_linecache_getlines
- sys.displayhook = save_displayhook
- if clear_globs:
- test.globs.clear()
- import builtins
- builtins._ = None
- #/////////////////////////////////////////////////////////////////
- # Summarization
- #/////////////////////////////////////////////////////////////////
- def summarize(self, verbose=None):
- """
- Print a summary of all the test cases that have been run by
- this DocTestRunner, and return a tuple `(f, t)`, where `f` is
- the total number of failed examples, and `t` is the total
- number of tried examples.
- The optional `verbose` argument controls how detailed the
- summary is. If the verbosity is not specified, then the
- DocTestRunner's verbosity is used.
- """
- if verbose is None:
- verbose = self._verbose
- notests = []
- passed = []
- failed = []
- totalt = totalf = 0
- for x in self._name2ft.items():
- name, (f, t) = x
- assert f <= t
- totalt += t
- totalf += f
- if t == 0:
- notests.append(name)
- elif f == 0:
- passed.append( (name, t) )
- else:
- failed.append(x)
- if verbose:
- if notests:
- print(len(notests), "items had no tests:")
- notests.sort()
- for thing in notests:
- print(" ", thing)
- if passed:
- print(len(passed), "items passed all tests:")
- passed.sort()
- for thing, count in passed:
- print(" %3d tests in %s" % (count, thing))
- if failed:
- print(self.DIVIDER)
- print(len(failed), "items had failures:")
- failed.sort()
- for thing, (f, t) in failed:
- print(" %3d of %3d in %s" % (f, t, thing))
- if verbose:
- print(totalt, "tests in", len(self._name2ft), "items.")
- print(totalt - totalf, "passed and", totalf, "failed.")
- if totalf:
- print("***Test Failed***", totalf, "failures.")
- elif verbose:
- print("Test passed.")
- return TestResults(totalf, totalt)
- #/////////////////////////////////////////////////////////////////
- # Backward compatibility cruft to maintain doctest.master.
- #/////////////////////////////////////////////////////////////////
- def merge(self, other):
- d = self._name2ft
- for name, (f, t) in other._name2ft.items():
- if name in d:
- # Don't print here by default, since doing
- # so breaks some of the buildbots
- #print("*** DocTestRunner.merge: '" + name + "' in both" \
- # " testers; summing outcomes.")
- f2, t2 = d[name]
- f = f + f2
- t = t + t2
- d[name] = f, t
- class OutputChecker:
- """
- A class used to check the whether the actual output from a doctest
- example matches the expected output. `OutputChecker` defines two
- methods: `check_output`, which compares a given pair of outputs,
- and returns true if they match; and `output_difference`, which
- returns a string describing the differences between two outputs.
- """
- def _toAscii(self, s):
- """
- Convert string to hex-escaped ASCII string.
- """
- return str(s.encode('ASCII', 'backslashreplace'), "ASCII")
- def check_output(self, want, got, optionflags):
- """
- Return True iff the actual output from an example (`got`)
- matches the expected output (`want`). These strings are
- always considered to match if they are identical; but
- depending on what option flags the test runner is using,
- several non-exact match types are also possible. See the
- documentation for `TestRunner` for more information about
- option flags.
- """
- # If `want` contains hex-escaped character such as "\u1234",
- # then `want` is a string of six characters(e.g. [\,u,1,2,3,4]).
- # On the other hand, `got` could be another sequence of
- # characters such as [\u1234], so `want` and `got` should
- # be folded to hex-escaped ASCII string to compare.
- got = self._toAscii(got)
- want = self._toAscii(want)
- # Handle the common case first, for efficiency:
- # if they're string-identical, always return true.
- if got == want:
- return True
- # The values True and False replaced 1 and 0 as the return
- # value for boolean comparisons in Python 2.3.
- if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
- if (got,want) == ("True\n", "1\n"):
- return True
- if (got,want) == ("False\n", "0\n"):
- return True
- # <BLANKLINE> can be used as a special sequence to signify a
- # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
- if not (optionflags & DONT_ACCEPT_BLANKLINE):
- # Replace <BLANKLINE> in want with a blank line.
- want = re.sub(r'(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
- '', want)
- # If a line in got contains only spaces, then remove the
- # spaces.
- got = re.sub(r'(?m)^[^\S\n]+$', '', got)
- if got == want:
- return True
- # This flag causes doctest to ignore any differences in the
- # contents of whitespace strings. Note that this can be used
- # in conjunction with the ELLIPSIS flag.
- if optionflags & NORMALIZE_WHITESPACE:
- got = ' '.join(got.split())
- want = ' '.join(want.split())
- if got == want:
- return True
- # The ELLIPSIS flag says to let the sequence "..." in `want`
- # match any substring in `got`.
- if optionflags & ELLIPSIS:
- if _ellipsis_match(want, got):
- return True
- # We didn't find any match; return false.
- return False
- # Should we do a fancy diff?
- def _do_a_fancy_diff(self, want, got, optionflags):
- # Not unless they asked for a fancy diff.
- if not optionflags & (REPORT_UDIFF |
- REPORT_CDIFF |
- REPORT_NDIFF):
- return False
- # If expected output uses ellipsis, a meaningful fancy diff is
- # too hard ... or maybe not. In two real-life failures Tim saw,
- # a diff was a major help anyway, so this is commented out.
- # [todo] _ellipsis_match() knows which pieces do and don't match,
- # and could be the basis for a kick-ass diff in this case.
- ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
- ## return False
- # ndiff does intraline difference marking, so can be useful even
- # for 1-line differences.
- if optionflags & REPORT_NDIFF:
- return True
- # The other diff types need at least a few lines to be helpful.
- return want.count('\n') > 2 and got.count('\n') > 2
- def output_difference(self, example, got, optionflags):
- """
- Return a string describing the differences between the
- expected output for a given example (`example`) and the actual
- output (`got`). `optionflags` is the set of option flags used
- to compare `want` and `got`.
- """
- want = example.want
- # If <BLANKLINE>s are being used, then replace blank lines
- # with <BLANKLINE> in the actual output string.
- if not (optionflags & DONT_ACCEPT_BLANKLINE):
- got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
- # Check if we should use diff.
- if self._do_a_fancy_diff(want, got, optionflags):
- # Split want & got into lines.
- want_lines = want.splitlines(keepends=True)
- got_lines = got.splitlines(keepends=True)
- # Use difflib to find their differences.
- if optionflags & REPORT_UDIFF:
- diff = difflib.unified_diff(want_lines, got_lines, n=2)
- diff = list(diff)[2:] # strip the diff header
- kind = 'unified diff with -expected +actual'
- elif optionflags & REPORT_CDIFF:
- diff = difflib.context_diff(want_lines, got_lines, n=2)
- diff = list(diff)[2:] # strip the diff header
- kind = 'context diff with expected followed by actual'
- elif optionflags & REPORT_NDIFF:
- engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
- diff = list(engine.compare(want_lines, got_lines))
- kind = 'ndiff with -expected +actual'
- else:
- assert 0, 'Bad diff option'
- return 'Differences (%s):\n' % kind + _indent(''.join(diff))
- # If we're not using diff, then simply list the expected
- # output followed by the actual output.
- if want and got:
- return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
- elif want:
- return 'Expected:\n%sGot nothing\n' % _indent(want)
- elif got:
- return 'Expected nothing\nGot:\n%s' % _indent(got)
- else:
- return 'Expected nothing\nGot nothing\n'
- class DocTestFailure(Exception):
- """A DocTest example has failed in debugging mode.
- The exception instance has variables:
- - test: the DocTest object being run
- - example: the Example object that failed
- - got: the actual output
- """
- def __init__(self, test, example, got):
- self.test = test
- self.example = example
- self.got = got
- def __str__(self):
- return str(self.test)
- class UnexpectedException(Exception):
- """A DocTest example has encountered an unexpected exception
- The exception instance has variables:
- - test: the DocTest object being run
- - example: the Example object that failed
- - exc_info: the exception info
- """
- def __init__(self, test, example, exc_info):
- self.test = test
- self.example = example
- self.exc_info = exc_info
- def __str__(self):
- return str(self.test)
- class DebugRunner(DocTestRunner):
- r"""Run doc tests but raise an exception as soon as there is a failure.
- If an unexpected exception occurs, an UnexpectedException is raised.
- It contains the test, the example, and the original exception:
- >>> runner = DebugRunner(verbose=False)
- >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
- ... {}, 'foo', 'foo.py', 0)
- >>> try:
- ... runner.run(test)
- ... except UnexpectedException as f:
- ... failure = f
- >>> failure.test is test
- True
- >>> failure.example.want
- '42\n'
- >>> exc_info = failure.exc_info
- >>> raise exc_info[1] # Already has the traceback
- Traceback (most recent call last):
- ...
- KeyError
- We wrap the original exception to give the calling application
- access to the test and example information.
- If the output doesn't match, then a DocTestFailure is raised:
- >>> test = DocTestParser().get_doctest('''
- ... >>> x = 1
- ... >>> x
- ... 2
- ... ''', {}, 'foo', 'foo.py', 0)
- >>> try:
- ... runner.run(test)
- ... except DocTestFailure as f:
- ... failure = f
- DocTestFailure objects provide access to the test:
- >>> failure.test is test
- True
- As well as to the example:
- >>> failure.example.want
- '2\n'
- and the actual output:
- >>> failure.got
- '1\n'
- If a failure or error occurs, the globals are left intact:
- >>> del test.globs['__builtins__']
- >>> test.globs
- {'x': 1}
- >>> test = DocTestParser().get_doctest('''
- ... >>> x = 2
- ... >>> raise KeyError
- ... ''', {}, 'foo', 'foo.py', 0)
- >>> runner.run(test)
- Traceback (most recent call last):
- ...
- doctest.UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
- >>> del test.globs['__builtins__']
- >>> test.globs
- {'x': 2}
- But the globals are cleared if there is no error:
- >>> test = DocTestParser().get_doctest('''
- ... >>> x = 2
- ... ''', {}, 'foo', 'foo.py', 0)
- >>> runner.run(test)
- TestResults(failed=0, attempted=1)
- >>> test.globs
- {}
- """
- def run(self, test, compileflags=None, out=None, clear_globs=True):
- r = DocTestRunner.run(self, test, compileflags, out, False)
- if clear_globs:
- test.globs.clear()
- return r
- def report_unexpected_exception(self, out, test, example, exc_info):
- raise UnexpectedException(test, example, exc_info)
- def report_failure(self, out, test, example, got):
- raise DocTestFailure(test, example, got)
- ######################################################################
- ## 6. Test Functions
- ######################################################################
- # These should be backwards compatible.
- # For backward compatibility, a global instance of a DocTestRunner
- # class, updated by testmod.
- master = None
- def testmod(m=None, name=None, globs=None, verbose=None,
- report=True, optionflags=0, extraglobs=None,
- raise_on_error=False, exclude_empty=False):
- """m=None, name=None, globs=None, verbose=None, report=True,
- optionflags=0, extraglobs=None, raise_on_error=False,
- exclude_empty=False
- Test examples in docstrings in functions and classes reachable
- from module m (or the current module if m is not supplied), starting
- with m.__doc__.
- Also test examples reachable from dict m.__test__ if it exists and is
- not None. m.__test__ maps names to functions, classes and strings;
- function and class docstrings are tested even if the name is private;
- strings are tested directly, as if they were docstrings.
- Return (#failures, #tests).
- See help(doctest) for an overview.
- Optional keyword arg "name" gives the name of the module; by default
- use m.__name__.
- Optional keyword arg "globs" gives a dict to be used as the globals
- when executing examples; by default, use m.__dict__. A copy of this
- dict is actually used for each docstring, so that each docstring's
- examples start with a clean slate.
- Optional keyword arg "extraglobs" gives a dictionary that should be
- merged into the globals that are used to execute examples. By
- default, no extra globals are used. This is new in 2.4.
- Optional keyword arg "verbose" prints lots of stuff if true, prints
- only failures if false; by default, it's true iff "-v" is in sys.argv.
- Optional keyword arg "report" prints a summary at the end when true,
- else prints nothing at the end. In verbose mode, the summary is
- detailed, else very brief (in fact, empty if all tests passed).
- Optional keyword arg "optionflags" or's together module constants,
- and defaults to 0. This is new in 2.3. Possible values (see the
- docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
- Optional keyword arg "raise_on_error" raises an exception on the
- first unexpected exception or failure. This allows failures to be
- post-mortem debugged.
- Advanced tomfoolery: testmod runs methods of a local instance of
- class doctest.Tester, then merges the results into (or creates)
- global Tester instance doctest.master. Methods of doctest.master
- can be called directly too, if you want to do something unusual.
- Passing report=0 to testmod is especially useful then, to delay
- displaying a summary. Invoke doctest.master.summarize(verbose)
- when you're done fiddling.
- """
- global master
- # If no module was given, then use __main__.
- if m is None:
- # DWA - m will still be None if this wasn't invoked from the command
- # line, in which case the following TypeError is about as good an error
- # as we should expect
- m = sys.modules.get('__main__')
- # Check that we were actually given a module.
- if not inspect.ismodule(m):
- raise TypeError("testmod: module required; %r" % (m,))
- # If no name was given, then use the module's name.
- if name is None:
- name = m.__name__
- # Find, parse, and run all tests in the given module.
- finder = DocTestFinder(exclude_empty=exclude_empty)
- if raise_on_error:
- runner = DebugRunner(verbose=verbose, optionflags=optionflags)
- else:
- runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
- for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
- runner.run(test)
- if report:
- runner.summarize()
- if master is None:
- master = runner
- else:
- master.merge(runner)
- return TestResults(runner.failures, runner.tries)
- def testfile(filename, module_relative=True, name=None, package=None,
- globs=None, verbose=None, report=True, optionflags=0,
- extraglobs=None, raise_on_error=False, parser=DocTestParser(),
- encoding=None):
- """
- Test examples in the given file. Return (#failures, #tests).
- Optional keyword arg "module_relative" specifies how filenames
- should be interpreted:
- - If "module_relative" is True (the default), then "filename"
- specifies a module-relative path. By default, this path is
- relative to the calling module's directory; but if the
- "package" argument is specified, then it is relative to that
- package. To ensure os-independence, "filename" should use
- "/" characters to separate path segments, and should not
- be an absolute path (i.e., it may not begin with "/").
- - If "module_relative" is False, then "filename" specifies an
- os-specific path. The path may be absolute or relative (to
- the current working directory).
- Optional keyword arg "name" gives the name of the test; by default
- use the file's basename.
- Optional keyword argument "package" is a Python package or the
- name of a Python package whose directory should be used as the
- base directory for a module relative filename. If no package is
- specified, then the calling module's directory is used as the base
- directory for module relative filenames. It is an error to
- specify "package" if "module_relative" is False.
- Optional keyword arg "globs" gives a dict to be used as the globals
- when executing examples; by default, use {}. A copy of this dict
- is actually used for each docstring, so that each docstring's
- examples start with a clean slate.
- Optional keyword arg "extraglobs" gives a dictionary that should be
- merged into the globals that are used to execute examples. By
- default, no extra globals are used.
- Optional keyword arg "verbose" prints lots of stuff if true, prints
- only failures if false; by default, it's true iff "-v" is in sys.argv.
- Optional keyword arg "report" prints a summary at the end when true,
- else prints nothing at the end. In verbose mode, the summary is
- detailed, else very brief (in fact, empty if all tests passed).
- Optional keyword arg "optionflags" or's together module constants,
- and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
- Optional keyword arg "raise_on_error" raises an exception on the
- first unexpected exception or failure. This allows failures to be
- post-mortem debugged.
- Optional keyword arg "parser" specifies a DocTestParser (or
- subclass) that should be used to extract tests from the files.
- Optional keyword arg "encoding" specifies an encoding that should
- be used to convert the file to unicode.
- Advanced tomfoolery: testmod runs methods of a local instance of
- class doctest.Tester, then merges the results into (or creates)
- global Tester instance doctest.master. Methods of doctest.master
- can be called directly too, if you want to do something unusual.
- Passing report=0 to testmod is especially useful then, to delay
- displaying a summary. Invoke doctest.master.summarize(verbose)
- when you're done fiddling.
- """
- global master
- if package and not module_relative:
- raise ValueError("Package may only be specified for module-"
- "relative paths.")
- # Relativize the path
- text, filename = _load_testfile(filename, package, module_relative,
- encoding or "utf-8")
- # If no name was given, then use the file's name.
- if name is None:
- name = os.path.basename(filename)
- # Assemble the globals.
- if globs is None:
- globs = {}
- else:
- globs = globs.copy()
- if extraglobs is not None:
- globs.update(extraglobs)
- if '__name__' not in globs:
- globs['__name__'] = '__main__'
- if raise_on_error:
- runner = DebugRunner(verbose=verbose, optionflags=optionflags)
- else:
- runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
- # Read the file, convert it to a test, and run it.
- test = parser.get_doctest(text, globs, name, filename, 0)
- runner.run(test)
- if report:
- runner.summarize()
- if master is None:
- master = runner
- else:
- master.merge(runner)
- return TestResults(runner.failures, runner.tries)
- def run_docstring_examples(f, globs, verbose=False, name="NoName",
- compileflags=None, optionflags=0):
- """
- Test examples in the given object's docstring (`f`), using `globs`
- as globals. Optional argument `name` is used in failure messages.
- If the optional argument `verbose` is true, then generate output
- even if there are no failures.
- `compileflags` gives the set of flags that should be used by the
- Python compiler when running the examples. If not specified, then
- it will default to the set of future-import flags that apply to
- `globs`.
- Optional keyword arg `optionflags` specifies options for the
- testing and output. See the documentation for `testmod` for more
- information.
- """
- # Find, parse, and run all tests in the given module.
- finder = DocTestFinder(verbose=verbose, recurse=False)
- runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
- for test in finder.find(f, name, globs=globs):
- runner.run(test, compileflags=compileflags)
- ######################################################################
- ## 7. Unittest Support
- ######################################################################
- _unittest_reportflags = 0
- def set_unittest_reportflags(flags):
- """Sets the unittest option flags.
- The old flag is returned so that a runner could restore the old
- value if it wished to:
- >>> import doctest
- >>> old = doctest._unittest_reportflags
- >>> doctest.set_unittest_reportflags(REPORT_NDIFF |
- ... REPORT_ONLY_FIRST_FAILURE) == old
- True
- >>> doctest._unittest_reportflags == (REPORT_NDIFF |
- ... REPORT_ONLY_FIRST_FAILURE)
- True
- Only reporting flags can be set:
- >>> doctest.set_unittest_reportflags(ELLIPSIS)
- Traceback (most recent call last):
- ...
- ValueError: ('Only reporting flags allowed', 8)
- >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
- ... REPORT_ONLY_FIRST_FAILURE)
- True
- """
- global _unittest_reportflags
- if (flags & REPORTING_FLAGS) != flags:
- raise ValueError("Only reporting flags allowed", flags)
- old = _unittest_reportflags
- _unittest_reportflags = flags
- return old
- class DocTestCase(unittest.TestCase):
- def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
- checker=None):
- unittest.TestCase.__init__(self)
- self._dt_optionflags = optionflags
- self._dt_checker = checker
- self._dt_globs = test.globs.copy()
- self._dt_test = test
- self._dt_setUp = setUp
- self._dt_tearDown = tearDown
- def setUp(self):
- test = self._dt_test
- if self._dt_setUp is not None:
- self._dt_setUp(test)
- def tearDown(self):
- test = self._dt_test
- if self._dt_tearDown is not None:
- self._dt_tearDown(test)
- # restore the original globs
- test.globs.clear()
- test.globs.update(self._dt_globs)
- def runTest(self):
- test = self._dt_test
- old = sys.stdout
- new = StringIO()
- optionflags = self._dt_optionflags
- if not (optionflags & REPORTING_FLAGS):
- # The option flags don't include any reporting flags,
- # so add the default reporting flags
- optionflags |= _unittest_reportflags
- runner = DocTestRunner(optionflags=optionflags,
- checker=self._dt_checker, verbose=False)
- try:
- runner.DIVIDER = "-"*70
- failures, tries = runner.run(
- test, out=new.write, clear_globs=False)
- finally:
- sys.stdout = old
- if failures:
- raise self.failureException(self.format_failure(new.getvalue()))
- def format_failure(self, err):
- test = self._dt_test
- if test.lineno is None:
- lineno = 'unknown line number'
- else:
- lineno = '%s' % test.lineno
- lname = '.'.join(test.name.split('.')[-1:])
- return ('Failed doctest test for %s\n'
- ' File "%s", line %s, in %s\n\n%s'
- % (test.name, test.filename, lineno, lname, err)
- )
- def debug(self):
- r"""Run the test case without results and without catching exceptions
- The unit test framework includes a debug method on test cases
- and test suites to support post-mortem debugging. The test code
- is run in such a way that errors are not caught. This way a
- caller can catch the errors and initiate post-mortem debugging.
- The DocTestCase provides a debug method that raises
- UnexpectedException errors if there is an unexpected
- exception:
- >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
- ... {}, 'foo', 'foo.py', 0)
- >>> case = DocTestCase(test)
- >>> try:
- ... case.debug()
- ... except UnexpectedException as f:
- ... failure = f
- The UnexpectedException contains the test, the example, and
- the original exception:
- >>> failure.test is test
- True
- >>> failure.example.want
- '42\n'
- >>> exc_info = failure.exc_info
- >>> raise exc_info[1] # Already has the traceback
- Traceback (most recent call last):
- ...
- KeyError
- If the output doesn't match, then a DocTestFailure is raised:
- >>> test = DocTestParser().get_doctest('''
- ... >>> x = 1
- ... >>> x
- ... 2
- ... ''', {}, 'foo', 'foo.py', 0)
- >>> case = DocTestCase(test)
- >>> try:
- ... case.debug()
- ... except DocTestFailure as f:
- ... failure = f
- DocTestFailure objects provide access to the test:
- >>> failure.test is test
- True
- As well as to the example:
- >>> failure.example.want
- '2\n'
- and the actual output:
- >>> failure.got
- '1\n'
- """
- self.setUp()
- runner = DebugRunner(optionflags=self._dt_optionflags,
- checker=self._dt_checker, verbose=False)
- runner.run(self._dt_test, clear_globs=False)
- self.tearDown()
- def id(self):
- return self._dt_test.name
- def __eq__(self, other):
- if type(self) is not type(other):
- return NotImplemented
- return self._dt_test == other._dt_test and \
- self._dt_optionflags == other._dt_optionflags and \
- self._dt_setUp == other._dt_setUp and \
- self._dt_tearDown == other._dt_tearDown and \
- self._dt_checker == other._dt_checker
- def __hash__(self):
- return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown,
- self._dt_checker))
- def __repr__(self):
- name = self._dt_test.name.split('.')
- return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
- __str__ = object.__str__
- def shortDescription(self):
- return "Doctest: " + self._dt_test.name
- class SkipDocTestCase(DocTestCase):
- def __init__(self, module):
- self.module = module
- DocTestCase.__init__(self, None)
- def setUp(self):
- self.skipTest("DocTestSuite will not work with -O2 and above")
- def test_skip(self):
- pass
- def shortDescription(self):
- return "Skipping tests from %s" % self.module.__name__
- __str__ = shortDescription
- class _DocTestSuite(unittest.TestSuite):
- def _removeTestAtIndex(self, index):
- pass
- def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
- **options):
- """
- Convert doctest tests for a module to a unittest test suite.
- This converts each documentation string in a module that
- contains doctest tests to a unittest test case. If any of the
- tests in a doc string fail, then the test case fails. An exception
- is raised showing the name of the file containing the test and a
- (sometimes approximate) line number.
- The `module` argument provides the module to be tested. The argument
- can be either a module or a module name.
- If no argument is given, the calling module is used.
- A number of options may be provided as keyword arguments:
- setUp
- A set-up function. This is called before running the
- tests in each file. The setUp function will be passed a DocTest
- object. The setUp function can access the test globals as the
- globs attribute of the test passed.
- tearDown
- A tear-down function. This is called after running the
- tests in each file. The tearDown function will be passed a DocTest
- object. The tearDown function can access the test globals as the
- globs attribute of the test passed.
- globs
- A dictionary containing initial global variables for the tests.
- optionflags
- A set of doctest option flags expressed as an integer.
- """
- if test_finder is None:
- test_finder = DocTestFinder()
- module = _normalize_module(module)
- tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
- if not tests and sys.flags.optimize >=2:
- # Skip doctests when running with -O2
- suite = _DocTestSuite()
- suite.addTest(SkipDocTestCase(module))
- return suite
- tests.sort()
- suite = _DocTestSuite()
- for test in tests:
- if len(test.examples) == 0:
- continue
- if not test.filename:
- filename = module.__file__
- if filename[-4:] == ".pyc":
- filename = filename[:-1]
- test.filename = filename
- suite.addTest(DocTestCase(test, **options))
- return suite
- class DocFileCase(DocTestCase):
- def id(self):
- return '_'.join(self._dt_test.name.split('.'))
- def __repr__(self):
- return self._dt_test.filename
- def format_failure(self, err):
- return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
- % (self._dt_test.name, self._dt_test.filename, err)
- )
- def DocFileTest(path, module_relative=True, package=None,
- globs=None, parser=DocTestParser(),
- encoding=None, **options):
- if globs is None:
- globs = {}
- else:
- globs = globs.copy()
- if package and not module_relative:
- raise ValueError("Package may only be specified for module-"
- "relative paths.")
- # Relativize the path.
- doc, path = _load_testfile(path, package, module_relative,
- encoding or "utf-8")
- if "__file__" not in globs:
- globs["__file__"] = path
- # Find the file and read it.
- name = os.path.basename(path)
- # Convert it to a test, and wrap it in a DocFileCase.
- test = parser.get_doctest(doc, globs, name, path, 0)
- return DocFileCase(test, **options)
- def DocFileSuite(*paths, **kw):
- """A unittest suite for one or more doctest files.
- The path to each doctest file is given as a string; the
- interpretation of that string depends on the keyword argument
- "module_relative".
- A number of options may be provided as keyword arguments:
- module_relative
- If "module_relative" is True, then the given file paths are
- interpreted as os-independent module-relative paths. By
- default, these paths are relative to the calling module's
- directory; but if the "package" argument is specified, then
- they are relative to that package. To ensure os-independence,
- "filename" should use "/" characters to separate path
- segments, and may not be an absolute path (i.e., it may not
- begin with "/").
- If "module_relative" is False, then the given file paths are
- interpreted as os-specific paths. These paths may be absolute
- or relative (to the current working directory).
- package
- A Python package or the name of a Python package whose directory
- should be used as the base directory for module relative paths.
- If "package" is not specified, then the calling module's
- directory is used as the base directory for module relative
- filenames. It is an error to specify "package" if
- "module_relative" is False.
- setUp
- A set-up function. This is called before running the
- tests in each file. The setUp function will be passed a DocTest
- object. The setUp function can access the test globals as the
- globs attribute of the test passed.
- tearDown
- A tear-down function. This is called after running the
- tests in each file. The tearDown function will be passed a DocTest
- object. The tearDown function can access the test globals as the
- globs attribute of the test passed.
- globs
- A dictionary containing initial global variables for the tests.
- optionflags
- A set of doctest option flags expressed as an integer.
- parser
- A DocTestParser (or subclass) that should be used to extract
- tests from the files.
- encoding
- An encoding that will be used to convert the files to unicode.
- """
- suite = _DocTestSuite()
- # We do this here so that _normalize_module is called at the right
- # level. If it were called in DocFileTest, then this function
- # would be the caller and we might guess the package incorrectly.
- if kw.get('module_relative', True):
- kw['package'] = _normalize_module(kw.get('package'))
- for path in paths:
- suite.addTest(DocFileTest(path, **kw))
- return suite
- ######################################################################
- ## 8. Debugging Support
- ######################################################################
- def script_from_examples(s):
- r"""Extract script from text with examples.
- Converts text with examples to a Python script. Example input is
- converted to regular code. Example output and all other words
- are converted to comments:
- >>> text = '''
- ... Here are examples of simple math.
- ...
- ... Python has super accurate integer addition
- ...
- ... >>> 2 + 2
- ... 5
- ...
- ... And very friendly error messages:
- ...
- ... >>> 1/0
- ... To Infinity
- ... And
- ... Beyond
- ...
- ... You can use logic if you want:
- ...
- ... >>> if 0:
- ... ... blah
- ... ... blah
- ... ...
- ...
- ... Ho hum
- ... '''
- >>> print(script_from_examples(text))
- # Here are examples of simple math.
- #
- # Python has super accurate integer addition
- #
- 2 + 2
- # Expected:
- ## 5
- #
- # And very friendly error messages:
- #
- 1/0
- # Expected:
- ## To Infinity
- ## And
- ## Beyond
- #
- # You can use logic if you want:
- #
- if 0:
- blah
- blah
- #
- # Ho hum
- <BLANKLINE>
- """
- output = []
- for piece in DocTestParser().parse(s):
- if isinstance(piece, Example):
- # Add the example's source code (strip trailing NL)
- output.append(piece.source[:-1])
- # Add the expected output:
- want = piece.want
- if want:
- output.append('# Expected:')
- output += ['## '+l for l in want.split('\n')[:-1]]
- else:
- # Add non-example text.
- output += [_comment_line(l)
- for l in piece.split('\n')[:-1]]
- # Trim junk on both ends.
- while output and output[-1] == '#':
- output.pop()
- while output and output[0] == '#':
- output.pop(0)
- # Combine the output, and return it.
- # Add a courtesy newline to prevent exec from choking (see bug #1172785)
- return '\n'.join(output) + '\n'
- def testsource(module, name):
- """Extract the test sources from a doctest docstring as a script.
- Provide the module (or dotted name of the module) containing the
- test to be debugged and the name (within the module) of the object
- with the doc string with tests to be debugged.
- """
- module = _normalize_module(module)
- tests = DocTestFinder().find(module)
- test = [t for t in tests if t.name == name]
- if not test:
- raise ValueError(name, "not found in tests")
- test = test[0]
- testsrc = script_from_examples(test.docstring)
- return testsrc
- def debug_src(src, pm=False, globs=None):
- """Debug a single doctest docstring, in argument `src`'"""
- testsrc = script_from_examples(src)
- debug_script(testsrc, pm, globs)
- def debug_script(src, pm=False, globs=None):
- "Debug a test script. `src` is the script, as a string."
- import pdb
- if globs:
- globs = globs.copy()
- else:
- globs = {}
- if pm:
- try:
- exec(src, globs, globs)
- except:
- print(sys.exc_info()[1])
- p = pdb.Pdb(nosigint=True)
- p.reset()
- p.interaction(None, sys.exc_info()[2])
- else:
- pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs)
- def debug(module, name, pm=False):
- """Debug a single doctest docstring.
- Provide the module (or dotted name of the module) containing the
- test to be debugged and the name (within the module) of the object
- with the docstring with tests to be debugged.
- """
- module = _normalize_module(module)
- testsrc = testsource(module, name)
- debug_script(testsrc, pm, module.__dict__)
- ######################################################################
- ## 9. Example Usage
- ######################################################################
- class _TestClass:
- """
- A pointless class, for sanity-checking of docstring testing.
- Methods:
- square()
- get()
- >>> _TestClass(13).get() + _TestClass(-12).get()
- 1
- >>> hex(_TestClass(13).square().get())
- '0xa9'
- """
- def __init__(self, val):
- """val -> _TestClass object with associated value val.
- >>> t = _TestClass(123)
- >>> print(t.get())
- 123
- """
- self.val = val
- def square(self):
- """square() -> square TestClass's associated value
- >>> _TestClass(13).square().get()
- 169
- """
- self.val = self.val ** 2
- return self
- def get(self):
- """get() -> return TestClass's associated value.
- >>> x = _TestClass(-42)
- >>> print(x.get())
- -42
- """
- return self.val
- __test__ = {"_TestClass": _TestClass,
- "string": r"""
- Example of a string object, searched as-is.
- >>> x = 1; y = 2
- >>> x + y, x * y
- (3, 2)
- """,
- "bool-int equivalence": r"""
- In 2.2, boolean expressions displayed
- 0 or 1. By default, we still accept
- them. This can be disabled by passing
- DONT_ACCEPT_TRUE_FOR_1 to the new
- optionflags argument.
- >>> 4 == 4
- 1
- >>> 4 == 4
- True
- >>> 4 > 4
- 0
- >>> 4 > 4
- False
- """,
- "blank lines": r"""
- Blank lines can be marked with <BLANKLINE>:
- >>> print('foo\n\nbar\n')
- foo
- <BLANKLINE>
- bar
- <BLANKLINE>
- """,
- "ellipsis": r"""
- If the ellipsis flag is used, then '...' can be used to
- elide substrings in the desired output:
- >>> print(list(range(1000))) #doctest: +ELLIPSIS
- [0, 1, 2, ..., 999]
- """,
- "whitespace normalization": r"""
- If the whitespace normalization flag is used, then
- differences in whitespace are ignored.
- >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE
- [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
- 27, 28, 29]
- """,
- }
- def _test():
- import argparse
- parser = argparse.ArgumentParser(description="doctest runner")
- parser.add_argument('-v', '--verbose', action='store_true', default=False,
- help='print very verbose output for all tests')
- parser.add_argument('-o', '--option', action='append',
- choices=OPTIONFLAGS_BY_NAME.keys(), default=[],
- help=('specify a doctest option flag to apply'
- ' to the test run; may be specified more'
- ' than once to apply multiple options'))
- parser.add_argument('-f', '--fail-fast', action='store_true',
- help=('stop running tests after first failure (this'
- ' is a shorthand for -o FAIL_FAST, and is'
- ' in addition to any other -o options)'))
- parser.add_argument('file', nargs='+',
- help='file containing the tests to run')
- args = parser.parse_args()
- testfiles = args.file
- # Verbose used to be handled by the "inspect argv" magic in DocTestRunner,
- # but since we are using argparse we are passing it manually now.
- verbose = args.verbose
- options = 0
- for option in args.option:
- options |= OPTIONFLAGS_BY_NAME[option]
- if args.fail_fast:
- options |= FAIL_FAST
- for filename in testfiles:
- if filename.endswith(".py"):
- # It is a module -- insert its dir into sys.path and try to
- # import it. If it is part of a package, that possibly
- # won't work because of package imports.
- dirname, filename = os.path.split(filename)
- sys.path.insert(0, dirname)
- m = __import__(filename[:-3])
- del sys.path[0]
- failures, _ = testmod(m, verbose=verbose, optionflags=options)
- else:
- failures, _ = testfile(filename, module_relative=False,
- verbose=verbose, optionflags=options)
- if failures:
- return 1
- return 0
- if __name__ == "__main__":
- sys.exit(_test())
|