extrapolation.py 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115
  1. try:
  2. from itertools import izip
  3. except ImportError:
  4. izip = zip
  5. from ..libmp.backend import xrange
  6. from .calculus import defun
  7. try:
  8. next = next
  9. except NameError:
  10. next = lambda _: _.next()
  11. @defun
  12. def richardson(ctx, seq):
  13. r"""
  14. Given a list ``seq`` of the first `N` elements of a slowly convergent
  15. infinite sequence, :func:`~mpmath.richardson` computes the `N`-term
  16. Richardson extrapolate for the limit.
  17. :func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated
  18. limit and `c` is the magnitude of the largest weight used during the
  19. computation. The weight provides an estimate of the precision
  20. lost to cancellation. Due to cancellation effects, the sequence must
  21. be typically be computed at a much higher precision than the target
  22. accuracy of the extrapolation.
  23. **Applicability and issues**
  24. The `N`-step Richardson extrapolation algorithm used by
  25. :func:`~mpmath.richardson` is described in [1].
  26. Richardson extrapolation only works for a specific type of sequence,
  27. namely one converging like partial sums of
  28. `P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials.
  29. When the sequence does not convergence at such a rate
  30. :func:`~mpmath.richardson` generally produces garbage.
  31. Richardson extrapolation has the advantage of being fast: the `N`-term
  32. extrapolate requires only `O(N)` arithmetic operations, and usually
  33. produces an estimate that is accurate to `O(N)` digits. Contrast with
  34. the Shanks transformation (see :func:`~mpmath.shanks`), which requires
  35. `O(N^2)` operations.
  36. :func:`~mpmath.richardson` is unable to produce an estimate for the
  37. approximation error. One way to estimate the error is to perform
  38. two extrapolations with slightly different `N` and comparing the
  39. results.
  40. Richardson extrapolation does not work for oscillating sequences.
  41. As a simple workaround, :func:`~mpmath.richardson` detects if the last
  42. three elements do not differ monotonically, and in that case
  43. applies extrapolation only to the even-index elements.
  44. **Example**
  45. Applying Richardson extrapolation to the Leibniz series for `\pi`::
  46. >>> from mpmath import *
  47. >>> mp.dps = 30; mp.pretty = True
  48. >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
  49. ... for m in range(1,30)]
  50. >>> v, c = richardson(S[:10])
  51. >>> v
  52. 3.2126984126984126984126984127
  53. >>> nprint([v-pi, c])
  54. [0.0711058, 2.0]
  55. >>> v, c = richardson(S[:30])
  56. >>> v
  57. 3.14159265468624052829954206226
  58. >>> nprint([v-pi, c])
  59. [1.09645e-9, 20833.3]
  60. **References**
  61. 1. [BenderOrszag]_ pp. 375-376
  62. """
  63. if len(seq) < 3:
  64. raise ValueError("seq should be of minimum length 3")
  65. if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]):
  66. seq = seq[::2]
  67. N = len(seq)//2-1
  68. s = ctx.zero
  69. # The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)!
  70. # To avoid repeated factorials, we simplify the quotient
  71. # of successive weights to obtain a recurrence relation
  72. c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N))
  73. maxc = 1
  74. for k in xrange(N+1):
  75. s += c * seq[N+k]
  76. maxc = max(abs(c), maxc)
  77. c *= (k-N)*ctx.mpf(k+N+1)**N
  78. c /= ((1+k)*ctx.mpf(k+N)**N)
  79. return s, maxc
  80. @defun
  81. def shanks(ctx, seq, table=None, randomized=False):
  82. r"""
  83. Given a list ``seq`` of the first `N` elements of a slowly
  84. convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
  85. Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
  86. transformation often provides strong convergence acceleration,
  87. especially if the sequence is oscillating.
  88. The iterated Shanks transformation is computed using the Wynn
  89. epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
  90. epsilon table generated by Wynn's algorithm, which can be read
  91. off as follows:
  92. * The table is a list of lists forming a lower triangular matrix,
  93. where higher row and column indices correspond to more accurate
  94. values.
  95. * The columns with even index hold dummy entries (required for the
  96. computation) and the columns with odd index hold the actual
  97. extrapolates.
  98. * The last element in the last row is typically the most
  99. accurate estimate of the limit.
  100. * The difference to the third last element in the last row
  101. provides an estimate of the approximation error.
  102. * The magnitude of the second last element provides an estimate
  103. of the numerical accuracy lost to cancellation.
  104. For convenience, so the extrapolation is stopped at an odd index
  105. so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
  106. limit.
  107. Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
  108. This can be used to efficiently extend a previous computation after
  109. new elements have been appended to the sequence. The table will
  110. then be updated in-place.
  111. **The Shanks transformation**
  112. The Shanks transformation is defined as follows (see [2]): given
  113. the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
  114. given by
  115. .. math ::
  116. S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
  117. The Shanks transformation gives the exact limit `A_{\infty}` in a
  118. single step if `A_k = A + a q^k`. Note in particular that it
  119. extrapolates the exact sum of a geometric series in a single step.
  120. Applying the Shanks transformation once often improves convergence
  121. substantially for an arbitrary sequence, but the optimal effect is
  122. obtained by applying it iteratively:
  123. `S(S(A_k)), S(S(S(A_k))), \ldots`.
  124. Wynn's epsilon algorithm provides an efficient way to generate
  125. the table of iterated Shanks transformations. It reduces the
  126. computation of each element to essentially a single division, at
  127. the cost of requiring dummy elements in the table. See [1] for
  128. details.
  129. **Precision issues**
  130. Due to cancellation effects, the sequence must be typically be
  131. computed at a much higher precision than the target accuracy
  132. of the extrapolation.
  133. If the Shanks transformation converges to the exact limit (such
  134. as if the sequence is a geometric series), then a division by
  135. zero occurs. By default, :func:`~mpmath.shanks` handles this case by
  136. terminating the iteration and returning the table it has
  137. generated so far. With *randomized=True*, it will instead
  138. replace the zero by a pseudorandom number close to zero.
  139. (TODO: find a better solution to this problem.)
  140. **Examples**
  141. We illustrate by applying Shanks transformation to the Leibniz
  142. series for `\pi`::
  143. >>> from mpmath import *
  144. >>> mp.dps = 50
  145. >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
  146. ... for m in range(1,30)]
  147. >>>
  148. >>> T = shanks(S[:7])
  149. >>> for row in T:
  150. ... nprint(row)
  151. ...
  152. [-0.75]
  153. [1.25, 3.16667]
  154. [-1.75, 3.13333, -28.75]
  155. [2.25, 3.14524, 82.25, 3.14234]
  156. [-2.75, 3.13968, -177.75, 3.14139, -969.937]
  157. [3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
  158. The extrapolated accuracy is about 4 digits, and about 4 digits
  159. may have been lost due to cancellation::
  160. >>> L = T[-1]
  161. >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
  162. [2.22532e-5, 4.78309e-5, 3515.06]
  163. Now we extend the computation::
  164. >>> T = shanks(S[:25], T)
  165. >>> L = T[-1]
  166. >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
  167. [3.75527e-19, 1.48478e-19, 2.96014e+17]
  168. The value for pi is now accurate to 18 digits. About 18 digits may
  169. also have been lost to cancellation.
  170. Here is an example with a geometric series, where the convergence
  171. is immediate (the sum is exactly 1)::
  172. >>> mp.dps = 15
  173. >>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]):
  174. ... nprint(row)
  175. [4.0]
  176. [8.0, 1.0]
  177. **References**
  178. 1. [GravesMorris]_
  179. 2. [BenderOrszag]_ pp. 368-375
  180. """
  181. if len(seq) < 2:
  182. raise ValueError("seq should be of minimum length 2")
  183. if table:
  184. START = len(table)
  185. else:
  186. START = 0
  187. table = []
  188. STOP = len(seq) - 1
  189. if STOP & 1:
  190. STOP -= 1
  191. one = ctx.one
  192. eps = +ctx.eps
  193. if randomized:
  194. from random import Random
  195. rnd = Random()
  196. rnd.seed(START)
  197. for i in xrange(START, STOP):
  198. row = []
  199. for j in xrange(i+1):
  200. if j == 0:
  201. a, b = 0, seq[i+1]-seq[i]
  202. else:
  203. if j == 1:
  204. a = seq[i]
  205. else:
  206. a = table[i-1][j-2]
  207. b = row[j-1] - table[i-1][j-1]
  208. if not b:
  209. if randomized:
  210. b = (1 + rnd.getrandbits(10))*eps
  211. elif i & 1:
  212. return table[:-1]
  213. else:
  214. return table
  215. row.append(a + one/b)
  216. table.append(row)
  217. return table
  218. class levin_class:
  219. # levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
  220. r"""
  221. This interface implements Levin's (nonlinear) sequence transformation for
  222. convergence acceleration and summation of divergent series. It performs
  223. better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent
  224. or alternating divergent series.
  225. Let *A* be the series we want to sum:
  226. .. math ::
  227. A = \sum_{k=0}^{\infty} a_k
  228. Attention: all `a_k` must be non-zero!
  229. Let `s_n` be the partial sums of this series:
  230. .. math ::
  231. s_n = \sum_{k=0}^n a_k.
  232. **Methods**
  233. Calling ``levin`` returns an object with the following methods.
  234. ``update(...)`` works with the list of individual terms `a_k` of *A*, and
  235. ``update_step(...)`` works with the list of partial sums `s_k` of *A*:
  236. .. code ::
  237. v, e = ...update([a_0, a_1,..., a_k])
  238. v, e = ...update_psum([s_0, s_1,..., s_k])
  239. ``step(...)`` works with the individual terms `a_k` and ``step_psum(...)``
  240. works with the partial sums `s_k`:
  241. .. code ::
  242. v, e = ...step(a_k)
  243. v, e = ...step_psum(s_k)
  244. *v* is the current estimate for *A*, and *e* is an error estimate which is
  245. simply the difference between the current estimate and the last estimate.
  246. One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``.
  247. **A word of caution**
  248. One can only hope for good results (i.e. convergence acceleration or
  249. resummation) if the `s_n` have some well defind asymptotic behavior for
  250. large `n` and are not erratic or random. Furthermore one usually needs very
  251. high working precision because of the numerical cancellation. If the working
  252. precision is insufficient, levin may produce silently numerical garbage.
  253. Furthermore even if the Levin-transformation converges, in the general case
  254. there is no proof that the result is mathematically sound. Only for very
  255. special classes of problems one can prove that the Levin-transformation
  256. converges to the expected result (for example Stieltjes-type integrals).
  257. Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison
  258. to Shanks/Wynn-epsilon, Richardson & co.
  259. In summary one can say that the Levin-transformation is powerful but
  260. unreliable and that it may need a copious amount of working precision.
  261. The Levin transform has several variants differing in the choice of weights.
  262. Some variants are better suited for the possible flavours of convergence
  263. behaviour of *A* than other variants:
  264. .. code ::
  265. convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon
  266. logarithmic + - + -
  267. linear + + + +
  268. alternating divergent + + + +
  269. "+" means the variant is suitable,"-" means the variant is not suitable;
  270. for comparison the Shanks/Wynn-epsilon transform is listed, too.
  271. The variant is controlled though the variant keyword (i.e. ``variant="u"``,
  272. ``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice.
  273. Finally it is possible to use the Sidi-S transform instead of the Levin transform
  274. by using the keyword ``method='sidi'``. The Sidi-S transform works better than the
  275. Levin transformation for some divergent series (see the examples).
  276. Parameters:
  277. .. code ::
  278. method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation
  279. variant "u","t" or "v" chooses the weight variant.
  280. The Levin transform is also accessible through the nsum interface.
  281. ``method="l"`` or ``method="levin"`` select the normal Levin transform while
  282. ``method="sidi"``
  283. selects the Sidi-S transform. The variant is in both cases selected through the
  284. levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise
  285. it will miss the point where the Levin transform converges resulting in numerical
  286. overflow/garbage. For highly divergent series a copious amount of working precision
  287. must be chosen.
  288. **Examples**
  289. First we sum the zeta function::
  290. >>> from mpmath import mp
  291. >>> mp.prec = 53
  292. >>> eps = mp.mpf(mp.eps)
  293. >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
  294. ... L = mp.levin(method = "levin", variant = "u")
  295. ... S, s, n = [], 0, 1
  296. ... while 1:
  297. ... s += mp.one / (n * n)
  298. ... n += 1
  299. ... S.append(s)
  300. ... v, e = L.update_psum(S)
  301. ... if e < eps:
  302. ... break
  303. ... if n > 1000: raise RuntimeError("iteration limit exceeded")
  304. >>> print(mp.chop(v - mp.pi ** 2 / 6))
  305. 0.0
  306. >>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u")
  307. >>> print(mp.chop(v - w))
  308. 0.0
  309. Now we sum the zeta function outside its range of convergence
  310. (attention: This does not work at the negative integers!)::
  311. >>> eps = mp.mpf(mp.eps)
  312. >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
  313. ... L = mp.levin(method = "levin", variant = "v")
  314. ... A, n = [], 1
  315. ... while 1:
  316. ... s = mp.mpf(n) ** (2 + 3j)
  317. ... n += 1
  318. ... A.append(s)
  319. ... v, e = L.update(A)
  320. ... if e < eps:
  321. ... break
  322. ... if n > 1000: raise RuntimeError("iteration limit exceeded")
  323. >>> print(mp.chop(v - mp.zeta(-2-3j)))
  324. 0.0
  325. >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
  326. >>> print(mp.chop(v - w))
  327. 0.0
  328. Now we sum the divergent asymptotic expansion of an integral related to the
  329. exponential integral (see also [2] p.373). The Sidi-S transform works best here::
  330. >>> z = mp.mpf(10)
  331. >>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
  332. >>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
  333. >>> eps = mp.mpf(mp.eps)
  334. >>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation
  335. ... L = mp.levin(method = "sidi", variant = "t")
  336. ... n = 0
  337. ... while 1:
  338. ... s = (-1)**n * mp.fac(n) * z ** (-n)
  339. ... v, e = L.step(s)
  340. ... n += 1
  341. ... if e < eps:
  342. ... break
  343. ... if n > 1000: raise RuntimeError("iteration limit exceeded")
  344. >>> print(mp.chop(v - exact))
  345. 0.0
  346. >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
  347. >>> print(mp.chop(v - w))
  348. 0.0
  349. Another highly divergent integral is also summable::
  350. >>> z = mp.mpf(2)
  351. >>> eps = mp.mpf(mp.eps)
  352. >>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
  353. >>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
  354. >>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series
  355. ... L = mp.levin(method = "levin", variant = "t")
  356. ... n, s = 0, 0
  357. ... while 1:
  358. ... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n))
  359. ... n += 1
  360. ... v, e = L.step_psum(s)
  361. ... if e < eps:
  362. ... break
  363. ... if n > 1000: raise RuntimeError("iteration limit exceeded")
  364. >>> print(mp.chop(v - exact))
  365. 0.0
  366. >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
  367. ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
  368. >>> print(mp.chop(v - w))
  369. 0.0
  370. These examples run with 15-20 decimal digits precision. For higher precision the
  371. working precision must be raised.
  372. **Examples for nsum**
  373. Here we calculate Euler's constant as the constant term in the Laurent
  374. expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of
  375. the logarithmic convergence behaviour of the Dirichlet series for zeta::
  376. >>> mp.dps = 30
  377. >>> z = mp.mpf(10) ** (-10)
  378. >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z
  379. >>> print(mp.chop(a - mp.euler, tol = 1e-10))
  380. 0.0
  381. The Sidi-S transform performs excellently for the alternating series of `\log(2)`::
  382. >>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi")
  383. >>> print(mp.chop(a - mp.log(2)))
  384. 0.0
  385. Hypergeometric series can also be summed outside their range of convergence.
  386. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the
  387. point where the Levin transform converges resulting in numerical overflow/garbage::
  388. >>> z = 2 + 1j
  389. >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
  390. >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
  391. >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
  392. >>> print(mp.chop(exact-v))
  393. 0.0
  394. References:
  395. [1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of
  396. Convergence and the Summation of Divergent Series" arXiv:math/0306302
  397. [2] A. Sidi - "Pratical Extrapolation Methods"
  398. [3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209
  399. """
  400. def __init__(self, method = "levin", variant = "u"):
  401. self.variant = variant
  402. self.n = 0
  403. self.a0 = 0
  404. self.theta = 1
  405. self.A = []
  406. self.B = []
  407. self.last = 0
  408. self.last_s = False
  409. if method == "levin":
  410. self.factor = self.factor_levin
  411. elif method == "sidi":
  412. self.factor = self.factor_sidi
  413. else:
  414. raise ValueError("levin: unknown method \"%s\"" % method)
  415. def factor_levin(self, i):
  416. # original levin
  417. # [1] p.50,e.7.5-7 (with n-j replaced by i)
  418. return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1)
  419. def factor_sidi(self, i):
  420. # sidi analogon to levin (factorial series)
  421. # [1] p.59,e.8.3-16 (with n-j replaced by i)
  422. return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3))
  423. def run(self, s, a0, a1 = 0):
  424. if self.variant=="t":
  425. # levin t
  426. w=a0
  427. elif self.variant=="u":
  428. # levin u
  429. w=a0*(self.theta+self.n)
  430. elif self.variant=="v":
  431. # levin v
  432. w=a0*a1/(a0-a1)
  433. else:
  434. assert False, "unknown variant"
  435. if w==0:
  436. raise ValueError("levin: zero weight")
  437. self.A.append(s/w)
  438. self.B.append(1/w)
  439. for i in range(self.n-1,-1,-1):
  440. if i==self.n-1:
  441. f=1
  442. else:
  443. f=self.factor(i)
  444. self.A[i]=self.A[i+1]-f*self.A[i]
  445. self.B[i]=self.B[i+1]-f*self.B[i]
  446. self.n+=1
  447. ###########################################################################
  448. def update_psum(self,S):
  449. """
  450. This routine applies the convergence acceleration to the list of partial sums.
  451. A = sum(a_k, k = 0..infinity)
  452. s_n = sum(a_k, k = 0..n)
  453. v, e = ...update_psum([s_0, s_1,..., s_k])
  454. output:
  455. v current estimate of the series A
  456. e an error estimate which is simply the difference between the current
  457. estimate and the last estimate.
  458. """
  459. if self.variant!="v":
  460. if self.n==0:
  461. self.run(S[0],S[0])
  462. while self.n<len(S):
  463. self.run(S[self.n],S[self.n]-S[self.n-1])
  464. else:
  465. if len(S)==1:
  466. self.last=0
  467. return S[0],abs(S[0])
  468. if self.n==0:
  469. self.a1=S[1]-S[0]
  470. self.run(S[0],S[0],self.a1)
  471. while self.n<len(S)-1:
  472. na1=S[self.n+1]-S[self.n]
  473. self.run(S[self.n],self.a1,na1)
  474. self.a1=na1
  475. value=self.A[0]/self.B[0]
  476. err=abs(value-self.last)
  477. self.last=value
  478. return value,err
  479. def update(self,X):
  480. """
  481. This routine applies the convergence acceleration to the list of individual terms.
  482. A = sum(a_k, k = 0..infinity)
  483. v, e = ...update([a_0, a_1,..., a_k])
  484. output:
  485. v current estimate of the series A
  486. e an error estimate which is simply the difference between the current
  487. estimate and the last estimate.
  488. """
  489. if self.variant!="v":
  490. if self.n==0:
  491. self.s=X[0]
  492. self.run(self.s,X[0])
  493. while self.n<len(X):
  494. self.s+=X[self.n]
  495. self.run(self.s,X[self.n])
  496. else:
  497. if len(X)==1:
  498. self.last=0
  499. return X[0],abs(X[0])
  500. if self.n==0:
  501. self.s=X[0]
  502. self.run(self.s,X[0],X[1])
  503. while self.n<len(X)-1:
  504. self.s+=X[self.n]
  505. self.run(self.s,X[self.n],X[self.n+1])
  506. value=self.A[0]/self.B[0]
  507. err=abs(value-self.last)
  508. self.last=value
  509. return value,err
  510. ###########################################################################
  511. def step_psum(self,s):
  512. """
  513. This routine applies the convergence acceleration to the partial sums.
  514. A = sum(a_k, k = 0..infinity)
  515. s_n = sum(a_k, k = 0..n)
  516. v, e = ...step_psum(s_k)
  517. output:
  518. v current estimate of the series A
  519. e an error estimate which is simply the difference between the current
  520. estimate and the last estimate.
  521. """
  522. if self.variant!="v":
  523. if self.n==0:
  524. self.last_s=s
  525. self.run(s,s)
  526. else:
  527. self.run(s,s-self.last_s)
  528. self.last_s=s
  529. else:
  530. if isinstance(self.last_s,bool):
  531. self.last_s=s
  532. self.last_w=s
  533. self.last=0
  534. return s,abs(s)
  535. na1=s-self.last_s
  536. self.run(self.last_s,self.last_w,na1)
  537. self.last_w=na1
  538. self.last_s=s
  539. value=self.A[0]/self.B[0]
  540. err=abs(value-self.last)
  541. self.last=value
  542. return value,err
  543. def step(self,x):
  544. """
  545. This routine applies the convergence acceleration to the individual terms.
  546. A = sum(a_k, k = 0..infinity)
  547. v, e = ...step(a_k)
  548. output:
  549. v current estimate of the series A
  550. e an error estimate which is simply the difference between the current
  551. estimate and the last estimate.
  552. """
  553. if self.variant!="v":
  554. if self.n==0:
  555. self.s=x
  556. self.run(self.s,x)
  557. else:
  558. self.s+=x
  559. self.run(self.s,x)
  560. else:
  561. if isinstance(self.last_s,bool):
  562. self.last_s=x
  563. self.s=0
  564. self.last=0
  565. return x,abs(x)
  566. self.s+=self.last_s
  567. self.run(self.s,self.last_s,x)
  568. self.last_s=x
  569. value=self.A[0]/self.B[0]
  570. err=abs(value-self.last)
  571. self.last=value
  572. return value,err
  573. def levin(ctx, method = "levin", variant = "u"):
  574. L = levin_class(method = method, variant = variant)
  575. L.ctx = ctx
  576. return L
  577. levin.__doc__ = levin_class.__doc__
  578. defun(levin)
  579. class cohen_alt_class:
  580. # cohen_alt: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
  581. r"""
  582. This interface implements the convergence acceleration of alternating series
  583. as described in H. Cohen, F.R. Villegas, D. Zagier - "Convergence Acceleration
  584. of Alternating Series". This series transformation works only well if the
  585. individual terms of the series have an alternating sign. It belongs to the
  586. class of linear series transformations (in contrast to the Shanks/Wynn-epsilon
  587. or Levin transform). This series transformation is also able to sum some types
  588. of divergent series. See the paper under which conditions this resummation is
  589. mathematical sound.
  590. Let *A* be the series we want to sum:
  591. .. math ::
  592. A = \sum_{k=0}^{\infty} a_k
  593. Let `s_n` be the partial sums of this series:
  594. .. math ::
  595. s_n = \sum_{k=0}^n a_k.
  596. **Interface**
  597. Calling ``cohen_alt`` returns an object with the following methods.
  598. Then ``update(...)`` works with the list of individual terms `a_k` and
  599. ``update_psum(...)`` works with the list of partial sums `s_k`:
  600. .. code ::
  601. v, e = ...update([a_0, a_1,..., a_k])
  602. v, e = ...update_psum([s_0, s_1,..., s_k])
  603. *v* is the current estimate for *A*, and *e* is an error estimate which is
  604. simply the difference between the current estimate and the last estimate.
  605. **Examples**
  606. Here we compute the alternating zeta function using ``update_psum``::
  607. >>> from mpmath import mp
  608. >>> AC = mp.cohen_alt()
  609. >>> S, s, n = [], 0, 1
  610. >>> while 1:
  611. ... s += -((-1) ** n) * mp.one / (n * n)
  612. ... n += 1
  613. ... S.append(s)
  614. ... v, e = AC.update_psum(S)
  615. ... if e < mp.eps:
  616. ... break
  617. ... if n > 1000: raise RuntimeError("iteration limit exceeded")
  618. >>> print(mp.chop(v - mp.pi ** 2 / 12))
  619. 0.0
  620. Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`::
  621. >>> A = []
  622. >>> AC = mp.cohen_alt()
  623. >>> n = 1
  624. >>> while 1:
  625. ... A.append( mp.loggamma(1 + mp.one / (2 * n - 1)))
  626. ... A.append(-mp.loggamma(1 + mp.one / (2 * n)))
  627. ... n += 1
  628. ... v, e = AC.update(A)
  629. ... if e < mp.eps:
  630. ... break
  631. ... if n > 1000: raise RuntimeError("iteration limit exceeded")
  632. >>> v = mp.exp(v)
  633. >>> print(mp.chop(v - 1.06215090557106, tol = 1e-12))
  634. 0.0
  635. ``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface::
  636. >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
  637. >>> print(mp.chop(v - mp.log(2)))
  638. 0.0
  639. >>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a")
  640. >>> print(mp.chop(v - mp.pi / 4))
  641. 0.0
  642. >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
  643. >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
  644. 0.0
  645. """
  646. def __init__(self):
  647. self.last=0
  648. def update(self, A):
  649. """
  650. This routine applies the convergence acceleration to the list of individual terms.
  651. A = sum(a_k, k = 0..infinity)
  652. v, e = ...update([a_0, a_1,..., a_k])
  653. output:
  654. v current estimate of the series A
  655. e an error estimate which is simply the difference between the current
  656. estimate and the last estimate.
  657. """
  658. n = len(A)
  659. d = (3 + self.ctx.sqrt(8)) ** n
  660. d = (d + 1 / d) / 2
  661. b = -self.ctx.one
  662. c = -d
  663. s = 0
  664. for k in xrange(n):
  665. c = b - c
  666. if k % 2 == 0:
  667. s = s + c * A[k]
  668. else:
  669. s = s - c * A[k]
  670. b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
  671. value = s / d
  672. err = abs(value - self.last)
  673. self.last = value
  674. return value, err
  675. def update_psum(self, S):
  676. """
  677. This routine applies the convergence acceleration to the list of partial sums.
  678. A = sum(a_k, k = 0..infinity)
  679. s_n = sum(a_k ,k = 0..n)
  680. v, e = ...update_psum([s_0, s_1,..., s_k])
  681. output:
  682. v current estimate of the series A
  683. e an error estimate which is simply the difference between the current
  684. estimate and the last estimate.
  685. """
  686. n = len(S)
  687. d = (3 + self.ctx.sqrt(8)) ** n
  688. d = (d + 1 / d) / 2
  689. b = self.ctx.one
  690. s = 0
  691. for k in xrange(n):
  692. b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one))
  693. s += b * S[k]
  694. value = s / d
  695. err = abs(value - self.last)
  696. self.last = value
  697. return value, err
  698. def cohen_alt(ctx):
  699. L = cohen_alt_class()
  700. L.ctx = ctx
  701. return L
  702. cohen_alt.__doc__ = cohen_alt_class.__doc__
  703. defun(cohen_alt)
  704. @defun
  705. def sumap(ctx, f, interval, integral=None, error=False):
  706. r"""
  707. Evaluates an infinite series of an analytic summand *f* using the
  708. Abel-Plana formula
  709. .. math ::
  710. \sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) +
  711. i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt.
  712. Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`),
  713. the Abel-Plana formula does not require derivatives. However,
  714. it only works when `|f(it)-f(-it)|` does not
  715. increase too rapidly with `t`.
  716. **Examples**
  717. The Abel-Plana formula is particularly useful when the summand
  718. decreases like a power of `k`; for example when the sum is a pure
  719. zeta function::
  720. >>> from mpmath import *
  721. >>> mp.dps = 25; mp.pretty = True
  722. >>> sumap(lambda k: 1/k**2.5, [1,inf])
  723. 1.34148725725091717975677
  724. >>> zeta(2.5)
  725. 1.34148725725091717975677
  726. >>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf])
  727. (-3.385361068546473342286084 - 0.7432082105196321803869551j)
  728. >>> zeta(2.5+2.5j, 1+1j)
  729. (-3.385361068546473342286084 - 0.7432082105196321803869551j)
  730. If the series is alternating, numerical quadrature along the real
  731. line is likely to give poor results, so it is better to evaluate
  732. the first term symbolically whenever possible:
  733. >>> n=3; z=-0.75
  734. >>> I = expint(n,-log(z))
  735. >>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I))
  736. -0.6917036036904594510141448
  737. >>> polylog(n,z)
  738. -0.6917036036904594510141448
  739. """
  740. prec = ctx.prec
  741. try:
  742. ctx.prec += 10
  743. a, b = interval
  744. if b != ctx.inf:
  745. raise ValueError("b should be equal to ctx.inf")
  746. g = lambda x: f(x+a)
  747. if integral is None:
  748. i1, err1 = ctx.quad(g, [0,ctx.inf], error=True)
  749. else:
  750. i1, err1 = integral, 0
  751. j = ctx.j
  752. p = ctx.pi * 2
  753. if ctx._is_real_type(i1):
  754. h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t)
  755. else:
  756. h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t)
  757. i2, err2 = ctx.quad(h, [0,ctx.inf], error=True)
  758. err = err1+err2
  759. v = i1+i2+0.5*g(ctx.mpf(0))
  760. finally:
  761. ctx.prec = prec
  762. if error:
  763. return +v, err
  764. return +v
  765. @defun
  766. def sumem(ctx, f, interval, tol=None, reject=10, integral=None,
  767. adiffs=None, bdiffs=None, verbose=False, error=False,
  768. _fast_abort=False):
  769. r"""
  770. Uses the Euler-Maclaurin formula to compute an approximation accurate
  771. to within ``tol`` (which defaults to the present epsilon) of the sum
  772. .. math ::
  773. S = \sum_{k=a}^b f(k)
  774. where `(a,b)` are given by ``interval`` and `a` or `b` may be
  775. infinite. The approximation is
  776. .. math ::
  777. S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} +
  778. \sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!}
  779. \left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right).
  780. The last sum in the Euler-Maclaurin formula is not generally
  781. convergent (a notable exception is if `f` is a polynomial, in
  782. which case Euler-Maclaurin actually gives an exact result).
  783. The summation is stopped as soon as the quotient between two
  784. consecutive terms falls below *reject*. That is, by default
  785. (*reject* = 10), the summation is continued as long as each
  786. term adds at least one decimal.
  787. Although not convergent, convergence to a given tolerance can
  788. often be "forced" if `b = \infty` by summing up to `a+N` and then
  789. applying the Euler-Maclaurin formula to the sum over the range
  790. `(a+N+1, \ldots, \infty)`. This procedure is implemented by
  791. :func:`~mpmath.nsum`.
  792. By default numerical quadrature and differentiation is used.
  793. If the symbolic values of the integral and endpoint derivatives
  794. are known, it is more efficient to pass the value of the
  795. integral explicitly as ``integral`` and the derivatives
  796. explicitly as ``adiffs`` and ``bdiffs``. The derivatives
  797. should be given as iterables that yield
  798. `f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`).
  799. **Examples**
  800. Summation of an infinite series, with automatic and symbolic
  801. integral and derivative values (the second should be much faster)::
  802. >>> from mpmath import *
  803. >>> mp.dps = 50; mp.pretty = True
  804. >>> sumem(lambda n: 1/n**2, [32, inf])
  805. 0.03174336652030209012658168043874142714132886413417
  806. >>> I = mpf(1)/32
  807. >>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999))
  808. >>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D)
  809. 0.03174336652030209012658168043874142714132886413417
  810. An exact evaluation of a finite polynomial sum::
  811. >>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000])
  812. 10500155000624963999742499550000.0
  813. >>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001)))
  814. 10500155000624963999742499550000
  815. """
  816. tol = tol or +ctx.eps
  817. interval = ctx._as_points(interval)
  818. a = ctx.convert(interval[0])
  819. b = ctx.convert(interval[-1])
  820. err = ctx.zero
  821. prev = 0
  822. M = 10000
  823. if a == ctx.ninf: adiffs = (0 for n in xrange(M))
  824. else: adiffs = adiffs or ctx.diffs(f, a)
  825. if b == ctx.inf: bdiffs = (0 for n in xrange(M))
  826. else: bdiffs = bdiffs or ctx.diffs(f, b)
  827. orig = ctx.prec
  828. #verbose = 1
  829. try:
  830. ctx.prec += 10
  831. s = ctx.zero
  832. for k, (da, db) in enumerate(izip(adiffs, bdiffs)):
  833. if k & 1:
  834. term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1)
  835. mag = abs(term)
  836. if verbose:
  837. print("term", k, "magnitude =", ctx.nstr(mag))
  838. if k > 4 and mag < tol:
  839. s += term
  840. break
  841. elif k > 4 and abs(prev) / mag < reject:
  842. err += mag
  843. if _fast_abort:
  844. return [s, (s, err)][error]
  845. if verbose:
  846. print("Failed to converge")
  847. break
  848. else:
  849. s += term
  850. prev = term
  851. # Endpoint correction
  852. if a != ctx.ninf: s += f(a)/2
  853. if b != ctx.inf: s += f(b)/2
  854. # Tail integral
  855. if verbose:
  856. print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b)))
  857. if integral:
  858. s += integral
  859. else:
  860. integral, ierr = ctx.quad(f, interval, error=True)
  861. if verbose:
  862. print("Integration error:", ierr)
  863. s += integral
  864. err += ierr
  865. finally:
  866. ctx.prec = orig
  867. if error:
  868. return s, err
  869. else:
  870. return s
  871. @defun
  872. def adaptive_extrapolation(ctx, update, emfun, kwargs):
  873. option = kwargs.get
  874. if ctx._fixed_precision:
  875. tol = option('tol', ctx.eps*2**10)
  876. else:
  877. tol = option('tol', ctx.eps/2**10)
  878. verbose = option('verbose', False)
  879. maxterms = option('maxterms', ctx.dps*10)
  880. method = set(option('method', 'r+s').split('+'))
  881. skip = option('skip', 0)
  882. steps = iter(option('steps', xrange(10, 10**9, 10)))
  883. strict = option('strict')
  884. #steps = (10 for i in xrange(1000))
  885. summer=[]
  886. if 'd' in method or 'direct' in method:
  887. TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False
  888. else:
  889. TRY_RICHARDSON = ('r' in method) or ('richardson' in method)
  890. TRY_SHANKS = ('s' in method) or ('shanks' in method)
  891. TRY_EULER_MACLAURIN = ('e' in method) or \
  892. ('euler-maclaurin' in method)
  893. def init_levin(m):
  894. variant = kwargs.get("levin_variant", "u")
  895. if isinstance(variant, str):
  896. if variant == "all":
  897. variant = ["u", "v", "t"]
  898. else:
  899. variant = [variant]
  900. for s in variant:
  901. L = levin_class(method = m, variant = s)
  902. L.ctx = ctx
  903. L.name = m + "(" + s + ")"
  904. summer.append(L)
  905. if ('l' in method) or ('levin' in method):
  906. init_levin("levin")
  907. if ('sidi' in method):
  908. init_levin("sidi")
  909. if ('a' in method) or ('alternating' in method):
  910. L = cohen_alt_class()
  911. L.ctx = ctx
  912. L.name = "alternating"
  913. summer.append(L)
  914. last_richardson_value = 0
  915. shanks_table = []
  916. index = 0
  917. step = 10
  918. partial = []
  919. best = ctx.zero
  920. orig = ctx.prec
  921. try:
  922. if 'workprec' in kwargs:
  923. ctx.prec = kwargs['workprec']
  924. elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0:
  925. ctx.prec = (ctx.prec+10) * 4
  926. else:
  927. ctx.prec += 30
  928. while 1:
  929. if index >= maxterms:
  930. break
  931. # Get new batch of terms
  932. try:
  933. step = next(steps)
  934. except StopIteration:
  935. pass
  936. if verbose:
  937. print("-"*70)
  938. print("Adding terms #%i-#%i" % (index, index+step))
  939. update(partial, xrange(index, index+step))
  940. index += step
  941. # Check direct error
  942. best = partial[-1]
  943. error = abs(best - partial[-2])
  944. if verbose:
  945. print("Direct error: %s" % ctx.nstr(error))
  946. if error <= tol:
  947. return best
  948. # Check each extrapolation method
  949. if TRY_RICHARDSON:
  950. value, maxc = ctx.richardson(partial)
  951. # Convergence
  952. richardson_error = abs(value - last_richardson_value)
  953. if verbose:
  954. print("Richardson error: %s" % ctx.nstr(richardson_error))
  955. # Convergence
  956. if richardson_error <= tol:
  957. return value
  958. last_richardson_value = value
  959. # Unreliable due to cancellation
  960. if ctx.eps*maxc > tol:
  961. if verbose:
  962. print("Ran out of precision for Richardson")
  963. TRY_RICHARDSON = False
  964. if richardson_error < error:
  965. error = richardson_error
  966. best = value
  967. if TRY_SHANKS:
  968. shanks_table = ctx.shanks(partial, shanks_table, randomized=True)
  969. row = shanks_table[-1]
  970. if len(row) == 2:
  971. est1 = row[-1]
  972. shanks_error = 0
  973. else:
  974. est1, maxc, est2 = row[-1], abs(row[-2]), row[-3]
  975. shanks_error = abs(est1-est2)
  976. if verbose:
  977. print("Shanks error: %s" % ctx.nstr(shanks_error))
  978. if shanks_error <= tol:
  979. return est1
  980. if ctx.eps*maxc > tol:
  981. if verbose:
  982. print("Ran out of precision for Shanks")
  983. TRY_SHANKS = False
  984. if shanks_error < error:
  985. error = shanks_error
  986. best = est1
  987. for L in summer:
  988. est, lerror = L.update_psum(partial)
  989. if verbose:
  990. print("%s error: %s" % (L.name, ctx.nstr(lerror)))
  991. if lerror <= tol:
  992. return est
  993. if lerror < error:
  994. error = lerror
  995. best = est
  996. if TRY_EULER_MACLAURIN:
  997. if ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])).ae(-1):
  998. if verbose:
  999. print ("NOT using Euler-Maclaurin: the series appears"
  1000. " to be alternating, so numerical\n quadrature"
  1001. " will most likely fail")
  1002. TRY_EULER_MACLAURIN = False
  1003. else:
  1004. value, em_error = emfun(index, tol)
  1005. value += partial[-1]
  1006. if verbose:
  1007. print("Euler-Maclaurin error: %s" % ctx.nstr(em_error))
  1008. if em_error <= tol:
  1009. return value
  1010. if em_error < error:
  1011. best = value
  1012. finally:
  1013. ctx.prec = orig
  1014. if strict:
  1015. raise ctx.NoConvergence
  1016. if verbose:
  1017. print("Warning: failed to converge to target accuracy")
  1018. return best
  1019. @defun
  1020. def nsum(ctx, f, *intervals, **options):
  1021. r"""
  1022. Computes the sum
  1023. .. math :: S = \sum_{k=a}^b f(k)
  1024. where `(a, b)` = *interval*, and where `a = -\infty` and/or
  1025. `b = \infty` are allowed, or more generally
  1026. .. math :: S = \sum_{k_1=a_1}^{b_1} \cdots
  1027. \sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n)
  1028. if multiple intervals are given.
  1029. Two examples of infinite series that can be summed by :func:`~mpmath.nsum`,
  1030. where the first converges rapidly and the second converges slowly,
  1031. are::
  1032. >>> from mpmath import *
  1033. >>> mp.dps = 15; mp.pretty = True
  1034. >>> nsum(lambda n: 1/fac(n), [0, inf])
  1035. 2.71828182845905
  1036. >>> nsum(lambda n: 1/n**2, [1, inf])
  1037. 1.64493406684823
  1038. When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to
  1039. accurately estimate the sums of slowly convergent series. If the series is
  1040. finite, :func:`~mpmath.nsum` currently does not attempt to perform any
  1041. extrapolation, and simply calls :func:`~mpmath.fsum`.
  1042. Multidimensional infinite series are reduced to a single-dimensional
  1043. series over expanding hypercubes; if both infinite and finite dimensions
  1044. are present, the finite ranges are moved innermost. For more advanced
  1045. control over the summation order, use nested calls to :func:`~mpmath.nsum`,
  1046. or manually rewrite the sum as a single-dimensional series.
  1047. **Options**
  1048. *tol*
  1049. Desired maximum final error. Defaults roughly to the
  1050. epsilon of the working precision.
  1051. *method*
  1052. Which summation algorithm to use (described below).
  1053. Default: ``'richardson+shanks'``.
  1054. *maxterms*
  1055. Cancel after at most this many terms. Default: 10*dps.
  1056. *steps*
  1057. An iterable giving the number of terms to add between
  1058. each extrapolation attempt. The default sequence is
  1059. [10, 20, 30, 40, ...]. For example, if you know that
  1060. approximately 100 terms will be required, efficiency might be
  1061. improved by setting this to [100, 10]. Then the first
  1062. extrapolation will be performed after 100 terms, the second
  1063. after 110, etc.
  1064. *verbose*
  1065. Print details about progress.
  1066. *ignore*
  1067. If enabled, any term that raises ``ArithmeticError``
  1068. or ``ValueError`` (e.g. through division by zero) is replaced
  1069. by a zero. This is convenient for lattice sums with
  1070. a singular term near the origin.
  1071. **Methods**
  1072. Unfortunately, an algorithm that can efficiently sum any infinite
  1073. series does not exist. :func:`~mpmath.nsum` implements several different
  1074. algorithms that each work well in different cases. The *method*
  1075. keyword argument selects a method.
  1076. The default method is ``'r+s'``, i.e. both Richardson extrapolation
  1077. and Shanks transformation is attempted. A slower method that
  1078. handles more cases is ``'r+s+e'``. For very high precision
  1079. summation, or if the summation needs to be fast (for example if
  1080. multiple sums need to be evaluated), it is a good idea to
  1081. investigate which one method works best and only use that.
  1082. ``'richardson'`` / ``'r'``:
  1083. Uses Richardson extrapolation. Provides useful extrapolation
  1084. when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)`
  1085. for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for
  1086. additional information.
  1087. ``'shanks'`` / ``'s'``:
  1088. Uses Shanks transformation. Typically provides useful
  1089. extrapolation when `f(k) \sim c^k` or when successive terms
  1090. alternate signs. Is able to sum some divergent series.
  1091. See :func:`~mpmath.shanks` for additional information.
  1092. ``'levin'`` / ``'l'``:
  1093. Uses the Levin transformation. It performs better than the Shanks
  1094. transformation for logarithmic convergent or alternating divergent
  1095. series. The ``'levin_variant'``-keyword selects the variant. Valid
  1096. choices are "u", "t", "v" and "all" whereby "all" uses all three
  1097. u,t and v simultanously (This is good for performance comparison in
  1098. conjunction with "verbose=True"). Instead of the Levin transform one can
  1099. also use the Sidi-S transform by selecting the method ``'sidi'``.
  1100. See :func:`~mpmath.levin` for additional details.
  1101. ``'alternating'`` / ``'a'``:
  1102. This is the convergence acceleration of alternating series developped
  1103. by Cohen, Villegras and Zagier.
  1104. See :func:`~mpmath.cohen_alt` for additional details.
  1105. ``'euler-maclaurin'`` / ``'e'``:
  1106. Uses the Euler-Maclaurin summation formula to approximate
  1107. the remainder sum by an integral. This requires high-order
  1108. numerical derivatives and numerical integration. The advantage
  1109. of this algorithm is that it works regardless of the
  1110. decay rate of `f`, as long as `f` is sufficiently smooth.
  1111. See :func:`~mpmath.sumem` for additional information.
  1112. ``'direct'`` / ``'d'``:
  1113. Does not perform any extrapolation. This can be used
  1114. (and should only be used for) rapidly convergent series.
  1115. The summation automatically stops when the terms
  1116. decrease below the target tolerance.
  1117. **Basic examples**
  1118. A finite sum::
  1119. >>> nsum(lambda k: 1/k, [1, 6])
  1120. 2.45
  1121. Summation of a series going to negative infinity and a doubly
  1122. infinite series::
  1123. >>> nsum(lambda k: 1/k**2, [-inf, -1])
  1124. 1.64493406684823
  1125. >>> nsum(lambda k: 1/(1+k**2), [-inf, inf])
  1126. 3.15334809493716
  1127. :func:`~mpmath.nsum` handles sums of complex numbers::
  1128. >>> nsum(lambda k: (0.5+0.25j)**k, [0, inf])
  1129. (1.6 + 0.8j)
  1130. The following sum converges very rapidly, so it is most
  1131. efficient to sum it by disabling convergence acceleration::
  1132. >>> mp.dps = 1000
  1133. >>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf],
  1134. ... method='direct')
  1135. >>> b = (cos(1)+sin(1))/4
  1136. >>> abs(a-b) < mpf('1e-998')
  1137. True
  1138. **Examples with Richardson extrapolation**
  1139. Richardson extrapolation works well for sums over rational
  1140. functions, as well as their alternating counterparts::
  1141. >>> mp.dps = 50
  1142. >>> nsum(lambda k: 1 / k**3, [1, inf],
  1143. ... method='richardson')
  1144. 1.2020569031595942853997381615114499907649862923405
  1145. >>> zeta(3)
  1146. 1.2020569031595942853997381615114499907649862923405
  1147. >>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf],
  1148. ... method='richardson')
  1149. 2.9348022005446793094172454999380755676568497036204
  1150. >>> pi**2/2-2
  1151. 2.9348022005446793094172454999380755676568497036204
  1152. >>> nsum(lambda k: (-1)**k / k**3, [1, inf],
  1153. ... method='richardson')
  1154. -0.90154267736969571404980362113358749307373971925537
  1155. >>> -3*zeta(3)/4
  1156. -0.90154267736969571404980362113358749307373971925538
  1157. **Examples with Shanks transformation**
  1158. The Shanks transformation works well for geometric series
  1159. and typically provides excellent acceleration for Taylor
  1160. series near the border of their disk of convergence.
  1161. Here we apply it to a series for `\log(2)`, which can be
  1162. seen as the Taylor series for `\log(1+x)` with `x = 1`::
  1163. >>> nsum(lambda k: -(-1)**k/k, [1, inf],
  1164. ... method='shanks')
  1165. 0.69314718055994530941723212145817656807550013436025
  1166. >>> log(2)
  1167. 0.69314718055994530941723212145817656807550013436025
  1168. Here we apply it to a slowly convergent geometric series::
  1169. >>> nsum(lambda k: mpf('0.995')**k, [0, inf],
  1170. ... method='shanks')
  1171. 200.0
  1172. Finally, Shanks' method works very well for alternating series
  1173. where `f(k) = (-1)^k g(k)`, and often does so regardless of
  1174. the exact decay rate of `g(k)`::
  1175. >>> mp.dps = 15
  1176. >>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf],
  1177. ... method='shanks')
  1178. 0.765147024625408
  1179. >>> (2-sqrt(2))*zeta(1.5)/2
  1180. 0.765147024625408
  1181. The following slowly convergent alternating series has no known
  1182. closed-form value. Evaluating the sum a second time at higher
  1183. precision indicates that the value is probably correct::
  1184. >>> nsum(lambda k: (-1)**k / log(k), [2, inf],
  1185. ... method='shanks')
  1186. 0.924299897222939
  1187. >>> mp.dps = 30
  1188. >>> nsum(lambda k: (-1)**k / log(k), [2, inf],
  1189. ... method='shanks')
  1190. 0.92429989722293885595957018136
  1191. **Examples with Levin transformation**
  1192. The following example calculates Euler's constant as the constant term in
  1193. the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow
  1194. because of the logarithmic convergence behaviour of the Dirichlet series
  1195. for zeta.
  1196. >>> mp.dps = 30
  1197. >>> z = mp.mpf(10) ** (-10)
  1198. >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z
  1199. >>> print(mp.chop(a - mp.euler, tol = 1e-10))
  1200. 0.0
  1201. Now we sum the zeta function outside its range of convergence
  1202. (attention: This does not work at the negative integers!):
  1203. >>> mp.dps = 15
  1204. >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
  1205. >>> print(mp.chop(w - mp.zeta(-2-3j)))
  1206. 0.0
  1207. The next example resummates an asymptotic series expansion of an integral
  1208. related to the exponential integral.
  1209. >>> mp.dps = 15
  1210. >>> z = mp.mpf(10)
  1211. >>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
  1212. >>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
  1213. >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
  1214. >>> print(mp.chop(w - exact))
  1215. 0.0
  1216. Following highly divergent asymptotic expansion needs some care. Firstly we
  1217. need copious amount of working precision. Secondly the stepsize must not be
  1218. chosen to large, otherwise nsum may miss the point where the Levin transform
  1219. converges and reach the point where only numerical garbage is produced due to
  1220. numerical cancellation.
  1221. >>> mp.dps = 15
  1222. >>> z = mp.mpf(2)
  1223. >>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
  1224. >>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
  1225. >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
  1226. ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
  1227. >>> print(mp.chop(w - exact))
  1228. 0.0
  1229. The hypergeoemtric function can also be summed outside its range of convergence:
  1230. >>> mp.dps = 15
  1231. >>> z = 2 + 1j
  1232. >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
  1233. >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
  1234. >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
  1235. >>> print(mp.chop(exact-v))
  1236. 0.0
  1237. **Examples with Cohen's alternating series resummation**
  1238. The next example sums the alternating zeta function:
  1239. >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
  1240. >>> print(mp.chop(v - mp.log(2)))
  1241. 0.0
  1242. The derivate of the alternating zeta function outside its range of
  1243. convergence:
  1244. >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
  1245. >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
  1246. 0.0
  1247. **Examples with Euler-Maclaurin summation**
  1248. The sum in the following example has the wrong rate of convergence
  1249. for either Richardson or Shanks to be effective.
  1250. >>> f = lambda k: log(k)/k**2.5
  1251. >>> mp.dps = 15
  1252. >>> nsum(f, [1, inf], method='euler-maclaurin')
  1253. 0.38734195032621
  1254. >>> -diff(zeta, 2.5)
  1255. 0.38734195032621
  1256. Increasing ``steps`` improves speed at higher precision::
  1257. >>> mp.dps = 50
  1258. >>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250])
  1259. 0.38734195032620997271199237593105101319948228874688
  1260. >>> -diff(zeta, 2.5)
  1261. 0.38734195032620997271199237593105101319948228874688
  1262. **Divergent series**
  1263. The Shanks transformation is able to sum some *divergent*
  1264. series. In particular, it is often able to sum Taylor series
  1265. beyond their radius of convergence (this is due to a relation
  1266. between the Shanks transformation and Pade approximations;
  1267. see :func:`~mpmath.pade` for an alternative way to evaluate divergent
  1268. Taylor series). Furthermore the Levin-transform examples above
  1269. contain some divergent series resummation.
  1270. Here we apply it to `\log(1+x)` far outside the region of
  1271. convergence::
  1272. >>> mp.dps = 50
  1273. >>> nsum(lambda k: -(-9)**k/k, [1, inf],
  1274. ... method='shanks')
  1275. 2.3025850929940456840179914546843642076011014886288
  1276. >>> log(10)
  1277. 2.3025850929940456840179914546843642076011014886288
  1278. A particular type of divergent series that can be summed
  1279. using the Shanks transformation is geometric series.
  1280. The result is the same as using the closed-form formula
  1281. for an infinite geometric series::
  1282. >>> mp.dps = 15
  1283. >>> for n in range(-8, 8):
  1284. ... if n == 1:
  1285. ... continue
  1286. ... print("%s %s %s" % (mpf(n), mpf(1)/(1-n),
  1287. ... nsum(lambda k: n**k, [0, inf], method='shanks')))
  1288. ...
  1289. -8.0 0.111111111111111 0.111111111111111
  1290. -7.0 0.125 0.125
  1291. -6.0 0.142857142857143 0.142857142857143
  1292. -5.0 0.166666666666667 0.166666666666667
  1293. -4.0 0.2 0.2
  1294. -3.0 0.25 0.25
  1295. -2.0 0.333333333333333 0.333333333333333
  1296. -1.0 0.5 0.5
  1297. 0.0 1.0 1.0
  1298. 2.0 -1.0 -1.0
  1299. 3.0 -0.5 -0.5
  1300. 4.0 -0.333333333333333 -0.333333333333333
  1301. 5.0 -0.25 -0.25
  1302. 6.0 -0.2 -0.2
  1303. 7.0 -0.166666666666667 -0.166666666666667
  1304. **Multidimensional sums**
  1305. Any combination of finite and infinite ranges is allowed for the
  1306. summation indices::
  1307. >>> mp.dps = 15
  1308. >>> nsum(lambda x,y: x+y, [2,3], [4,5])
  1309. 28.0
  1310. >>> nsum(lambda x,y: x/2**y, [1,3], [1,inf])
  1311. 6.0
  1312. >>> nsum(lambda x,y: y/2**x, [1,inf], [1,3])
  1313. 6.0
  1314. >>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4])
  1315. 7.0
  1316. >>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf])
  1317. 7.0
  1318. >>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf])
  1319. 7.0
  1320. Some nice examples of double series with analytic solutions or
  1321. reductions to single-dimensional series (see [1])::
  1322. >>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf])
  1323. 1.60669515241529
  1324. >>> nsum(lambda n: 1/(2**n-1), [1,inf])
  1325. 1.60669515241529
  1326. >>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf])
  1327. 0.278070510848213
  1328. >>> pi*(pi-3*ln2)/12
  1329. 0.278070510848213
  1330. >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf])
  1331. 0.129319852864168
  1332. >>> altzeta(2) - altzeta(1)
  1333. 0.129319852864168
  1334. >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf])
  1335. 0.0790756439455825
  1336. >>> altzeta(3) - altzeta(2)
  1337. 0.0790756439455825
  1338. >>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)),
  1339. ... [1,inf], [1,inf])
  1340. 0.28125
  1341. >>> mpf(9)/32
  1342. 0.28125
  1343. >>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j),
  1344. ... [1,inf], [1,inf], workprec=400)
  1345. 1.64493406684823
  1346. >>> zeta(2)
  1347. 1.64493406684823
  1348. A hard example of a multidimensional sum is the Madelung constant
  1349. in three dimensions (see [2]). The defining sum converges very
  1350. slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to
  1351. obtain an accurate value through convergence acceleration. The
  1352. second evaluation below uses a much more efficient, rapidly
  1353. convergent 2D sum::
  1354. >>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5,
  1355. ... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True)
  1356. -1.74756459463318
  1357. >>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \
  1358. ... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf])
  1359. -1.74756459463318
  1360. Another example of a lattice sum in 2D::
  1361. >>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf],
  1362. ... [-inf,inf], ignore=True)
  1363. -2.1775860903036
  1364. >>> -pi*ln2
  1365. -2.1775860903036
  1366. An example of an Eisenstein series::
  1367. >>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf],
  1368. ... ignore=True)
  1369. (3.1512120021539 + 0.0j)
  1370. **References**
  1371. 1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html,
  1372. 2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html
  1373. """
  1374. infinite, g = standardize(ctx, f, intervals, options)
  1375. if not infinite:
  1376. return +g()
  1377. def update(partial_sums, indices):
  1378. if partial_sums:
  1379. psum = partial_sums[-1]
  1380. else:
  1381. psum = ctx.zero
  1382. for k in indices:
  1383. psum = psum + g(ctx.mpf(k))
  1384. partial_sums.append(psum)
  1385. prec = ctx.prec
  1386. def emfun(point, tol):
  1387. workprec = ctx.prec
  1388. ctx.prec = prec + 10
  1389. v = ctx.sumem(g, [point, ctx.inf], tol, error=1)
  1390. ctx.prec = workprec
  1391. return v
  1392. return +ctx.adaptive_extrapolation(update, emfun, options)
  1393. def wrapsafe(f):
  1394. def g(*args):
  1395. try:
  1396. return f(*args)
  1397. except (ArithmeticError, ValueError):
  1398. return 0
  1399. return g
  1400. def standardize(ctx, f, intervals, options):
  1401. if options.get("ignore"):
  1402. f = wrapsafe(f)
  1403. finite = []
  1404. infinite = []
  1405. for k, points in enumerate(intervals):
  1406. a, b = ctx._as_points(points)
  1407. if b < a:
  1408. return False, (lambda: ctx.zero)
  1409. if a == ctx.ninf or b == ctx.inf:
  1410. infinite.append((k, (a,b)))
  1411. else:
  1412. finite.append((k, (int(a), int(b))))
  1413. if finite:
  1414. f = fold_finite(ctx, f, finite)
  1415. if not infinite:
  1416. return False, lambda: f(*([0]*len(intervals)))
  1417. if infinite:
  1418. f = standardize_infinite(ctx, f, infinite)
  1419. f = fold_infinite(ctx, f, infinite)
  1420. args = [0] * len(intervals)
  1421. d = infinite[0][0]
  1422. def g(k):
  1423. args[d] = k
  1424. return f(*args)
  1425. return True, g
  1426. # backwards compatible itertools.product
  1427. def cartesian_product(args):
  1428. pools = map(tuple, args)
  1429. result = [[]]
  1430. for pool in pools:
  1431. result = [x+[y] for x in result for y in pool]
  1432. for prod in result:
  1433. yield tuple(prod)
  1434. def fold_finite(ctx, f, intervals):
  1435. if not intervals:
  1436. return f
  1437. indices = [v[0] for v in intervals]
  1438. points = [v[1] for v in intervals]
  1439. ranges = [xrange(a, b+1) for (a,b) in points]
  1440. def g(*args):
  1441. args = list(args)
  1442. s = ctx.zero
  1443. for xs in cartesian_product(ranges):
  1444. for dim, x in zip(indices, xs):
  1445. args[dim] = ctx.mpf(x)
  1446. s += f(*args)
  1447. return s
  1448. #print "Folded finite", indices
  1449. return g
  1450. # Standardize each interval to [0,inf]
  1451. def standardize_infinite(ctx, f, intervals):
  1452. if not intervals:
  1453. return f
  1454. dim, [a,b] = intervals[-1]
  1455. if a == ctx.ninf:
  1456. if b == ctx.inf:
  1457. def g(*args):
  1458. args = list(args)
  1459. k = args[dim]
  1460. if k:
  1461. s = f(*args)
  1462. args[dim] = -k
  1463. s += f(*args)
  1464. return s
  1465. else:
  1466. return f(*args)
  1467. else:
  1468. def g(*args):
  1469. args = list(args)
  1470. args[dim] = b - args[dim]
  1471. return f(*args)
  1472. else:
  1473. def g(*args):
  1474. args = list(args)
  1475. args[dim] += a
  1476. return f(*args)
  1477. #print "Standardized infinity along dimension", dim, a, b
  1478. return standardize_infinite(ctx, g, intervals[:-1])
  1479. def fold_infinite(ctx, f, intervals):
  1480. if len(intervals) < 2:
  1481. return f
  1482. dim1 = intervals[-2][0]
  1483. dim2 = intervals[-1][0]
  1484. # Assume intervals are [0,inf] x [0,inf] x ...
  1485. def g(*args):
  1486. args = list(args)
  1487. #args.insert(dim2, None)
  1488. n = int(args[dim1])
  1489. s = ctx.zero
  1490. #y = ctx.mpf(n)
  1491. args[dim2] = ctx.mpf(n) #y
  1492. for x in xrange(n+1):
  1493. args[dim1] = ctx.mpf(x)
  1494. s += f(*args)
  1495. args[dim1] = ctx.mpf(n) #ctx.mpf(n)
  1496. for y in xrange(n):
  1497. args[dim2] = ctx.mpf(y)
  1498. s += f(*args)
  1499. return s
  1500. #print "Folded infinite from", len(intervals), "to", (len(intervals)-1)
  1501. return fold_infinite(ctx, g, intervals[:-1])
  1502. @defun
  1503. def nprod(ctx, f, interval, nsum=False, **kwargs):
  1504. r"""
  1505. Computes the product
  1506. .. math ::
  1507. P = \prod_{k=a}^b f(k)
  1508. where `(a, b)` = *interval*, and where `a = -\infty` and/or
  1509. `b = \infty` are allowed.
  1510. By default, :func:`~mpmath.nprod` uses the same extrapolation methods as
  1511. :func:`~mpmath.nsum`, except applied to the partial products rather than
  1512. partial sums, and the same keyword options as for :func:`~mpmath.nsum` are
  1513. supported. If ``nsum=True``, the product is instead computed via
  1514. :func:`~mpmath.nsum` as
  1515. .. math ::
  1516. P = \exp\left( \sum_{k=a}^b \log(f(k)) \right).
  1517. This is slower, but can sometimes yield better results. It is
  1518. also required (and used automatically) when Euler-Maclaurin
  1519. summation is requested.
  1520. **Examples**
  1521. A simple finite product::
  1522. >>> from mpmath import *
  1523. >>> mp.dps = 25; mp.pretty = True
  1524. >>> nprod(lambda k: k, [1, 4])
  1525. 24.0
  1526. A large number of infinite products have known exact values,
  1527. and can therefore be used as a reference. Most of the following
  1528. examples are taken from MathWorld [1].
  1529. A few infinite products with simple values are::
  1530. >>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf])
  1531. 3.141592653589793238462643
  1532. >>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf])
  1533. 2.0
  1534. >>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf])
  1535. 0.6666666666666666666666667
  1536. >>> nprod(lambda k: (1-1/k**2), [2, inf])
  1537. 0.5
  1538. Next, several more infinite products with more complicated
  1539. values::
  1540. >>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6)
  1541. 5.180668317897115748416626
  1542. 5.180668317897115748416626
  1543. >>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi)
  1544. 0.2720290549821331629502366
  1545. 0.2720290549821331629502366
  1546. >>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf])
  1547. 0.8480540493529003921296502
  1548. >>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi))
  1549. 0.8480540493529003921296502
  1550. >>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf])
  1551. 1.848936182858244485224927
  1552. >>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi
  1553. 1.848936182858244485224927
  1554. >>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi)
  1555. 0.9190194775937444301739244
  1556. 0.9190194775937444301739244
  1557. >>> nprod(lambda k: (1-1/k**6), [2, inf])
  1558. 0.9826842777421925183244759
  1559. >>> (1+cosh(pi*sqrt(3)))/(12*pi**2)
  1560. 0.9826842777421925183244759
  1561. >>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi)
  1562. 1.838038955187488860347849
  1563. 1.838038955187488860347849
  1564. >>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf])
  1565. 1.447255926890365298959138
  1566. >>> exp(1+euler/2)/sqrt(2*pi)
  1567. 1.447255926890365298959138
  1568. The following two products are equivalent and can be evaluated in
  1569. terms of a Jacobi theta function. Pi can be replaced by any value
  1570. (as long as convergence is preserved)::
  1571. >>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf])
  1572. 0.3838451207481672404778686
  1573. >>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf])
  1574. 0.3838451207481672404778686
  1575. >>> jtheta(4,0,1/pi)
  1576. 0.3838451207481672404778686
  1577. This product does not have a known closed form value::
  1578. >>> nprod(lambda k: (1-1/2**k), [1, inf])
  1579. 0.2887880950866024212788997
  1580. A product taken from `-\infty`::
  1581. >>> nprod(lambda k: 1-k**(-3), [-inf,-2])
  1582. 0.8093965973662901095786805
  1583. >>> cosh(pi*sqrt(3)/2)/(3*pi)
  1584. 0.8093965973662901095786805
  1585. A doubly infinite product::
  1586. >>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf])
  1587. 23.41432688231864337420035
  1588. >>> exp(pi/tanh(pi))
  1589. 23.41432688231864337420035
  1590. A product requiring the use of Euler-Maclaurin summation to compute
  1591. an accurate value::
  1592. >>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e')
  1593. 0.696155111336231052898125
  1594. **References**
  1595. 1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html
  1596. """
  1597. if nsum or ('e' in kwargs.get('method', '')):
  1598. orig = ctx.prec
  1599. try:
  1600. # TODO: we are evaluating log(1+eps) -> eps, which is
  1601. # inaccurate. This currently works because nsum greatly
  1602. # increases the working precision. But we should be
  1603. # more intelligent and handle the precision here.
  1604. ctx.prec += 10
  1605. v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs)
  1606. finally:
  1607. ctx.prec = orig
  1608. return +ctx.exp(v)
  1609. a, b = ctx._as_points(interval)
  1610. if a == ctx.ninf:
  1611. if b == ctx.inf:
  1612. return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs)
  1613. return ctx.nprod(f, [-b, ctx.inf], **kwargs)
  1614. elif b != ctx.inf:
  1615. return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1))
  1616. a = int(a)
  1617. def update(partial_products, indices):
  1618. if partial_products:
  1619. pprod = partial_products[-1]
  1620. else:
  1621. pprod = ctx.one
  1622. for k in indices:
  1623. pprod = pprod * f(a + ctx.mpf(k))
  1624. partial_products.append(pprod)
  1625. return +ctx.adaptive_extrapolation(update, None, kwargs)
  1626. @defun
  1627. def limit(ctx, f, x, direction=1, exp=False, **kwargs):
  1628. r"""
  1629. Computes an estimate of the limit
  1630. .. math ::
  1631. \lim_{t \to x} f(t)
  1632. where `x` may be finite or infinite.
  1633. For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for
  1634. consecutive integer values of `n`, where the approach direction
  1635. `d` may be specified using the *direction* keyword argument.
  1636. For infinite `x`, :func:`~mpmath.limit` evaluates values of
  1637. `f(\mathrm{sign}(x) \cdot n)`.
  1638. If the approach to the limit is not sufficiently fast to give
  1639. an accurate estimate directly, :func:`~mpmath.limit` attempts to find
  1640. the limit using Richardson extrapolation or the Shanks
  1641. transformation. You can select between these methods using
  1642. the *method* keyword (see documentation of :func:`~mpmath.nsum` for
  1643. more information).
  1644. **Options**
  1645. The following options are available with essentially the
  1646. same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*,
  1647. *steps*, *verbose*.
  1648. If the option *exp=True* is set, `f` will be
  1649. sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots`
  1650. instead of the linearly spaced points `n = 1, 2, 3, \ldots`.
  1651. This can sometimes improve the rate of convergence so that
  1652. :func:`~mpmath.limit` may return a more accurate answer (and faster).
  1653. However, do note that this can only be used if `f`
  1654. supports fast and accurate evaluation for arguments that
  1655. are extremely close to the limit point (or if infinite,
  1656. very large arguments).
  1657. **Examples**
  1658. A basic evaluation of a removable singularity::
  1659. >>> from mpmath import *
  1660. >>> mp.dps = 30; mp.pretty = True
  1661. >>> limit(lambda x: (x-sin(x))/x**3, 0)
  1662. 0.166666666666666666666666666667
  1663. Computing the exponential function using its limit definition::
  1664. >>> limit(lambda n: (1+3/n)**n, inf)
  1665. 20.0855369231876677409285296546
  1666. >>> exp(3)
  1667. 20.0855369231876677409285296546
  1668. A limit for `\pi`::
  1669. >>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2
  1670. >>> limit(f, inf)
  1671. 3.14159265358979323846264338328
  1672. Calculating the coefficient in Stirling's formula::
  1673. >>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf)
  1674. 2.50662827463100050241576528481
  1675. >>> sqrt(2*pi)
  1676. 2.50662827463100050241576528481
  1677. Evaluating Euler's constant `\gamma` using the limit representation
  1678. .. math ::
  1679. \gamma = \lim_{n \rightarrow \infty } \left[ \left(
  1680. \sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right]
  1681. (which converges notoriously slowly)::
  1682. >>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n)
  1683. >>> limit(f, inf)
  1684. 0.577215664901532860606512090082
  1685. >>> +euler
  1686. 0.577215664901532860606512090082
  1687. With default settings, the following limit converges too slowly
  1688. to be evaluated accurately. Changing to exponential sampling
  1689. however gives a perfect result::
  1690. >>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x)
  1691. >>> limit(f, inf)
  1692. 0.992831158558330281129249686491
  1693. >>> limit(f, inf, exp=True)
  1694. 1.0
  1695. """
  1696. if ctx.isinf(x):
  1697. direction = ctx.sign(x)
  1698. g = lambda k: f(ctx.mpf(k+1)*direction)
  1699. else:
  1700. direction *= ctx.one
  1701. g = lambda k: f(x + direction/(k+1))
  1702. if exp:
  1703. h = g
  1704. g = lambda k: h(2**k)
  1705. def update(values, indices):
  1706. for k in indices:
  1707. values.append(g(k+1))
  1708. # XXX: steps used by nsum don't work well
  1709. if not 'steps' in kwargs:
  1710. kwargs['steps'] = [10]
  1711. return +ctx.adaptive_extrapolation(update, None, kwargs)