differentiation.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. from ..libmp.backend import xrange
  2. from .calculus import defun
  3. try:
  4. iteritems = dict.iteritems
  5. except AttributeError:
  6. iteritems = dict.items
  7. #----------------------------------------------------------------------------#
  8. # Differentiation #
  9. #----------------------------------------------------------------------------#
  10. @defun
  11. def difference(ctx, s, n):
  12. r"""
  13. Given a sequence `(s_k)` containing at least `n+1` items, returns the
  14. `n`-th forward difference,
  15. .. math ::
  16. \Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k.
  17. """
  18. n = int(n)
  19. d = ctx.zero
  20. b = (-1) ** (n & 1)
  21. for k in xrange(n+1):
  22. d += b * s[k]
  23. b = (b * (k-n)) // (k+1)
  24. return d
  25. def hsteps(ctx, f, x, n, prec, **options):
  26. singular = options.get('singular')
  27. addprec = options.get('addprec', 10)
  28. direction = options.get('direction', 0)
  29. workprec = (prec+2*addprec) * (n+1)
  30. orig = ctx.prec
  31. try:
  32. ctx.prec = workprec
  33. h = options.get('h')
  34. if h is None:
  35. if options.get('relative'):
  36. hextramag = int(ctx.mag(x))
  37. else:
  38. hextramag = 0
  39. h = ctx.ldexp(1, -prec-addprec-hextramag)
  40. else:
  41. h = ctx.convert(h)
  42. # Directed: steps x, x+h, ... x+n*h
  43. direction = options.get('direction', 0)
  44. if direction:
  45. h *= ctx.sign(direction)
  46. steps = xrange(n+1)
  47. norm = h
  48. # Central: steps x-n*h, x-(n-2)*h ..., x, ..., x+(n-2)*h, x+n*h
  49. else:
  50. steps = xrange(-n, n+1, 2)
  51. norm = (2*h)
  52. # Perturb
  53. if singular:
  54. x += 0.5*h
  55. values = [f(x+k*h) for k in steps]
  56. return values, norm, workprec
  57. finally:
  58. ctx.prec = orig
  59. @defun
  60. def diff(ctx, f, x, n=1, **options):
  61. r"""
  62. Numerically computes the derivative of `f`, `f'(x)`, or generally for
  63. an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`.
  64. A few basic examples are::
  65. >>> from mpmath import *
  66. >>> mp.dps = 15; mp.pretty = True
  67. >>> diff(lambda x: x**2 + x, 1.0)
  68. 3.0
  69. >>> diff(lambda x: x**2 + x, 1.0, 2)
  70. 2.0
  71. >>> diff(lambda x: x**2 + x, 1.0, 3)
  72. 0.0
  73. >>> nprint([diff(exp, 3, n) for n in range(5)]) # exp'(x) = exp(x)
  74. [20.0855, 20.0855, 20.0855, 20.0855, 20.0855]
  75. Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)`
  76. and order `(n_1, \ldots, n_k)`, the partial derivative
  77. `f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example::
  78. >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1))
  79. 2.75
  80. >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1))
  81. 3.0
  82. **Options**
  83. The following optional keyword arguments are recognized:
  84. ``method``
  85. Supported methods are ``'step'`` or ``'quad'``: derivatives may be
  86. computed using either a finite difference with a small step
  87. size `h` (default), or numerical quadrature.
  88. ``direction``
  89. Direction of finite difference: can be -1 for a left
  90. difference, 0 for a central difference (default), or +1
  91. for a right difference; more generally can be any complex number.
  92. ``addprec``
  93. Extra precision for `h` used to account for the function's
  94. sensitivity to perturbations (default = 10).
  95. ``relative``
  96. Choose `h` relative to the magnitude of `x`, rather than an
  97. absolute value; useful for large or tiny `x` (default = False).
  98. ``h``
  99. As an alternative to ``addprec`` and ``relative``, manually
  100. select the step size `h`.
  101. ``singular``
  102. If True, evaluation exactly at the point `x` is avoided; this is
  103. useful for differentiating functions with removable singularities.
  104. Default = False.
  105. ``radius``
  106. Radius of integration contour (with ``method = 'quad'``).
  107. Default = 0.25. A larger radius typically is faster and more
  108. accurate, but it must be chosen so that `f` has no
  109. singularities within the radius from the evaluation point.
  110. A finite difference requires `n+1` function evaluations and must be
  111. performed at `(n+1)` times the target precision. Accordingly, `f` must
  112. support fast evaluation at high precision.
  113. With integration, a larger number of function evaluations is
  114. required, but not much extra precision is required. For high order
  115. derivatives, this method may thus be faster if f is very expensive to
  116. evaluate at high precision.
  117. **Further examples**
  118. The direction option is useful for computing left- or right-sided
  119. derivatives of nonsmooth functions::
  120. >>> diff(abs, 0, direction=0)
  121. 0.0
  122. >>> diff(abs, 0, direction=1)
  123. 1.0
  124. >>> diff(abs, 0, direction=-1)
  125. -1.0
  126. More generally, if the direction is nonzero, a right difference
  127. is computed where the step size is multiplied by sign(direction).
  128. For example, with direction=+j, the derivative from the positive
  129. imaginary direction will be computed::
  130. >>> diff(abs, 0, direction=j)
  131. (0.0 - 1.0j)
  132. With integration, the result may have a small imaginary part
  133. even even if the result is purely real::
  134. >>> diff(sqrt, 1, method='quad') # doctest:+ELLIPSIS
  135. (0.5 - 4.59...e-26j)
  136. >>> chop(_)
  137. 0.5
  138. Adding precision to obtain an accurate value::
  139. >>> diff(cos, 1e-30)
  140. 0.0
  141. >>> diff(cos, 1e-30, h=0.0001)
  142. -9.99999998328279e-31
  143. >>> diff(cos, 1e-30, addprec=100)
  144. -1.0e-30
  145. """
  146. partial = False
  147. try:
  148. orders = list(n)
  149. x = list(x)
  150. partial = True
  151. except TypeError:
  152. pass
  153. if partial:
  154. x = [ctx.convert(_) for _ in x]
  155. return _partial_diff(ctx, f, x, orders, options)
  156. method = options.get('method', 'step')
  157. if n == 0 and method != 'quad' and not options.get('singular'):
  158. return f(ctx.convert(x))
  159. prec = ctx.prec
  160. try:
  161. if method == 'step':
  162. values, norm, workprec = hsteps(ctx, f, x, n, prec, **options)
  163. ctx.prec = workprec
  164. v = ctx.difference(values, n) / norm**n
  165. elif method == 'quad':
  166. ctx.prec += 10
  167. radius = ctx.convert(options.get('radius', 0.25))
  168. def g(t):
  169. rei = radius*ctx.expj(t)
  170. z = x + rei
  171. return f(z) / rei**n
  172. d = ctx.quadts(g, [0, 2*ctx.pi])
  173. v = d * ctx.factorial(n) / (2*ctx.pi)
  174. else:
  175. raise ValueError("unknown method: %r" % method)
  176. finally:
  177. ctx.prec = prec
  178. return +v
  179. def _partial_diff(ctx, f, xs, orders, options):
  180. if not orders:
  181. return f()
  182. if not sum(orders):
  183. return f(*xs)
  184. i = 0
  185. for i in range(len(orders)):
  186. if orders[i]:
  187. break
  188. order = orders[i]
  189. def fdiff_inner(*f_args):
  190. def inner(t):
  191. return f(*(f_args[:i] + (t,) + f_args[i+1:]))
  192. return ctx.diff(inner, f_args[i], order, **options)
  193. orders[i] = 0
  194. return _partial_diff(ctx, fdiff_inner, xs, orders, options)
  195. @defun
  196. def diffs(ctx, f, x, n=None, **options):
  197. r"""
  198. Returns a generator that yields the sequence of derivatives
  199. .. math ::
  200. f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots
  201. With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)`
  202. function evaluations to generate the first `k` derivatives,
  203. rather than the roughly `O(k^2)` evaluations
  204. required if one calls :func:`~mpmath.diff` `k` separate times.
  205. With `n < \infty`, the generator stops as soon as the
  206. `n`-th derivative has been generated. If the exact number of
  207. needed derivatives is known in advance, this is further
  208. slightly more efficient.
  209. Options are the same as for :func:`~mpmath.diff`.
  210. **Examples**
  211. >>> from mpmath import *
  212. >>> mp.dps = 15
  213. >>> nprint(list(diffs(cos, 1, 5)))
  214. [0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471]
  215. >>> for i, d in zip(range(6), diffs(cos, 1)):
  216. ... print("%s %s" % (i, d))
  217. ...
  218. 0 0.54030230586814
  219. 1 -0.841470984807897
  220. 2 -0.54030230586814
  221. 3 0.841470984807897
  222. 4 0.54030230586814
  223. 5 -0.841470984807897
  224. """
  225. if n is None:
  226. n = ctx.inf
  227. else:
  228. n = int(n)
  229. if options.get('method', 'step') != 'step':
  230. k = 0
  231. while k < n + 1:
  232. yield ctx.diff(f, x, k, **options)
  233. k += 1
  234. return
  235. singular = options.get('singular')
  236. if singular:
  237. yield ctx.diff(f, x, 0, singular=True)
  238. else:
  239. yield f(ctx.convert(x))
  240. if n < 1:
  241. return
  242. if n == ctx.inf:
  243. A, B = 1, 2
  244. else:
  245. A, B = 1, n+1
  246. while 1:
  247. callprec = ctx.prec
  248. y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options)
  249. for k in xrange(A, B):
  250. try:
  251. ctx.prec = workprec
  252. d = ctx.difference(y, k) / norm**k
  253. finally:
  254. ctx.prec = callprec
  255. yield +d
  256. if k >= n:
  257. return
  258. A, B = B, int(A*1.4+1)
  259. B = min(B, n)
  260. def iterable_to_function(gen):
  261. gen = iter(gen)
  262. data = []
  263. def f(k):
  264. for i in xrange(len(data), k+1):
  265. data.append(next(gen))
  266. return data[k]
  267. return f
  268. @defun
  269. def diffs_prod(ctx, factors):
  270. r"""
  271. Given a list of `N` iterables or generators yielding
  272. `f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`,
  273. generate `g(x), g'(x), g''(x), \ldots` where
  274. `g(x) = f_1(x) f_2(x) \cdots f_N(x)`.
  275. At high precision and for large orders, this is typically more efficient
  276. than numerical differentiation if the derivatives of each `f_k(x)`
  277. admit direct computation.
  278. Note: This function does not increase the working precision internally,
  279. so guard digits may have to be added externally for full accuracy.
  280. **Examples**
  281. >>> from mpmath import *
  282. >>> mp.dps = 15; mp.pretty = True
  283. >>> f = lambda x: exp(x)*cos(x)*sin(x)
  284. >>> u = diffs(f, 1)
  285. >>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)])
  286. >>> next(u); next(v)
  287. 1.23586333600241
  288. 1.23586333600241
  289. >>> next(u); next(v)
  290. 0.104658952245596
  291. 0.104658952245596
  292. >>> next(u); next(v)
  293. -5.96999877552086
  294. -5.96999877552086
  295. >>> next(u); next(v)
  296. -12.4632923122697
  297. -12.4632923122697
  298. """
  299. N = len(factors)
  300. if N == 1:
  301. for c in factors[0]:
  302. yield c
  303. else:
  304. u = iterable_to_function(ctx.diffs_prod(factors[:N//2]))
  305. v = iterable_to_function(ctx.diffs_prod(factors[N//2:]))
  306. n = 0
  307. while 1:
  308. #yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1))
  309. s = u(n) * v(0)
  310. a = 1
  311. for k in xrange(1,n+1):
  312. a = a * (n-k+1) // k
  313. s += a * u(n-k) * v(k)
  314. yield s
  315. n += 1
  316. def dpoly(n, _cache={}):
  317. """
  318. nth differentiation polynomial for exp (Faa di Bruno's formula).
  319. TODO: most exponents are zero, so maybe a sparse representation
  320. would be better.
  321. """
  322. if n in _cache:
  323. return _cache[n]
  324. if not _cache:
  325. _cache[0] = {(0,):1}
  326. R = dpoly(n-1)
  327. R = dict((c+(0,),v) for (c,v) in iteritems(R))
  328. Ra = {}
  329. for powers, count in iteritems(R):
  330. powers1 = (powers[0]+1,) + powers[1:]
  331. if powers1 in Ra:
  332. Ra[powers1] += count
  333. else:
  334. Ra[powers1] = count
  335. for powers, count in iteritems(R):
  336. if not sum(powers):
  337. continue
  338. for k,p in enumerate(powers):
  339. if p:
  340. powers2 = powers[:k] + (p-1,powers[k+1]+1) + powers[k+2:]
  341. if powers2 in Ra:
  342. Ra[powers2] += p*count
  343. else:
  344. Ra[powers2] = p*count
  345. _cache[n] = Ra
  346. return _cache[n]
  347. @defun
  348. def diffs_exp(ctx, fdiffs):
  349. r"""
  350. Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots`
  351. generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`.
  352. At high precision and for large orders, this is typically more efficient
  353. than numerical differentiation if the derivatives of `f(x)`
  354. admit direct computation.
  355. Note: This function does not increase the working precision internally,
  356. so guard digits may have to be added externally for full accuracy.
  357. **Examples**
  358. The derivatives of the gamma function can be computed using
  359. logarithmic differentiation::
  360. >>> from mpmath import *
  361. >>> mp.dps = 15; mp.pretty = True
  362. >>>
  363. >>> def diffs_loggamma(x):
  364. ... yield loggamma(x)
  365. ... i = 0
  366. ... while 1:
  367. ... yield psi(i,x)
  368. ... i += 1
  369. ...
  370. >>> u = diffs_exp(diffs_loggamma(3))
  371. >>> v = diffs(gamma, 3)
  372. >>> next(u); next(v)
  373. 2.0
  374. 2.0
  375. >>> next(u); next(v)
  376. 1.84556867019693
  377. 1.84556867019693
  378. >>> next(u); next(v)
  379. 2.49292999190269
  380. 2.49292999190269
  381. >>> next(u); next(v)
  382. 3.44996501352367
  383. 3.44996501352367
  384. """
  385. fn = iterable_to_function(fdiffs)
  386. f0 = ctx.exp(fn(0))
  387. yield f0
  388. i = 1
  389. while 1:
  390. s = ctx.mpf(0)
  391. for powers, c in iteritems(dpoly(i)):
  392. s += c*ctx.fprod(fn(k+1)**p for (k,p) in enumerate(powers) if p)
  393. yield s * f0
  394. i += 1
  395. @defun
  396. def differint(ctx, f, x, n=1, x0=0):
  397. r"""
  398. Calculates the Riemann-Liouville differintegral, or fractional
  399. derivative, defined by
  400. .. math ::
  401. \,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m}
  402. \int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt
  403. where `f` is a given (presumably well-behaved) function,
  404. `x` is the evaluation point, `n` is the order, and `x_0` is
  405. the reference point of integration (`m` is an arbitrary
  406. parameter selected automatically).
  407. With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`,
  408. the second derivative `f''(x)`, etc. With `n = -1`, it gives
  409. `\int_{x_0}^x f(t) dt`, with `n = -2`
  410. it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc.
  411. As `n` is permitted to be any number, this operator generalizes
  412. iterated differentiation and iterated integration to a single
  413. operator with a continuous order parameter.
  414. **Examples**
  415. There is an exact formula for the fractional derivative of a
  416. monomial `x^p`, which may be used as a reference. For example,
  417. the following gives a half-derivative (order 0.5)::
  418. >>> from mpmath import *
  419. >>> mp.dps = 15; mp.pretty = True
  420. >>> x = mpf(3); p = 2; n = 0.5
  421. >>> differint(lambda t: t**p, x, n)
  422. 7.81764019044672
  423. >>> gamma(p+1)/gamma(p-n+1) * x**(p-n)
  424. 7.81764019044672
  425. Another useful test function is the exponential function, whose
  426. integration / differentiation formula easy generalizes
  427. to arbitrary order. Here we first compute a third derivative,
  428. and then a triply nested integral. (The reference point `x_0`
  429. is set to `-\infty` to avoid nonzero endpoint terms.)::
  430. >>> differint(lambda x: exp(pi*x), -1.5, 3)
  431. 0.278538406900792
  432. >>> exp(pi*-1.5) * pi**3
  433. 0.278538406900792
  434. >>> differint(lambda x: exp(pi*x), 3.5, -3, -inf)
  435. 1922.50563031149
  436. >>> exp(pi*3.5) / pi**3
  437. 1922.50563031149
  438. However, for noninteger `n`, the differentiation formula for the
  439. exponential function must be modified to give the same result as the
  440. Riemann-Liouville differintegral::
  441. >>> x = mpf(3.5)
  442. >>> c = pi
  443. >>> n = 1+2*j
  444. >>> differint(lambda x: exp(c*x), x, n)
  445. (-123295.005390743 + 140955.117867654j)
  446. >>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n)
  447. (-123295.005390743 + 140955.117867654j)
  448. """
  449. m = max(int(ctx.ceil(ctx.re(n)))+1, 1)
  450. r = m-n-1
  451. g = lambda x: ctx.quad(lambda t: (x-t)**r * f(t), [x0, x])
  452. return ctx.diff(g, x, m) / ctx.gamma(m-n)
  453. @defun
  454. def diffun(ctx, f, n=1, **options):
  455. r"""
  456. Given a function `f`, returns a function `g(x)` that evaluates the nth
  457. derivative `f^{(n)}(x)`::
  458. >>> from mpmath import *
  459. >>> mp.dps = 15; mp.pretty = True
  460. >>> cos2 = diffun(sin)
  461. >>> sin2 = diffun(sin, 4)
  462. >>> cos(1.3), cos2(1.3)
  463. (0.267498828624587, 0.267498828624587)
  464. >>> sin(1.3), sin2(1.3)
  465. (0.963558185417193, 0.963558185417193)
  466. The function `f` must support arbitrary precision evaluation.
  467. See :func:`~mpmath.diff` for additional details and supported
  468. keyword options.
  469. """
  470. if n == 0:
  471. return f
  472. def g(x):
  473. return ctx.diff(f, x, n, **options)
  474. return g
  475. @defun
  476. def taylor(ctx, f, x, n, **options):
  477. r"""
  478. Produces a degree-`n` Taylor polynomial around the point `x` of the
  479. given function `f`. The coefficients are returned as a list.
  480. >>> from mpmath import *
  481. >>> mp.dps = 15; mp.pretty = True
  482. >>> nprint(chop(taylor(sin, 0, 5)))
  483. [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333]
  484. The coefficients are computed using high-order numerical
  485. differentiation. The function must be possible to evaluate
  486. to arbitrary precision. See :func:`~mpmath.diff` for additional details
  487. and supported keyword options.
  488. Note that to evaluate the Taylor polynomial as an approximation
  489. of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed,
  490. and the point of the Taylor expansion must be subtracted from
  491. the argument:
  492. >>> p = taylor(exp, 2.0, 10)
  493. >>> polyval(p[::-1], 2.5 - 2.0)
  494. 12.1824939606092
  495. >>> exp(2.5)
  496. 12.1824939607035
  497. """
  498. gen = enumerate(ctx.diffs(f, x, n, **options))
  499. if options.get("chop", True):
  500. return [ctx.chop(d)/ctx.factorial(i) for i, d in gen]
  501. else:
  502. return [d/ctx.factorial(i) for i, d in gen]
  503. @defun
  504. def pade(ctx, a, L, M):
  505. r"""
  506. Computes a Pade approximation of degree `(L, M)` to a function.
  507. Given at least `L+M+1` Taylor coefficients `a` approximating
  508. a function `A(x)`, :func:`~mpmath.pade` returns coefficients of
  509. polynomials `P, Q` satisfying
  510. .. math ::
  511. P = \sum_{k=0}^L p_k x^k
  512. Q = \sum_{k=0}^M q_k x^k
  513. Q_0 = 1
  514. A(x) Q(x) = P(x) + O(x^{L+M+1})
  515. `P(x)/Q(x)` can provide a good approximation to an analytic function
  516. beyond the radius of convergence of its Taylor series (example
  517. from G.A. Baker 'Essentials of Pade Approximants' Academic Press,
  518. Ch.1A)::
  519. >>> from mpmath import *
  520. >>> mp.dps = 15; mp.pretty = True
  521. >>> one = mpf(1)
  522. >>> def f(x):
  523. ... return sqrt((one + 2*x)/(one + x))
  524. ...
  525. >>> a = taylor(f, 0, 6)
  526. >>> p, q = pade(a, 3, 3)
  527. >>> x = 10
  528. >>> polyval(p[::-1], x)/polyval(q[::-1], x)
  529. 1.38169105566806
  530. >>> f(x)
  531. 1.38169855941551
  532. """
  533. # To determine L+1 coefficients of P and M coefficients of Q
  534. # L+M+1 coefficients of A must be provided
  535. if len(a) < L+M+1:
  536. raise ValueError("L+M+1 Coefficients should be provided")
  537. if M == 0:
  538. if L == 0:
  539. return [ctx.one], [ctx.one]
  540. else:
  541. return a[:L+1], [ctx.one]
  542. # Solve first
  543. # a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1]
  544. # ...
  545. # a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M]
  546. A = ctx.matrix(M)
  547. for j in range(M):
  548. for i in range(min(M, L+j+1)):
  549. A[j, i] = a[L+j-i]
  550. v = -ctx.matrix(a[(L+1):(L+M+1)])
  551. x = ctx.lu_solve(A, v)
  552. q = [ctx.one] + list(x)
  553. # compute p
  554. p = [0]*(L+1)
  555. for i in range(L+1):
  556. s = a[i]
  557. for j in range(1, min(M,i) + 1):
  558. s += q[j]*a[i-j]
  559. p[i] = s
  560. return p, q