connector.py 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262
  1. import asyncio
  2. import functools
  3. import random
  4. import sys
  5. import traceback
  6. import warnings
  7. from collections import defaultdict, deque
  8. from contextlib import suppress
  9. from http.cookies import SimpleCookie
  10. from itertools import cycle, islice
  11. from time import monotonic
  12. from types import TracebackType
  13. from typing import (
  14. TYPE_CHECKING,
  15. Any,
  16. Awaitable,
  17. Callable,
  18. DefaultDict,
  19. Dict,
  20. Iterator,
  21. List,
  22. Optional,
  23. Set,
  24. Tuple,
  25. Type,
  26. Union,
  27. cast,
  28. )
  29. import attr
  30. from . import hdrs, helpers
  31. from .abc import AbstractResolver
  32. from .client_exceptions import (
  33. ClientConnectionError,
  34. ClientConnectorCertificateError,
  35. ClientConnectorError,
  36. ClientConnectorSSLError,
  37. ClientHttpProxyError,
  38. ClientProxyConnectionError,
  39. ServerFingerprintMismatch,
  40. cert_errors,
  41. ssl_errors,
  42. )
  43. from .client_proto import ResponseHandler
  44. from .client_reqrep import ClientRequest, Fingerprint, _merge_ssl_params
  45. from .helpers import PY_36, CeilTimeout, get_running_loop, is_ip_address, noop, sentinel
  46. from .http import RESPONSES
  47. from .locks import EventResultOrError
  48. from .resolver import DefaultResolver
  49. try:
  50. import ssl
  51. SSLContext = ssl.SSLContext
  52. except ImportError: # pragma: no cover
  53. ssl = None # type: ignore
  54. SSLContext = object # type: ignore
  55. __all__ = ("BaseConnector", "TCPConnector", "UnixConnector", "NamedPipeConnector")
  56. if TYPE_CHECKING: # pragma: no cover
  57. from .client import ClientTimeout
  58. from .client_reqrep import ConnectionKey
  59. from .tracing import Trace
  60. class _DeprecationWaiter:
  61. __slots__ = ("_awaitable", "_awaited")
  62. def __init__(self, awaitable: Awaitable[Any]) -> None:
  63. self._awaitable = awaitable
  64. self._awaited = False
  65. def __await__(self) -> Any:
  66. self._awaited = True
  67. return self._awaitable.__await__()
  68. def __del__(self) -> None:
  69. if not self._awaited:
  70. warnings.warn(
  71. "Connector.close() is a coroutine, "
  72. "please use await connector.close()",
  73. DeprecationWarning,
  74. )
  75. class Connection:
  76. _source_traceback = None
  77. _transport = None
  78. def __init__(
  79. self,
  80. connector: "BaseConnector",
  81. key: "ConnectionKey",
  82. protocol: ResponseHandler,
  83. loop: asyncio.AbstractEventLoop,
  84. ) -> None:
  85. self._key = key
  86. self._connector = connector
  87. self._loop = loop
  88. self._protocol = protocol # type: Optional[ResponseHandler]
  89. self._callbacks = [] # type: List[Callable[[], None]]
  90. if loop.get_debug():
  91. self._source_traceback = traceback.extract_stack(sys._getframe(1))
  92. def __repr__(self) -> str:
  93. return f"Connection<{self._key}>"
  94. def __del__(self, _warnings: Any = warnings) -> None:
  95. if self._protocol is not None:
  96. if PY_36:
  97. kwargs = {"source": self}
  98. else:
  99. kwargs = {}
  100. _warnings.warn(f"Unclosed connection {self!r}", ResourceWarning, **kwargs)
  101. if self._loop.is_closed():
  102. return
  103. self._connector._release(self._key, self._protocol, should_close=True)
  104. context = {"client_connection": self, "message": "Unclosed connection"}
  105. if self._source_traceback is not None:
  106. context["source_traceback"] = self._source_traceback
  107. self._loop.call_exception_handler(context)
  108. @property
  109. def loop(self) -> asyncio.AbstractEventLoop:
  110. warnings.warn(
  111. "connector.loop property is deprecated", DeprecationWarning, stacklevel=2
  112. )
  113. return self._loop
  114. @property
  115. def transport(self) -> Optional[asyncio.Transport]:
  116. if self._protocol is None:
  117. return None
  118. return self._protocol.transport
  119. @property
  120. def protocol(self) -> Optional[ResponseHandler]:
  121. return self._protocol
  122. def add_callback(self, callback: Callable[[], None]) -> None:
  123. if callback is not None:
  124. self._callbacks.append(callback)
  125. def _notify_release(self) -> None:
  126. callbacks, self._callbacks = self._callbacks[:], []
  127. for cb in callbacks:
  128. with suppress(Exception):
  129. cb()
  130. def close(self) -> None:
  131. self._notify_release()
  132. if self._protocol is not None:
  133. self._connector._release(self._key, self._protocol, should_close=True)
  134. self._protocol = None
  135. def release(self) -> None:
  136. self._notify_release()
  137. if self._protocol is not None:
  138. self._connector._release(
  139. self._key, self._protocol, should_close=self._protocol.should_close
  140. )
  141. self._protocol = None
  142. @property
  143. def closed(self) -> bool:
  144. return self._protocol is None or not self._protocol.is_connected()
  145. class _TransportPlaceholder:
  146. """ placeholder for BaseConnector.connect function """
  147. def close(self) -> None:
  148. pass
  149. class BaseConnector:
  150. """Base connector class.
  151. keepalive_timeout - (optional) Keep-alive timeout.
  152. force_close - Set to True to force close and do reconnect
  153. after each request (and between redirects).
  154. limit - The total number of simultaneous connections.
  155. limit_per_host - Number of simultaneous connections to one host.
  156. enable_cleanup_closed - Enables clean-up closed ssl transports.
  157. Disabled by default.
  158. loop - Optional event loop.
  159. """
  160. _closed = True # prevent AttributeError in __del__ if ctor was failed
  161. _source_traceback = None
  162. # abort transport after 2 seconds (cleanup broken connections)
  163. _cleanup_closed_period = 2.0
  164. def __init__(
  165. self,
  166. *,
  167. keepalive_timeout: Union[object, None, float] = sentinel,
  168. force_close: bool = False,
  169. limit: int = 100,
  170. limit_per_host: int = 0,
  171. enable_cleanup_closed: bool = False,
  172. loop: Optional[asyncio.AbstractEventLoop] = None,
  173. ) -> None:
  174. if force_close:
  175. if keepalive_timeout is not None and keepalive_timeout is not sentinel:
  176. raise ValueError(
  177. "keepalive_timeout cannot " "be set if force_close is True"
  178. )
  179. else:
  180. if keepalive_timeout is sentinel:
  181. keepalive_timeout = 15.0
  182. loop = get_running_loop(loop)
  183. self._closed = False
  184. if loop.get_debug():
  185. self._source_traceback = traceback.extract_stack(sys._getframe(1))
  186. self._conns = (
  187. {}
  188. ) # type: Dict[ConnectionKey, List[Tuple[ResponseHandler, float]]]
  189. self._limit = limit
  190. self._limit_per_host = limit_per_host
  191. self._acquired = set() # type: Set[ResponseHandler]
  192. self._acquired_per_host = defaultdict(
  193. set
  194. ) # type: DefaultDict[ConnectionKey, Set[ResponseHandler]]
  195. self._keepalive_timeout = cast(float, keepalive_timeout)
  196. self._force_close = force_close
  197. # {host_key: FIFO list of waiters}
  198. self._waiters = defaultdict(deque) # type: ignore
  199. self._loop = loop
  200. self._factory = functools.partial(ResponseHandler, loop=loop)
  201. self.cookies = SimpleCookie() # type: SimpleCookie[str]
  202. # start keep-alive connection cleanup task
  203. self._cleanup_handle = None
  204. # start cleanup closed transports task
  205. self._cleanup_closed_handle = None
  206. self._cleanup_closed_disabled = not enable_cleanup_closed
  207. self._cleanup_closed_transports = [] # type: List[Optional[asyncio.Transport]]
  208. self._cleanup_closed()
  209. def __del__(self, _warnings: Any = warnings) -> None:
  210. if self._closed:
  211. return
  212. if not self._conns:
  213. return
  214. conns = [repr(c) for c in self._conns.values()]
  215. self._close()
  216. if PY_36:
  217. kwargs = {"source": self}
  218. else:
  219. kwargs = {}
  220. _warnings.warn(f"Unclosed connector {self!r}", ResourceWarning, **kwargs)
  221. context = {
  222. "connector": self,
  223. "connections": conns,
  224. "message": "Unclosed connector",
  225. }
  226. if self._source_traceback is not None:
  227. context["source_traceback"] = self._source_traceback
  228. self._loop.call_exception_handler(context)
  229. def __enter__(self) -> "BaseConnector":
  230. warnings.warn(
  231. '"witn Connector():" is deprecated, '
  232. 'use "async with Connector():" instead',
  233. DeprecationWarning,
  234. )
  235. return self
  236. def __exit__(self, *exc: Any) -> None:
  237. self.close()
  238. async def __aenter__(self) -> "BaseConnector":
  239. return self
  240. async def __aexit__(
  241. self,
  242. exc_type: Optional[Type[BaseException]] = None,
  243. exc_value: Optional[BaseException] = None,
  244. exc_traceback: Optional[TracebackType] = None,
  245. ) -> None:
  246. await self.close()
  247. @property
  248. def force_close(self) -> bool:
  249. """Ultimately close connection on releasing if True."""
  250. return self._force_close
  251. @property
  252. def limit(self) -> int:
  253. """The total number for simultaneous connections.
  254. If limit is 0 the connector has no limit.
  255. The default limit size is 100.
  256. """
  257. return self._limit
  258. @property
  259. def limit_per_host(self) -> int:
  260. """The limit_per_host for simultaneous connections
  261. to the same endpoint.
  262. Endpoints are the same if they are have equal
  263. (host, port, is_ssl) triple.
  264. """
  265. return self._limit_per_host
  266. def _cleanup(self) -> None:
  267. """Cleanup unused transports."""
  268. if self._cleanup_handle:
  269. self._cleanup_handle.cancel()
  270. # _cleanup_handle should be unset, otherwise _release() will not
  271. # recreate it ever!
  272. self._cleanup_handle = None
  273. now = self._loop.time()
  274. timeout = self._keepalive_timeout
  275. if self._conns:
  276. connections = {}
  277. deadline = now - timeout
  278. for key, conns in self._conns.items():
  279. alive = []
  280. for proto, use_time in conns:
  281. if proto.is_connected():
  282. if use_time - deadline < 0:
  283. transport = proto.transport
  284. proto.close()
  285. if key.is_ssl and not self._cleanup_closed_disabled:
  286. self._cleanup_closed_transports.append(transport)
  287. else:
  288. alive.append((proto, use_time))
  289. else:
  290. transport = proto.transport
  291. proto.close()
  292. if key.is_ssl and not self._cleanup_closed_disabled:
  293. self._cleanup_closed_transports.append(transport)
  294. if alive:
  295. connections[key] = alive
  296. self._conns = connections
  297. if self._conns:
  298. self._cleanup_handle = helpers.weakref_handle(
  299. self, "_cleanup", timeout, self._loop
  300. )
  301. def _drop_acquired_per_host(
  302. self, key: "ConnectionKey", val: ResponseHandler
  303. ) -> None:
  304. acquired_per_host = self._acquired_per_host
  305. if key not in acquired_per_host:
  306. return
  307. conns = acquired_per_host[key]
  308. conns.remove(val)
  309. if not conns:
  310. del self._acquired_per_host[key]
  311. def _cleanup_closed(self) -> None:
  312. """Double confirmation for transport close.
  313. Some broken ssl servers may leave socket open without proper close.
  314. """
  315. if self._cleanup_closed_handle:
  316. self._cleanup_closed_handle.cancel()
  317. for transport in self._cleanup_closed_transports:
  318. if transport is not None:
  319. transport.abort()
  320. self._cleanup_closed_transports = []
  321. if not self._cleanup_closed_disabled:
  322. self._cleanup_closed_handle = helpers.weakref_handle(
  323. self, "_cleanup_closed", self._cleanup_closed_period, self._loop
  324. )
  325. def close(self) -> Awaitable[None]:
  326. """Close all opened transports."""
  327. self._close()
  328. return _DeprecationWaiter(noop())
  329. def _close(self) -> None:
  330. if self._closed:
  331. return
  332. self._closed = True
  333. try:
  334. if self._loop.is_closed():
  335. return
  336. # cancel cleanup task
  337. if self._cleanup_handle:
  338. self._cleanup_handle.cancel()
  339. # cancel cleanup close task
  340. if self._cleanup_closed_handle:
  341. self._cleanup_closed_handle.cancel()
  342. for data in self._conns.values():
  343. for proto, t0 in data:
  344. proto.close()
  345. for proto in self._acquired:
  346. proto.close()
  347. for transport in self._cleanup_closed_transports:
  348. if transport is not None:
  349. transport.abort()
  350. finally:
  351. self._conns.clear()
  352. self._acquired.clear()
  353. self._waiters.clear()
  354. self._cleanup_handle = None
  355. self._cleanup_closed_transports.clear()
  356. self._cleanup_closed_handle = None
  357. @property
  358. def closed(self) -> bool:
  359. """Is connector closed.
  360. A readonly property.
  361. """
  362. return self._closed
  363. def _available_connections(self, key: "ConnectionKey") -> int:
  364. """
  365. Return number of available connections taking into account
  366. the limit, limit_per_host and the connection key.
  367. If it returns less than 1 means that there is no connections
  368. availables.
  369. """
  370. if self._limit:
  371. # total calc available connections
  372. available = self._limit - len(self._acquired)
  373. # check limit per host
  374. if (
  375. self._limit_per_host
  376. and available > 0
  377. and key in self._acquired_per_host
  378. ):
  379. acquired = self._acquired_per_host.get(key)
  380. assert acquired is not None
  381. available = self._limit_per_host - len(acquired)
  382. elif self._limit_per_host and key in self._acquired_per_host:
  383. # check limit per host
  384. acquired = self._acquired_per_host.get(key)
  385. assert acquired is not None
  386. available = self._limit_per_host - len(acquired)
  387. else:
  388. available = 1
  389. return available
  390. async def connect(
  391. self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
  392. ) -> Connection:
  393. """Get from pool or create new connection."""
  394. key = req.connection_key
  395. available = self._available_connections(key)
  396. # Wait if there are no available connections or if there are/were
  397. # waiters (i.e. don't steal connection from a waiter about to wake up)
  398. if available <= 0 or key in self._waiters:
  399. fut = self._loop.create_future()
  400. # This connection will now count towards the limit.
  401. self._waiters[key].append(fut)
  402. if traces:
  403. for trace in traces:
  404. await trace.send_connection_queued_start()
  405. try:
  406. await fut
  407. except BaseException as e:
  408. if key in self._waiters:
  409. # remove a waiter even if it was cancelled, normally it's
  410. # removed when it's notified
  411. try:
  412. self._waiters[key].remove(fut)
  413. except ValueError: # fut may no longer be in list
  414. pass
  415. raise e
  416. finally:
  417. if key in self._waiters and not self._waiters[key]:
  418. del self._waiters[key]
  419. if traces:
  420. for trace in traces:
  421. await trace.send_connection_queued_end()
  422. proto = self._get(key)
  423. if proto is None:
  424. placeholder = cast(ResponseHandler, _TransportPlaceholder())
  425. self._acquired.add(placeholder)
  426. self._acquired_per_host[key].add(placeholder)
  427. if traces:
  428. for trace in traces:
  429. await trace.send_connection_create_start()
  430. try:
  431. proto = await self._create_connection(req, traces, timeout)
  432. if self._closed:
  433. proto.close()
  434. raise ClientConnectionError("Connector is closed.")
  435. except BaseException:
  436. if not self._closed:
  437. self._acquired.remove(placeholder)
  438. self._drop_acquired_per_host(key, placeholder)
  439. self._release_waiter()
  440. raise
  441. else:
  442. if not self._closed:
  443. self._acquired.remove(placeholder)
  444. self._drop_acquired_per_host(key, placeholder)
  445. if traces:
  446. for trace in traces:
  447. await trace.send_connection_create_end()
  448. else:
  449. if traces:
  450. for trace in traces:
  451. await trace.send_connection_reuseconn()
  452. self._acquired.add(proto)
  453. self._acquired_per_host[key].add(proto)
  454. return Connection(self, key, proto, self._loop)
  455. def _get(self, key: "ConnectionKey") -> Optional[ResponseHandler]:
  456. try:
  457. conns = self._conns[key]
  458. except KeyError:
  459. return None
  460. t1 = self._loop.time()
  461. while conns:
  462. proto, t0 = conns.pop()
  463. if proto.is_connected():
  464. if t1 - t0 > self._keepalive_timeout:
  465. transport = proto.transport
  466. proto.close()
  467. # only for SSL transports
  468. if key.is_ssl and not self._cleanup_closed_disabled:
  469. self._cleanup_closed_transports.append(transport)
  470. else:
  471. if not conns:
  472. # The very last connection was reclaimed: drop the key
  473. del self._conns[key]
  474. return proto
  475. else:
  476. transport = proto.transport
  477. proto.close()
  478. if key.is_ssl and not self._cleanup_closed_disabled:
  479. self._cleanup_closed_transports.append(transport)
  480. # No more connections: drop the key
  481. del self._conns[key]
  482. return None
  483. def _release_waiter(self) -> None:
  484. """
  485. Iterates over all waiters till found one that is not finsihed and
  486. belongs to a host that has available connections.
  487. """
  488. if not self._waiters:
  489. return
  490. # Having the dict keys ordered this avoids to iterate
  491. # at the same order at each call.
  492. queues = list(self._waiters.keys())
  493. random.shuffle(queues)
  494. for key in queues:
  495. if self._available_connections(key) < 1:
  496. continue
  497. waiters = self._waiters[key]
  498. while waiters:
  499. waiter = waiters.popleft()
  500. if not waiter.done():
  501. waiter.set_result(None)
  502. return
  503. def _release_acquired(self, key: "ConnectionKey", proto: ResponseHandler) -> None:
  504. if self._closed:
  505. # acquired connection is already released on connector closing
  506. return
  507. try:
  508. self._acquired.remove(proto)
  509. self._drop_acquired_per_host(key, proto)
  510. except KeyError: # pragma: no cover
  511. # this may be result of undetermenistic order of objects
  512. # finalization due garbage collection.
  513. pass
  514. else:
  515. self._release_waiter()
  516. def _release(
  517. self,
  518. key: "ConnectionKey",
  519. protocol: ResponseHandler,
  520. *,
  521. should_close: bool = False,
  522. ) -> None:
  523. if self._closed:
  524. # acquired connection is already released on connector closing
  525. return
  526. self._release_acquired(key, protocol)
  527. if self._force_close:
  528. should_close = True
  529. if should_close or protocol.should_close:
  530. transport = protocol.transport
  531. protocol.close()
  532. if key.is_ssl and not self._cleanup_closed_disabled:
  533. self._cleanup_closed_transports.append(transport)
  534. else:
  535. conns = self._conns.get(key)
  536. if conns is None:
  537. conns = self._conns[key] = []
  538. conns.append((protocol, self._loop.time()))
  539. if self._cleanup_handle is None:
  540. self._cleanup_handle = helpers.weakref_handle(
  541. self, "_cleanup", self._keepalive_timeout, self._loop
  542. )
  543. async def _create_connection(
  544. self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
  545. ) -> ResponseHandler:
  546. raise NotImplementedError()
  547. class _DNSCacheTable:
  548. def __init__(self, ttl: Optional[float] = None) -> None:
  549. self._addrs_rr = (
  550. {}
  551. ) # type: Dict[Tuple[str, int], Tuple[Iterator[Dict[str, Any]], int]]
  552. self._timestamps = {} # type: Dict[Tuple[str, int], float]
  553. self._ttl = ttl
  554. def __contains__(self, host: object) -> bool:
  555. return host in self._addrs_rr
  556. def add(self, key: Tuple[str, int], addrs: List[Dict[str, Any]]) -> None:
  557. self._addrs_rr[key] = (cycle(addrs), len(addrs))
  558. if self._ttl:
  559. self._timestamps[key] = monotonic()
  560. def remove(self, key: Tuple[str, int]) -> None:
  561. self._addrs_rr.pop(key, None)
  562. if self._ttl:
  563. self._timestamps.pop(key, None)
  564. def clear(self) -> None:
  565. self._addrs_rr.clear()
  566. self._timestamps.clear()
  567. def next_addrs(self, key: Tuple[str, int]) -> List[Dict[str, Any]]:
  568. loop, length = self._addrs_rr[key]
  569. addrs = list(islice(loop, length))
  570. # Consume one more element to shift internal state of `cycle`
  571. next(loop)
  572. return addrs
  573. def expired(self, key: Tuple[str, int]) -> bool:
  574. if self._ttl is None:
  575. return False
  576. return self._timestamps[key] + self._ttl < monotonic()
  577. class TCPConnector(BaseConnector):
  578. """TCP connector.
  579. verify_ssl - Set to True to check ssl certifications.
  580. fingerprint - Pass the binary sha256
  581. digest of the expected certificate in DER format to verify
  582. that the certificate the server presents matches. See also
  583. https://en.wikipedia.org/wiki/Transport_Layer_Security#Certificate_pinning
  584. resolver - Enable DNS lookups and use this
  585. resolver
  586. use_dns_cache - Use memory cache for DNS lookups.
  587. ttl_dns_cache - Max seconds having cached a DNS entry, None forever.
  588. family - socket address family
  589. local_addr - local tuple of (host, port) to bind socket to
  590. keepalive_timeout - (optional) Keep-alive timeout.
  591. force_close - Set to True to force close and do reconnect
  592. after each request (and between redirects).
  593. limit - The total number of simultaneous connections.
  594. limit_per_host - Number of simultaneous connections to one host.
  595. enable_cleanup_closed - Enables clean-up closed ssl transports.
  596. Disabled by default.
  597. loop - Optional event loop.
  598. """
  599. def __init__(
  600. self,
  601. *,
  602. verify_ssl: bool = True,
  603. fingerprint: Optional[bytes] = None,
  604. use_dns_cache: bool = True,
  605. ttl_dns_cache: Optional[int] = 10,
  606. family: int = 0,
  607. ssl_context: Optional[SSLContext] = None,
  608. ssl: Union[None, bool, Fingerprint, SSLContext] = None,
  609. local_addr: Optional[Tuple[str, int]] = None,
  610. resolver: Optional[AbstractResolver] = None,
  611. keepalive_timeout: Union[None, float, object] = sentinel,
  612. force_close: bool = False,
  613. limit: int = 100,
  614. limit_per_host: int = 0,
  615. enable_cleanup_closed: bool = False,
  616. loop: Optional[asyncio.AbstractEventLoop] = None,
  617. ):
  618. super().__init__(
  619. keepalive_timeout=keepalive_timeout,
  620. force_close=force_close,
  621. limit=limit,
  622. limit_per_host=limit_per_host,
  623. enable_cleanup_closed=enable_cleanup_closed,
  624. loop=loop,
  625. )
  626. self._ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint)
  627. if resolver is None:
  628. resolver = DefaultResolver(loop=self._loop)
  629. self._resolver = resolver
  630. self._use_dns_cache = use_dns_cache
  631. self._cached_hosts = _DNSCacheTable(ttl=ttl_dns_cache)
  632. self._throttle_dns_events = (
  633. {}
  634. ) # type: Dict[Tuple[str, int], EventResultOrError]
  635. self._family = family
  636. self._local_addr = local_addr
  637. def close(self) -> Awaitable[None]:
  638. """Close all ongoing DNS calls."""
  639. for ev in self._throttle_dns_events.values():
  640. ev.cancel()
  641. return super().close()
  642. @property
  643. def family(self) -> int:
  644. """Socket family like AF_INET."""
  645. return self._family
  646. @property
  647. def use_dns_cache(self) -> bool:
  648. """True if local DNS caching is enabled."""
  649. return self._use_dns_cache
  650. def clear_dns_cache(
  651. self, host: Optional[str] = None, port: Optional[int] = None
  652. ) -> None:
  653. """Remove specified host/port or clear all dns local cache."""
  654. if host is not None and port is not None:
  655. self._cached_hosts.remove((host, port))
  656. elif host is not None or port is not None:
  657. raise ValueError("either both host and port " "or none of them are allowed")
  658. else:
  659. self._cached_hosts.clear()
  660. async def _resolve_host(
  661. self, host: str, port: int, traces: Optional[List["Trace"]] = None
  662. ) -> List[Dict[str, Any]]:
  663. if is_ip_address(host):
  664. return [
  665. {
  666. "hostname": host,
  667. "host": host,
  668. "port": port,
  669. "family": self._family,
  670. "proto": 0,
  671. "flags": 0,
  672. }
  673. ]
  674. if not self._use_dns_cache:
  675. if traces:
  676. for trace in traces:
  677. await trace.send_dns_resolvehost_start(host)
  678. res = await self._resolver.resolve(host, port, family=self._family)
  679. if traces:
  680. for trace in traces:
  681. await trace.send_dns_resolvehost_end(host)
  682. return res
  683. key = (host, port)
  684. if (key in self._cached_hosts) and (not self._cached_hosts.expired(key)):
  685. # get result early, before any await (#4014)
  686. result = self._cached_hosts.next_addrs(key)
  687. if traces:
  688. for trace in traces:
  689. await trace.send_dns_cache_hit(host)
  690. return result
  691. if key in self._throttle_dns_events:
  692. # get event early, before any await (#4014)
  693. event = self._throttle_dns_events[key]
  694. if traces:
  695. for trace in traces:
  696. await trace.send_dns_cache_hit(host)
  697. await event.wait()
  698. else:
  699. # update dict early, before any await (#4014)
  700. self._throttle_dns_events[key] = EventResultOrError(self._loop)
  701. if traces:
  702. for trace in traces:
  703. await trace.send_dns_cache_miss(host)
  704. try:
  705. if traces:
  706. for trace in traces:
  707. await trace.send_dns_resolvehost_start(host)
  708. addrs = await self._resolver.resolve(host, port, family=self._family)
  709. if traces:
  710. for trace in traces:
  711. await trace.send_dns_resolvehost_end(host)
  712. self._cached_hosts.add(key, addrs)
  713. self._throttle_dns_events[key].set()
  714. except BaseException as e:
  715. # any DNS exception, independently of the implementation
  716. # is set for the waiters to raise the same exception.
  717. self._throttle_dns_events[key].set(exc=e)
  718. raise
  719. finally:
  720. self._throttle_dns_events.pop(key)
  721. return self._cached_hosts.next_addrs(key)
  722. async def _create_connection(
  723. self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
  724. ) -> ResponseHandler:
  725. """Create connection.
  726. Has same keyword arguments as BaseEventLoop.create_connection.
  727. """
  728. if req.proxy:
  729. _, proto = await self._create_proxy_connection(req, traces, timeout)
  730. else:
  731. _, proto = await self._create_direct_connection(req, traces, timeout)
  732. return proto
  733. @staticmethod
  734. @functools.lru_cache(None)
  735. def _make_ssl_context(verified: bool) -> SSLContext:
  736. if verified:
  737. return ssl.create_default_context()
  738. else:
  739. sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
  740. sslcontext.options |= ssl.OP_NO_SSLv2
  741. sslcontext.options |= ssl.OP_NO_SSLv3
  742. try:
  743. sslcontext.options |= ssl.OP_NO_COMPRESSION
  744. except AttributeError as attr_err:
  745. warnings.warn(
  746. "{!s}: The Python interpreter is compiled "
  747. "against OpenSSL < 1.0.0. Ref: "
  748. "https://docs.python.org/3/library/ssl.html"
  749. "#ssl.OP_NO_COMPRESSION".format(attr_err),
  750. )
  751. sslcontext.set_default_verify_paths()
  752. return sslcontext
  753. def _get_ssl_context(self, req: "ClientRequest") -> Optional[SSLContext]:
  754. """Logic to get the correct SSL context
  755. 0. if req.ssl is false, return None
  756. 1. if ssl_context is specified in req, use it
  757. 2. if _ssl_context is specified in self, use it
  758. 3. otherwise:
  759. 1. if verify_ssl is not specified in req, use self.ssl_context
  760. (will generate a default context according to self.verify_ssl)
  761. 2. if verify_ssl is True in req, generate a default SSL context
  762. 3. if verify_ssl is False in req, generate a SSL context that
  763. won't verify
  764. """
  765. if req.is_ssl():
  766. if ssl is None: # pragma: no cover
  767. raise RuntimeError("SSL is not supported.")
  768. sslcontext = req.ssl
  769. if isinstance(sslcontext, ssl.SSLContext):
  770. return sslcontext
  771. if sslcontext is not None:
  772. # not verified or fingerprinted
  773. return self._make_ssl_context(False)
  774. sslcontext = self._ssl
  775. if isinstance(sslcontext, ssl.SSLContext):
  776. return sslcontext
  777. if sslcontext is not None:
  778. # not verified or fingerprinted
  779. return self._make_ssl_context(False)
  780. return self._make_ssl_context(True)
  781. else:
  782. return None
  783. def _get_fingerprint(self, req: "ClientRequest") -> Optional["Fingerprint"]:
  784. ret = req.ssl
  785. if isinstance(ret, Fingerprint):
  786. return ret
  787. ret = self._ssl
  788. if isinstance(ret, Fingerprint):
  789. return ret
  790. return None
  791. async def _wrap_create_connection(
  792. self,
  793. *args: Any,
  794. req: "ClientRequest",
  795. timeout: "ClientTimeout",
  796. client_error: Type[Exception] = ClientConnectorError,
  797. **kwargs: Any,
  798. ) -> Tuple[asyncio.Transport, ResponseHandler]:
  799. try:
  800. with CeilTimeout(timeout.sock_connect):
  801. return await self._loop.create_connection(*args, **kwargs) # type: ignore # noqa
  802. except cert_errors as exc:
  803. raise ClientConnectorCertificateError(req.connection_key, exc) from exc
  804. except ssl_errors as exc:
  805. raise ClientConnectorSSLError(req.connection_key, exc) from exc
  806. except OSError as exc:
  807. raise client_error(req.connection_key, exc) from exc
  808. async def _create_direct_connection(
  809. self,
  810. req: "ClientRequest",
  811. traces: List["Trace"],
  812. timeout: "ClientTimeout",
  813. *,
  814. client_error: Type[Exception] = ClientConnectorError,
  815. ) -> Tuple[asyncio.Transport, ResponseHandler]:
  816. sslcontext = self._get_ssl_context(req)
  817. fingerprint = self._get_fingerprint(req)
  818. host = req.url.raw_host
  819. assert host is not None
  820. port = req.port
  821. assert port is not None
  822. host_resolved = asyncio.ensure_future(
  823. self._resolve_host(host, port, traces=traces), loop=self._loop
  824. )
  825. try:
  826. # Cancelling this lookup should not cancel the underlying lookup
  827. # or else the cancel event will get broadcast to all the waiters
  828. # across all connections.
  829. hosts = await asyncio.shield(host_resolved)
  830. except asyncio.CancelledError:
  831. def drop_exception(fut: "asyncio.Future[List[Dict[str, Any]]]") -> None:
  832. with suppress(Exception, asyncio.CancelledError):
  833. fut.result()
  834. host_resolved.add_done_callback(drop_exception)
  835. raise
  836. except OSError as exc:
  837. # in case of proxy it is not ClientProxyConnectionError
  838. # it is problem of resolving proxy ip itself
  839. raise ClientConnectorError(req.connection_key, exc) from exc
  840. last_exc = None # type: Optional[Exception]
  841. for hinfo in hosts:
  842. host = hinfo["host"]
  843. port = hinfo["port"]
  844. try:
  845. transp, proto = await self._wrap_create_connection(
  846. self._factory,
  847. host,
  848. port,
  849. timeout=timeout,
  850. ssl=sslcontext,
  851. family=hinfo["family"],
  852. proto=hinfo["proto"],
  853. flags=hinfo["flags"],
  854. server_hostname=hinfo["hostname"] if sslcontext else None,
  855. local_addr=self._local_addr,
  856. req=req,
  857. client_error=client_error,
  858. )
  859. except ClientConnectorError as exc:
  860. last_exc = exc
  861. continue
  862. if req.is_ssl() and fingerprint:
  863. try:
  864. fingerprint.check(transp)
  865. except ServerFingerprintMismatch as exc:
  866. transp.close()
  867. if not self._cleanup_closed_disabled:
  868. self._cleanup_closed_transports.append(transp)
  869. last_exc = exc
  870. continue
  871. return transp, proto
  872. else:
  873. assert last_exc is not None
  874. raise last_exc
  875. async def _create_proxy_connection(
  876. self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
  877. ) -> Tuple[asyncio.Transport, ResponseHandler]:
  878. headers = {} # type: Dict[str, str]
  879. if req.proxy_headers is not None:
  880. headers = req.proxy_headers # type: ignore
  881. headers[hdrs.HOST] = req.headers[hdrs.HOST]
  882. url = req.proxy
  883. assert url is not None
  884. proxy_req = ClientRequest(
  885. hdrs.METH_GET,
  886. url,
  887. headers=headers,
  888. auth=req.proxy_auth,
  889. loop=self._loop,
  890. ssl=req.ssl,
  891. )
  892. # create connection to proxy server
  893. transport, proto = await self._create_direct_connection(
  894. proxy_req, [], timeout, client_error=ClientProxyConnectionError
  895. )
  896. # Many HTTP proxies has buggy keepalive support. Let's not
  897. # reuse connection but close it after processing every
  898. # response.
  899. proto.force_close()
  900. auth = proxy_req.headers.pop(hdrs.AUTHORIZATION, None)
  901. if auth is not None:
  902. if not req.is_ssl():
  903. req.headers[hdrs.PROXY_AUTHORIZATION] = auth
  904. else:
  905. proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth
  906. if req.is_ssl():
  907. sslcontext = self._get_ssl_context(req)
  908. # For HTTPS requests over HTTP proxy
  909. # we must notify proxy to tunnel connection
  910. # so we send CONNECT command:
  911. # CONNECT www.python.org:443 HTTP/1.1
  912. # Host: www.python.org
  913. #
  914. # next we must do TLS handshake and so on
  915. # to do this we must wrap raw socket into secure one
  916. # asyncio handles this perfectly
  917. proxy_req.method = hdrs.METH_CONNECT
  918. proxy_req.url = req.url
  919. key = attr.evolve(
  920. req.connection_key, proxy=None, proxy_auth=None, proxy_headers_hash=None
  921. )
  922. conn = Connection(self, key, proto, self._loop)
  923. proxy_resp = await proxy_req.send(conn)
  924. try:
  925. protocol = conn._protocol
  926. assert protocol is not None
  927. protocol.set_response_params()
  928. resp = await proxy_resp.start(conn)
  929. except BaseException:
  930. proxy_resp.close()
  931. conn.close()
  932. raise
  933. else:
  934. conn._protocol = None
  935. conn._transport = None
  936. try:
  937. if resp.status != 200:
  938. message = resp.reason
  939. if message is None:
  940. message = RESPONSES[resp.status][0]
  941. raise ClientHttpProxyError(
  942. proxy_resp.request_info,
  943. resp.history,
  944. status=resp.status,
  945. message=message,
  946. headers=resp.headers,
  947. )
  948. rawsock = transport.get_extra_info("socket", default=None)
  949. if rawsock is None:
  950. raise RuntimeError("Transport does not expose socket instance")
  951. # Duplicate the socket, so now we can close proxy transport
  952. rawsock = rawsock.dup()
  953. finally:
  954. transport.close()
  955. transport, proto = await self._wrap_create_connection(
  956. self._factory,
  957. timeout=timeout,
  958. ssl=sslcontext,
  959. sock=rawsock,
  960. server_hostname=req.host,
  961. req=req,
  962. )
  963. finally:
  964. proxy_resp.close()
  965. return transport, proto
  966. class UnixConnector(BaseConnector):
  967. """Unix socket connector.
  968. path - Unix socket path.
  969. keepalive_timeout - (optional) Keep-alive timeout.
  970. force_close - Set to True to force close and do reconnect
  971. after each request (and between redirects).
  972. limit - The total number of simultaneous connections.
  973. limit_per_host - Number of simultaneous connections to one host.
  974. loop - Optional event loop.
  975. """
  976. def __init__(
  977. self,
  978. path: str,
  979. force_close: bool = False,
  980. keepalive_timeout: Union[object, float, None] = sentinel,
  981. limit: int = 100,
  982. limit_per_host: int = 0,
  983. loop: Optional[asyncio.AbstractEventLoop] = None,
  984. ) -> None:
  985. super().__init__(
  986. force_close=force_close,
  987. keepalive_timeout=keepalive_timeout,
  988. limit=limit,
  989. limit_per_host=limit_per_host,
  990. loop=loop,
  991. )
  992. self._path = path
  993. @property
  994. def path(self) -> str:
  995. """Path to unix socket."""
  996. return self._path
  997. async def _create_connection(
  998. self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
  999. ) -> ResponseHandler:
  1000. try:
  1001. with CeilTimeout(timeout.sock_connect):
  1002. _, proto = await self._loop.create_unix_connection(
  1003. self._factory, self._path
  1004. )
  1005. except OSError as exc:
  1006. raise ClientConnectorError(req.connection_key, exc) from exc
  1007. return cast(ResponseHandler, proto)
  1008. class NamedPipeConnector(BaseConnector):
  1009. """Named pipe connector.
  1010. Only supported by the proactor event loop.
  1011. See also: https://docs.python.org/3.7/library/asyncio-eventloop.html
  1012. path - Windows named pipe path.
  1013. keepalive_timeout - (optional) Keep-alive timeout.
  1014. force_close - Set to True to force close and do reconnect
  1015. after each request (and between redirects).
  1016. limit - The total number of simultaneous connections.
  1017. limit_per_host - Number of simultaneous connections to one host.
  1018. loop - Optional event loop.
  1019. """
  1020. def __init__(
  1021. self,
  1022. path: str,
  1023. force_close: bool = False,
  1024. keepalive_timeout: Union[object, float, None] = sentinel,
  1025. limit: int = 100,
  1026. limit_per_host: int = 0,
  1027. loop: Optional[asyncio.AbstractEventLoop] = None,
  1028. ) -> None:
  1029. super().__init__(
  1030. force_close=force_close,
  1031. keepalive_timeout=keepalive_timeout,
  1032. limit=limit,
  1033. limit_per_host=limit_per_host,
  1034. loop=loop,
  1035. )
  1036. if not isinstance(self._loop, asyncio.ProactorEventLoop): # type: ignore
  1037. raise RuntimeError(
  1038. "Named Pipes only available in proactor " "loop under windows"
  1039. )
  1040. self._path = path
  1041. @property
  1042. def path(self) -> str:
  1043. """Path to the named pipe."""
  1044. return self._path
  1045. async def _create_connection(
  1046. self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
  1047. ) -> ResponseHandler:
  1048. try:
  1049. with CeilTimeout(timeout.sock_connect):
  1050. _, proto = await self._loop.create_pipe_connection( # type: ignore
  1051. self._factory, self._path
  1052. )
  1053. # the drain is required so that the connection_made is called
  1054. # and transport is set otherwise it is not set before the
  1055. # `assert conn.transport is not None`
  1056. # in client.py's _request method
  1057. await asyncio.sleep(0)
  1058. # other option is to manually set transport like
  1059. # `proto.transport = trans`
  1060. except OSError as exc:
  1061. raise ClientConnectorError(req.connection_key, exc) from exc
  1062. return cast(ResponseHandler, proto)