annotate urllib3/connectionpool.py @ 7:5eb2d5e3bf22

planemo upload for repository https://toolrepo.galaxytrakr.org/view/jpayne/bioproject_to_srr_2/556cac4fb538
author jpayne
date Sun, 05 May 2024 23:32:17 -0400
parents
children
rev   line source
jpayne@7 1 from __future__ import annotations
jpayne@7 2
jpayne@7 3 import errno
jpayne@7 4 import logging
jpayne@7 5 import queue
jpayne@7 6 import sys
jpayne@7 7 import typing
jpayne@7 8 import warnings
jpayne@7 9 import weakref
jpayne@7 10 from socket import timeout as SocketTimeout
jpayne@7 11 from types import TracebackType
jpayne@7 12
jpayne@7 13 from ._base_connection import _TYPE_BODY
jpayne@7 14 from ._collections import HTTPHeaderDict
jpayne@7 15 from ._request_methods import RequestMethods
jpayne@7 16 from .connection import (
jpayne@7 17 BaseSSLError,
jpayne@7 18 BrokenPipeError,
jpayne@7 19 DummyConnection,
jpayne@7 20 HTTPConnection,
jpayne@7 21 HTTPException,
jpayne@7 22 HTTPSConnection,
jpayne@7 23 ProxyConfig,
jpayne@7 24 _wrap_proxy_error,
jpayne@7 25 )
jpayne@7 26 from .connection import port_by_scheme as port_by_scheme
jpayne@7 27 from .exceptions import (
jpayne@7 28 ClosedPoolError,
jpayne@7 29 EmptyPoolError,
jpayne@7 30 FullPoolError,
jpayne@7 31 HostChangedError,
jpayne@7 32 InsecureRequestWarning,
jpayne@7 33 LocationValueError,
jpayne@7 34 MaxRetryError,
jpayne@7 35 NewConnectionError,
jpayne@7 36 ProtocolError,
jpayne@7 37 ProxyError,
jpayne@7 38 ReadTimeoutError,
jpayne@7 39 SSLError,
jpayne@7 40 TimeoutError,
jpayne@7 41 )
jpayne@7 42 from .response import BaseHTTPResponse
jpayne@7 43 from .util.connection import is_connection_dropped
jpayne@7 44 from .util.proxy import connection_requires_http_tunnel
jpayne@7 45 from .util.request import _TYPE_BODY_POSITION, set_file_position
jpayne@7 46 from .util.retry import Retry
jpayne@7 47 from .util.ssl_match_hostname import CertificateError
jpayne@7 48 from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout
jpayne@7 49 from .util.url import Url, _encode_target
jpayne@7 50 from .util.url import _normalize_host as normalize_host
jpayne@7 51 from .util.url import parse_url
jpayne@7 52 from .util.util import to_str
jpayne@7 53
jpayne@7 54 if typing.TYPE_CHECKING:
jpayne@7 55 import ssl
jpayne@7 56 from typing import Literal
jpayne@7 57
jpayne@7 58 from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection
jpayne@7 59
jpayne@7 60 log = logging.getLogger(__name__)
jpayne@7 61
jpayne@7 62 _TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None]
jpayne@7 63
jpayne@7 64 _SelfT = typing.TypeVar("_SelfT")
jpayne@7 65
jpayne@7 66
jpayne@7 67 # Pool objects
jpayne@7 68 class ConnectionPool:
jpayne@7 69 """
jpayne@7 70 Base class for all connection pools, such as
jpayne@7 71 :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
jpayne@7 72
jpayne@7 73 .. note::
jpayne@7 74 ConnectionPool.urlopen() does not normalize or percent-encode target URIs
jpayne@7 75 which is useful if your target server doesn't support percent-encoded
jpayne@7 76 target URIs.
jpayne@7 77 """
jpayne@7 78
jpayne@7 79 scheme: str | None = None
jpayne@7 80 QueueCls = queue.LifoQueue
jpayne@7 81
jpayne@7 82 def __init__(self, host: str, port: int | None = None) -> None:
jpayne@7 83 if not host:
jpayne@7 84 raise LocationValueError("No host specified.")
jpayne@7 85
jpayne@7 86 self.host = _normalize_host(host, scheme=self.scheme)
jpayne@7 87 self.port = port
jpayne@7 88
jpayne@7 89 # This property uses 'normalize_host()' (not '_normalize_host()')
jpayne@7 90 # to avoid removing square braces around IPv6 addresses.
jpayne@7 91 # This value is sent to `HTTPConnection.set_tunnel()` if called
jpayne@7 92 # because square braces are required for HTTP CONNECT tunneling.
jpayne@7 93 self._tunnel_host = normalize_host(host, scheme=self.scheme).lower()
jpayne@7 94
jpayne@7 95 def __str__(self) -> str:
jpayne@7 96 return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})"
jpayne@7 97
jpayne@7 98 def __enter__(self: _SelfT) -> _SelfT:
jpayne@7 99 return self
jpayne@7 100
jpayne@7 101 def __exit__(
jpayne@7 102 self,
jpayne@7 103 exc_type: type[BaseException] | None,
jpayne@7 104 exc_val: BaseException | None,
jpayne@7 105 exc_tb: TracebackType | None,
jpayne@7 106 ) -> Literal[False]:
jpayne@7 107 self.close()
jpayne@7 108 # Return False to re-raise any potential exceptions
jpayne@7 109 return False
jpayne@7 110
jpayne@7 111 def close(self) -> None:
jpayne@7 112 """
jpayne@7 113 Close all pooled connections and disable the pool.
jpayne@7 114 """
jpayne@7 115
jpayne@7 116
jpayne@7 117 # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
jpayne@7 118 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
jpayne@7 119
jpayne@7 120
jpayne@7 121 class HTTPConnectionPool(ConnectionPool, RequestMethods):
jpayne@7 122 """
jpayne@7 123 Thread-safe connection pool for one host.
jpayne@7 124
jpayne@7 125 :param host:
jpayne@7 126 Host used for this HTTP Connection (e.g. "localhost"), passed into
jpayne@7 127 :class:`http.client.HTTPConnection`.
jpayne@7 128
jpayne@7 129 :param port:
jpayne@7 130 Port used for this HTTP Connection (None is equivalent to 80), passed
jpayne@7 131 into :class:`http.client.HTTPConnection`.
jpayne@7 132
jpayne@7 133 :param timeout:
jpayne@7 134 Socket timeout in seconds for each individual connection. This can
jpayne@7 135 be a float or integer, which sets the timeout for the HTTP request,
jpayne@7 136 or an instance of :class:`urllib3.util.Timeout` which gives you more
jpayne@7 137 fine-grained control over request timeouts. After the constructor has
jpayne@7 138 been parsed, this is always a `urllib3.util.Timeout` object.
jpayne@7 139
jpayne@7 140 :param maxsize:
jpayne@7 141 Number of connections to save that can be reused. More than 1 is useful
jpayne@7 142 in multithreaded situations. If ``block`` is set to False, more
jpayne@7 143 connections will be created but they will not be saved once they've
jpayne@7 144 been used.
jpayne@7 145
jpayne@7 146 :param block:
jpayne@7 147 If set to True, no more than ``maxsize`` connections will be used at
jpayne@7 148 a time. When no free connections are available, the call will block
jpayne@7 149 until a connection has been released. This is a useful side effect for
jpayne@7 150 particular multithreaded situations where one does not want to use more
jpayne@7 151 than maxsize connections per host to prevent flooding.
jpayne@7 152
jpayne@7 153 :param headers:
jpayne@7 154 Headers to include with all requests, unless other headers are given
jpayne@7 155 explicitly.
jpayne@7 156
jpayne@7 157 :param retries:
jpayne@7 158 Retry configuration to use by default with requests in this pool.
jpayne@7 159
jpayne@7 160 :param _proxy:
jpayne@7 161 Parsed proxy URL, should not be used directly, instead, see
jpayne@7 162 :class:`urllib3.ProxyManager`
jpayne@7 163
jpayne@7 164 :param _proxy_headers:
jpayne@7 165 A dictionary with proxy headers, should not be used directly,
jpayne@7 166 instead, see :class:`urllib3.ProxyManager`
jpayne@7 167
jpayne@7 168 :param \\**conn_kw:
jpayne@7 169 Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
jpayne@7 170 :class:`urllib3.connection.HTTPSConnection` instances.
jpayne@7 171 """
jpayne@7 172
jpayne@7 173 scheme = "http"
jpayne@7 174 ConnectionCls: (
jpayne@7 175 type[BaseHTTPConnection] | type[BaseHTTPSConnection]
jpayne@7 176 ) = HTTPConnection
jpayne@7 177
jpayne@7 178 def __init__(
jpayne@7 179 self,
jpayne@7 180 host: str,
jpayne@7 181 port: int | None = None,
jpayne@7 182 timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
jpayne@7 183 maxsize: int = 1,
jpayne@7 184 block: bool = False,
jpayne@7 185 headers: typing.Mapping[str, str] | None = None,
jpayne@7 186 retries: Retry | bool | int | None = None,
jpayne@7 187 _proxy: Url | None = None,
jpayne@7 188 _proxy_headers: typing.Mapping[str, str] | None = None,
jpayne@7 189 _proxy_config: ProxyConfig | None = None,
jpayne@7 190 **conn_kw: typing.Any,
jpayne@7 191 ):
jpayne@7 192 ConnectionPool.__init__(self, host, port)
jpayne@7 193 RequestMethods.__init__(self, headers)
jpayne@7 194
jpayne@7 195 if not isinstance(timeout, Timeout):
jpayne@7 196 timeout = Timeout.from_float(timeout)
jpayne@7 197
jpayne@7 198 if retries is None:
jpayne@7 199 retries = Retry.DEFAULT
jpayne@7 200
jpayne@7 201 self.timeout = timeout
jpayne@7 202 self.retries = retries
jpayne@7 203
jpayne@7 204 self.pool: queue.LifoQueue[typing.Any] | None = self.QueueCls(maxsize)
jpayne@7 205 self.block = block
jpayne@7 206
jpayne@7 207 self.proxy = _proxy
jpayne@7 208 self.proxy_headers = _proxy_headers or {}
jpayne@7 209 self.proxy_config = _proxy_config
jpayne@7 210
jpayne@7 211 # Fill the queue up so that doing get() on it will block properly
jpayne@7 212 for _ in range(maxsize):
jpayne@7 213 self.pool.put(None)
jpayne@7 214
jpayne@7 215 # These are mostly for testing and debugging purposes.
jpayne@7 216 self.num_connections = 0
jpayne@7 217 self.num_requests = 0
jpayne@7 218 self.conn_kw = conn_kw
jpayne@7 219
jpayne@7 220 if self.proxy:
jpayne@7 221 # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
jpayne@7 222 # We cannot know if the user has added default socket options, so we cannot replace the
jpayne@7 223 # list.
jpayne@7 224 self.conn_kw.setdefault("socket_options", [])
jpayne@7 225
jpayne@7 226 self.conn_kw["proxy"] = self.proxy
jpayne@7 227 self.conn_kw["proxy_config"] = self.proxy_config
jpayne@7 228
jpayne@7 229 # Do not pass 'self' as callback to 'finalize'.
jpayne@7 230 # Then the 'finalize' would keep an endless living (leak) to self.
jpayne@7 231 # By just passing a reference to the pool allows the garbage collector
jpayne@7 232 # to free self if nobody else has a reference to it.
jpayne@7 233 pool = self.pool
jpayne@7 234
jpayne@7 235 # Close all the HTTPConnections in the pool before the
jpayne@7 236 # HTTPConnectionPool object is garbage collected.
jpayne@7 237 weakref.finalize(self, _close_pool_connections, pool)
jpayne@7 238
jpayne@7 239 def _new_conn(self) -> BaseHTTPConnection:
jpayne@7 240 """
jpayne@7 241 Return a fresh :class:`HTTPConnection`.
jpayne@7 242 """
jpayne@7 243 self.num_connections += 1
jpayne@7 244 log.debug(
jpayne@7 245 "Starting new HTTP connection (%d): %s:%s",
jpayne@7 246 self.num_connections,
jpayne@7 247 self.host,
jpayne@7 248 self.port or "80",
jpayne@7 249 )
jpayne@7 250
jpayne@7 251 conn = self.ConnectionCls(
jpayne@7 252 host=self.host,
jpayne@7 253 port=self.port,
jpayne@7 254 timeout=self.timeout.connect_timeout,
jpayne@7 255 **self.conn_kw,
jpayne@7 256 )
jpayne@7 257 return conn
jpayne@7 258
jpayne@7 259 def _get_conn(self, timeout: float | None = None) -> BaseHTTPConnection:
jpayne@7 260 """
jpayne@7 261 Get a connection. Will return a pooled connection if one is available.
jpayne@7 262
jpayne@7 263 If no connections are available and :prop:`.block` is ``False``, then a
jpayne@7 264 fresh connection is returned.
jpayne@7 265
jpayne@7 266 :param timeout:
jpayne@7 267 Seconds to wait before giving up and raising
jpayne@7 268 :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
jpayne@7 269 :prop:`.block` is ``True``.
jpayne@7 270 """
jpayne@7 271 conn = None
jpayne@7 272
jpayne@7 273 if self.pool is None:
jpayne@7 274 raise ClosedPoolError(self, "Pool is closed.")
jpayne@7 275
jpayne@7 276 try:
jpayne@7 277 conn = self.pool.get(block=self.block, timeout=timeout)
jpayne@7 278
jpayne@7 279 except AttributeError: # self.pool is None
jpayne@7 280 raise ClosedPoolError(self, "Pool is closed.") from None # Defensive:
jpayne@7 281
jpayne@7 282 except queue.Empty:
jpayne@7 283 if self.block:
jpayne@7 284 raise EmptyPoolError(
jpayne@7 285 self,
jpayne@7 286 "Pool is empty and a new connection can't be opened due to blocking mode.",
jpayne@7 287 ) from None
jpayne@7 288 pass # Oh well, we'll create a new connection then
jpayne@7 289
jpayne@7 290 # If this is a persistent connection, check if it got disconnected
jpayne@7 291 if conn and is_connection_dropped(conn):
jpayne@7 292 log.debug("Resetting dropped connection: %s", self.host)
jpayne@7 293 conn.close()
jpayne@7 294
jpayne@7 295 return conn or self._new_conn()
jpayne@7 296
jpayne@7 297 def _put_conn(self, conn: BaseHTTPConnection | None) -> None:
jpayne@7 298 """
jpayne@7 299 Put a connection back into the pool.
jpayne@7 300
jpayne@7 301 :param conn:
jpayne@7 302 Connection object for the current host and port as returned by
jpayne@7 303 :meth:`._new_conn` or :meth:`._get_conn`.
jpayne@7 304
jpayne@7 305 If the pool is already full, the connection is closed and discarded
jpayne@7 306 because we exceeded maxsize. If connections are discarded frequently,
jpayne@7 307 then maxsize should be increased.
jpayne@7 308
jpayne@7 309 If the pool is closed, then the connection will be closed and discarded.
jpayne@7 310 """
jpayne@7 311 if self.pool is not None:
jpayne@7 312 try:
jpayne@7 313 self.pool.put(conn, block=False)
jpayne@7 314 return # Everything is dandy, done.
jpayne@7 315 except AttributeError:
jpayne@7 316 # self.pool is None.
jpayne@7 317 pass
jpayne@7 318 except queue.Full:
jpayne@7 319 # Connection never got put back into the pool, close it.
jpayne@7 320 if conn:
jpayne@7 321 conn.close()
jpayne@7 322
jpayne@7 323 if self.block:
jpayne@7 324 # This should never happen if you got the conn from self._get_conn
jpayne@7 325 raise FullPoolError(
jpayne@7 326 self,
jpayne@7 327 "Pool reached maximum size and no more connections are allowed.",
jpayne@7 328 ) from None
jpayne@7 329
jpayne@7 330 log.warning(
jpayne@7 331 "Connection pool is full, discarding connection: %s. Connection pool size: %s",
jpayne@7 332 self.host,
jpayne@7 333 self.pool.qsize(),
jpayne@7 334 )
jpayne@7 335
jpayne@7 336 # Connection never got put back into the pool, close it.
jpayne@7 337 if conn:
jpayne@7 338 conn.close()
jpayne@7 339
jpayne@7 340 def _validate_conn(self, conn: BaseHTTPConnection) -> None:
jpayne@7 341 """
jpayne@7 342 Called right before a request is made, after the socket is created.
jpayne@7 343 """
jpayne@7 344
jpayne@7 345 def _prepare_proxy(self, conn: BaseHTTPConnection) -> None:
jpayne@7 346 # Nothing to do for HTTP connections.
jpayne@7 347 pass
jpayne@7 348
jpayne@7 349 def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:
jpayne@7 350 """Helper that always returns a :class:`urllib3.util.Timeout`"""
jpayne@7 351 if timeout is _DEFAULT_TIMEOUT:
jpayne@7 352 return self.timeout.clone()
jpayne@7 353
jpayne@7 354 if isinstance(timeout, Timeout):
jpayne@7 355 return timeout.clone()
jpayne@7 356 else:
jpayne@7 357 # User passed us an int/float. This is for backwards compatibility,
jpayne@7 358 # can be removed later
jpayne@7 359 return Timeout.from_float(timeout)
jpayne@7 360
jpayne@7 361 def _raise_timeout(
jpayne@7 362 self,
jpayne@7 363 err: BaseSSLError | OSError | SocketTimeout,
jpayne@7 364 url: str,
jpayne@7 365 timeout_value: _TYPE_TIMEOUT | None,
jpayne@7 366 ) -> None:
jpayne@7 367 """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
jpayne@7 368
jpayne@7 369 if isinstance(err, SocketTimeout):
jpayne@7 370 raise ReadTimeoutError(
jpayne@7 371 self, url, f"Read timed out. (read timeout={timeout_value})"
jpayne@7 372 ) from err
jpayne@7 373
jpayne@7 374 # See the above comment about EAGAIN in Python 3.
jpayne@7 375 if hasattr(err, "errno") and err.errno in _blocking_errnos:
jpayne@7 376 raise ReadTimeoutError(
jpayne@7 377 self, url, f"Read timed out. (read timeout={timeout_value})"
jpayne@7 378 ) from err
jpayne@7 379
jpayne@7 380 def _make_request(
jpayne@7 381 self,
jpayne@7 382 conn: BaseHTTPConnection,
jpayne@7 383 method: str,
jpayne@7 384 url: str,
jpayne@7 385 body: _TYPE_BODY | None = None,
jpayne@7 386 headers: typing.Mapping[str, str] | None = None,
jpayne@7 387 retries: Retry | None = None,
jpayne@7 388 timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
jpayne@7 389 chunked: bool = False,
jpayne@7 390 response_conn: BaseHTTPConnection | None = None,
jpayne@7 391 preload_content: bool = True,
jpayne@7 392 decode_content: bool = True,
jpayne@7 393 enforce_content_length: bool = True,
jpayne@7 394 ) -> BaseHTTPResponse:
jpayne@7 395 """
jpayne@7 396 Perform a request on a given urllib connection object taken from our
jpayne@7 397 pool.
jpayne@7 398
jpayne@7 399 :param conn:
jpayne@7 400 a connection from one of our connection pools
jpayne@7 401
jpayne@7 402 :param method:
jpayne@7 403 HTTP request method (such as GET, POST, PUT, etc.)
jpayne@7 404
jpayne@7 405 :param url:
jpayne@7 406 The URL to perform the request on.
jpayne@7 407
jpayne@7 408 :param body:
jpayne@7 409 Data to send in the request body, either :class:`str`, :class:`bytes`,
jpayne@7 410 an iterable of :class:`str`/:class:`bytes`, or a file-like object.
jpayne@7 411
jpayne@7 412 :param headers:
jpayne@7 413 Dictionary of custom headers to send, such as User-Agent,
jpayne@7 414 If-None-Match, etc. If None, pool headers are used. If provided,
jpayne@7 415 these headers completely replace any pool-specific headers.
jpayne@7 416
jpayne@7 417 :param retries:
jpayne@7 418 Configure the number of retries to allow before raising a
jpayne@7 419 :class:`~urllib3.exceptions.MaxRetryError` exception.
jpayne@7 420
jpayne@7 421 Pass ``None`` to retry until you receive a response. Pass a
jpayne@7 422 :class:`~urllib3.util.retry.Retry` object for fine-grained control
jpayne@7 423 over different types of retries.
jpayne@7 424 Pass an integer number to retry connection errors that many times,
jpayne@7 425 but no other types of errors. Pass zero to never retry.
jpayne@7 426
jpayne@7 427 If ``False``, then retries are disabled and any exception is raised
jpayne@7 428 immediately. Also, instead of raising a MaxRetryError on redirects,
jpayne@7 429 the redirect response will be returned.
jpayne@7 430
jpayne@7 431 :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
jpayne@7 432
jpayne@7 433 :param timeout:
jpayne@7 434 If specified, overrides the default timeout for this one
jpayne@7 435 request. It may be a float (in seconds) or an instance of
jpayne@7 436 :class:`urllib3.util.Timeout`.
jpayne@7 437
jpayne@7 438 :param chunked:
jpayne@7 439 If True, urllib3 will send the body using chunked transfer
jpayne@7 440 encoding. Otherwise, urllib3 will send the body using the standard
jpayne@7 441 content-length form. Defaults to False.
jpayne@7 442
jpayne@7 443 :param response_conn:
jpayne@7 444 Set this to ``None`` if you will handle releasing the connection or
jpayne@7 445 set the connection to have the response release it.
jpayne@7 446
jpayne@7 447 :param preload_content:
jpayne@7 448 If True, the response's body will be preloaded during construction.
jpayne@7 449
jpayne@7 450 :param decode_content:
jpayne@7 451 If True, will attempt to decode the body based on the
jpayne@7 452 'content-encoding' header.
jpayne@7 453
jpayne@7 454 :param enforce_content_length:
jpayne@7 455 Enforce content length checking. Body returned by server must match
jpayne@7 456 value of Content-Length header, if present. Otherwise, raise error.
jpayne@7 457 """
jpayne@7 458 self.num_requests += 1
jpayne@7 459
jpayne@7 460 timeout_obj = self._get_timeout(timeout)
jpayne@7 461 timeout_obj.start_connect()
jpayne@7 462 conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)
jpayne@7 463
jpayne@7 464 try:
jpayne@7 465 # Trigger any extra validation we need to do.
jpayne@7 466 try:
jpayne@7 467 self._validate_conn(conn)
jpayne@7 468 except (SocketTimeout, BaseSSLError) as e:
jpayne@7 469 self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
jpayne@7 470 raise
jpayne@7 471
jpayne@7 472 # _validate_conn() starts the connection to an HTTPS proxy
jpayne@7 473 # so we need to wrap errors with 'ProxyError' here too.
jpayne@7 474 except (
jpayne@7 475 OSError,
jpayne@7 476 NewConnectionError,
jpayne@7 477 TimeoutError,
jpayne@7 478 BaseSSLError,
jpayne@7 479 CertificateError,
jpayne@7 480 SSLError,
jpayne@7 481 ) as e:
jpayne@7 482 new_e: Exception = e
jpayne@7 483 if isinstance(e, (BaseSSLError, CertificateError)):
jpayne@7 484 new_e = SSLError(e)
jpayne@7 485 # If the connection didn't successfully connect to it's proxy
jpayne@7 486 # then there
jpayne@7 487 if isinstance(
jpayne@7 488 new_e, (OSError, NewConnectionError, TimeoutError, SSLError)
jpayne@7 489 ) and (conn and conn.proxy and not conn.has_connected_to_proxy):
jpayne@7 490 new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
jpayne@7 491 raise new_e
jpayne@7 492
jpayne@7 493 # conn.request() calls http.client.*.request, not the method in
jpayne@7 494 # urllib3.request. It also calls makefile (recv) on the socket.
jpayne@7 495 try:
jpayne@7 496 conn.request(
jpayne@7 497 method,
jpayne@7 498 url,
jpayne@7 499 body=body,
jpayne@7 500 headers=headers,
jpayne@7 501 chunked=chunked,
jpayne@7 502 preload_content=preload_content,
jpayne@7 503 decode_content=decode_content,
jpayne@7 504 enforce_content_length=enforce_content_length,
jpayne@7 505 )
jpayne@7 506
jpayne@7 507 # We are swallowing BrokenPipeError (errno.EPIPE) since the server is
jpayne@7 508 # legitimately able to close the connection after sending a valid response.
jpayne@7 509 # With this behaviour, the received response is still readable.
jpayne@7 510 except BrokenPipeError:
jpayne@7 511 pass
jpayne@7 512 except OSError as e:
jpayne@7 513 # MacOS/Linux
jpayne@7 514 # EPROTOTYPE and ECONNRESET are needed on macOS
jpayne@7 515 # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
jpayne@7 516 # Condition changed later to emit ECONNRESET instead of only EPROTOTYPE.
jpayne@7 517 if e.errno != errno.EPROTOTYPE and e.errno != errno.ECONNRESET:
jpayne@7 518 raise
jpayne@7 519
jpayne@7 520 # Reset the timeout for the recv() on the socket
jpayne@7 521 read_timeout = timeout_obj.read_timeout
jpayne@7 522
jpayne@7 523 if not conn.is_closed:
jpayne@7 524 # In Python 3 socket.py will catch EAGAIN and return None when you
jpayne@7 525 # try and read into the file pointer created by http.client, which
jpayne@7 526 # instead raises a BadStatusLine exception. Instead of catching
jpayne@7 527 # the exception and assuming all BadStatusLine exceptions are read
jpayne@7 528 # timeouts, check for a zero timeout before making the request.
jpayne@7 529 if read_timeout == 0:
jpayne@7 530 raise ReadTimeoutError(
jpayne@7 531 self, url, f"Read timed out. (read timeout={read_timeout})"
jpayne@7 532 )
jpayne@7 533 conn.timeout = read_timeout
jpayne@7 534
jpayne@7 535 # Receive the response from the server
jpayne@7 536 try:
jpayne@7 537 response = conn.getresponse()
jpayne@7 538 except (BaseSSLError, OSError) as e:
jpayne@7 539 self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
jpayne@7 540 raise
jpayne@7 541
jpayne@7 542 # Set properties that are used by the pooling layer.
jpayne@7 543 response.retries = retries
jpayne@7 544 response._connection = response_conn # type: ignore[attr-defined]
jpayne@7 545 response._pool = self # type: ignore[attr-defined]
jpayne@7 546
jpayne@7 547 # emscripten connection doesn't have _http_vsn_str
jpayne@7 548 http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
jpayne@7 549 log.debug(
jpayne@7 550 '%s://%s:%s "%s %s %s" %s %s',
jpayne@7 551 self.scheme,
jpayne@7 552 self.host,
jpayne@7 553 self.port,
jpayne@7 554 method,
jpayne@7 555 url,
jpayne@7 556 # HTTP version
jpayne@7 557 http_version,
jpayne@7 558 response.status,
jpayne@7 559 response.length_remaining,
jpayne@7 560 )
jpayne@7 561
jpayne@7 562 return response
jpayne@7 563
jpayne@7 564 def close(self) -> None:
jpayne@7 565 """
jpayne@7 566 Close all pooled connections and disable the pool.
jpayne@7 567 """
jpayne@7 568 if self.pool is None:
jpayne@7 569 return
jpayne@7 570 # Disable access to the pool
jpayne@7 571 old_pool, self.pool = self.pool, None
jpayne@7 572
jpayne@7 573 # Close all the HTTPConnections in the pool.
jpayne@7 574 _close_pool_connections(old_pool)
jpayne@7 575
jpayne@7 576 def is_same_host(self, url: str) -> bool:
jpayne@7 577 """
jpayne@7 578 Check if the given ``url`` is a member of the same host as this
jpayne@7 579 connection pool.
jpayne@7 580 """
jpayne@7 581 if url.startswith("/"):
jpayne@7 582 return True
jpayne@7 583
jpayne@7 584 # TODO: Add optional support for socket.gethostbyname checking.
jpayne@7 585 scheme, _, host, port, *_ = parse_url(url)
jpayne@7 586 scheme = scheme or "http"
jpayne@7 587 if host is not None:
jpayne@7 588 host = _normalize_host(host, scheme=scheme)
jpayne@7 589
jpayne@7 590 # Use explicit default port for comparison when none is given
jpayne@7 591 if self.port and not port:
jpayne@7 592 port = port_by_scheme.get(scheme)
jpayne@7 593 elif not self.port and port == port_by_scheme.get(scheme):
jpayne@7 594 port = None
jpayne@7 595
jpayne@7 596 return (scheme, host, port) == (self.scheme, self.host, self.port)
jpayne@7 597
jpayne@7 598 def urlopen( # type: ignore[override]
jpayne@7 599 self,
jpayne@7 600 method: str,
jpayne@7 601 url: str,
jpayne@7 602 body: _TYPE_BODY | None = None,
jpayne@7 603 headers: typing.Mapping[str, str] | None = None,
jpayne@7 604 retries: Retry | bool | int | None = None,
jpayne@7 605 redirect: bool = True,
jpayne@7 606 assert_same_host: bool = True,
jpayne@7 607 timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
jpayne@7 608 pool_timeout: int | None = None,
jpayne@7 609 release_conn: bool | None = None,
jpayne@7 610 chunked: bool = False,
jpayne@7 611 body_pos: _TYPE_BODY_POSITION | None = None,
jpayne@7 612 preload_content: bool = True,
jpayne@7 613 decode_content: bool = True,
jpayne@7 614 **response_kw: typing.Any,
jpayne@7 615 ) -> BaseHTTPResponse:
jpayne@7 616 """
jpayne@7 617 Get a connection from the pool and perform an HTTP request. This is the
jpayne@7 618 lowest level call for making a request, so you'll need to specify all
jpayne@7 619 the raw details.
jpayne@7 620
jpayne@7 621 .. note::
jpayne@7 622
jpayne@7 623 More commonly, it's appropriate to use a convenience method
jpayne@7 624 such as :meth:`request`.
jpayne@7 625
jpayne@7 626 .. note::
jpayne@7 627
jpayne@7 628 `release_conn` will only behave as expected if
jpayne@7 629 `preload_content=False` because we want to make
jpayne@7 630 `preload_content=False` the default behaviour someday soon without
jpayne@7 631 breaking backwards compatibility.
jpayne@7 632
jpayne@7 633 :param method:
jpayne@7 634 HTTP request method (such as GET, POST, PUT, etc.)
jpayne@7 635
jpayne@7 636 :param url:
jpayne@7 637 The URL to perform the request on.
jpayne@7 638
jpayne@7 639 :param body:
jpayne@7 640 Data to send in the request body, either :class:`str`, :class:`bytes`,
jpayne@7 641 an iterable of :class:`str`/:class:`bytes`, or a file-like object.
jpayne@7 642
jpayne@7 643 :param headers:
jpayne@7 644 Dictionary of custom headers to send, such as User-Agent,
jpayne@7 645 If-None-Match, etc. If None, pool headers are used. If provided,
jpayne@7 646 these headers completely replace any pool-specific headers.
jpayne@7 647
jpayne@7 648 :param retries:
jpayne@7 649 Configure the number of retries to allow before raising a
jpayne@7 650 :class:`~urllib3.exceptions.MaxRetryError` exception.
jpayne@7 651
jpayne@7 652 If ``None`` (default) will retry 3 times, see ``Retry.DEFAULT``. Pass a
jpayne@7 653 :class:`~urllib3.util.retry.Retry` object for fine-grained control
jpayne@7 654 over different types of retries.
jpayne@7 655 Pass an integer number to retry connection errors that many times,
jpayne@7 656 but no other types of errors. Pass zero to never retry.
jpayne@7 657
jpayne@7 658 If ``False``, then retries are disabled and any exception is raised
jpayne@7 659 immediately. Also, instead of raising a MaxRetryError on redirects,
jpayne@7 660 the redirect response will be returned.
jpayne@7 661
jpayne@7 662 :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
jpayne@7 663
jpayne@7 664 :param redirect:
jpayne@7 665 If True, automatically handle redirects (status codes 301, 302,
jpayne@7 666 303, 307, 308). Each redirect counts as a retry. Disabling retries
jpayne@7 667 will disable redirect, too.
jpayne@7 668
jpayne@7 669 :param assert_same_host:
jpayne@7 670 If ``True``, will make sure that the host of the pool requests is
jpayne@7 671 consistent else will raise HostChangedError. When ``False``, you can
jpayne@7 672 use the pool on an HTTP proxy and request foreign hosts.
jpayne@7 673
jpayne@7 674 :param timeout:
jpayne@7 675 If specified, overrides the default timeout for this one
jpayne@7 676 request. It may be a float (in seconds) or an instance of
jpayne@7 677 :class:`urllib3.util.Timeout`.
jpayne@7 678
jpayne@7 679 :param pool_timeout:
jpayne@7 680 If set and the pool is set to block=True, then this method will
jpayne@7 681 block for ``pool_timeout`` seconds and raise EmptyPoolError if no
jpayne@7 682 connection is available within the time period.
jpayne@7 683
jpayne@7 684 :param bool preload_content:
jpayne@7 685 If True, the response's body will be preloaded into memory.
jpayne@7 686
jpayne@7 687 :param bool decode_content:
jpayne@7 688 If True, will attempt to decode the body based on the
jpayne@7 689 'content-encoding' header.
jpayne@7 690
jpayne@7 691 :param release_conn:
jpayne@7 692 If False, then the urlopen call will not release the connection
jpayne@7 693 back into the pool once a response is received (but will release if
jpayne@7 694 you read the entire contents of the response such as when
jpayne@7 695 `preload_content=True`). This is useful if you're not preloading
jpayne@7 696 the response's content immediately. You will need to call
jpayne@7 697 ``r.release_conn()`` on the response ``r`` to return the connection
jpayne@7 698 back into the pool. If None, it takes the value of ``preload_content``
jpayne@7 699 which defaults to ``True``.
jpayne@7 700
jpayne@7 701 :param bool chunked:
jpayne@7 702 If True, urllib3 will send the body using chunked transfer
jpayne@7 703 encoding. Otherwise, urllib3 will send the body using the standard
jpayne@7 704 content-length form. Defaults to False.
jpayne@7 705
jpayne@7 706 :param int body_pos:
jpayne@7 707 Position to seek to in file-like body in the event of a retry or
jpayne@7 708 redirect. Typically this won't need to be set because urllib3 will
jpayne@7 709 auto-populate the value when needed.
jpayne@7 710 """
jpayne@7 711 parsed_url = parse_url(url)
jpayne@7 712 destination_scheme = parsed_url.scheme
jpayne@7 713
jpayne@7 714 if headers is None:
jpayne@7 715 headers = self.headers
jpayne@7 716
jpayne@7 717 if not isinstance(retries, Retry):
jpayne@7 718 retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
jpayne@7 719
jpayne@7 720 if release_conn is None:
jpayne@7 721 release_conn = preload_content
jpayne@7 722
jpayne@7 723 # Check host
jpayne@7 724 if assert_same_host and not self.is_same_host(url):
jpayne@7 725 raise HostChangedError(self, url, retries)
jpayne@7 726
jpayne@7 727 # Ensure that the URL we're connecting to is properly encoded
jpayne@7 728 if url.startswith("/"):
jpayne@7 729 url = to_str(_encode_target(url))
jpayne@7 730 else:
jpayne@7 731 url = to_str(parsed_url.url)
jpayne@7 732
jpayne@7 733 conn = None
jpayne@7 734
jpayne@7 735 # Track whether `conn` needs to be released before
jpayne@7 736 # returning/raising/recursing. Update this variable if necessary, and
jpayne@7 737 # leave `release_conn` constant throughout the function. That way, if
jpayne@7 738 # the function recurses, the original value of `release_conn` will be
jpayne@7 739 # passed down into the recursive call, and its value will be respected.
jpayne@7 740 #
jpayne@7 741 # See issue #651 [1] for details.
jpayne@7 742 #
jpayne@7 743 # [1] <https://github.com/urllib3/urllib3/issues/651>
jpayne@7 744 release_this_conn = release_conn
jpayne@7 745
jpayne@7 746 http_tunnel_required = connection_requires_http_tunnel(
jpayne@7 747 self.proxy, self.proxy_config, destination_scheme
jpayne@7 748 )
jpayne@7 749
jpayne@7 750 # Merge the proxy headers. Only done when not using HTTP CONNECT. We
jpayne@7 751 # have to copy the headers dict so we can safely change it without those
jpayne@7 752 # changes being reflected in anyone else's copy.
jpayne@7 753 if not http_tunnel_required:
jpayne@7 754 headers = headers.copy() # type: ignore[attr-defined]
jpayne@7 755 headers.update(self.proxy_headers) # type: ignore[union-attr]
jpayne@7 756
jpayne@7 757 # Must keep the exception bound to a separate variable or else Python 3
jpayne@7 758 # complains about UnboundLocalError.
jpayne@7 759 err = None
jpayne@7 760
jpayne@7 761 # Keep track of whether we cleanly exited the except block. This
jpayne@7 762 # ensures we do proper cleanup in finally.
jpayne@7 763 clean_exit = False
jpayne@7 764
jpayne@7 765 # Rewind body position, if needed. Record current position
jpayne@7 766 # for future rewinds in the event of a redirect/retry.
jpayne@7 767 body_pos = set_file_position(body, body_pos)
jpayne@7 768
jpayne@7 769 try:
jpayne@7 770 # Request a connection from the queue.
jpayne@7 771 timeout_obj = self._get_timeout(timeout)
jpayne@7 772 conn = self._get_conn(timeout=pool_timeout)
jpayne@7 773
jpayne@7 774 conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]
jpayne@7 775
jpayne@7 776 # Is this a closed/new connection that requires CONNECT tunnelling?
jpayne@7 777 if self.proxy is not None and http_tunnel_required and conn.is_closed:
jpayne@7 778 try:
jpayne@7 779 self._prepare_proxy(conn)
jpayne@7 780 except (BaseSSLError, OSError, SocketTimeout) as e:
jpayne@7 781 self._raise_timeout(
jpayne@7 782 err=e, url=self.proxy.url, timeout_value=conn.timeout
jpayne@7 783 )
jpayne@7 784 raise
jpayne@7 785
jpayne@7 786 # If we're going to release the connection in ``finally:``, then
jpayne@7 787 # the response doesn't need to know about the connection. Otherwise
jpayne@7 788 # it will also try to release it and we'll have a double-release
jpayne@7 789 # mess.
jpayne@7 790 response_conn = conn if not release_conn else None
jpayne@7 791
jpayne@7 792 # Make the request on the HTTPConnection object
jpayne@7 793 response = self._make_request(
jpayne@7 794 conn,
jpayne@7 795 method,
jpayne@7 796 url,
jpayne@7 797 timeout=timeout_obj,
jpayne@7 798 body=body,
jpayne@7 799 headers=headers,
jpayne@7 800 chunked=chunked,
jpayne@7 801 retries=retries,
jpayne@7 802 response_conn=response_conn,
jpayne@7 803 preload_content=preload_content,
jpayne@7 804 decode_content=decode_content,
jpayne@7 805 **response_kw,
jpayne@7 806 )
jpayne@7 807
jpayne@7 808 # Everything went great!
jpayne@7 809 clean_exit = True
jpayne@7 810
jpayne@7 811 except EmptyPoolError:
jpayne@7 812 # Didn't get a connection from the pool, no need to clean up
jpayne@7 813 clean_exit = True
jpayne@7 814 release_this_conn = False
jpayne@7 815 raise
jpayne@7 816
jpayne@7 817 except (
jpayne@7 818 TimeoutError,
jpayne@7 819 HTTPException,
jpayne@7 820 OSError,
jpayne@7 821 ProtocolError,
jpayne@7 822 BaseSSLError,
jpayne@7 823 SSLError,
jpayne@7 824 CertificateError,
jpayne@7 825 ProxyError,
jpayne@7 826 ) as e:
jpayne@7 827 # Discard the connection for these exceptions. It will be
jpayne@7 828 # replaced during the next _get_conn() call.
jpayne@7 829 clean_exit = False
jpayne@7 830 new_e: Exception = e
jpayne@7 831 if isinstance(e, (BaseSSLError, CertificateError)):
jpayne@7 832 new_e = SSLError(e)
jpayne@7 833 if isinstance(
jpayne@7 834 new_e,
jpayne@7 835 (
jpayne@7 836 OSError,
jpayne@7 837 NewConnectionError,
jpayne@7 838 TimeoutError,
jpayne@7 839 SSLError,
jpayne@7 840 HTTPException,
jpayne@7 841 ),
jpayne@7 842 ) and (conn and conn.proxy and not conn.has_connected_to_proxy):
jpayne@7 843 new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
jpayne@7 844 elif isinstance(new_e, (OSError, HTTPException)):
jpayne@7 845 new_e = ProtocolError("Connection aborted.", new_e)
jpayne@7 846
jpayne@7 847 retries = retries.increment(
jpayne@7 848 method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]
jpayne@7 849 )
jpayne@7 850 retries.sleep()
jpayne@7 851
jpayne@7 852 # Keep track of the error for the retry warning.
jpayne@7 853 err = e
jpayne@7 854
jpayne@7 855 finally:
jpayne@7 856 if not clean_exit:
jpayne@7 857 # We hit some kind of exception, handled or otherwise. We need
jpayne@7 858 # to throw the connection away unless explicitly told not to.
jpayne@7 859 # Close the connection, set the variable to None, and make sure
jpayne@7 860 # we put the None back in the pool to avoid leaking it.
jpayne@7 861 if conn:
jpayne@7 862 conn.close()
jpayne@7 863 conn = None
jpayne@7 864 release_this_conn = True
jpayne@7 865
jpayne@7 866 if release_this_conn:
jpayne@7 867 # Put the connection back to be reused. If the connection is
jpayne@7 868 # expired then it will be None, which will get replaced with a
jpayne@7 869 # fresh connection during _get_conn.
jpayne@7 870 self._put_conn(conn)
jpayne@7 871
jpayne@7 872 if not conn:
jpayne@7 873 # Try again
jpayne@7 874 log.warning(
jpayne@7 875 "Retrying (%r) after connection broken by '%r': %s", retries, err, url
jpayne@7 876 )
jpayne@7 877 return self.urlopen(
jpayne@7 878 method,
jpayne@7 879 url,
jpayne@7 880 body,
jpayne@7 881 headers,
jpayne@7 882 retries,
jpayne@7 883 redirect,
jpayne@7 884 assert_same_host,
jpayne@7 885 timeout=timeout,
jpayne@7 886 pool_timeout=pool_timeout,
jpayne@7 887 release_conn=release_conn,
jpayne@7 888 chunked=chunked,
jpayne@7 889 body_pos=body_pos,
jpayne@7 890 preload_content=preload_content,
jpayne@7 891 decode_content=decode_content,
jpayne@7 892 **response_kw,
jpayne@7 893 )
jpayne@7 894
jpayne@7 895 # Handle redirect?
jpayne@7 896 redirect_location = redirect and response.get_redirect_location()
jpayne@7 897 if redirect_location:
jpayne@7 898 if response.status == 303:
jpayne@7 899 # Change the method according to RFC 9110, Section 15.4.4.
jpayne@7 900 method = "GET"
jpayne@7 901 # And lose the body not to transfer anything sensitive.
jpayne@7 902 body = None
jpayne@7 903 headers = HTTPHeaderDict(headers)._prepare_for_method_change()
jpayne@7 904
jpayne@7 905 try:
jpayne@7 906 retries = retries.increment(method, url, response=response, _pool=self)
jpayne@7 907 except MaxRetryError:
jpayne@7 908 if retries.raise_on_redirect:
jpayne@7 909 response.drain_conn()
jpayne@7 910 raise
jpayne@7 911 return response
jpayne@7 912
jpayne@7 913 response.drain_conn()
jpayne@7 914 retries.sleep_for_retry(response)
jpayne@7 915 log.debug("Redirecting %s -> %s", url, redirect_location)
jpayne@7 916 return self.urlopen(
jpayne@7 917 method,
jpayne@7 918 redirect_location,
jpayne@7 919 body,
jpayne@7 920 headers,
jpayne@7 921 retries=retries,
jpayne@7 922 redirect=redirect,
jpayne@7 923 assert_same_host=assert_same_host,
jpayne@7 924 timeout=timeout,
jpayne@7 925 pool_timeout=pool_timeout,
jpayne@7 926 release_conn=release_conn,
jpayne@7 927 chunked=chunked,
jpayne@7 928 body_pos=body_pos,
jpayne@7 929 preload_content=preload_content,
jpayne@7 930 decode_content=decode_content,
jpayne@7 931 **response_kw,
jpayne@7 932 )
jpayne@7 933
jpayne@7 934 # Check if we should retry the HTTP response.
jpayne@7 935 has_retry_after = bool(response.headers.get("Retry-After"))
jpayne@7 936 if retries.is_retry(method, response.status, has_retry_after):
jpayne@7 937 try:
jpayne@7 938 retries = retries.increment(method, url, response=response, _pool=self)
jpayne@7 939 except MaxRetryError:
jpayne@7 940 if retries.raise_on_status:
jpayne@7 941 response.drain_conn()
jpayne@7 942 raise
jpayne@7 943 return response
jpayne@7 944
jpayne@7 945 response.drain_conn()
jpayne@7 946 retries.sleep(response)
jpayne@7 947 log.debug("Retry: %s", url)
jpayne@7 948 return self.urlopen(
jpayne@7 949 method,
jpayne@7 950 url,
jpayne@7 951 body,
jpayne@7 952 headers,
jpayne@7 953 retries=retries,
jpayne@7 954 redirect=redirect,
jpayne@7 955 assert_same_host=assert_same_host,
jpayne@7 956 timeout=timeout,
jpayne@7 957 pool_timeout=pool_timeout,
jpayne@7 958 release_conn=release_conn,
jpayne@7 959 chunked=chunked,
jpayne@7 960 body_pos=body_pos,
jpayne@7 961 preload_content=preload_content,
jpayne@7 962 decode_content=decode_content,
jpayne@7 963 **response_kw,
jpayne@7 964 )
jpayne@7 965
jpayne@7 966 return response
jpayne@7 967
jpayne@7 968
jpayne@7 969 class HTTPSConnectionPool(HTTPConnectionPool):
jpayne@7 970 """
jpayne@7 971 Same as :class:`.HTTPConnectionPool`, but HTTPS.
jpayne@7 972
jpayne@7 973 :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
jpayne@7 974 ``assert_hostname`` and ``host`` in this order to verify connections.
jpayne@7 975 If ``assert_hostname`` is False, no verification is done.
jpayne@7 976
jpayne@7 977 The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
jpayne@7 978 ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
jpayne@7 979 is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
jpayne@7 980 the connection socket into an SSL socket.
jpayne@7 981 """
jpayne@7 982
jpayne@7 983 scheme = "https"
jpayne@7 984 ConnectionCls: type[BaseHTTPSConnection] = HTTPSConnection
jpayne@7 985
jpayne@7 986 def __init__(
jpayne@7 987 self,
jpayne@7 988 host: str,
jpayne@7 989 port: int | None = None,
jpayne@7 990 timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
jpayne@7 991 maxsize: int = 1,
jpayne@7 992 block: bool = False,
jpayne@7 993 headers: typing.Mapping[str, str] | None = None,
jpayne@7 994 retries: Retry | bool | int | None = None,
jpayne@7 995 _proxy: Url | None = None,
jpayne@7 996 _proxy_headers: typing.Mapping[str, str] | None = None,
jpayne@7 997 key_file: str | None = None,
jpayne@7 998 cert_file: str | None = None,
jpayne@7 999 cert_reqs: int | str | None = None,
jpayne@7 1000 key_password: str | None = None,
jpayne@7 1001 ca_certs: str | None = None,
jpayne@7 1002 ssl_version: int | str | None = None,
jpayne@7 1003 ssl_minimum_version: ssl.TLSVersion | None = None,
jpayne@7 1004 ssl_maximum_version: ssl.TLSVersion | None = None,
jpayne@7 1005 assert_hostname: str | Literal[False] | None = None,
jpayne@7 1006 assert_fingerprint: str | None = None,
jpayne@7 1007 ca_cert_dir: str | None = None,
jpayne@7 1008 **conn_kw: typing.Any,
jpayne@7 1009 ) -> None:
jpayne@7 1010 super().__init__(
jpayne@7 1011 host,
jpayne@7 1012 port,
jpayne@7 1013 timeout,
jpayne@7 1014 maxsize,
jpayne@7 1015 block,
jpayne@7 1016 headers,
jpayne@7 1017 retries,
jpayne@7 1018 _proxy,
jpayne@7 1019 _proxy_headers,
jpayne@7 1020 **conn_kw,
jpayne@7 1021 )
jpayne@7 1022
jpayne@7 1023 self.key_file = key_file
jpayne@7 1024 self.cert_file = cert_file
jpayne@7 1025 self.cert_reqs = cert_reqs
jpayne@7 1026 self.key_password = key_password
jpayne@7 1027 self.ca_certs = ca_certs
jpayne@7 1028 self.ca_cert_dir = ca_cert_dir
jpayne@7 1029 self.ssl_version = ssl_version
jpayne@7 1030 self.ssl_minimum_version = ssl_minimum_version
jpayne@7 1031 self.ssl_maximum_version = ssl_maximum_version
jpayne@7 1032 self.assert_hostname = assert_hostname
jpayne@7 1033 self.assert_fingerprint = assert_fingerprint
jpayne@7 1034
jpayne@7 1035 def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]
jpayne@7 1036 """Establishes a tunnel connection through HTTP CONNECT."""
jpayne@7 1037 if self.proxy and self.proxy.scheme == "https":
jpayne@7 1038 tunnel_scheme = "https"
jpayne@7 1039 else:
jpayne@7 1040 tunnel_scheme = "http"
jpayne@7 1041
jpayne@7 1042 conn.set_tunnel(
jpayne@7 1043 scheme=tunnel_scheme,
jpayne@7 1044 host=self._tunnel_host,
jpayne@7 1045 port=self.port,
jpayne@7 1046 headers=self.proxy_headers,
jpayne@7 1047 )
jpayne@7 1048 conn.connect()
jpayne@7 1049
jpayne@7 1050 def _new_conn(self) -> BaseHTTPSConnection:
jpayne@7 1051 """
jpayne@7 1052 Return a fresh :class:`urllib3.connection.HTTPConnection`.
jpayne@7 1053 """
jpayne@7 1054 self.num_connections += 1
jpayne@7 1055 log.debug(
jpayne@7 1056 "Starting new HTTPS connection (%d): %s:%s",
jpayne@7 1057 self.num_connections,
jpayne@7 1058 self.host,
jpayne@7 1059 self.port or "443",
jpayne@7 1060 )
jpayne@7 1061
jpayne@7 1062 if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]
jpayne@7 1063 raise ImportError(
jpayne@7 1064 "Can't connect to HTTPS URL because the SSL module is not available."
jpayne@7 1065 )
jpayne@7 1066
jpayne@7 1067 actual_host: str = self.host
jpayne@7 1068 actual_port = self.port
jpayne@7 1069 if self.proxy is not None and self.proxy.host is not None:
jpayne@7 1070 actual_host = self.proxy.host
jpayne@7 1071 actual_port = self.proxy.port
jpayne@7 1072
jpayne@7 1073 return self.ConnectionCls(
jpayne@7 1074 host=actual_host,
jpayne@7 1075 port=actual_port,
jpayne@7 1076 timeout=self.timeout.connect_timeout,
jpayne@7 1077 cert_file=self.cert_file,
jpayne@7 1078 key_file=self.key_file,
jpayne@7 1079 key_password=self.key_password,
jpayne@7 1080 cert_reqs=self.cert_reqs,
jpayne@7 1081 ca_certs=self.ca_certs,
jpayne@7 1082 ca_cert_dir=self.ca_cert_dir,
jpayne@7 1083 assert_hostname=self.assert_hostname,
jpayne@7 1084 assert_fingerprint=self.assert_fingerprint,
jpayne@7 1085 ssl_version=self.ssl_version,
jpayne@7 1086 ssl_minimum_version=self.ssl_minimum_version,
jpayne@7 1087 ssl_maximum_version=self.ssl_maximum_version,
jpayne@7 1088 **self.conn_kw,
jpayne@7 1089 )
jpayne@7 1090
jpayne@7 1091 def _validate_conn(self, conn: BaseHTTPConnection) -> None:
jpayne@7 1092 """
jpayne@7 1093 Called right before a request is made, after the socket is created.
jpayne@7 1094 """
jpayne@7 1095 super()._validate_conn(conn)
jpayne@7 1096
jpayne@7 1097 # Force connect early to allow us to validate the connection.
jpayne@7 1098 if conn.is_closed:
jpayne@7 1099 conn.connect()
jpayne@7 1100
jpayne@7 1101 # TODO revise this, see https://github.com/urllib3/urllib3/issues/2791
jpayne@7 1102 if not conn.is_verified and not conn.proxy_is_verified:
jpayne@7 1103 warnings.warn(
jpayne@7 1104 (
jpayne@7 1105 f"Unverified HTTPS request is being made to host '{conn.host}'. "
jpayne@7 1106 "Adding certificate verification is strongly advised. See: "
jpayne@7 1107 "https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
jpayne@7 1108 "#tls-warnings"
jpayne@7 1109 ),
jpayne@7 1110 InsecureRequestWarning,
jpayne@7 1111 )
jpayne@7 1112
jpayne@7 1113
jpayne@7 1114 def connection_from_url(url: str, **kw: typing.Any) -> HTTPConnectionPool:
jpayne@7 1115 """
jpayne@7 1116 Given a url, return an :class:`.ConnectionPool` instance of its host.
jpayne@7 1117
jpayne@7 1118 This is a shortcut for not having to parse out the scheme, host, and port
jpayne@7 1119 of the url before creating an :class:`.ConnectionPool` instance.
jpayne@7 1120
jpayne@7 1121 :param url:
jpayne@7 1122 Absolute URL string that must include the scheme. Port is optional.
jpayne@7 1123
jpayne@7 1124 :param \\**kw:
jpayne@7 1125 Passes additional parameters to the constructor of the appropriate
jpayne@7 1126 :class:`.ConnectionPool`. Useful for specifying things like
jpayne@7 1127 timeout, maxsize, headers, etc.
jpayne@7 1128
jpayne@7 1129 Example::
jpayne@7 1130
jpayne@7 1131 >>> conn = connection_from_url('http://google.com/')
jpayne@7 1132 >>> r = conn.request('GET', '/')
jpayne@7 1133 """
jpayne@7 1134 scheme, _, host, port, *_ = parse_url(url)
jpayne@7 1135 scheme = scheme or "http"
jpayne@7 1136 port = port or port_by_scheme.get(scheme, 80)
jpayne@7 1137 if scheme == "https":
jpayne@7 1138 return HTTPSConnectionPool(host, port=port, **kw) # type: ignore[arg-type]
jpayne@7 1139 else:
jpayne@7 1140 return HTTPConnectionPool(host, port=port, **kw) # type: ignore[arg-type]
jpayne@7 1141
jpayne@7 1142
jpayne@7 1143 @typing.overload
jpayne@7 1144 def _normalize_host(host: None, scheme: str | None) -> None:
jpayne@7 1145 ...
jpayne@7 1146
jpayne@7 1147
jpayne@7 1148 @typing.overload
jpayne@7 1149 def _normalize_host(host: str, scheme: str | None) -> str:
jpayne@7 1150 ...
jpayne@7 1151
jpayne@7 1152
jpayne@7 1153 def _normalize_host(host: str | None, scheme: str | None) -> str | None:
jpayne@7 1154 """
jpayne@7 1155 Normalize hosts for comparisons and use with sockets.
jpayne@7 1156 """
jpayne@7 1157
jpayne@7 1158 host = normalize_host(host, scheme)
jpayne@7 1159
jpayne@7 1160 # httplib doesn't like it when we include brackets in IPv6 addresses
jpayne@7 1161 # Specifically, if we include brackets but also pass the port then
jpayne@7 1162 # httplib crazily doubles up the square brackets on the Host header.
jpayne@7 1163 # Instead, we need to make sure we never pass ``None`` as the port.
jpayne@7 1164 # However, for backward compatibility reasons we can't actually
jpayne@7 1165 # *assert* that. See http://bugs.python.org/issue28539
jpayne@7 1166 if host and host.startswith("[") and host.endswith("]"):
jpayne@7 1167 host = host[1:-1]
jpayne@7 1168 return host
jpayne@7 1169
jpayne@7 1170
jpayne@7 1171 def _url_from_pool(
jpayne@7 1172 pool: HTTPConnectionPool | HTTPSConnectionPool, path: str | None = None
jpayne@7 1173 ) -> str:
jpayne@7 1174 """Returns the URL from a given connection pool. This is mainly used for testing and logging."""
jpayne@7 1175 return Url(scheme=pool.scheme, host=pool.host, port=pool.port, path=path).url
jpayne@7 1176
jpayne@7 1177
jpayne@7 1178 def _close_pool_connections(pool: queue.LifoQueue[typing.Any]) -> None:
jpayne@7 1179 """Drains a queue of connections and closes each one."""
jpayne@7 1180 try:
jpayne@7 1181 while True:
jpayne@7 1182 conn = pool.get(block=False)
jpayne@7 1183 if conn:
jpayne@7 1184 conn.close()
jpayne@7 1185 except queue.Empty:
jpayne@7 1186 pass # Done.