summaryrefslogtreecommitdiff
path: root/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util
diff options
context:
space:
mode:
authorShubham Saini <shubham6405@gmail.com>2018-12-11 10:01:23 +0000
committerShubham Saini <shubham6405@gmail.com>2018-12-11 10:01:23 +0000
commit68df54d6629ec019142eb149dd037774f2d11e7c (patch)
tree345bc22d46b4e01a4ba8303b94278952a4ed2b9e /venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util
First commit
Diffstat (limited to 'venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util')
-rw-r--r--venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/__init__.py54
-rw-r--r--venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/connection.py130
-rw-r--r--venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/request.py118
-rw-r--r--venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/response.py81
-rw-r--r--venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/retry.py401
-rw-r--r--venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/selectors.py581
-rw-r--r--venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/ssl_.py341
-rw-r--r--venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/timeout.py242
-rw-r--r--venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/url.py230
-rw-r--r--venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/wait.py40
10 files changed, 2218 insertions, 0 deletions
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/__init__.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/__init__.py
new file mode 100644
index 0000000..a84b005
--- /dev/null
+++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/__init__.py
@@ -0,0 +1,54 @@
1from __future__ import absolute_import
2# For backwards compatibility, provide imports that used to be here.
3from .connection import is_connection_dropped
4from .request import make_headers
5from .response import is_fp_closed
6from .ssl_ import (
7 SSLContext,
8 HAS_SNI,
9 IS_PYOPENSSL,
10 IS_SECURETRANSPORT,
11 assert_fingerprint,
12 resolve_cert_reqs,
13 resolve_ssl_version,
14 ssl_wrap_socket,
15)
16from .timeout import (
17 current_time,
18 Timeout,
19)
20
21from .retry import Retry
22from .url import (
23 get_host,
24 parse_url,
25 split_first,
26 Url,
27)
28from .wait import (
29 wait_for_read,
30 wait_for_write
31)
32
33__all__ = (
34 'HAS_SNI',
35 'IS_PYOPENSSL',
36 'IS_SECURETRANSPORT',
37 'SSLContext',
38 'Retry',
39 'Timeout',
40 'Url',
41 'assert_fingerprint',
42 'current_time',
43 'is_connection_dropped',
44 'is_fp_closed',
45 'get_host',
46 'parse_url',
47 'make_headers',
48 'resolve_cert_reqs',
49 'resolve_ssl_version',
50 'split_first',
51 'ssl_wrap_socket',
52 'wait_for_read',
53 'wait_for_write'
54)
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/connection.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/connection.py
new file mode 100644
index 0000000..31ecd83
--- /dev/null
+++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/connection.py
@@ -0,0 +1,130 @@
1from __future__ import absolute_import
2import socket
3from .wait import wait_for_read
4from .selectors import HAS_SELECT, SelectorError
5
6
7def is_connection_dropped(conn): # Platform-specific
8 """
9 Returns True if the connection is dropped and should be closed.
10
11 :param conn:
12 :class:`httplib.HTTPConnection` object.
13
14 Note: For platforms like AppEngine, this will always return ``False`` to
15 let the platform handle connection recycling transparently for us.
16 """
17 sock = getattr(conn, 'sock', False)
18 if sock is False: # Platform-specific: AppEngine
19 return False
20 if sock is None: # Connection already closed (such as by httplib).
21 return True
22
23 if not HAS_SELECT:
24 return False
25
26 try:
27 return bool(wait_for_read(sock, timeout=0.0))
28 except SelectorError:
29 return True
30
31
32# This function is copied from socket.py in the Python 2.7 standard
33# library test suite. Added to its signature is only `socket_options`.
34# One additional modification is that we avoid binding to IPv6 servers
35# discovered in DNS if the system doesn't have IPv6 functionality.
36def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
37 source_address=None, socket_options=None):
38 """Connect to *address* and return the socket object.
39
40 Convenience function. Connect to *address* (a 2-tuple ``(host,
41 port)``) and return the socket object. Passing the optional
42 *timeout* parameter will set the timeout on the socket instance
43 before attempting to connect. If no *timeout* is supplied, the
44 global default timeout setting returned by :func:`getdefaulttimeout`
45 is used. If *source_address* is set it must be a tuple of (host, port)
46 for the socket to bind as a source address before making the connection.
47 An host of '' or port 0 tells the OS to use the default.
48 """
49
50 host, port = address
51 if host.startswith('['):
52 host = host.strip('[]')
53 err = None
54
55 # Using the value from allowed_gai_family() in the context of getaddrinfo lets
56 # us select whether to work with IPv4 DNS records, IPv6 records, or both.
57 # The original create_connection function always returns all records.
58 family = allowed_gai_family()
59
60 for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
61 af, socktype, proto, canonname, sa = res
62 sock = None
63 try:
64 sock = socket.socket(af, socktype, proto)
65
66 # If provided, set socket level options before connecting.
67 _set_socket_options(sock, socket_options)
68
69 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
70 sock.settimeout(timeout)
71 if source_address:
72 sock.bind(source_address)
73 sock.connect(sa)
74 return sock
75
76 except socket.error as e:
77 err = e
78 if sock is not None:
79 sock.close()
80 sock = None
81
82 if err is not None:
83 raise err
84
85 raise socket.error("getaddrinfo returns an empty list")
86
87
88def _set_socket_options(sock, options):
89 if options is None:
90 return
91
92 for opt in options:
93 sock.setsockopt(*opt)
94
95
96def allowed_gai_family():
97 """This function is designed to work in the context of
98 getaddrinfo, where family=socket.AF_UNSPEC is the default and
99 will perform a DNS search for both IPv6 and IPv4 records."""
100
101 family = socket.AF_INET
102 if HAS_IPV6:
103 family = socket.AF_UNSPEC
104 return family
105
106
107def _has_ipv6(host):
108 """ Returns True if the system can bind an IPv6 address. """
109 sock = None
110 has_ipv6 = False
111
112 if socket.has_ipv6:
113 # has_ipv6 returns true if cPython was compiled with IPv6 support.
114 # It does not tell us if the system has IPv6 support enabled. To
115 # determine that we must bind to an IPv6 address.
116 # https://github.com/shazow/urllib3/pull/611
117 # https://bugs.python.org/issue658327
118 try:
119 sock = socket.socket(socket.AF_INET6)
120 sock.bind((host, 0))
121 has_ipv6 = True
122 except Exception:
123 pass
124
125 if sock:
126 sock.close()
127 return has_ipv6
128
129
130HAS_IPV6 = _has_ipv6('::1')
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/request.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/request.py
new file mode 100644
index 0000000..22882b8
--- /dev/null
+++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/request.py
@@ -0,0 +1,118 @@
1from __future__ import absolute_import
2from base64 import b64encode
3
4from ..packages.six import b, integer_types
5from ..exceptions import UnrewindableBodyError
6
7ACCEPT_ENCODING = 'gzip,deflate'
8_FAILEDTELL = object()
9
10
11def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
12 basic_auth=None, proxy_basic_auth=None, disable_cache=None):
13 """
14 Shortcuts for generating request headers.
15
16 :param keep_alive:
17 If ``True``, adds 'connection: keep-alive' header.
18
19 :param accept_encoding:
20 Can be a boolean, list, or string.
21 ``True`` translates to 'gzip,deflate'.
22 List will get joined by comma.
23 String will be used as provided.
24
25 :param user_agent:
26 String representing the user-agent you want, such as
27 "python-urllib3/0.6"
28
29 :param basic_auth:
30 Colon-separated username:password string for 'authorization: basic ...'
31 auth header.
32
33 :param proxy_basic_auth:
34 Colon-separated username:password string for 'proxy-authorization: basic ...'
35 auth header.
36
37 :param disable_cache:
38 If ``True``, adds 'cache-control: no-cache' header.
39
40 Example::
41
42 >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
43 {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
44 >>> make_headers(accept_encoding=True)
45 {'accept-encoding': 'gzip,deflate'}
46 """
47 headers = {}
48 if accept_encoding:
49 if isinstance(accept_encoding, str):
50 pass
51 elif isinstance(accept_encoding, list):
52 accept_encoding = ','.join(accept_encoding)
53 else:
54 accept_encoding = ACCEPT_ENCODING
55 headers['accept-encoding'] = accept_encoding
56
57 if user_agent:
58 headers['user-agent'] = user_agent
59
60 if keep_alive:
61 headers['connection'] = 'keep-alive'
62
63 if basic_auth:
64 headers['authorization'] = 'Basic ' + \
65 b64encode(b(basic_auth)).decode('utf-8')
66
67 if proxy_basic_auth:
68 headers['proxy-authorization'] = 'Basic ' + \
69 b64encode(b(proxy_basic_auth)).decode('utf-8')
70
71 if disable_cache:
72 headers['cache-control'] = 'no-cache'
73
74 return headers
75
76
77def set_file_position(body, pos):
78 """
79 If a position is provided, move file to that point.
80 Otherwise, we'll attempt to record a position for future use.
81 """
82 if pos is not None:
83 rewind_body(body, pos)
84 elif getattr(body, 'tell', None) is not None:
85 try:
86 pos = body.tell()
87 except (IOError, OSError):
88 # This differentiates from None, allowing us to catch
89 # a failed `tell()` later when trying to rewind the body.
90 pos = _FAILEDTELL
91
92 return pos
93
94
95def rewind_body(body, body_pos):
96 """
97 Attempt to rewind body to a certain position.
98 Primarily used for request redirects and retries.
99
100 :param body:
101 File-like object that supports seek.
102
103 :param int pos:
104 Position to seek to in file.
105 """
106 body_seek = getattr(body, 'seek', None)
107 if body_seek is not None and isinstance(body_pos, integer_types):
108 try:
109 body_seek(body_pos)
110 except (IOError, OSError):
111 raise UnrewindableBodyError("An error occurred when rewinding request "
112 "body for redirect/retry.")
113 elif body_pos is _FAILEDTELL:
114 raise UnrewindableBodyError("Unable to record file position for rewinding "
115 "request body during a redirect/retry.")
116 else:
117 raise ValueError("body_pos must be of type integer, "
118 "instead it was %s." % type(body_pos))
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/response.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/response.py
new file mode 100644
index 0000000..c2eb49c
--- /dev/null
+++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/response.py
@@ -0,0 +1,81 @@
1from __future__ import absolute_import
2from ..packages.six.moves import http_client as httplib
3
4from ..exceptions import HeaderParsingError
5
6
7def is_fp_closed(obj):
8 """
9 Checks whether a given file-like object is closed.
10
11 :param obj:
12 The file-like object to check.
13 """
14
15 try:
16 # Check `isclosed()` first, in case Python3 doesn't set `closed`.
17 # GH Issue #928
18 return obj.isclosed()
19 except AttributeError:
20 pass
21
22 try:
23 # Check via the official file-like-object way.
24 return obj.closed
25 except AttributeError:
26 pass
27
28 try:
29 # Check if the object is a container for another file-like object that
30 # gets released on exhaustion (e.g. HTTPResponse).
31 return obj.fp is None
32 except AttributeError:
33 pass
34
35 raise ValueError("Unable to determine whether fp is closed.")
36
37
38def assert_header_parsing(headers):
39 """
40 Asserts whether all headers have been successfully parsed.
41 Extracts encountered errors from the result of parsing headers.
42
43 Only works on Python 3.
44
45 :param headers: Headers to verify.
46 :type headers: `httplib.HTTPMessage`.
47
48 :raises urllib3.exceptions.HeaderParsingError:
49 If parsing errors are found.
50 """
51
52 # This will fail silently if we pass in the wrong kind of parameter.
53 # To make debugging easier add an explicit check.
54 if not isinstance(headers, httplib.HTTPMessage):
55 raise TypeError('expected httplib.Message, got {0}.'.format(
56 type(headers)))
57
58 defects = getattr(headers, 'defects', None)
59 get_payload = getattr(headers, 'get_payload', None)
60
61 unparsed_data = None
62 if get_payload: # Platform-specific: Python 3.
63 unparsed_data = get_payload()
64
65 if defects or unparsed_data:
66 raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
67
68
69def is_response_to_head(response):
70 """
71 Checks whether the request of a response has been a HEAD-request.
72 Handles the quirks of AppEngine.
73
74 :param conn:
75 :type conn: :class:`httplib.HTTPResponse`
76 """
77 # FIXME: Can we do this somehow without accessing private httplib _method?
78 method = response._method
79 if isinstance(method, int): # Platform-specific: Appengine
80 return method == 3
81 return method.upper() == 'HEAD'
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/retry.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/retry.py
new file mode 100644
index 0000000..2a7e8c1
--- /dev/null
+++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/retry.py
@@ -0,0 +1,401 @@
1from __future__ import absolute_import
2import time
3import logging
4from collections import namedtuple
5from itertools import takewhile
6import email
7import re
8
9from ..exceptions import (
10 ConnectTimeoutError,
11 MaxRetryError,
12 ProtocolError,
13 ReadTimeoutError,
14 ResponseError,
15 InvalidHeader,
16)
17from ..packages import six
18
19
20log = logging.getLogger(__name__)
21
22# Data structure for representing the metadata of requests that result in a retry.
23RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
24 "status", "redirect_location"])
25
26
27class Retry(object):
28 """ Retry configuration.
29
30 Each retry attempt will create a new Retry object with updated values, so
31 they can be safely reused.
32
33 Retries can be defined as a default for a pool::
34
35 retries = Retry(connect=5, read=2, redirect=5)
36 http = PoolManager(retries=retries)
37 response = http.request('GET', 'http://example.com/')
38
39 Or per-request (which overrides the default for the pool)::
40
41 response = http.request('GET', 'http://example.com/', retries=Retry(10))
42
43 Retries can be disabled by passing ``False``::
44
45 response = http.request('GET', 'http://example.com/', retries=False)
46
47 Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
48 retries are disabled, in which case the causing exception will be raised.
49
50 :param int total:
51 Total number of retries to allow. Takes precedence over other counts.
52
53 Set to ``None`` to remove this constraint and fall back on other
54 counts. It's a good idea to set this to some sensibly-high value to
55 account for unexpected edge cases and avoid infinite retry loops.
56
57 Set to ``0`` to fail on the first retry.
58
59 Set to ``False`` to disable and imply ``raise_on_redirect=False``.
60
61 :param int connect:
62 How many connection-related errors to retry on.
63
64 These are errors raised before the request is sent to the remote server,
65 which we assume has not triggered the server to process the request.
66
67 Set to ``0`` to fail on the first retry of this type.
68
69 :param int read:
70 How many times to retry on read errors.
71
72 These errors are raised after the request was sent to the server, so the
73 request may have side-effects.
74
75 Set to ``0`` to fail on the first retry of this type.
76
77 :param int redirect:
78 How many redirects to perform. Limit this to avoid infinite redirect
79 loops.
80
81 A redirect is a HTTP response with a status code 301, 302, 303, 307 or
82 308.
83
84 Set to ``0`` to fail on the first retry of this type.
85
86 Set to ``False`` to disable and imply ``raise_on_redirect=False``.
87
88 :param int status:
89 How many times to retry on bad status codes.
90
91 These are retries made on responses, where status code matches
92 ``status_forcelist``.
93
94 Set to ``0`` to fail on the first retry of this type.
95
96 :param iterable method_whitelist:
97 Set of uppercased HTTP method verbs that we should retry on.
98
99 By default, we only retry on methods which are considered to be
100 idempotent (multiple requests with the same parameters end with the
101 same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
102
103 Set to a ``False`` value to retry on any verb.
104
105 :param iterable status_forcelist:
106 A set of integer HTTP status codes that we should force a retry on.
107 A retry is initiated if the request method is in ``method_whitelist``
108 and the response status code is in ``status_forcelist``.
109
110 By default, this is disabled with ``None``.
111
112 :param float backoff_factor:
113 A backoff factor to apply between attempts after the second try
114 (most errors are resolved immediately by a second try without a
115 delay). urllib3 will sleep for::
116
117 {backoff factor} * (2 ^ ({number of total retries} - 1))
118
119 seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
120 for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
121 than :attr:`Retry.BACKOFF_MAX`.
122
123 By default, backoff is disabled (set to 0).
124
125 :param bool raise_on_redirect: Whether, if the number of redirects is
126 exhausted, to raise a MaxRetryError, or to return a response with a
127 response code in the 3xx range.
128
129 :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
130 whether we should raise an exception, or return a response,
131 if status falls in ``status_forcelist`` range and retries have
132 been exhausted.
133
134 :param tuple history: The history of the request encountered during
135 each call to :meth:`~Retry.increment`. The list is in the order
136 the requests occurred. Each list item is of class :class:`RequestHistory`.
137
138 :param bool respect_retry_after_header:
139 Whether to respect Retry-After header on status codes defined as
140 :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
141
142 """
143
144 DEFAULT_METHOD_WHITELIST = frozenset([
145 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
146
147 RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
148
149 #: Maximum backoff time.
150 BACKOFF_MAX = 120
151
152 def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
153 method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
154 backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
155 history=None, respect_retry_after_header=True):
156
157 self.total = total
158 self.connect = connect
159 self.read = read
160 self.status = status
161
162 if redirect is False or total is False:
163 redirect = 0
164 raise_on_redirect = False
165
166 self.redirect = redirect
167 self.status_forcelist = status_forcelist or set()
168 self.method_whitelist = method_whitelist
169 self.backoff_factor = backoff_factor
170 self.raise_on_redirect = raise_on_redirect
171 self.raise_on_status = raise_on_status
172 self.history = history or tuple()
173 self.respect_retry_after_header = respect_retry_after_header
174
175 def new(self, **kw):
176 params = dict(
177 total=self.total,
178 connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
179 method_whitelist=self.method_whitelist,
180 status_forcelist=self.status_forcelist,
181 backoff_factor=self.backoff_factor,
182 raise_on_redirect=self.raise_on_redirect,
183 raise_on_status=self.raise_on_status,
184 history=self.history,
185 )
186 params.update(kw)
187 return type(self)(**params)
188
189 @classmethod
190 def from_int(cls, retries, redirect=True, default=None):
191 """ Backwards-compatibility for the old retries format."""
192 if retries is None:
193 retries = default if default is not None else cls.DEFAULT
194
195 if isinstance(retries, Retry):
196 return retries
197
198 redirect = bool(redirect) and None
199 new_retries = cls(retries, redirect=redirect)
200 log.debug("Converted retries value: %r -> %r", retries, new_retries)
201 return new_retries
202
203 def get_backoff_time(self):
204 """ Formula for computing the current backoff
205
206 :rtype: float
207 """
208 # We want to consider only the last consecutive errors sequence (Ignore redirects).
209 consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
210 reversed(self.history))))
211 if consecutive_errors_len <= 1:
212 return 0
213
214 backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
215 return min(self.BACKOFF_MAX, backoff_value)
216
217 def parse_retry_after(self, retry_after):
218 # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
219 if re.match(r"^\s*[0-9]+\s*$", retry_after):
220 seconds = int(retry_after)
221 else:
222 retry_date_tuple = email.utils.parsedate(retry_after)
223 if retry_date_tuple is None:
224 raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
225 retry_date = time.mktime(retry_date_tuple)
226 seconds = retry_date - time.time()
227
228 if seconds < 0:
229 seconds = 0
230
231 return seconds
232
233 def get_retry_after(self, response):
234 """ Get the value of Retry-After in seconds. """
235
236 retry_after = response.getheader("Retry-After")
237
238 if retry_after is None:
239 return None
240
241 return self.parse_retry_after(retry_after)
242
243 def sleep_for_retry(self, response=None):
244 retry_after = self.get_retry_after(response)
245 if retry_after:
246 time.sleep(retry_after)
247 return True
248
249 return False
250
251 def _sleep_backoff(self):
252 backoff = self.get_backoff_time()
253 if backoff <= 0:
254 return
255 time.sleep(backoff)
256
257 def sleep(self, response=None):
258 """ Sleep between retry attempts.
259
260 This method will respect a server's ``Retry-After`` response header
261 and sleep the duration of the time requested. If that is not present, it
262 will use an exponential backoff. By default, the backoff factor is 0 and
263 this method will return immediately.
264 """
265
266 if response:
267 slept = self.sleep_for_retry(response)
268 if slept:
269 return
270
271 self._sleep_backoff()
272
273 def _is_connection_error(self, err):
274 """ Errors when we're fairly sure that the server did not receive the
275 request, so it should be safe to retry.
276 """
277 return isinstance(err, ConnectTimeoutError)
278
279 def _is_read_error(self, err):
280 """ Errors that occur after the request has been started, so we should
281 assume that the server began processing it.
282 """
283 return isinstance(err, (ReadTimeoutError, ProtocolError))
284
285 def _is_method_retryable(self, method):
286 """ Checks if a given HTTP method should be retried upon, depending if
287 it is included on the method whitelist.
288 """
289 if self.method_whitelist and method.upper() not in self.method_whitelist:
290 return False
291
292 return True
293
294 def is_retry(self, method, status_code, has_retry_after=False):
295 """ Is this method/status code retryable? (Based on whitelists and control
296 variables such as the number of total retries to allow, whether to
297 respect the Retry-After header, whether this header is present, and
298 whether the returned status code is on the list of status codes to
299 be retried upon on the presence of the aforementioned header)
300 """
301 if not self._is_method_retryable(method):
302 return False
303
304 if self.status_forcelist and status_code in self.status_forcelist:
305 return True
306
307 return (self.total and self.respect_retry_after_header and
308 has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
309
310 def is_exhausted(self):
311 """ Are we out of retries? """
312 retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
313 retry_counts = list(filter(None, retry_counts))
314 if not retry_counts:
315 return False
316
317 return min(retry_counts) < 0
318
319 def increment(self, method=None, url=None, response=None, error=None,
320 _pool=None, _stacktrace=None):
321 """ Return a new Retry object with incremented retry counters.
322
323 :param response: A response object, or None, if the server did not
324 return a response.
325 :type response: :class:`~urllib3.response.HTTPResponse`
326 :param Exception error: An error encountered during the request, or
327 None if the response was received successfully.
328
329 :return: A new ``Retry`` object.
330 """
331 if self.total is False and error:
332 # Disabled, indicate to re-raise the error.
333 raise six.reraise(type(error), error, _stacktrace)
334
335 total = self.total
336 if total is not None:
337 total -= 1
338
339 connect = self.connect
340 read = self.read
341 redirect = self.redirect
342 status_count = self.status
343 cause = 'unknown'
344 status = None
345 redirect_location = None
346
347 if error and self._is_connection_error(error):
348 # Connect retry?
349 if connect is False:
350 raise six.reraise(type(error), error, _stacktrace)
351 elif connect is not None:
352 connect -= 1
353
354 elif error and self._is_read_error(error):
355 # Read retry?
356 if read is False or not self._is_method_retryable(method):
357 raise six.reraise(type(error), error, _stacktrace)
358 elif read is not None:
359 read -= 1
360
361 elif response and response.get_redirect_location():
362 # Redirect retry?
363 if redirect is not None:
364 redirect -= 1
365 cause = 'too many redirects'
366 redirect_location = response.get_redirect_location()
367 status = response.status
368
369 else:
370 # Incrementing because of a server error like a 500 in
371 # status_forcelist and a the given method is in the whitelist
372 cause = ResponseError.GENERIC_ERROR
373 if response and response.status:
374 if status_count is not None:
375 status_count -= 1
376 cause = ResponseError.SPECIFIC_ERROR.format(
377 status_code=response.status)
378 status = response.status
379
380 history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
381
382 new_retry = self.new(
383 total=total,
384 connect=connect, read=read, redirect=redirect, status=status_count,
385 history=history)
386
387 if new_retry.is_exhausted():
388 raise MaxRetryError(_pool, url, error or ResponseError(cause))
389
390 log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
391
392 return new_retry
393
394 def __repr__(self):
395 return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
396 'read={self.read}, redirect={self.redirect}, status={self.status})').format(
397 cls=type(self), self=self)
398
399
400# For backwards compatibility (equivalent to pre-v1.9):
401Retry.DEFAULT = Retry(3)
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/selectors.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/selectors.py
new file mode 100644
index 0000000..9f16c66
--- /dev/null
+++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/selectors.py
@@ -0,0 +1,581 @@
1# Backport of selectors.py from Python 3.5+ to support Python < 3.4
2# Also has the behavior specified in PEP 475 which is to retry syscalls
3# in the case of an EINTR error. This module is required because selectors34
4# does not follow this behavior and instead returns that no dile descriptor
5# events have occurred rather than retry the syscall. The decision to drop
6# support for select.devpoll is made to maintain 100% test coverage.
7
8import errno
9import math
10import select
11import socket
12import sys
13import time
14from collections import namedtuple, Mapping
15
16try:
17 monotonic = time.monotonic
18except (AttributeError, ImportError): # Python 3.3<
19 monotonic = time.time
20
21EVENT_READ = (1 << 0)
22EVENT_WRITE = (1 << 1)
23
24HAS_SELECT = True # Variable that shows whether the platform has a selector.
25_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
26_DEFAULT_SELECTOR = None
27
28
29class SelectorError(Exception):
30 def __init__(self, errcode):
31 super(SelectorError, self).__init__()
32 self.errno = errcode
33
34 def __repr__(self):
35 return "<SelectorError errno={0}>".format(self.errno)
36
37 def __str__(self):
38 return self.__repr__()
39
40
41def _fileobj_to_fd(fileobj):
42 """ Return a file descriptor from a file object. If
43 given an integer will simply return that integer back. """
44 if isinstance(fileobj, int):
45 fd = fileobj
46 else:
47 try:
48 fd = int(fileobj.fileno())
49 except (AttributeError, TypeError, ValueError):
50 raise ValueError("Invalid file object: {0!r}".format(fileobj))
51 if fd < 0:
52 raise ValueError("Invalid file descriptor: {0}".format(fd))
53 return fd
54
55
56# Determine which function to use to wrap system calls because Python 3.5+
57# already handles the case when system calls are interrupted.
58if sys.version_info >= (3, 5):
59 def _syscall_wrapper(func, _, *args, **kwargs):
60 """ This is the short-circuit version of the below logic
61 because in Python 3.5+ all system calls automatically restart
62 and recalculate their timeouts. """
63 try:
64 return func(*args, **kwargs)
65 except (OSError, IOError, select.error) as e:
66 errcode = None
67 if hasattr(e, "errno"):
68 errcode = e.errno
69 raise SelectorError(errcode)
70else:
71 def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
72 """ Wrapper function for syscalls that could fail due to EINTR.
73 All functions should be retried if there is time left in the timeout
74 in accordance with PEP 475. """
75 timeout = kwargs.get("timeout", None)
76 if timeout is None:
77 expires = None
78 recalc_timeout = False
79 else:
80 timeout = float(timeout)
81 if timeout < 0.0: # Timeout less than 0 treated as no timeout.
82 expires = None
83 else:
84 expires = monotonic() + timeout
85
86 args = list(args)
87 if recalc_timeout and "timeout" not in kwargs:
88 raise ValueError(
89 "Timeout must be in args or kwargs to be recalculated")
90
91 result = _SYSCALL_SENTINEL
92 while result is _SYSCALL_SENTINEL:
93 try:
94 result = func(*args, **kwargs)
95 # OSError is thrown by select.select
96 # IOError is thrown by select.epoll.poll
97 # select.error is thrown by select.poll.poll
98 # Aren't we thankful for Python 3.x rework for exceptions?
99 except (OSError, IOError, select.error) as e:
100 # select.error wasn't a subclass of OSError in the past.
101 errcode = None
102 if hasattr(e, "errno"):
103 errcode = e.errno
104 elif hasattr(e, "args"):
105 errcode = e.args[0]
106
107 # Also test for the Windows equivalent of EINTR.
108 is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
109 errcode == errno.WSAEINTR))
110
111 if is_interrupt:
112 if expires is not None:
113 current_time = monotonic()
114 if current_time > expires:
115 raise OSError(errno=errno.ETIMEDOUT)
116 if recalc_timeout:
117 if "timeout" in kwargs:
118 kwargs["timeout"] = expires - current_time
119 continue
120 if errcode:
121 raise SelectorError(errcode)
122 else:
123 raise
124 return result
125
126
127SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
128
129
130class _SelectorMapping(Mapping):
131 """ Mapping of file objects to selector keys """
132
133 def __init__(self, selector):
134 self._selector = selector
135
136 def __len__(self):
137 return len(self._selector._fd_to_key)
138
139 def __getitem__(self, fileobj):
140 try:
141 fd = self._selector._fileobj_lookup(fileobj)
142 return self._selector._fd_to_key[fd]
143 except KeyError:
144 raise KeyError("{0!r} is not registered.".format(fileobj))
145
146 def __iter__(self):
147 return iter(self._selector._fd_to_key)
148
149
150class BaseSelector(object):
151 """ Abstract Selector class
152
153 A selector supports registering file objects to be monitored
154 for specific I/O events.
155
156 A file object is a file descriptor or any object with a
157 `fileno()` method. An arbitrary object can be attached to the
158 file object which can be used for example to store context info,
159 a callback, etc.
160
161 A selector can use various implementations (select(), poll(), epoll(),
162 and kqueue()) depending on the platform. The 'DefaultSelector' class uses
163 the most efficient implementation for the current platform.
164 """
165 def __init__(self):
166 # Maps file descriptors to keys.
167 self._fd_to_key = {}
168
169 # Read-only mapping returned by get_map()
170 self._map = _SelectorMapping(self)
171
172 def _fileobj_lookup(self, fileobj):
173 """ Return a file descriptor from a file object.
174 This wraps _fileobj_to_fd() to do an exhaustive
175 search in case the object is invalid but we still
176 have it in our map. Used by unregister() so we can
177 unregister an object that was previously registered
178 even if it is closed. It is also used by _SelectorMapping
179 """
180 try:
181 return _fileobj_to_fd(fileobj)
182 except ValueError:
183
184 # Search through all our mapped keys.
185 for key in self._fd_to_key.values():
186 if key.fileobj is fileobj:
187 return key.fd
188
189 # Raise ValueError after all.
190 raise
191
192 def register(self, fileobj, events, data=None):
193 """ Register a file object for a set of events to monitor. """
194 if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
195 raise ValueError("Invalid events: {0!r}".format(events))
196
197 key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
198
199 if key.fd in self._fd_to_key:
200 raise KeyError("{0!r} (FD {1}) is already registered"
201 .format(fileobj, key.fd))
202
203 self._fd_to_key[key.fd] = key
204 return key
205
206 def unregister(self, fileobj):
207 """ Unregister a file object from being monitored. """
208 try:
209 key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
210 except KeyError:
211 raise KeyError("{0!r} is not registered".format(fileobj))
212
213 # Getting the fileno of a closed socket on Windows errors with EBADF.
214 except socket.error as e: # Platform-specific: Windows.
215 if e.errno != errno.EBADF:
216 raise
217 else:
218 for key in self._fd_to_key.values():
219 if key.fileobj is fileobj:
220 self._fd_to_key.pop(key.fd)
221 break
222 else:
223 raise KeyError("{0!r} is not registered".format(fileobj))
224 return key
225
226 def modify(self, fileobj, events, data=None):
227 """ Change a registered file object monitored events and data. """
228 # NOTE: Some subclasses optimize this operation even further.
229 try:
230 key = self._fd_to_key[self._fileobj_lookup(fileobj)]
231 except KeyError:
232 raise KeyError("{0!r} is not registered".format(fileobj))
233
234 if events != key.events:
235 self.unregister(fileobj)
236 key = self.register(fileobj, events, data)
237
238 elif data != key.data:
239 # Use a shortcut to update the data.
240 key = key._replace(data=data)
241 self._fd_to_key[key.fd] = key
242
243 return key
244
245 def select(self, timeout=None):
246 """ Perform the actual selection until some monitored file objects
247 are ready or the timeout expires. """
248 raise NotImplementedError()
249
250 def close(self):
251 """ Close the selector. This must be called to ensure that all
252 underlying resources are freed. """
253 self._fd_to_key.clear()
254 self._map = None
255
256 def get_key(self, fileobj):
257 """ Return the key associated with a registered file object. """
258 mapping = self.get_map()
259 if mapping is None:
260 raise RuntimeError("Selector is closed")
261 try:
262 return mapping[fileobj]
263 except KeyError:
264 raise KeyError("{0!r} is not registered".format(fileobj))
265
266 def get_map(self):
267 """ Return a mapping of file objects to selector keys """
268 return self._map
269
270 def _key_from_fd(self, fd):
271 """ Return the key associated to a given file descriptor
272 Return None if it is not found. """
273 try:
274 return self._fd_to_key[fd]
275 except KeyError:
276 return None
277
278 def __enter__(self):
279 return self
280
281 def __exit__(self, *args):
282 self.close()
283
284
285# Almost all platforms have select.select()
286if hasattr(select, "select"):
287 class SelectSelector(BaseSelector):
288 """ Select-based selector. """
289 def __init__(self):
290 super(SelectSelector, self).__init__()
291 self._readers = set()
292 self._writers = set()
293
294 def register(self, fileobj, events, data=None):
295 key = super(SelectSelector, self).register(fileobj, events, data)
296 if events & EVENT_READ:
297 self._readers.add(key.fd)
298 if events & EVENT_WRITE:
299 self._writers.add(key.fd)
300 return key
301
302 def unregister(self, fileobj):
303 key = super(SelectSelector, self).unregister(fileobj)
304 self._readers.discard(key.fd)
305 self._writers.discard(key.fd)
306 return key
307
308 def _select(self, r, w, timeout=None):
309 """ Wrapper for select.select because timeout is a positional arg """
310 return select.select(r, w, [], timeout)
311
312 def select(self, timeout=None):
313 # Selecting on empty lists on Windows errors out.
314 if not len(self._readers) and not len(self._writers):
315 return []
316
317 timeout = None if timeout is None else max(timeout, 0.0)
318 ready = []
319 r, w, _ = _syscall_wrapper(self._select, True, self._readers,
320 self._writers, timeout)
321 r = set(r)
322 w = set(w)
323 for fd in r | w:
324 events = 0
325 if fd in r:
326 events |= EVENT_READ
327 if fd in w:
328 events |= EVENT_WRITE
329
330 key = self._key_from_fd(fd)
331 if key:
332 ready.append((key, events & key.events))
333 return ready
334
335
336if hasattr(select, "poll"):
337 class PollSelector(BaseSelector):
338 """ Poll-based selector """
339 def __init__(self):
340 super(PollSelector, self).__init__()
341 self._poll = select.poll()
342
343 def register(self, fileobj, events, data=None):
344 key = super(PollSelector, self).register(fileobj, events, data)
345 event_mask = 0
346 if events & EVENT_READ:
347 event_mask |= select.POLLIN
348 if events & EVENT_WRITE:
349 event_mask |= select.POLLOUT
350 self._poll.register(key.fd, event_mask)
351 return key
352
353 def unregister(self, fileobj):
354 key = super(PollSelector, self).unregister(fileobj)
355 self._poll.unregister(key.fd)
356 return key
357
358 def _wrap_poll(self, timeout=None):
359 """ Wrapper function for select.poll.poll() so that
360 _syscall_wrapper can work with only seconds. """
361 if timeout is not None:
362 if timeout <= 0:
363 timeout = 0
364 else:
365 # select.poll.poll() has a resolution of 1 millisecond,
366 # round away from zero to wait *at least* timeout seconds.
367 timeout = math.ceil(timeout * 1e3)
368
369 result = self._poll.poll(timeout)
370 return result
371
372 def select(self, timeout=None):
373 ready = []
374 fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
375 for fd, event_mask in fd_events:
376 events = 0
377 if event_mask & ~select.POLLIN:
378 events |= EVENT_WRITE
379 if event_mask & ~select.POLLOUT:
380 events |= EVENT_READ
381
382 key = self._key_from_fd(fd)
383 if key:
384 ready.append((key, events & key.events))
385
386 return ready
387
388
389if hasattr(select, "epoll"):
390 class EpollSelector(BaseSelector):
391 """ Epoll-based selector """
392 def __init__(self):
393 super(EpollSelector, self).__init__()
394 self._epoll = select.epoll()
395
396 def fileno(self):
397 return self._epoll.fileno()
398
399 def register(self, fileobj, events, data=None):
400 key = super(EpollSelector, self).register(fileobj, events, data)
401 events_mask = 0
402 if events & EVENT_READ:
403 events_mask |= select.EPOLLIN
404 if events & EVENT_WRITE:
405 events_mask |= select.EPOLLOUT
406 _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
407 return key
408
409 def unregister(self, fileobj):
410 key = super(EpollSelector, self).unregister(fileobj)
411 try:
412 _syscall_wrapper(self._epoll.unregister, False, key.fd)
413 except SelectorError:
414 # This can occur when the fd was closed since registry.
415 pass
416 return key
417
418 def select(self, timeout=None):
419 if timeout is not None:
420 if timeout <= 0:
421 timeout = 0.0
422 else:
423 # select.epoll.poll() has a resolution of 1 millisecond
424 # but luckily takes seconds so we don't need a wrapper
425 # like PollSelector. Just for better rounding.
426 timeout = math.ceil(timeout * 1e3) * 1e-3
427 timeout = float(timeout)
428 else:
429 timeout = -1.0 # epoll.poll() must have a float.
430
431 # We always want at least 1 to ensure that select can be called
432 # with no file descriptors registered. Otherwise will fail.
433 max_events = max(len(self._fd_to_key), 1)
434
435 ready = []
436 fd_events = _syscall_wrapper(self._epoll.poll, True,
437 timeout=timeout,
438 maxevents=max_events)
439 for fd, event_mask in fd_events:
440 events = 0
441 if event_mask & ~select.EPOLLIN:
442 events |= EVENT_WRITE
443 if event_mask & ~select.EPOLLOUT:
444 events |= EVENT_READ
445
446 key = self._key_from_fd(fd)
447 if key:
448 ready.append((key, events & key.events))
449 return ready
450
451 def close(self):
452 self._epoll.close()
453 super(EpollSelector, self).close()
454
455
456if hasattr(select, "kqueue"):
457 class KqueueSelector(BaseSelector):
458 """ Kqueue / Kevent-based selector """
459 def __init__(self):
460 super(KqueueSelector, self).__init__()
461 self._kqueue = select.kqueue()
462
463 def fileno(self):
464 return self._kqueue.fileno()
465
466 def register(self, fileobj, events, data=None):
467 key = super(KqueueSelector, self).register(fileobj, events, data)
468 if events & EVENT_READ:
469 kevent = select.kevent(key.fd,
470 select.KQ_FILTER_READ,
471 select.KQ_EV_ADD)
472
473 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
474
475 if events & EVENT_WRITE:
476 kevent = select.kevent(key.fd,
477 select.KQ_FILTER_WRITE,
478 select.KQ_EV_ADD)
479
480 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
481
482 return key
483
484 def unregister(self, fileobj):
485 key = super(KqueueSelector, self).unregister(fileobj)
486 if key.events & EVENT_READ:
487 kevent = select.kevent(key.fd,
488 select.KQ_FILTER_READ,
489 select.KQ_EV_DELETE)
490 try:
491 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
492 except SelectorError:
493 pass
494 if key.events & EVENT_WRITE:
495 kevent = select.kevent(key.fd,
496 select.KQ_FILTER_WRITE,
497 select.KQ_EV_DELETE)
498 try:
499 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
500 except SelectorError:
501 pass
502
503 return key
504
505 def select(self, timeout=None):
506 if timeout is not None:
507 timeout = max(timeout, 0)
508
509 max_events = len(self._fd_to_key) * 2
510 ready_fds = {}
511
512 kevent_list = _syscall_wrapper(self._kqueue.control, True,
513 None, max_events, timeout)
514
515 for kevent in kevent_list:
516 fd = kevent.ident
517 event_mask = kevent.filter
518 events = 0
519 if event_mask == select.KQ_FILTER_READ:
520 events |= EVENT_READ
521 if event_mask == select.KQ_FILTER_WRITE:
522 events |= EVENT_WRITE
523
524 key = self._key_from_fd(fd)
525 if key:
526 if key.fd not in ready_fds:
527 ready_fds[key.fd] = (key, events & key.events)
528 else:
529 old_events = ready_fds[key.fd][1]
530 ready_fds[key.fd] = (key, (events | old_events) & key.events)
531
532 return list(ready_fds.values())
533
534 def close(self):
535 self._kqueue.close()
536 super(KqueueSelector, self).close()
537
538
539if not hasattr(select, 'select'): # Platform-specific: AppEngine
540 HAS_SELECT = False
541
542
543def _can_allocate(struct):
544 """ Checks that select structs can be allocated by the underlying
545 operating system, not just advertised by the select module. We don't
546 check select() because we'll be hopeful that most platforms that
547 don't have it available will not advertise it. (ie: GAE) """
548 try:
549 # select.poll() objects won't fail until used.
550 if struct == 'poll':
551 p = select.poll()
552 p.poll(0)
553
554 # All others will fail on allocation.
555 else:
556 getattr(select, struct)().close()
557 return True
558 except (OSError, AttributeError) as e:
559 return False
560
561
562# Choose the best implementation, roughly:
563# kqueue == epoll > poll > select. Devpoll not supported. (See above)
564# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
565def DefaultSelector():
566 """ This function serves as a first call for DefaultSelector to
567 detect if the select module is being monkey-patched incorrectly
568 by eventlet, greenlet, and preserve proper behavior. """
569 global _DEFAULT_SELECTOR
570 if _DEFAULT_SELECTOR is None:
571 if _can_allocate('kqueue'):
572 _DEFAULT_SELECTOR = KqueueSelector
573 elif _can_allocate('epoll'):
574 _DEFAULT_SELECTOR = EpollSelector
575 elif _can_allocate('poll'):
576 _DEFAULT_SELECTOR = PollSelector
577 elif hasattr(select, 'select'):
578 _DEFAULT_SELECTOR = SelectSelector
579 else: # Platform-specific: AppEngine
580 raise ValueError('Platform does not have a selector')
581 return _DEFAULT_SELECTOR()
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/ssl_.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/ssl_.py
new file mode 100644
index 0000000..c11dff2
--- /dev/null
+++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/ssl_.py
@@ -0,0 +1,341 @@
1from __future__ import absolute_import
2import errno
3import warnings
4import hmac
5
6from binascii import hexlify, unhexlify
7from hashlib import md5, sha1, sha256
8
9from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
10
11
12SSLContext = None
13HAS_SNI = False
14IS_PYOPENSSL = False
15IS_SECURETRANSPORT = False
16
17# Maps the length of a digest to a possible hash function producing this digest
18HASHFUNC_MAP = {
19 32: md5,
20 40: sha1,
21 64: sha256,
22}
23
24
25def _const_compare_digest_backport(a, b):
26 """
27 Compare two digests of equal length in constant time.
28
29 The digests must be of type str/bytes.
30 Returns True if the digests match, and False otherwise.
31 """
32 result = abs(len(a) - len(b))
33 for l, r in zip(bytearray(a), bytearray(b)):
34 result |= l ^ r
35 return result == 0
36
37
38_const_compare_digest = getattr(hmac, 'compare_digest',
39 _const_compare_digest_backport)
40
41
42try: # Test for SSL features
43 import ssl
44 from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
45 from ssl import HAS_SNI # Has SNI?
46except ImportError:
47 pass
48
49
50try:
51 from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
52except ImportError:
53 OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
54 OP_NO_COMPRESSION = 0x20000
55
56# A secure default.
57# Sources for more information on TLS ciphers:
58#
59# - https://wiki.mozilla.org/Security/Server_Side_TLS
60# - https://www.ssllabs.com/projects/best-practices/index.html
61# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
62#
63# The general intent is:
64# - Prefer TLS 1.3 cipher suites
65# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
66# - prefer ECDHE over DHE for better performance,
67# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
68# security,
69# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
70# - disable NULL authentication, MD5 MACs and DSS for security reasons.
71DEFAULT_CIPHERS = ':'.join([
72 'TLS13-AES-256-GCM-SHA384',
73 'TLS13-CHACHA20-POLY1305-SHA256',
74 'TLS13-AES-128-GCM-SHA256',
75 'ECDH+AESGCM',
76 'ECDH+CHACHA20',
77 'DH+AESGCM',
78 'DH+CHACHA20',
79 'ECDH+AES256',
80 'DH+AES256',
81 'ECDH+AES128',
82 'DH+AES',
83 'RSA+AESGCM',
84 'RSA+AES',
85 '!aNULL',
86 '!eNULL',
87 '!MD5',
88])
89
90try:
91 from ssl import SSLContext # Modern SSL?
92except ImportError:
93 import sys
94
95 class SSLContext(object): # Platform-specific: Python 2 & 3.1
96 supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
97 (3, 2) <= sys.version_info)
98
99 def __init__(self, protocol_version):
100 self.protocol = protocol_version
101 # Use default values from a real SSLContext
102 self.check_hostname = False
103 self.verify_mode = ssl.CERT_NONE
104 self.ca_certs = None
105 self.options = 0
106 self.certfile = None
107 self.keyfile = None
108 self.ciphers = None
109
110 def load_cert_chain(self, certfile, keyfile):
111 self.certfile = certfile
112 self.keyfile = keyfile
113
114 def load_verify_locations(self, cafile=None, capath=None):
115 self.ca_certs = cafile
116
117 if capath is not None:
118 raise SSLError("CA directories not supported in older Pythons")
119
120 def set_ciphers(self, cipher_suite):
121 if not self.supports_set_ciphers:
122 raise TypeError(
123 'Your version of Python does not support setting '
124 'a custom cipher suite. Please upgrade to Python '
125 '2.7, 3.2, or later if you need this functionality.'
126 )
127 self.ciphers = cipher_suite
128
129 def wrap_socket(self, socket, server_hostname=None, server_side=False):
130 warnings.warn(
131 'A true SSLContext object is not available. This prevents '
132 'urllib3 from configuring SSL appropriately and may cause '
133 'certain SSL connections to fail. You can upgrade to a newer '
134 'version of Python to solve this. For more information, see '
135 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
136 '#ssl-warnings',
137 InsecurePlatformWarning
138 )
139 kwargs = {
140 'keyfile': self.keyfile,
141 'certfile': self.certfile,
142 'ca_certs': self.ca_certs,
143 'cert_reqs': self.verify_mode,
144 'ssl_version': self.protocol,
145 'server_side': server_side,
146 }
147 if self.supports_set_ciphers: # Platform-specific: Python 2.7+
148 return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
149 else: # Platform-specific: Python 2.6
150 return wrap_socket(socket, **kwargs)
151
152
153def assert_fingerprint(cert, fingerprint):
154 """
155 Checks if given fingerprint matches the supplied certificate.
156
157 :param cert:
158 Certificate as bytes object.
159 :param fingerprint:
160 Fingerprint as string of hexdigits, can be interspersed by colons.
161 """
162
163 fingerprint = fingerprint.replace(':', '').lower()
164 digest_length = len(fingerprint)
165 hashfunc = HASHFUNC_MAP.get(digest_length)
166 if not hashfunc:
167 raise SSLError(
168 'Fingerprint of invalid length: {0}'.format(fingerprint))
169
170 # We need encode() here for py32; works on py2 and p33.
171 fingerprint_bytes = unhexlify(fingerprint.encode())
172
173 cert_digest = hashfunc(cert).digest()
174
175 if not _const_compare_digest(cert_digest, fingerprint_bytes):
176 raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
177 .format(fingerprint, hexlify(cert_digest)))
178
179
180def resolve_cert_reqs(candidate):
181 """
182 Resolves the argument to a numeric constant, which can be passed to
183 the wrap_socket function/method from the ssl module.
184 Defaults to :data:`ssl.CERT_NONE`.
185 If given a string it is assumed to be the name of the constant in the
186 :mod:`ssl` module or its abbrevation.
187 (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
188 If it's neither `None` nor a string we assume it is already the numeric
189 constant which can directly be passed to wrap_socket.
190 """
191 if candidate is None:
192 return CERT_NONE
193
194 if isinstance(candidate, str):
195 res = getattr(ssl, candidate, None)
196 if res is None:
197 res = getattr(ssl, 'CERT_' + candidate)
198 return res
199
200 return candidate
201
202
203def resolve_ssl_version(candidate):
204 """
205 like resolve_cert_reqs
206 """
207 if candidate is None:
208 return PROTOCOL_SSLv23
209
210 if isinstance(candidate, str):
211 res = getattr(ssl, candidate, None)
212 if res is None:
213 res = getattr(ssl, 'PROTOCOL_' + candidate)
214 return res
215
216 return candidate
217
218
219def create_urllib3_context(ssl_version=None, cert_reqs=None,
220 options=None, ciphers=None):
221 """All arguments have the same meaning as ``ssl_wrap_socket``.
222
223 By default, this function does a lot of the same work that
224 ``ssl.create_default_context`` does on Python 3.4+. It:
225
226 - Disables SSLv2, SSLv3, and compression
227 - Sets a restricted set of server ciphers
228
229 If you wish to enable SSLv3, you can do::
230
231 from pip._vendor.urllib3.util import ssl_
232 context = ssl_.create_urllib3_context()
233 context.options &= ~ssl_.OP_NO_SSLv3
234
235 You can do the same to enable compression (substituting ``COMPRESSION``
236 for ``SSLv3`` in the last line above).
237
238 :param ssl_version:
239 The desired protocol version to use. This will default to
240 PROTOCOL_SSLv23 which will negotiate the highest protocol that both
241 the server and your installation of OpenSSL support.
242 :param cert_reqs:
243 Whether to require the certificate verification. This defaults to
244 ``ssl.CERT_REQUIRED``.
245 :param options:
246 Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
247 ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
248 :param ciphers:
249 Which cipher suites to allow the server to select.
250 :returns:
251 Constructed SSLContext object with specified options
252 :rtype: SSLContext
253 """
254 context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
255
256 # Setting the default here, as we may have no ssl module on import
257 cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
258
259 if options is None:
260 options = 0
261 # SSLv2 is easily broken and is considered harmful and dangerous
262 options |= OP_NO_SSLv2
263 # SSLv3 has several problems and is now dangerous
264 options |= OP_NO_SSLv3
265 # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
266 # (issue #309)
267 options |= OP_NO_COMPRESSION
268
269 context.options |= options
270
271 if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
272 context.set_ciphers(ciphers or DEFAULT_CIPHERS)
273
274 context.verify_mode = cert_reqs
275 if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
276 # We do our own verification, including fingerprints and alternative
277 # hostnames. So disable it here
278 context.check_hostname = False
279 return context
280
281
282def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
283 ca_certs=None, server_hostname=None,
284 ssl_version=None, ciphers=None, ssl_context=None,
285 ca_cert_dir=None):
286 """
287 All arguments except for server_hostname, ssl_context, and ca_cert_dir have
288 the same meaning as they do when using :func:`ssl.wrap_socket`.
289
290 :param server_hostname:
291 When SNI is supported, the expected hostname of the certificate
292 :param ssl_context:
293 A pre-made :class:`SSLContext` object. If none is provided, one will
294 be created using :func:`create_urllib3_context`.
295 :param ciphers:
296 A string of ciphers we wish the client to support. This is not
297 supported on Python 2.6 as the ssl module does not support it.
298 :param ca_cert_dir:
299 A directory containing CA certificates in multiple separate files, as
300 supported by OpenSSL's -CApath flag or the capath argument to
301 SSLContext.load_verify_locations().
302 """
303 context = ssl_context
304 if context is None:
305 # Note: This branch of code and all the variables in it are no longer
306 # used by urllib3 itself. We should consider deprecating and removing
307 # this code.
308 context = create_urllib3_context(ssl_version, cert_reqs,
309 ciphers=ciphers)
310
311 if ca_certs or ca_cert_dir:
312 try:
313 context.load_verify_locations(ca_certs, ca_cert_dir)
314 except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
315 raise SSLError(e)
316 # Py33 raises FileNotFoundError which subclasses OSError
317 # These are not equivalent unless we check the errno attribute
318 except OSError as e: # Platform-specific: Python 3.3 and beyond
319 if e.errno == errno.ENOENT:
320 raise SSLError(e)
321 raise
322 elif getattr(context, 'load_default_certs', None) is not None:
323 # try to load OS default certs; works well on Windows (require Python3.4+)
324 context.load_default_certs()
325
326 if certfile:
327 context.load_cert_chain(certfile, keyfile)
328 if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
329 return context.wrap_socket(sock, server_hostname=server_hostname)
330
331 warnings.warn(
332 'An HTTPS request has been made, but the SNI (Subject Name '
333 'Indication) extension to TLS is not available on this platform. '
334 'This may cause the server to present an incorrect TLS '
335 'certificate, which can cause validation failures. You can upgrade to '
336 'a newer version of Python to solve this. For more information, see '
337 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
338 '#ssl-warnings',
339 SNIMissingWarning
340 )
341 return context.wrap_socket(sock)
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/timeout.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/timeout.py
new file mode 100644
index 0000000..9c2e6ef
--- /dev/null
+++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/timeout.py
@@ -0,0 +1,242 @@
1from __future__ import absolute_import
2# The default socket timeout, used by httplib to indicate that no timeout was
3# specified by the user
4from socket import _GLOBAL_DEFAULT_TIMEOUT
5import time
6
7from ..exceptions import TimeoutStateError
8
9# A sentinel value to indicate that no timeout was specified by the user in
10# urllib3
11_Default = object()
12
13
14# Use time.monotonic if available.
15current_time = getattr(time, "monotonic", time.time)
16
17
18class Timeout(object):
19 """ Timeout configuration.
20
21 Timeouts can be defined as a default for a pool::
22
23 timeout = Timeout(connect=2.0, read=7.0)
24 http = PoolManager(timeout=timeout)
25 response = http.request('GET', 'http://example.com/')
26
27 Or per-request (which overrides the default for the pool)::
28
29 response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
30
31 Timeouts can be disabled by setting all the parameters to ``None``::
32
33 no_timeout = Timeout(connect=None, read=None)
34 response = http.request('GET', 'http://example.com/, timeout=no_timeout)
35
36
37 :param total:
38 This combines the connect and read timeouts into one; the read timeout
39 will be set to the time leftover from the connect attempt. In the
40 event that both a connect timeout and a total are specified, or a read
41 timeout and a total are specified, the shorter timeout will be applied.
42
43 Defaults to None.
44
45 :type total: integer, float, or None
46
47 :param connect:
48 The maximum amount of time to wait for a connection attempt to a server
49 to succeed. Omitting the parameter will default the connect timeout to
50 the system default, probably `the global default timeout in socket.py
51 <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
52 None will set an infinite timeout for connection attempts.
53
54 :type connect: integer, float, or None
55
56 :param read:
57 The maximum amount of time to wait between consecutive
58 read operations for a response from the server. Omitting
59 the parameter will default the read timeout to the system
60 default, probably `the global default timeout in socket.py
61 <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
62 None will set an infinite timeout.
63
64 :type read: integer, float, or None
65
66 .. note::
67
68 Many factors can affect the total amount of time for urllib3 to return
69 an HTTP response.
70
71 For example, Python's DNS resolver does not obey the timeout specified
72 on the socket. Other factors that can affect total request time include
73 high CPU load, high swap, the program running at a low priority level,
74 or other behaviors.
75
76 In addition, the read and total timeouts only measure the time between
77 read operations on the socket connecting the client and the server,
78 not the total amount of time for the request to return a complete
79 response. For most requests, the timeout is raised because the server
80 has not sent the first byte in the specified time. This is not always
81 the case; if a server streams one byte every fifteen seconds, a timeout
82 of 20 seconds will not trigger, even though the request will take
83 several minutes to complete.
84
85 If your goal is to cut off any request after a set amount of wall clock
86 time, consider having a second "watcher" thread to cut off a slow
87 request.
88 """
89
90 #: A sentinel object representing the default timeout value
91 DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
92
93 def __init__(self, total=None, connect=_Default, read=_Default):
94 self._connect = self._validate_timeout(connect, 'connect')
95 self._read = self._validate_timeout(read, 'read')
96 self.total = self._validate_timeout(total, 'total')
97 self._start_connect = None
98
99 def __str__(self):
100 return '%s(connect=%r, read=%r, total=%r)' % (
101 type(self).__name__, self._connect, self._read, self.total)
102
103 @classmethod
104 def _validate_timeout(cls, value, name):
105 """ Check that a timeout attribute is valid.
106
107 :param value: The timeout value to validate
108 :param name: The name of the timeout attribute to validate. This is
109 used to specify in error messages.
110 :return: The validated and casted version of the given value.
111 :raises ValueError: If it is a numeric value less than or equal to
112 zero, or the type is not an integer, float, or None.
113 """
114 if value is _Default:
115 return cls.DEFAULT_TIMEOUT
116
117 if value is None or value is cls.DEFAULT_TIMEOUT:
118 return value
119
120 if isinstance(value, bool):
121 raise ValueError("Timeout cannot be a boolean value. It must "
122 "be an int, float or None.")
123 try:
124 float(value)
125 except (TypeError, ValueError):
126 raise ValueError("Timeout value %s was %s, but it must be an "
127 "int, float or None." % (name, value))
128
129 try:
130 if value <= 0:
131 raise ValueError("Attempted to set %s timeout to %s, but the "
132 "timeout cannot be set to a value less "
133 "than or equal to 0." % (name, value))
134 except TypeError: # Python 3
135 raise ValueError("Timeout value %s was %s, but it must be an "
136 "int, float or None." % (name, value))
137
138 return value
139
140 @classmethod
141 def from_float(cls, timeout):
142 """ Create a new Timeout from a legacy timeout value.
143
144 The timeout value used by httplib.py sets the same timeout on the
145 connect(), and recv() socket requests. This creates a :class:`Timeout`
146 object that sets the individual timeouts to the ``timeout`` value
147 passed to this function.
148
149 :param timeout: The legacy timeout value.
150 :type timeout: integer, float, sentinel default object, or None
151 :return: Timeout object
152 :rtype: :class:`Timeout`
153 """
154 return Timeout(read=timeout, connect=timeout)
155
156 def clone(self):
157 """ Create a copy of the timeout object
158
159 Timeout properties are stored per-pool but each request needs a fresh
160 Timeout object to ensure each one has its own start/stop configured.
161
162 :return: a copy of the timeout object
163 :rtype: :class:`Timeout`
164 """
165 # We can't use copy.deepcopy because that will also create a new object
166 # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
167 # detect the user default.
168 return Timeout(connect=self._connect, read=self._read,
169 total=self.total)
170
171 def start_connect(self):
172 """ Start the timeout clock, used during a connect() attempt
173
174 :raises urllib3.exceptions.TimeoutStateError: if you attempt
175 to start a timer that has been started already.
176 """
177 if self._start_connect is not None:
178 raise TimeoutStateError("Timeout timer has already been started.")
179 self._start_connect = current_time()
180 return self._start_connect
181
182 def get_connect_duration(self):
183 """ Gets the time elapsed since the call to :meth:`start_connect`.
184
185 :return: Elapsed time.
186 :rtype: float
187 :raises urllib3.exceptions.TimeoutStateError: if you attempt
188 to get duration for a timer that hasn't been started.
189 """
190 if self._start_connect is None:
191 raise TimeoutStateError("Can't get connect duration for timer "
192 "that has not started.")
193 return current_time() - self._start_connect
194
195 @property
196 def connect_timeout(self):
197 """ Get the value to use when setting a connection timeout.
198
199 This will be a positive float or integer, the value None
200 (never timeout), or the default system timeout.
201
202 :return: Connect timeout.
203 :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
204 """
205 if self.total is None:
206 return self._connect
207
208 if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
209 return self.total
210
211 return min(self._connect, self.total)
212
213 @property
214 def read_timeout(self):
215 """ Get the value for the read timeout.
216
217 This assumes some time has elapsed in the connection timeout and
218 computes the read timeout appropriately.
219
220 If self.total is set, the read timeout is dependent on the amount of
221 time taken by the connect timeout. If the connection time has not been
222 established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
223 raised.
224
225 :return: Value to use for the read timeout.
226 :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
227 :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
228 has not yet been called on this object.
229 """
230 if (self.total is not None and
231 self.total is not self.DEFAULT_TIMEOUT and
232 self._read is not None and
233 self._read is not self.DEFAULT_TIMEOUT):
234 # In case the connect timeout has not yet been established.
235 if self._start_connect is None:
236 return self._read
237 return max(0, min(self.total - self.get_connect_duration(),
238 self._read))
239 elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
240 return max(0, self.total - self.get_connect_duration())
241 else:
242 return self._read
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/url.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/url.py
new file mode 100644
index 0000000..60f826a
--- /dev/null
+++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/url.py
@@ -0,0 +1,230 @@
1from __future__ import absolute_import
2from collections import namedtuple
3
4from ..exceptions import LocationParseError
5
6
7url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
8
9# We only want to normalize urls with an HTTP(S) scheme.
10# urllib3 infers URLs without a scheme (None) to be http.
11NORMALIZABLE_SCHEMES = ('http', 'https', None)
12
13
14class Url(namedtuple('Url', url_attrs)):
15 """
16 Datastructure for representing an HTTP URL. Used as a return value for
17 :func:`parse_url`. Both the scheme and host are normalized as they are
18 both case-insensitive according to RFC 3986.
19 """
20 __slots__ = ()
21
22 def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
23 query=None, fragment=None):
24 if path and not path.startswith('/'):
25 path = '/' + path
26 if scheme:
27 scheme = scheme.lower()
28 if host and scheme in NORMALIZABLE_SCHEMES:
29 host = host.lower()
30 return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
31 query, fragment)
32
33 @property
34 def hostname(self):
35 """For backwards-compatibility with urlparse. We're nice like that."""
36 return self.host
37
38 @property
39 def request_uri(self):
40 """Absolute path including the query string."""
41 uri = self.path or '/'
42
43 if self.query is not None:
44 uri += '?' + self.query
45
46 return uri
47
48 @property
49 def netloc(self):
50 """Network location including host and port"""
51 if self.port:
52 return '%s:%d' % (self.host, self.port)
53 return self.host
54
55 @property
56 def url(self):
57 """
58 Convert self into a url
59
60 This function should more or less round-trip with :func:`.parse_url`. The
61 returned url may not be exactly the same as the url inputted to
62 :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
63 with a blank port will have : removed).
64
65 Example: ::
66
67 >>> U = parse_url('http://google.com/mail/')
68 >>> U.url
69 'http://google.com/mail/'
70 >>> Url('http', 'username:password', 'host.com', 80,
71 ... '/path', 'query', 'fragment').url
72 'http://username:password@host.com:80/path?query#fragment'
73 """
74 scheme, auth, host, port, path, query, fragment = self
75 url = ''
76
77 # We use "is not None" we want things to happen with empty strings (or 0 port)
78 if scheme is not None:
79 url += scheme + '://'
80 if auth is not None:
81 url += auth + '@'
82 if host is not None:
83 url += host
84 if port is not None:
85 url += ':' + str(port)
86 if path is not None:
87 url += path
88 if query is not None:
89 url += '?' + query
90 if fragment is not None:
91 url += '#' + fragment
92
93 return url
94
95 def __str__(self):
96 return self.url
97
98
99def split_first(s, delims):
100 """
101 Given a string and an iterable of delimiters, split on the first found
102 delimiter. Return two split parts and the matched delimiter.
103
104 If not found, then the first part is the full input string.
105
106 Example::
107
108 >>> split_first('foo/bar?baz', '?/=')
109 ('foo', 'bar?baz', '/')
110 >>> split_first('foo/bar?baz', '123')
111 ('foo/bar?baz', '', None)
112
113 Scales linearly with number of delims. Not ideal for large number of delims.
114 """
115 min_idx = None
116 min_delim = None
117 for d in delims:
118 idx = s.find(d)
119 if idx < 0:
120 continue
121
122 if min_idx is None or idx < min_idx:
123 min_idx = idx
124 min_delim = d
125
126 if min_idx is None or min_idx < 0:
127 return s, '', None
128
129 return s[:min_idx], s[min_idx + 1:], min_delim
130
131
132def parse_url(url):
133 """
134 Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
135 performed to parse incomplete urls. Fields not provided will be None.
136
137 Partly backwards-compatible with :mod:`urlparse`.
138
139 Example::
140
141 >>> parse_url('http://google.com/mail/')
142 Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
143 >>> parse_url('google.com:80')
144 Url(scheme=None, host='google.com', port=80, path=None, ...)
145 >>> parse_url('/foo?bar')
146 Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
147 """
148
149 # While this code has overlap with stdlib's urlparse, it is much
150 # simplified for our needs and less annoying.
151 # Additionally, this implementations does silly things to be optimal
152 # on CPython.
153
154 if not url:
155 # Empty
156 return Url()
157
158 scheme = None
159 auth = None
160 host = None
161 port = None
162 path = None
163 fragment = None
164 query = None
165
166 # Scheme
167 if '://' in url:
168 scheme, url = url.split('://', 1)
169
170 # Find the earliest Authority Terminator
171 # (http://tools.ietf.org/html/rfc3986#section-3.2)
172 url, path_, delim = split_first(url, ['/', '?', '#'])
173
174 if delim:
175 # Reassemble the path
176 path = delim + path_
177
178 # Auth
179 if '@' in url:
180 # Last '@' denotes end of auth part
181 auth, url = url.rsplit('@', 1)
182
183 # IPv6
184 if url and url[0] == '[':
185 host, url = url.split(']', 1)
186 host += ']'
187
188 # Port
189 if ':' in url:
190 _host, port = url.split(':', 1)
191
192 if not host:
193 host = _host
194
195 if port:
196 # If given, ports must be integers. No whitespace, no plus or
197 # minus prefixes, no non-integer digits such as ^2 (superscript).
198 if not port.isdigit():
199 raise LocationParseError(url)
200 try:
201 port = int(port)
202 except ValueError:
203 raise LocationParseError(url)
204 else:
205 # Blank ports are cool, too. (rfc3986#section-3.2.3)
206 port = None
207
208 elif not host and url:
209 host = url
210
211 if not path:
212 return Url(scheme, auth, host, port, path, query, fragment)
213
214 # Fragment
215 if '#' in path:
216 path, fragment = path.split('#', 1)
217
218 # Query
219 if '?' in path:
220 path, query = path.split('?', 1)
221
222 return Url(scheme, auth, host, port, path, query, fragment)
223
224
225def get_host(url):
226 """
227 Deprecated. Use :func:`parse_url` instead.
228 """
229 p = parse_url(url)
230 return p.scheme or 'http', p.hostname, p.port
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/wait.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/wait.py
new file mode 100644
index 0000000..46392f2
--- /dev/null
+++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/wait.py
@@ -0,0 +1,40 @@
1from .selectors import (
2 HAS_SELECT,
3 DefaultSelector,
4 EVENT_READ,
5 EVENT_WRITE
6)
7
8
9def _wait_for_io_events(socks, events, timeout=None):
10 """ Waits for IO events to be available from a list of sockets
11 or optionally a single socket if passed in. Returns a list of
12 sockets that can be interacted with immediately. """
13 if not HAS_SELECT:
14 raise ValueError('Platform does not have a selector')
15 if not isinstance(socks, list):
16 # Probably just a single socket.
17 if hasattr(socks, "fileno"):
18 socks = [socks]
19 # Otherwise it might be a non-list iterable.
20 else:
21 socks = list(socks)
22 with DefaultSelector() as selector:
23 for sock in socks:
24 selector.register(sock, events)
25 return [key[0].fileobj for key in
26 selector.select(timeout) if key[1] & events]
27
28
29def wait_for_read(socks, timeout=None):
30 """ Waits for reading to be available from a list of sockets
31 or optionally a single socket if passed in. Returns a list of
32 sockets that can be read from immediately. """
33 return _wait_for_io_events(socks, EVENT_READ, timeout)
34
35
36def wait_for_write(socks, timeout=None):
37 """ Waits for writing to be available from a list of sockets
38 or optionally a single socket if passed in. Returns a list of
39 sockets that can be written to immediately. """
40 return _wait_for_io_events(socks, EVENT_WRITE, timeout)