diff options
author | Shubham Saini <shubham6405@gmail.com> | 2018-12-11 10:01:23 +0000 |
---|---|---|
committer | Shubham Saini <shubham6405@gmail.com> | 2018-12-11 10:01:23 +0000 |
commit | 68df54d6629ec019142eb149dd037774f2d11e7c (patch) | |
tree | 345bc22d46b4e01a4ba8303b94278952a4ed2b9e /venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3 |
First commit
Diffstat (limited to 'venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3')
36 files changed, 9802 insertions, 0 deletions
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/__init__.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/__init__.py new file mode 100644 index 0000000..1bffade --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/__init__.py | |||
@@ -0,0 +1,97 @@ | |||
1 | """ | ||
2 | urllib3 - Thread-safe connection pooling and re-using. | ||
3 | """ | ||
4 | |||
5 | from __future__ import absolute_import | ||
6 | import warnings | ||
7 | |||
8 | from .connectionpool import ( | ||
9 | HTTPConnectionPool, | ||
10 | HTTPSConnectionPool, | ||
11 | connection_from_url | ||
12 | ) | ||
13 | |||
14 | from . import exceptions | ||
15 | from .filepost import encode_multipart_formdata | ||
16 | from .poolmanager import PoolManager, ProxyManager, proxy_from_url | ||
17 | from .response import HTTPResponse | ||
18 | from .util.request import make_headers | ||
19 | from .util.url import get_host | ||
20 | from .util.timeout import Timeout | ||
21 | from .util.retry import Retry | ||
22 | |||
23 | |||
24 | # Set default logging handler to avoid "No handler found" warnings. | ||
25 | import logging | ||
26 | try: # Python 2.7+ | ||
27 | from logging import NullHandler | ||
28 | except ImportError: | ||
29 | class NullHandler(logging.Handler): | ||
30 | def emit(self, record): | ||
31 | pass | ||
32 | |||
33 | __author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' | ||
34 | __license__ = 'MIT' | ||
35 | __version__ = '1.22' | ||
36 | |||
37 | __all__ = ( | ||
38 | 'HTTPConnectionPool', | ||
39 | 'HTTPSConnectionPool', | ||
40 | 'PoolManager', | ||
41 | 'ProxyManager', | ||
42 | 'HTTPResponse', | ||
43 | 'Retry', | ||
44 | 'Timeout', | ||
45 | 'add_stderr_logger', | ||
46 | 'connection_from_url', | ||
47 | 'disable_warnings', | ||
48 | 'encode_multipart_formdata', | ||
49 | 'get_host', | ||
50 | 'make_headers', | ||
51 | 'proxy_from_url', | ||
52 | ) | ||
53 | |||
54 | logging.getLogger(__name__).addHandler(NullHandler()) | ||
55 | |||
56 | |||
57 | def add_stderr_logger(level=logging.DEBUG): | ||
58 | """ | ||
59 | Helper for quickly adding a StreamHandler to the logger. Useful for | ||
60 | debugging. | ||
61 | |||
62 | Returns the handler after adding it. | ||
63 | """ | ||
64 | # This method needs to be in this __init__.py to get the __name__ correct | ||
65 | # even if urllib3 is vendored within another package. | ||
66 | logger = logging.getLogger(__name__) | ||
67 | handler = logging.StreamHandler() | ||
68 | handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) | ||
69 | logger.addHandler(handler) | ||
70 | logger.setLevel(level) | ||
71 | logger.debug('Added a stderr logging handler to logger: %s', __name__) | ||
72 | return handler | ||
73 | |||
74 | |||
75 | # ... Clean up. | ||
76 | del NullHandler | ||
77 | |||
78 | |||
79 | # All warning filters *must* be appended unless you're really certain that they | ||
80 | # shouldn't be: otherwise, it's very hard for users to use most Python | ||
81 | # mechanisms to silence them. | ||
82 | # SecurityWarning's always go off by default. | ||
83 | warnings.simplefilter('always', exceptions.SecurityWarning, append=True) | ||
84 | # SubjectAltNameWarning's should go off once per host | ||
85 | warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True) | ||
86 | # InsecurePlatformWarning's don't vary between requests, so we keep it default. | ||
87 | warnings.simplefilter('default', exceptions.InsecurePlatformWarning, | ||
88 | append=True) | ||
89 | # SNIMissingWarnings should go off only once. | ||
90 | warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True) | ||
91 | |||
92 | |||
93 | def disable_warnings(category=exceptions.HTTPWarning): | ||
94 | """ | ||
95 | Helper for quickly disabling all urllib3 warnings. | ||
96 | """ | ||
97 | warnings.simplefilter('ignore', category) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/_collections.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/_collections.py new file mode 100644 index 0000000..ecbf6b0 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/_collections.py | |||
@@ -0,0 +1,319 @@ | |||
1 | from __future__ import absolute_import | ||
2 | from collections import Mapping, MutableMapping | ||
3 | try: | ||
4 | from threading import RLock | ||
5 | except ImportError: # Platform-specific: No threads available | ||
6 | class RLock: | ||
7 | def __enter__(self): | ||
8 | pass | ||
9 | |||
10 | def __exit__(self, exc_type, exc_value, traceback): | ||
11 | pass | ||
12 | |||
13 | |||
14 | try: # Python 2.7+ | ||
15 | from collections import OrderedDict | ||
16 | except ImportError: | ||
17 | from .packages.ordered_dict import OrderedDict | ||
18 | from .packages.six import iterkeys, itervalues, PY3 | ||
19 | |||
20 | |||
21 | __all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] | ||
22 | |||
23 | |||
24 | _Null = object() | ||
25 | |||
26 | |||
27 | class RecentlyUsedContainer(MutableMapping): | ||
28 | """ | ||
29 | Provides a thread-safe dict-like container which maintains up to | ||
30 | ``maxsize`` keys while throwing away the least-recently-used keys beyond | ||
31 | ``maxsize``. | ||
32 | |||
33 | :param maxsize: | ||
34 | Maximum number of recent elements to retain. | ||
35 | |||
36 | :param dispose_func: | ||
37 | Every time an item is evicted from the container, | ||
38 | ``dispose_func(value)`` is called. Callback which will get called | ||
39 | """ | ||
40 | |||
41 | ContainerCls = OrderedDict | ||
42 | |||
43 | def __init__(self, maxsize=10, dispose_func=None): | ||
44 | self._maxsize = maxsize | ||
45 | self.dispose_func = dispose_func | ||
46 | |||
47 | self._container = self.ContainerCls() | ||
48 | self.lock = RLock() | ||
49 | |||
50 | def __getitem__(self, key): | ||
51 | # Re-insert the item, moving it to the end of the eviction line. | ||
52 | with self.lock: | ||
53 | item = self._container.pop(key) | ||
54 | self._container[key] = item | ||
55 | return item | ||
56 | |||
57 | def __setitem__(self, key, value): | ||
58 | evicted_value = _Null | ||
59 | with self.lock: | ||
60 | # Possibly evict the existing value of 'key' | ||
61 | evicted_value = self._container.get(key, _Null) | ||
62 | self._container[key] = value | ||
63 | |||
64 | # If we didn't evict an existing value, we might have to evict the | ||
65 | # least recently used item from the beginning of the container. | ||
66 | if len(self._container) > self._maxsize: | ||
67 | _key, evicted_value = self._container.popitem(last=False) | ||
68 | |||
69 | if self.dispose_func and evicted_value is not _Null: | ||
70 | self.dispose_func(evicted_value) | ||
71 | |||
72 | def __delitem__(self, key): | ||
73 | with self.lock: | ||
74 | value = self._container.pop(key) | ||
75 | |||
76 | if self.dispose_func: | ||
77 | self.dispose_func(value) | ||
78 | |||
79 | def __len__(self): | ||
80 | with self.lock: | ||
81 | return len(self._container) | ||
82 | |||
83 | def __iter__(self): | ||
84 | raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') | ||
85 | |||
86 | def clear(self): | ||
87 | with self.lock: | ||
88 | # Copy pointers to all values, then wipe the mapping | ||
89 | values = list(itervalues(self._container)) | ||
90 | self._container.clear() | ||
91 | |||
92 | if self.dispose_func: | ||
93 | for value in values: | ||
94 | self.dispose_func(value) | ||
95 | |||
96 | def keys(self): | ||
97 | with self.lock: | ||
98 | return list(iterkeys(self._container)) | ||
99 | |||
100 | |||
101 | class HTTPHeaderDict(MutableMapping): | ||
102 | """ | ||
103 | :param headers: | ||
104 | An iterable of field-value pairs. Must not contain multiple field names | ||
105 | when compared case-insensitively. | ||
106 | |||
107 | :param kwargs: | ||
108 | Additional field-value pairs to pass in to ``dict.update``. | ||
109 | |||
110 | A ``dict`` like container for storing HTTP Headers. | ||
111 | |||
112 | Field names are stored and compared case-insensitively in compliance with | ||
113 | RFC 7230. Iteration provides the first case-sensitive key seen for each | ||
114 | case-insensitive pair. | ||
115 | |||
116 | Using ``__setitem__`` syntax overwrites fields that compare equal | ||
117 | case-insensitively in order to maintain ``dict``'s api. For fields that | ||
118 | compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` | ||
119 | in a loop. | ||
120 | |||
121 | If multiple fields that are equal case-insensitively are passed to the | ||
122 | constructor or ``.update``, the behavior is undefined and some will be | ||
123 | lost. | ||
124 | |||
125 | >>> headers = HTTPHeaderDict() | ||
126 | >>> headers.add('Set-Cookie', 'foo=bar') | ||
127 | >>> headers.add('set-cookie', 'baz=quxx') | ||
128 | >>> headers['content-length'] = '7' | ||
129 | >>> headers['SET-cookie'] | ||
130 | 'foo=bar, baz=quxx' | ||
131 | >>> headers['Content-Length'] | ||
132 | '7' | ||
133 | """ | ||
134 | |||
135 | def __init__(self, headers=None, **kwargs): | ||
136 | super(HTTPHeaderDict, self).__init__() | ||
137 | self._container = OrderedDict() | ||
138 | if headers is not None: | ||
139 | if isinstance(headers, HTTPHeaderDict): | ||
140 | self._copy_from(headers) | ||
141 | else: | ||
142 | self.extend(headers) | ||
143 | if kwargs: | ||
144 | self.extend(kwargs) | ||
145 | |||
146 | def __setitem__(self, key, val): | ||
147 | self._container[key.lower()] = [key, val] | ||
148 | return self._container[key.lower()] | ||
149 | |||
150 | def __getitem__(self, key): | ||
151 | val = self._container[key.lower()] | ||
152 | return ', '.join(val[1:]) | ||
153 | |||
154 | def __delitem__(self, key): | ||
155 | del self._container[key.lower()] | ||
156 | |||
157 | def __contains__(self, key): | ||
158 | return key.lower() in self._container | ||
159 | |||
160 | def __eq__(self, other): | ||
161 | if not isinstance(other, Mapping) and not hasattr(other, 'keys'): | ||
162 | return False | ||
163 | if not isinstance(other, type(self)): | ||
164 | other = type(self)(other) | ||
165 | return (dict((k.lower(), v) for k, v in self.itermerged()) == | ||
166 | dict((k.lower(), v) for k, v in other.itermerged())) | ||
167 | |||
168 | def __ne__(self, other): | ||
169 | return not self.__eq__(other) | ||
170 | |||
171 | if not PY3: # Python 2 | ||
172 | iterkeys = MutableMapping.iterkeys | ||
173 | itervalues = MutableMapping.itervalues | ||
174 | |||
175 | __marker = object() | ||
176 | |||
177 | def __len__(self): | ||
178 | return len(self._container) | ||
179 | |||
180 | def __iter__(self): | ||
181 | # Only provide the originally cased names | ||
182 | for vals in self._container.values(): | ||
183 | yield vals[0] | ||
184 | |||
185 | def pop(self, key, default=__marker): | ||
186 | '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. | ||
187 | If key is not found, d is returned if given, otherwise KeyError is raised. | ||
188 | ''' | ||
189 | # Using the MutableMapping function directly fails due to the private marker. | ||
190 | # Using ordinary dict.pop would expose the internal structures. | ||
191 | # So let's reinvent the wheel. | ||
192 | try: | ||
193 | value = self[key] | ||
194 | except KeyError: | ||
195 | if default is self.__marker: | ||
196 | raise | ||
197 | return default | ||
198 | else: | ||
199 | del self[key] | ||
200 | return value | ||
201 | |||
202 | def discard(self, key): | ||
203 | try: | ||
204 | del self[key] | ||
205 | except KeyError: | ||
206 | pass | ||
207 | |||
208 | def add(self, key, val): | ||
209 | """Adds a (name, value) pair, doesn't overwrite the value if it already | ||
210 | exists. | ||
211 | |||
212 | >>> headers = HTTPHeaderDict(foo='bar') | ||
213 | >>> headers.add('Foo', 'baz') | ||
214 | >>> headers['foo'] | ||
215 | 'bar, baz' | ||
216 | """ | ||
217 | key_lower = key.lower() | ||
218 | new_vals = [key, val] | ||
219 | # Keep the common case aka no item present as fast as possible | ||
220 | vals = self._container.setdefault(key_lower, new_vals) | ||
221 | if new_vals is not vals: | ||
222 | vals.append(val) | ||
223 | |||
224 | def extend(self, *args, **kwargs): | ||
225 | """Generic import function for any type of header-like object. | ||
226 | Adapted version of MutableMapping.update in order to insert items | ||
227 | with self.add instead of self.__setitem__ | ||
228 | """ | ||
229 | if len(args) > 1: | ||
230 | raise TypeError("extend() takes at most 1 positional " | ||
231 | "arguments ({0} given)".format(len(args))) | ||
232 | other = args[0] if len(args) >= 1 else () | ||
233 | |||
234 | if isinstance(other, HTTPHeaderDict): | ||
235 | for key, val in other.iteritems(): | ||
236 | self.add(key, val) | ||
237 | elif isinstance(other, Mapping): | ||
238 | for key in other: | ||
239 | self.add(key, other[key]) | ||
240 | elif hasattr(other, "keys"): | ||
241 | for key in other.keys(): | ||
242 | self.add(key, other[key]) | ||
243 | else: | ||
244 | for key, value in other: | ||
245 | self.add(key, value) | ||
246 | |||
247 | for key, value in kwargs.items(): | ||
248 | self.add(key, value) | ||
249 | |||
250 | def getlist(self, key, default=__marker): | ||
251 | """Returns a list of all the values for the named field. Returns an | ||
252 | empty list if the key doesn't exist.""" | ||
253 | try: | ||
254 | vals = self._container[key.lower()] | ||
255 | except KeyError: | ||
256 | if default is self.__marker: | ||
257 | return [] | ||
258 | return default | ||
259 | else: | ||
260 | return vals[1:] | ||
261 | |||
262 | # Backwards compatibility for httplib | ||
263 | getheaders = getlist | ||
264 | getallmatchingheaders = getlist | ||
265 | iget = getlist | ||
266 | |||
267 | # Backwards compatibility for http.cookiejar | ||
268 | get_all = getlist | ||
269 | |||
270 | def __repr__(self): | ||
271 | return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) | ||
272 | |||
273 | def _copy_from(self, other): | ||
274 | for key in other: | ||
275 | val = other.getlist(key) | ||
276 | if isinstance(val, list): | ||
277 | # Don't need to convert tuples | ||
278 | val = list(val) | ||
279 | self._container[key.lower()] = [key] + val | ||
280 | |||
281 | def copy(self): | ||
282 | clone = type(self)() | ||
283 | clone._copy_from(self) | ||
284 | return clone | ||
285 | |||
286 | def iteritems(self): | ||
287 | """Iterate over all header lines, including duplicate ones.""" | ||
288 | for key in self: | ||
289 | vals = self._container[key.lower()] | ||
290 | for val in vals[1:]: | ||
291 | yield vals[0], val | ||
292 | |||
293 | def itermerged(self): | ||
294 | """Iterate over all headers, merging duplicate ones together.""" | ||
295 | for key in self: | ||
296 | val = self._container[key.lower()] | ||
297 | yield val[0], ', '.join(val[1:]) | ||
298 | |||
299 | def items(self): | ||
300 | return list(self.iteritems()) | ||
301 | |||
302 | @classmethod | ||
303 | def from_httplib(cls, message): # Python 2 | ||
304 | """Read headers from a Python 2 httplib message object.""" | ||
305 | # python2.7 does not expose a proper API for exporting multiheaders | ||
306 | # efficiently. This function re-reads raw lines from the message | ||
307 | # object and extracts the multiheaders properly. | ||
308 | headers = [] | ||
309 | |||
310 | for line in message.headers: | ||
311 | if line.startswith((' ', '\t')): | ||
312 | key, value = headers[-1] | ||
313 | headers[-1] = (key, value + '\r\n' + line.rstrip()) | ||
314 | continue | ||
315 | |||
316 | key, value = line.split(':', 1) | ||
317 | headers.append((key, value.strip())) | ||
318 | |||
319 | return cls(headers) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/connection.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/connection.py new file mode 100644 index 0000000..67090e3 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/connection.py | |||
@@ -0,0 +1,373 @@ | |||
1 | from __future__ import absolute_import | ||
2 | import datetime | ||
3 | import logging | ||
4 | import os | ||
5 | import sys | ||
6 | import socket | ||
7 | from socket import error as SocketError, timeout as SocketTimeout | ||
8 | import warnings | ||
9 | from .packages import six | ||
10 | from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection | ||
11 | from .packages.six.moves.http_client import HTTPException # noqa: F401 | ||
12 | |||
13 | try: # Compiled with SSL? | ||
14 | import ssl | ||
15 | BaseSSLError = ssl.SSLError | ||
16 | except (ImportError, AttributeError): # Platform-specific: No SSL. | ||
17 | ssl = None | ||
18 | |||
19 | class BaseSSLError(BaseException): | ||
20 | pass | ||
21 | |||
22 | |||
23 | try: # Python 3: | ||
24 | # Not a no-op, we're adding this to the namespace so it can be imported. | ||
25 | ConnectionError = ConnectionError | ||
26 | except NameError: # Python 2: | ||
27 | class ConnectionError(Exception): | ||
28 | pass | ||
29 | |||
30 | |||
31 | from .exceptions import ( | ||
32 | NewConnectionError, | ||
33 | ConnectTimeoutError, | ||
34 | SubjectAltNameWarning, | ||
35 | SystemTimeWarning, | ||
36 | ) | ||
37 | from .packages.ssl_match_hostname import match_hostname, CertificateError | ||
38 | |||
39 | from .util.ssl_ import ( | ||
40 | resolve_cert_reqs, | ||
41 | resolve_ssl_version, | ||
42 | assert_fingerprint, | ||
43 | create_urllib3_context, | ||
44 | ssl_wrap_socket | ||
45 | ) | ||
46 | |||
47 | |||
48 | from .util import connection | ||
49 | |||
50 | from ._collections import HTTPHeaderDict | ||
51 | |||
52 | log = logging.getLogger(__name__) | ||
53 | |||
54 | port_by_scheme = { | ||
55 | 'http': 80, | ||
56 | 'https': 443, | ||
57 | } | ||
58 | |||
59 | # When updating RECENT_DATE, move it to | ||
60 | # within two years of the current date, and no | ||
61 | # earlier than 6 months ago. | ||
62 | RECENT_DATE = datetime.date(2016, 1, 1) | ||
63 | |||
64 | |||
65 | class DummyConnection(object): | ||
66 | """Used to detect a failed ConnectionCls import.""" | ||
67 | pass | ||
68 | |||
69 | |||
70 | class HTTPConnection(_HTTPConnection, object): | ||
71 | """ | ||
72 | Based on httplib.HTTPConnection but provides an extra constructor | ||
73 | backwards-compatibility layer between older and newer Pythons. | ||
74 | |||
75 | Additional keyword parameters are used to configure attributes of the connection. | ||
76 | Accepted parameters include: | ||
77 | |||
78 | - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool` | ||
79 | - ``source_address``: Set the source address for the current connection. | ||
80 | |||
81 | .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x | ||
82 | |||
83 | - ``socket_options``: Set specific options on the underlying socket. If not specified, then | ||
84 | defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling | ||
85 | Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. | ||
86 | |||
87 | For example, if you wish to enable TCP Keep Alive in addition to the defaults, | ||
88 | you might pass:: | ||
89 | |||
90 | HTTPConnection.default_socket_options + [ | ||
91 | (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), | ||
92 | ] | ||
93 | |||
94 | Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). | ||
95 | """ | ||
96 | |||
97 | default_port = port_by_scheme['http'] | ||
98 | |||
99 | #: Disable Nagle's algorithm by default. | ||
100 | #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` | ||
101 | default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] | ||
102 | |||
103 | #: Whether this connection verifies the host's certificate. | ||
104 | is_verified = False | ||
105 | |||
106 | def __init__(self, *args, **kw): | ||
107 | if six.PY3: # Python 3 | ||
108 | kw.pop('strict', None) | ||
109 | |||
110 | # Pre-set source_address in case we have an older Python like 2.6. | ||
111 | self.source_address = kw.get('source_address') | ||
112 | |||
113 | if sys.version_info < (2, 7): # Python 2.6 | ||
114 | # _HTTPConnection on Python 2.6 will balk at this keyword arg, but | ||
115 | # not newer versions. We can still use it when creating a | ||
116 | # connection though, so we pop it *after* we have saved it as | ||
117 | # self.source_address. | ||
118 | kw.pop('source_address', None) | ||
119 | |||
120 | #: The socket options provided by the user. If no options are | ||
121 | #: provided, we use the default options. | ||
122 | self.socket_options = kw.pop('socket_options', self.default_socket_options) | ||
123 | |||
124 | # Superclass also sets self.source_address in Python 2.7+. | ||
125 | _HTTPConnection.__init__(self, *args, **kw) | ||
126 | |||
127 | def _new_conn(self): | ||
128 | """ Establish a socket connection and set nodelay settings on it. | ||
129 | |||
130 | :return: New socket connection. | ||
131 | """ | ||
132 | extra_kw = {} | ||
133 | if self.source_address: | ||
134 | extra_kw['source_address'] = self.source_address | ||
135 | |||
136 | if self.socket_options: | ||
137 | extra_kw['socket_options'] = self.socket_options | ||
138 | |||
139 | try: | ||
140 | conn = connection.create_connection( | ||
141 | (self.host, self.port), self.timeout, **extra_kw) | ||
142 | |||
143 | except SocketTimeout as e: | ||
144 | raise ConnectTimeoutError( | ||
145 | self, "Connection to %s timed out. (connect timeout=%s)" % | ||
146 | (self.host, self.timeout)) | ||
147 | |||
148 | except SocketError as e: | ||
149 | raise NewConnectionError( | ||
150 | self, "Failed to establish a new connection: %s" % e) | ||
151 | |||
152 | return conn | ||
153 | |||
154 | def _prepare_conn(self, conn): | ||
155 | self.sock = conn | ||
156 | # the _tunnel_host attribute was added in python 2.6.3 (via | ||
157 | # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do | ||
158 | # not have them. | ||
159 | if getattr(self, '_tunnel_host', None): | ||
160 | # TODO: Fix tunnel so it doesn't depend on self.sock state. | ||
161 | self._tunnel() | ||
162 | # Mark this connection as not reusable | ||
163 | self.auto_open = 0 | ||
164 | |||
165 | def connect(self): | ||
166 | conn = self._new_conn() | ||
167 | self._prepare_conn(conn) | ||
168 | |||
169 | def request_chunked(self, method, url, body=None, headers=None): | ||
170 | """ | ||
171 | Alternative to the common request method, which sends the | ||
172 | body with chunked encoding and not as one block | ||
173 | """ | ||
174 | headers = HTTPHeaderDict(headers if headers is not None else {}) | ||
175 | skip_accept_encoding = 'accept-encoding' in headers | ||
176 | skip_host = 'host' in headers | ||
177 | self.putrequest( | ||
178 | method, | ||
179 | url, | ||
180 | skip_accept_encoding=skip_accept_encoding, | ||
181 | skip_host=skip_host | ||
182 | ) | ||
183 | for header, value in headers.items(): | ||
184 | self.putheader(header, value) | ||
185 | if 'transfer-encoding' not in headers: | ||
186 | self.putheader('Transfer-Encoding', 'chunked') | ||
187 | self.endheaders() | ||
188 | |||
189 | if body is not None: | ||
190 | stringish_types = six.string_types + (six.binary_type,) | ||
191 | if isinstance(body, stringish_types): | ||
192 | body = (body,) | ||
193 | for chunk in body: | ||
194 | if not chunk: | ||
195 | continue | ||
196 | if not isinstance(chunk, six.binary_type): | ||
197 | chunk = chunk.encode('utf8') | ||
198 | len_str = hex(len(chunk))[2:] | ||
199 | self.send(len_str.encode('utf-8')) | ||
200 | self.send(b'\r\n') | ||
201 | self.send(chunk) | ||
202 | self.send(b'\r\n') | ||
203 | |||
204 | # After the if clause, to always have a closed body | ||
205 | self.send(b'0\r\n\r\n') | ||
206 | |||
207 | |||
208 | class HTTPSConnection(HTTPConnection): | ||
209 | default_port = port_by_scheme['https'] | ||
210 | |||
211 | ssl_version = None | ||
212 | |||
213 | def __init__(self, host, port=None, key_file=None, cert_file=None, | ||
214 | strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, | ||
215 | ssl_context=None, **kw): | ||
216 | |||
217 | HTTPConnection.__init__(self, host, port, strict=strict, | ||
218 | timeout=timeout, **kw) | ||
219 | |||
220 | self.key_file = key_file | ||
221 | self.cert_file = cert_file | ||
222 | self.ssl_context = ssl_context | ||
223 | |||
224 | # Required property for Google AppEngine 1.9.0 which otherwise causes | ||
225 | # HTTPS requests to go out as HTTP. (See Issue #356) | ||
226 | self._protocol = 'https' | ||
227 | |||
228 | def connect(self): | ||
229 | conn = self._new_conn() | ||
230 | self._prepare_conn(conn) | ||
231 | |||
232 | if self.ssl_context is None: | ||
233 | self.ssl_context = create_urllib3_context( | ||
234 | ssl_version=resolve_ssl_version(None), | ||
235 | cert_reqs=resolve_cert_reqs(None), | ||
236 | ) | ||
237 | |||
238 | self.sock = ssl_wrap_socket( | ||
239 | sock=conn, | ||
240 | keyfile=self.key_file, | ||
241 | certfile=self.cert_file, | ||
242 | ssl_context=self.ssl_context, | ||
243 | ) | ||
244 | |||
245 | |||
246 | class VerifiedHTTPSConnection(HTTPSConnection): | ||
247 | """ | ||
248 | Based on httplib.HTTPSConnection but wraps the socket with | ||
249 | SSL certification. | ||
250 | """ | ||
251 | cert_reqs = None | ||
252 | ca_certs = None | ||
253 | ca_cert_dir = None | ||
254 | ssl_version = None | ||
255 | assert_fingerprint = None | ||
256 | |||
257 | def set_cert(self, key_file=None, cert_file=None, | ||
258 | cert_reqs=None, ca_certs=None, | ||
259 | assert_hostname=None, assert_fingerprint=None, | ||
260 | ca_cert_dir=None): | ||
261 | """ | ||
262 | This method should only be called once, before the connection is used. | ||
263 | """ | ||
264 | # If cert_reqs is not provided, we can try to guess. If the user gave | ||
265 | # us a cert database, we assume they want to use it: otherwise, if | ||
266 | # they gave us an SSL Context object we should use whatever is set for | ||
267 | # it. | ||
268 | if cert_reqs is None: | ||
269 | if ca_certs or ca_cert_dir: | ||
270 | cert_reqs = 'CERT_REQUIRED' | ||
271 | elif self.ssl_context is not None: | ||
272 | cert_reqs = self.ssl_context.verify_mode | ||
273 | |||
274 | self.key_file = key_file | ||
275 | self.cert_file = cert_file | ||
276 | self.cert_reqs = cert_reqs | ||
277 | self.assert_hostname = assert_hostname | ||
278 | self.assert_fingerprint = assert_fingerprint | ||
279 | self.ca_certs = ca_certs and os.path.expanduser(ca_certs) | ||
280 | self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) | ||
281 | |||
282 | def connect(self): | ||
283 | # Add certificate verification | ||
284 | conn = self._new_conn() | ||
285 | |||
286 | hostname = self.host | ||
287 | if getattr(self, '_tunnel_host', None): | ||
288 | # _tunnel_host was added in Python 2.6.3 | ||
289 | # (See: http://hg.python.org/cpython/rev/0f57b30a152f) | ||
290 | |||
291 | self.sock = conn | ||
292 | # Calls self._set_hostport(), so self.host is | ||
293 | # self._tunnel_host below. | ||
294 | self._tunnel() | ||
295 | # Mark this connection as not reusable | ||
296 | self.auto_open = 0 | ||
297 | |||
298 | # Override the host with the one we're requesting data from. | ||
299 | hostname = self._tunnel_host | ||
300 | |||
301 | is_time_off = datetime.date.today() < RECENT_DATE | ||
302 | if is_time_off: | ||
303 | warnings.warn(( | ||
304 | 'System time is way off (before {0}). This will probably ' | ||
305 | 'lead to SSL verification errors').format(RECENT_DATE), | ||
306 | SystemTimeWarning | ||
307 | ) | ||
308 | |||
309 | # Wrap socket using verification with the root certs in | ||
310 | # trusted_root_certs | ||
311 | if self.ssl_context is None: | ||
312 | self.ssl_context = create_urllib3_context( | ||
313 | ssl_version=resolve_ssl_version(self.ssl_version), | ||
314 | cert_reqs=resolve_cert_reqs(self.cert_reqs), | ||
315 | ) | ||
316 | |||
317 | context = self.ssl_context | ||
318 | context.verify_mode = resolve_cert_reqs(self.cert_reqs) | ||
319 | self.sock = ssl_wrap_socket( | ||
320 | sock=conn, | ||
321 | keyfile=self.key_file, | ||
322 | certfile=self.cert_file, | ||
323 | ca_certs=self.ca_certs, | ||
324 | ca_cert_dir=self.ca_cert_dir, | ||
325 | server_hostname=hostname, | ||
326 | ssl_context=context) | ||
327 | |||
328 | if self.assert_fingerprint: | ||
329 | assert_fingerprint(self.sock.getpeercert(binary_form=True), | ||
330 | self.assert_fingerprint) | ||
331 | elif context.verify_mode != ssl.CERT_NONE \ | ||
332 | and not getattr(context, 'check_hostname', False) \ | ||
333 | and self.assert_hostname is not False: | ||
334 | # While urllib3 attempts to always turn off hostname matching from | ||
335 | # the TLS library, this cannot always be done. So we check whether | ||
336 | # the TLS Library still thinks it's matching hostnames. | ||
337 | cert = self.sock.getpeercert() | ||
338 | if not cert.get('subjectAltName', ()): | ||
339 | warnings.warn(( | ||
340 | 'Certificate for {0} has no `subjectAltName`, falling back to check for a ' | ||
341 | '`commonName` for now. This feature is being removed by major browsers and ' | ||
342 | 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 ' | ||
343 | 'for details.)'.format(hostname)), | ||
344 | SubjectAltNameWarning | ||
345 | ) | ||
346 | _match_hostname(cert, self.assert_hostname or hostname) | ||
347 | |||
348 | self.is_verified = ( | ||
349 | context.verify_mode == ssl.CERT_REQUIRED or | ||
350 | self.assert_fingerprint is not None | ||
351 | ) | ||
352 | |||
353 | |||
354 | def _match_hostname(cert, asserted_hostname): | ||
355 | try: | ||
356 | match_hostname(cert, asserted_hostname) | ||
357 | except CertificateError as e: | ||
358 | log.error( | ||
359 | 'Certificate did not match expected hostname: %s. ' | ||
360 | 'Certificate: %s', asserted_hostname, cert | ||
361 | ) | ||
362 | # Add cert to exception and reraise so client code can inspect | ||
363 | # the cert when catching the exception, if they want to | ||
364 | e._peer_cert = cert | ||
365 | raise | ||
366 | |||
367 | |||
368 | if ssl: | ||
369 | # Make a copy for testing. | ||
370 | UnverifiedHTTPSConnection = HTTPSConnection | ||
371 | HTTPSConnection = VerifiedHTTPSConnection | ||
372 | else: | ||
373 | HTTPSConnection = DummyConnection | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/connectionpool.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/connectionpool.py new file mode 100644 index 0000000..b099ca8 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/connectionpool.py | |||
@@ -0,0 +1,905 @@ | |||
1 | from __future__ import absolute_import | ||
2 | import errno | ||
3 | import logging | ||
4 | import sys | ||
5 | import warnings | ||
6 | |||
7 | from socket import error as SocketError, timeout as SocketTimeout | ||
8 | import socket | ||
9 | |||
10 | |||
11 | from .exceptions import ( | ||
12 | ClosedPoolError, | ||
13 | ProtocolError, | ||
14 | EmptyPoolError, | ||
15 | HeaderParsingError, | ||
16 | HostChangedError, | ||
17 | LocationValueError, | ||
18 | MaxRetryError, | ||
19 | ProxyError, | ||
20 | ReadTimeoutError, | ||
21 | SSLError, | ||
22 | TimeoutError, | ||
23 | InsecureRequestWarning, | ||
24 | NewConnectionError, | ||
25 | ) | ||
26 | from .packages.ssl_match_hostname import CertificateError | ||
27 | from .packages import six | ||
28 | from .packages.six.moves import queue | ||
29 | from .connection import ( | ||
30 | port_by_scheme, | ||
31 | DummyConnection, | ||
32 | HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, | ||
33 | HTTPException, BaseSSLError, | ||
34 | ) | ||
35 | from .request import RequestMethods | ||
36 | from .response import HTTPResponse | ||
37 | |||
38 | from .util.connection import is_connection_dropped | ||
39 | from .util.request import set_file_position | ||
40 | from .util.response import assert_header_parsing | ||
41 | from .util.retry import Retry | ||
42 | from .util.timeout import Timeout | ||
43 | from .util.url import get_host, Url | ||
44 | |||
45 | |||
46 | if six.PY2: | ||
47 | # Queue is imported for side effects on MS Windows | ||
48 | import Queue as _unused_module_Queue # noqa: F401 | ||
49 | |||
50 | xrange = six.moves.xrange | ||
51 | |||
52 | log = logging.getLogger(__name__) | ||
53 | |||
54 | _Default = object() | ||
55 | |||
56 | |||
57 | # Pool objects | ||
58 | class ConnectionPool(object): | ||
59 | """ | ||
60 | Base class for all connection pools, such as | ||
61 | :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. | ||
62 | """ | ||
63 | |||
64 | scheme = None | ||
65 | QueueCls = queue.LifoQueue | ||
66 | |||
67 | def __init__(self, host, port=None): | ||
68 | if not host: | ||
69 | raise LocationValueError("No host specified.") | ||
70 | |||
71 | self.host = _ipv6_host(host).lower() | ||
72 | self._proxy_host = host.lower() | ||
73 | self.port = port | ||
74 | |||
75 | def __str__(self): | ||
76 | return '%s(host=%r, port=%r)' % (type(self).__name__, | ||
77 | self.host, self.port) | ||
78 | |||
79 | def __enter__(self): | ||
80 | return self | ||
81 | |||
82 | def __exit__(self, exc_type, exc_val, exc_tb): | ||
83 | self.close() | ||
84 | # Return False to re-raise any potential exceptions | ||
85 | return False | ||
86 | |||
87 | def close(self): | ||
88 | """ | ||
89 | Close all pooled connections and disable the pool. | ||
90 | """ | ||
91 | pass | ||
92 | |||
93 | |||
94 | # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 | ||
95 | _blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK]) | ||
96 | |||
97 | |||
98 | class HTTPConnectionPool(ConnectionPool, RequestMethods): | ||
99 | """ | ||
100 | Thread-safe connection pool for one host. | ||
101 | |||
102 | :param host: | ||
103 | Host used for this HTTP Connection (e.g. "localhost"), passed into | ||
104 | :class:`httplib.HTTPConnection`. | ||
105 | |||
106 | :param port: | ||
107 | Port used for this HTTP Connection (None is equivalent to 80), passed | ||
108 | into :class:`httplib.HTTPConnection`. | ||
109 | |||
110 | :param strict: | ||
111 | Causes BadStatusLine to be raised if the status line can't be parsed | ||
112 | as a valid HTTP/1.0 or 1.1 status line, passed into | ||
113 | :class:`httplib.HTTPConnection`. | ||
114 | |||
115 | .. note:: | ||
116 | Only works in Python 2. This parameter is ignored in Python 3. | ||
117 | |||
118 | :param timeout: | ||
119 | Socket timeout in seconds for each individual connection. This can | ||
120 | be a float or integer, which sets the timeout for the HTTP request, | ||
121 | or an instance of :class:`urllib3.util.Timeout` which gives you more | ||
122 | fine-grained control over request timeouts. After the constructor has | ||
123 | been parsed, this is always a `urllib3.util.Timeout` object. | ||
124 | |||
125 | :param maxsize: | ||
126 | Number of connections to save that can be reused. More than 1 is useful | ||
127 | in multithreaded situations. If ``block`` is set to False, more | ||
128 | connections will be created but they will not be saved once they've | ||
129 | been used. | ||
130 | |||
131 | :param block: | ||
132 | If set to True, no more than ``maxsize`` connections will be used at | ||
133 | a time. When no free connections are available, the call will block | ||
134 | until a connection has been released. This is a useful side effect for | ||
135 | particular multithreaded situations where one does not want to use more | ||
136 | than maxsize connections per host to prevent flooding. | ||
137 | |||
138 | :param headers: | ||
139 | Headers to include with all requests, unless other headers are given | ||
140 | explicitly. | ||
141 | |||
142 | :param retries: | ||
143 | Retry configuration to use by default with requests in this pool. | ||
144 | |||
145 | :param _proxy: | ||
146 | Parsed proxy URL, should not be used directly, instead, see | ||
147 | :class:`urllib3.connectionpool.ProxyManager`" | ||
148 | |||
149 | :param _proxy_headers: | ||
150 | A dictionary with proxy headers, should not be used directly, | ||
151 | instead, see :class:`urllib3.connectionpool.ProxyManager`" | ||
152 | |||
153 | :param \\**conn_kw: | ||
154 | Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, | ||
155 | :class:`urllib3.connection.HTTPSConnection` instances. | ||
156 | """ | ||
157 | |||
158 | scheme = 'http' | ||
159 | ConnectionCls = HTTPConnection | ||
160 | ResponseCls = HTTPResponse | ||
161 | |||
162 | def __init__(self, host, port=None, strict=False, | ||
163 | timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, | ||
164 | headers=None, retries=None, | ||
165 | _proxy=None, _proxy_headers=None, | ||
166 | **conn_kw): | ||
167 | ConnectionPool.__init__(self, host, port) | ||
168 | RequestMethods.__init__(self, headers) | ||
169 | |||
170 | self.strict = strict | ||
171 | |||
172 | if not isinstance(timeout, Timeout): | ||
173 | timeout = Timeout.from_float(timeout) | ||
174 | |||
175 | if retries is None: | ||
176 | retries = Retry.DEFAULT | ||
177 | |||
178 | self.timeout = timeout | ||
179 | self.retries = retries | ||
180 | |||
181 | self.pool = self.QueueCls(maxsize) | ||
182 | self.block = block | ||
183 | |||
184 | self.proxy = _proxy | ||
185 | self.proxy_headers = _proxy_headers or {} | ||
186 | |||
187 | # Fill the queue up so that doing get() on it will block properly | ||
188 | for _ in xrange(maxsize): | ||
189 | self.pool.put(None) | ||
190 | |||
191 | # These are mostly for testing and debugging purposes. | ||
192 | self.num_connections = 0 | ||
193 | self.num_requests = 0 | ||
194 | self.conn_kw = conn_kw | ||
195 | |||
196 | if self.proxy: | ||
197 | # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. | ||
198 | # We cannot know if the user has added default socket options, so we cannot replace the | ||
199 | # list. | ||
200 | self.conn_kw.setdefault('socket_options', []) | ||
201 | |||
202 | def _new_conn(self): | ||
203 | """ | ||
204 | Return a fresh :class:`HTTPConnection`. | ||
205 | """ | ||
206 | self.num_connections += 1 | ||
207 | log.debug("Starting new HTTP connection (%d): %s", | ||
208 | self.num_connections, self.host) | ||
209 | |||
210 | conn = self.ConnectionCls(host=self.host, port=self.port, | ||
211 | timeout=self.timeout.connect_timeout, | ||
212 | strict=self.strict, **self.conn_kw) | ||
213 | return conn | ||
214 | |||
215 | def _get_conn(self, timeout=None): | ||
216 | """ | ||
217 | Get a connection. Will return a pooled connection if one is available. | ||
218 | |||
219 | If no connections are available and :prop:`.block` is ``False``, then a | ||
220 | fresh connection is returned. | ||
221 | |||
222 | :param timeout: | ||
223 | Seconds to wait before giving up and raising | ||
224 | :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and | ||
225 | :prop:`.block` is ``True``. | ||
226 | """ | ||
227 | conn = None | ||
228 | try: | ||
229 | conn = self.pool.get(block=self.block, timeout=timeout) | ||
230 | |||
231 | except AttributeError: # self.pool is None | ||
232 | raise ClosedPoolError(self, "Pool is closed.") | ||
233 | |||
234 | except queue.Empty: | ||
235 | if self.block: | ||
236 | raise EmptyPoolError(self, | ||
237 | "Pool reached maximum size and no more " | ||
238 | "connections are allowed.") | ||
239 | pass # Oh well, we'll create a new connection then | ||
240 | |||
241 | # If this is a persistent connection, check if it got disconnected | ||
242 | if conn and is_connection_dropped(conn): | ||
243 | log.debug("Resetting dropped connection: %s", self.host) | ||
244 | conn.close() | ||
245 | if getattr(conn, 'auto_open', 1) == 0: | ||
246 | # This is a proxied connection that has been mutated by | ||
247 | # httplib._tunnel() and cannot be reused (since it would | ||
248 | # attempt to bypass the proxy) | ||
249 | conn = None | ||
250 | |||
251 | return conn or self._new_conn() | ||
252 | |||
253 | def _put_conn(self, conn): | ||
254 | """ | ||
255 | Put a connection back into the pool. | ||
256 | |||
257 | :param conn: | ||
258 | Connection object for the current host and port as returned by | ||
259 | :meth:`._new_conn` or :meth:`._get_conn`. | ||
260 | |||
261 | If the pool is already full, the connection is closed and discarded | ||
262 | because we exceeded maxsize. If connections are discarded frequently, | ||
263 | then maxsize should be increased. | ||
264 | |||
265 | If the pool is closed, then the connection will be closed and discarded. | ||
266 | """ | ||
267 | try: | ||
268 | self.pool.put(conn, block=False) | ||
269 | return # Everything is dandy, done. | ||
270 | except AttributeError: | ||
271 | # self.pool is None. | ||
272 | pass | ||
273 | except queue.Full: | ||
274 | # This should never happen if self.block == True | ||
275 | log.warning( | ||
276 | "Connection pool is full, discarding connection: %s", | ||
277 | self.host) | ||
278 | |||
279 | # Connection never got put back into the pool, close it. | ||
280 | if conn: | ||
281 | conn.close() | ||
282 | |||
283 | def _validate_conn(self, conn): | ||
284 | """ | ||
285 | Called right before a request is made, after the socket is created. | ||
286 | """ | ||
287 | pass | ||
288 | |||
289 | def _prepare_proxy(self, conn): | ||
290 | # Nothing to do for HTTP connections. | ||
291 | pass | ||
292 | |||
293 | def _get_timeout(self, timeout): | ||
294 | """ Helper that always returns a :class:`urllib3.util.Timeout` """ | ||
295 | if timeout is _Default: | ||
296 | return self.timeout.clone() | ||
297 | |||
298 | if isinstance(timeout, Timeout): | ||
299 | return timeout.clone() | ||
300 | else: | ||
301 | # User passed us an int/float. This is for backwards compatibility, | ||
302 | # can be removed later | ||
303 | return Timeout.from_float(timeout) | ||
304 | |||
305 | def _raise_timeout(self, err, url, timeout_value): | ||
306 | """Is the error actually a timeout? Will raise a ReadTimeout or pass""" | ||
307 | |||
308 | if isinstance(err, SocketTimeout): | ||
309 | raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) | ||
310 | |||
311 | # See the above comment about EAGAIN in Python 3. In Python 2 we have | ||
312 | # to specifically catch it and throw the timeout error | ||
313 | if hasattr(err, 'errno') and err.errno in _blocking_errnos: | ||
314 | raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) | ||
315 | |||
316 | # Catch possible read timeouts thrown as SSL errors. If not the | ||
317 | # case, rethrow the original. We need to do this because of: | ||
318 | # http://bugs.python.org/issue10272 | ||
319 | if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6 | ||
320 | raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) | ||
321 | |||
322 | def _make_request(self, conn, method, url, timeout=_Default, chunked=False, | ||
323 | **httplib_request_kw): | ||
324 | """ | ||
325 | Perform a request on a given urllib connection object taken from our | ||
326 | pool. | ||
327 | |||
328 | :param conn: | ||
329 | a connection from one of our connection pools | ||
330 | |||
331 | :param timeout: | ||
332 | Socket timeout in seconds for the request. This can be a | ||
333 | float or integer, which will set the same timeout value for | ||
334 | the socket connect and the socket read, or an instance of | ||
335 | :class:`urllib3.util.Timeout`, which gives you more fine-grained | ||
336 | control over your timeouts. | ||
337 | """ | ||
338 | self.num_requests += 1 | ||
339 | |||
340 | timeout_obj = self._get_timeout(timeout) | ||
341 | timeout_obj.start_connect() | ||
342 | conn.timeout = timeout_obj.connect_timeout | ||
343 | |||
344 | # Trigger any extra validation we need to do. | ||
345 | try: | ||
346 | self._validate_conn(conn) | ||
347 | except (SocketTimeout, BaseSSLError) as e: | ||
348 | # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. | ||
349 | self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) | ||
350 | raise | ||
351 | |||
352 | # conn.request() calls httplib.*.request, not the method in | ||
353 | # urllib3.request. It also calls makefile (recv) on the socket. | ||
354 | if chunked: | ||
355 | conn.request_chunked(method, url, **httplib_request_kw) | ||
356 | else: | ||
357 | conn.request(method, url, **httplib_request_kw) | ||
358 | |||
359 | # Reset the timeout for the recv() on the socket | ||
360 | read_timeout = timeout_obj.read_timeout | ||
361 | |||
362 | # App Engine doesn't have a sock attr | ||
363 | if getattr(conn, 'sock', None): | ||
364 | # In Python 3 socket.py will catch EAGAIN and return None when you | ||
365 | # try and read into the file pointer created by http.client, which | ||
366 | # instead raises a BadStatusLine exception. Instead of catching | ||
367 | # the exception and assuming all BadStatusLine exceptions are read | ||
368 | # timeouts, check for a zero timeout before making the request. | ||
369 | if read_timeout == 0: | ||
370 | raise ReadTimeoutError( | ||
371 | self, url, "Read timed out. (read timeout=%s)" % read_timeout) | ||
372 | if read_timeout is Timeout.DEFAULT_TIMEOUT: | ||
373 | conn.sock.settimeout(socket.getdefaulttimeout()) | ||
374 | else: # None or a value | ||
375 | conn.sock.settimeout(read_timeout) | ||
376 | |||
377 | # Receive the response from the server | ||
378 | try: | ||
379 | try: # Python 2.7, use buffering of HTTP responses | ||
380 | httplib_response = conn.getresponse(buffering=True) | ||
381 | except TypeError: # Python 2.6 and older, Python 3 | ||
382 | try: | ||
383 | httplib_response = conn.getresponse() | ||
384 | except Exception as e: | ||
385 | # Remove the TypeError from the exception chain in Python 3; | ||
386 | # otherwise it looks like a programming error was the cause. | ||
387 | six.raise_from(e, None) | ||
388 | except (SocketTimeout, BaseSSLError, SocketError) as e: | ||
389 | self._raise_timeout(err=e, url=url, timeout_value=read_timeout) | ||
390 | raise | ||
391 | |||
392 | # AppEngine doesn't have a version attr. | ||
393 | http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') | ||
394 | log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port, | ||
395 | method, url, http_version, httplib_response.status, | ||
396 | httplib_response.length) | ||
397 | |||
398 | try: | ||
399 | assert_header_parsing(httplib_response.msg) | ||
400 | except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 | ||
401 | log.warning( | ||
402 | 'Failed to parse headers (url=%s): %s', | ||
403 | self._absolute_url(url), hpe, exc_info=True) | ||
404 | |||
405 | return httplib_response | ||
406 | |||
407 | def _absolute_url(self, path): | ||
408 | return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url | ||
409 | |||
410 | def close(self): | ||
411 | """ | ||
412 | Close all pooled connections and disable the pool. | ||
413 | """ | ||
414 | # Disable access to the pool | ||
415 | old_pool, self.pool = self.pool, None | ||
416 | |||
417 | try: | ||
418 | while True: | ||
419 | conn = old_pool.get(block=False) | ||
420 | if conn: | ||
421 | conn.close() | ||
422 | |||
423 | except queue.Empty: | ||
424 | pass # Done. | ||
425 | |||
426 | def is_same_host(self, url): | ||
427 | """ | ||
428 | Check if the given ``url`` is a member of the same host as this | ||
429 | connection pool. | ||
430 | """ | ||
431 | if url.startswith('/'): | ||
432 | return True | ||
433 | |||
434 | # TODO: Add optional support for socket.gethostbyname checking. | ||
435 | scheme, host, port = get_host(url) | ||
436 | |||
437 | host = _ipv6_host(host).lower() | ||
438 | |||
439 | # Use explicit default port for comparison when none is given | ||
440 | if self.port and not port: | ||
441 | port = port_by_scheme.get(scheme) | ||
442 | elif not self.port and port == port_by_scheme.get(scheme): | ||
443 | port = None | ||
444 | |||
445 | return (scheme, host, port) == (self.scheme, self.host, self.port) | ||
446 | |||
447 | def urlopen(self, method, url, body=None, headers=None, retries=None, | ||
448 | redirect=True, assert_same_host=True, timeout=_Default, | ||
449 | pool_timeout=None, release_conn=None, chunked=False, | ||
450 | body_pos=None, **response_kw): | ||
451 | """ | ||
452 | Get a connection from the pool and perform an HTTP request. This is the | ||
453 | lowest level call for making a request, so you'll need to specify all | ||
454 | the raw details. | ||
455 | |||
456 | .. note:: | ||
457 | |||
458 | More commonly, it's appropriate to use a convenience method provided | ||
459 | by :class:`.RequestMethods`, such as :meth:`request`. | ||
460 | |||
461 | .. note:: | ||
462 | |||
463 | `release_conn` will only behave as expected if | ||
464 | `preload_content=False` because we want to make | ||
465 | `preload_content=False` the default behaviour someday soon without | ||
466 | breaking backwards compatibility. | ||
467 | |||
468 | :param method: | ||
469 | HTTP request method (such as GET, POST, PUT, etc.) | ||
470 | |||
471 | :param body: | ||
472 | Data to send in the request body (useful for creating | ||
473 | POST requests, see HTTPConnectionPool.post_url for | ||
474 | more convenience). | ||
475 | |||
476 | :param headers: | ||
477 | Dictionary of custom headers to send, such as User-Agent, | ||
478 | If-None-Match, etc. If None, pool headers are used. If provided, | ||
479 | these headers completely replace any pool-specific headers. | ||
480 | |||
481 | :param retries: | ||
482 | Configure the number of retries to allow before raising a | ||
483 | :class:`~urllib3.exceptions.MaxRetryError` exception. | ||
484 | |||
485 | Pass ``None`` to retry until you receive a response. Pass a | ||
486 | :class:`~urllib3.util.retry.Retry` object for fine-grained control | ||
487 | over different types of retries. | ||
488 | Pass an integer number to retry connection errors that many times, | ||
489 | but no other types of errors. Pass zero to never retry. | ||
490 | |||
491 | If ``False``, then retries are disabled and any exception is raised | ||
492 | immediately. Also, instead of raising a MaxRetryError on redirects, | ||
493 | the redirect response will be returned. | ||
494 | |||
495 | :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. | ||
496 | |||
497 | :param redirect: | ||
498 | If True, automatically handle redirects (status codes 301, 302, | ||
499 | 303, 307, 308). Each redirect counts as a retry. Disabling retries | ||
500 | will disable redirect, too. | ||
501 | |||
502 | :param assert_same_host: | ||
503 | If ``True``, will make sure that the host of the pool requests is | ||
504 | consistent else will raise HostChangedError. When False, you can | ||
505 | use the pool on an HTTP proxy and request foreign hosts. | ||
506 | |||
507 | :param timeout: | ||
508 | If specified, overrides the default timeout for this one | ||
509 | request. It may be a float (in seconds) or an instance of | ||
510 | :class:`urllib3.util.Timeout`. | ||
511 | |||
512 | :param pool_timeout: | ||
513 | If set and the pool is set to block=True, then this method will | ||
514 | block for ``pool_timeout`` seconds and raise EmptyPoolError if no | ||
515 | connection is available within the time period. | ||
516 | |||
517 | :param release_conn: | ||
518 | If False, then the urlopen call will not release the connection | ||
519 | back into the pool once a response is received (but will release if | ||
520 | you read the entire contents of the response such as when | ||
521 | `preload_content=True`). This is useful if you're not preloading | ||
522 | the response's content immediately. You will need to call | ||
523 | ``r.release_conn()`` on the response ``r`` to return the connection | ||
524 | back into the pool. If None, it takes the value of | ||
525 | ``response_kw.get('preload_content', True)``. | ||
526 | |||
527 | :param chunked: | ||
528 | If True, urllib3 will send the body using chunked transfer | ||
529 | encoding. Otherwise, urllib3 will send the body using the standard | ||
530 | content-length form. Defaults to False. | ||
531 | |||
532 | :param int body_pos: | ||
533 | Position to seek to in file-like body in the event of a retry or | ||
534 | redirect. Typically this won't need to be set because urllib3 will | ||
535 | auto-populate the value when needed. | ||
536 | |||
537 | :param \\**response_kw: | ||
538 | Additional parameters are passed to | ||
539 | :meth:`urllib3.response.HTTPResponse.from_httplib` | ||
540 | """ | ||
541 | if headers is None: | ||
542 | headers = self.headers | ||
543 | |||
544 | if not isinstance(retries, Retry): | ||
545 | retries = Retry.from_int(retries, redirect=redirect, default=self.retries) | ||
546 | |||
547 | if release_conn is None: | ||
548 | release_conn = response_kw.get('preload_content', True) | ||
549 | |||
550 | # Check host | ||
551 | if assert_same_host and not self.is_same_host(url): | ||
552 | raise HostChangedError(self, url, retries) | ||
553 | |||
554 | conn = None | ||
555 | |||
556 | # Track whether `conn` needs to be released before | ||
557 | # returning/raising/recursing. Update this variable if necessary, and | ||
558 | # leave `release_conn` constant throughout the function. That way, if | ||
559 | # the function recurses, the original value of `release_conn` will be | ||
560 | # passed down into the recursive call, and its value will be respected. | ||
561 | # | ||
562 | # See issue #651 [1] for details. | ||
563 | # | ||
564 | # [1] <https://github.com/shazow/urllib3/issues/651> | ||
565 | release_this_conn = release_conn | ||
566 | |||
567 | # Merge the proxy headers. Only do this in HTTP. We have to copy the | ||
568 | # headers dict so we can safely change it without those changes being | ||
569 | # reflected in anyone else's copy. | ||
570 | if self.scheme == 'http': | ||
571 | headers = headers.copy() | ||
572 | headers.update(self.proxy_headers) | ||
573 | |||
574 | # Must keep the exception bound to a separate variable or else Python 3 | ||
575 | # complains about UnboundLocalError. | ||
576 | err = None | ||
577 | |||
578 | # Keep track of whether we cleanly exited the except block. This | ||
579 | # ensures we do proper cleanup in finally. | ||
580 | clean_exit = False | ||
581 | |||
582 | # Rewind body position, if needed. Record current position | ||
583 | # for future rewinds in the event of a redirect/retry. | ||
584 | body_pos = set_file_position(body, body_pos) | ||
585 | |||
586 | try: | ||
587 | # Request a connection from the queue. | ||
588 | timeout_obj = self._get_timeout(timeout) | ||
589 | conn = self._get_conn(timeout=pool_timeout) | ||
590 | |||
591 | conn.timeout = timeout_obj.connect_timeout | ||
592 | |||
593 | is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) | ||
594 | if is_new_proxy_conn: | ||
595 | self._prepare_proxy(conn) | ||
596 | |||
597 | # Make the request on the httplib connection object. | ||
598 | httplib_response = self._make_request(conn, method, url, | ||
599 | timeout=timeout_obj, | ||
600 | body=body, headers=headers, | ||
601 | chunked=chunked) | ||
602 | |||
603 | # If we're going to release the connection in ``finally:``, then | ||
604 | # the response doesn't need to know about the connection. Otherwise | ||
605 | # it will also try to release it and we'll have a double-release | ||
606 | # mess. | ||
607 | response_conn = conn if not release_conn else None | ||
608 | |||
609 | # Pass method to Response for length checking | ||
610 | response_kw['request_method'] = method | ||
611 | |||
612 | # Import httplib's response into our own wrapper object | ||
613 | response = self.ResponseCls.from_httplib(httplib_response, | ||
614 | pool=self, | ||
615 | connection=response_conn, | ||
616 | retries=retries, | ||
617 | **response_kw) | ||
618 | |||
619 | # Everything went great! | ||
620 | clean_exit = True | ||
621 | |||
622 | except queue.Empty: | ||
623 | # Timed out by queue. | ||
624 | raise EmptyPoolError(self, "No pool connections are available.") | ||
625 | |||
626 | except (TimeoutError, HTTPException, SocketError, ProtocolError, | ||
627 | BaseSSLError, SSLError, CertificateError) as e: | ||
628 | # Discard the connection for these exceptions. It will be | ||
629 | # replaced during the next _get_conn() call. | ||
630 | clean_exit = False | ||
631 | if isinstance(e, (BaseSSLError, CertificateError)): | ||
632 | e = SSLError(e) | ||
633 | elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: | ||
634 | e = ProxyError('Cannot connect to proxy.', e) | ||
635 | elif isinstance(e, (SocketError, HTTPException)): | ||
636 | e = ProtocolError('Connection aborted.', e) | ||
637 | |||
638 | retries = retries.increment(method, url, error=e, _pool=self, | ||
639 | _stacktrace=sys.exc_info()[2]) | ||
640 | retries.sleep() | ||
641 | |||
642 | # Keep track of the error for the retry warning. | ||
643 | err = e | ||
644 | |||
645 | finally: | ||
646 | if not clean_exit: | ||
647 | # We hit some kind of exception, handled or otherwise. We need | ||
648 | # to throw the connection away unless explicitly told not to. | ||
649 | # Close the connection, set the variable to None, and make sure | ||
650 | # we put the None back in the pool to avoid leaking it. | ||
651 | conn = conn and conn.close() | ||
652 | release_this_conn = True | ||
653 | |||
654 | if release_this_conn: | ||
655 | # Put the connection back to be reused. If the connection is | ||
656 | # expired then it will be None, which will get replaced with a | ||
657 | # fresh connection during _get_conn. | ||
658 | self._put_conn(conn) | ||
659 | |||
660 | if not conn: | ||
661 | # Try again | ||
662 | log.warning("Retrying (%r) after connection " | ||
663 | "broken by '%r': %s", retries, err, url) | ||
664 | return self.urlopen(method, url, body, headers, retries, | ||
665 | redirect, assert_same_host, | ||
666 | timeout=timeout, pool_timeout=pool_timeout, | ||
667 | release_conn=release_conn, body_pos=body_pos, | ||
668 | **response_kw) | ||
669 | |||
670 | def drain_and_release_conn(response): | ||
671 | try: | ||
672 | # discard any remaining response body, the connection will be | ||
673 | # released back to the pool once the entire response is read | ||
674 | response.read() | ||
675 | except (TimeoutError, HTTPException, SocketError, ProtocolError, | ||
676 | BaseSSLError, SSLError) as e: | ||
677 | pass | ||
678 | |||
679 | # Handle redirect? | ||
680 | redirect_location = redirect and response.get_redirect_location() | ||
681 | if redirect_location: | ||
682 | if response.status == 303: | ||
683 | method = 'GET' | ||
684 | |||
685 | try: | ||
686 | retries = retries.increment(method, url, response=response, _pool=self) | ||
687 | except MaxRetryError: | ||
688 | if retries.raise_on_redirect: | ||
689 | # Drain and release the connection for this response, since | ||
690 | # we're not returning it to be released manually. | ||
691 | drain_and_release_conn(response) | ||
692 | raise | ||
693 | return response | ||
694 | |||
695 | # drain and return the connection to the pool before recursing | ||
696 | drain_and_release_conn(response) | ||
697 | |||
698 | retries.sleep_for_retry(response) | ||
699 | log.debug("Redirecting %s -> %s", url, redirect_location) | ||
700 | return self.urlopen( | ||
701 | method, redirect_location, body, headers, | ||
702 | retries=retries, redirect=redirect, | ||
703 | assert_same_host=assert_same_host, | ||
704 | timeout=timeout, pool_timeout=pool_timeout, | ||
705 | release_conn=release_conn, body_pos=body_pos, | ||
706 | **response_kw) | ||
707 | |||
708 | # Check if we should retry the HTTP response. | ||
709 | has_retry_after = bool(response.getheader('Retry-After')) | ||
710 | if retries.is_retry(method, response.status, has_retry_after): | ||
711 | try: | ||
712 | retries = retries.increment(method, url, response=response, _pool=self) | ||
713 | except MaxRetryError: | ||
714 | if retries.raise_on_status: | ||
715 | # Drain and release the connection for this response, since | ||
716 | # we're not returning it to be released manually. | ||
717 | drain_and_release_conn(response) | ||
718 | raise | ||
719 | return response | ||
720 | |||
721 | # drain and return the connection to the pool before recursing | ||
722 | drain_and_release_conn(response) | ||
723 | |||
724 | retries.sleep(response) | ||
725 | log.debug("Retry: %s", url) | ||
726 | return self.urlopen( | ||
727 | method, url, body, headers, | ||
728 | retries=retries, redirect=redirect, | ||
729 | assert_same_host=assert_same_host, | ||
730 | timeout=timeout, pool_timeout=pool_timeout, | ||
731 | release_conn=release_conn, | ||
732 | body_pos=body_pos, **response_kw) | ||
733 | |||
734 | return response | ||
735 | |||
736 | |||
737 | class HTTPSConnectionPool(HTTPConnectionPool): | ||
738 | """ | ||
739 | Same as :class:`.HTTPConnectionPool`, but HTTPS. | ||
740 | |||
741 | When Python is compiled with the :mod:`ssl` module, then | ||
742 | :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, | ||
743 | instead of :class:`.HTTPSConnection`. | ||
744 | |||
745 | :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, | ||
746 | ``assert_hostname`` and ``host`` in this order to verify connections. | ||
747 | If ``assert_hostname`` is False, no verification is done. | ||
748 | |||
749 | The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, | ||
750 | ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is | ||
751 | available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade | ||
752 | the connection socket into an SSL socket. | ||
753 | """ | ||
754 | |||
755 | scheme = 'https' | ||
756 | ConnectionCls = HTTPSConnection | ||
757 | |||
758 | def __init__(self, host, port=None, | ||
759 | strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, | ||
760 | block=False, headers=None, retries=None, | ||
761 | _proxy=None, _proxy_headers=None, | ||
762 | key_file=None, cert_file=None, cert_reqs=None, | ||
763 | ca_certs=None, ssl_version=None, | ||
764 | assert_hostname=None, assert_fingerprint=None, | ||
765 | ca_cert_dir=None, **conn_kw): | ||
766 | |||
767 | HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, | ||
768 | block, headers, retries, _proxy, _proxy_headers, | ||
769 | **conn_kw) | ||
770 | |||
771 | if ca_certs and cert_reqs is None: | ||
772 | cert_reqs = 'CERT_REQUIRED' | ||
773 | |||
774 | self.key_file = key_file | ||
775 | self.cert_file = cert_file | ||
776 | self.cert_reqs = cert_reqs | ||
777 | self.ca_certs = ca_certs | ||
778 | self.ca_cert_dir = ca_cert_dir | ||
779 | self.ssl_version = ssl_version | ||
780 | self.assert_hostname = assert_hostname | ||
781 | self.assert_fingerprint = assert_fingerprint | ||
782 | |||
783 | def _prepare_conn(self, conn): | ||
784 | """ | ||
785 | Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` | ||
786 | and establish the tunnel if proxy is used. | ||
787 | """ | ||
788 | |||
789 | if isinstance(conn, VerifiedHTTPSConnection): | ||
790 | conn.set_cert(key_file=self.key_file, | ||
791 | cert_file=self.cert_file, | ||
792 | cert_reqs=self.cert_reqs, | ||
793 | ca_certs=self.ca_certs, | ||
794 | ca_cert_dir=self.ca_cert_dir, | ||
795 | assert_hostname=self.assert_hostname, | ||
796 | assert_fingerprint=self.assert_fingerprint) | ||
797 | conn.ssl_version = self.ssl_version | ||
798 | return conn | ||
799 | |||
800 | def _prepare_proxy(self, conn): | ||
801 | """ | ||
802 | Establish tunnel connection early, because otherwise httplib | ||
803 | would improperly set Host: header to proxy's IP:port. | ||
804 | """ | ||
805 | # Python 2.7+ | ||
806 | try: | ||
807 | set_tunnel = conn.set_tunnel | ||
808 | except AttributeError: # Platform-specific: Python 2.6 | ||
809 | set_tunnel = conn._set_tunnel | ||
810 | |||
811 | if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older | ||
812 | set_tunnel(self._proxy_host, self.port) | ||
813 | else: | ||
814 | set_tunnel(self._proxy_host, self.port, self.proxy_headers) | ||
815 | |||
816 | conn.connect() | ||
817 | |||
818 | def _new_conn(self): | ||
819 | """ | ||
820 | Return a fresh :class:`httplib.HTTPSConnection`. | ||
821 | """ | ||
822 | self.num_connections += 1 | ||
823 | log.debug("Starting new HTTPS connection (%d): %s", | ||
824 | self.num_connections, self.host) | ||
825 | |||
826 | if not self.ConnectionCls or self.ConnectionCls is DummyConnection: | ||
827 | raise SSLError("Can't connect to HTTPS URL because the SSL " | ||
828 | "module is not available.") | ||
829 | |||
830 | actual_host = self.host | ||
831 | actual_port = self.port | ||
832 | if self.proxy is not None: | ||
833 | actual_host = self.proxy.host | ||
834 | actual_port = self.proxy.port | ||
835 | |||
836 | conn = self.ConnectionCls(host=actual_host, port=actual_port, | ||
837 | timeout=self.timeout.connect_timeout, | ||
838 | strict=self.strict, **self.conn_kw) | ||
839 | |||
840 | return self._prepare_conn(conn) | ||
841 | |||
842 | def _validate_conn(self, conn): | ||
843 | """ | ||
844 | Called right before a request is made, after the socket is created. | ||
845 | """ | ||
846 | super(HTTPSConnectionPool, self)._validate_conn(conn) | ||
847 | |||
848 | # Force connect early to allow us to validate the connection. | ||
849 | if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` | ||
850 | conn.connect() | ||
851 | |||
852 | if not conn.is_verified: | ||
853 | warnings.warn(( | ||
854 | 'Unverified HTTPS request is being made. ' | ||
855 | 'Adding certificate verification is strongly advised. See: ' | ||
856 | 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' | ||
857 | '#ssl-warnings'), | ||
858 | InsecureRequestWarning) | ||
859 | |||
860 | |||
861 | def connection_from_url(url, **kw): | ||
862 | """ | ||
863 | Given a url, return an :class:`.ConnectionPool` instance of its host. | ||
864 | |||
865 | This is a shortcut for not having to parse out the scheme, host, and port | ||
866 | of the url before creating an :class:`.ConnectionPool` instance. | ||
867 | |||
868 | :param url: | ||
869 | Absolute URL string that must include the scheme. Port is optional. | ||
870 | |||
871 | :param \\**kw: | ||
872 | Passes additional parameters to the constructor of the appropriate | ||
873 | :class:`.ConnectionPool`. Useful for specifying things like | ||
874 | timeout, maxsize, headers, etc. | ||
875 | |||
876 | Example:: | ||
877 | |||
878 | >>> conn = connection_from_url('http://google.com/') | ||
879 | >>> r = conn.request('GET', '/') | ||
880 | """ | ||
881 | scheme, host, port = get_host(url) | ||
882 | port = port or port_by_scheme.get(scheme, 80) | ||
883 | if scheme == 'https': | ||
884 | return HTTPSConnectionPool(host, port=port, **kw) | ||
885 | else: | ||
886 | return HTTPConnectionPool(host, port=port, **kw) | ||
887 | |||
888 | |||
889 | def _ipv6_host(host): | ||
890 | """ | ||
891 | Process IPv6 address literals | ||
892 | """ | ||
893 | |||
894 | # httplib doesn't like it when we include brackets in IPv6 addresses | ||
895 | # Specifically, if we include brackets but also pass the port then | ||
896 | # httplib crazily doubles up the square brackets on the Host header. | ||
897 | # Instead, we need to make sure we never pass ``None`` as the port. | ||
898 | # However, for backward compatibility reasons we can't actually | ||
899 | # *assert* that. See http://bugs.python.org/issue28539 | ||
900 | # | ||
901 | # Also if an IPv6 address literal has a zone identifier, the | ||
902 | # percent sign might be URIencoded, convert it back into ASCII | ||
903 | if host.startswith('[') and host.endswith(']'): | ||
904 | host = host.replace('%25', '%').strip('[]') | ||
905 | return host | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/__init__.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/__init__.py | |||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/__init__.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/__init__.py | |||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/bindings.py new file mode 100644 index 0000000..9787b02 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/bindings.py | |||
@@ -0,0 +1,593 @@ | |||
1 | """ | ||
2 | This module uses ctypes to bind a whole bunch of functions and constants from | ||
3 | SecureTransport. The goal here is to provide the low-level API to | ||
4 | SecureTransport. These are essentially the C-level functions and constants, and | ||
5 | they're pretty gross to work with. | ||
6 | |||
7 | This code is a bastardised version of the code found in Will Bond's oscrypto | ||
8 | library. An enormous debt is owed to him for blazing this trail for us. For | ||
9 | that reason, this code should be considered to be covered both by urllib3's | ||
10 | license and by oscrypto's: | ||
11 | |||
12 | Copyright (c) 2015-2016 Will Bond <will@wbond.net> | ||
13 | |||
14 | Permission is hereby granted, free of charge, to any person obtaining a | ||
15 | copy of this software and associated documentation files (the "Software"), | ||
16 | to deal in the Software without restriction, including without limitation | ||
17 | the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
18 | and/or sell copies of the Software, and to permit persons to whom the | ||
19 | Software is furnished to do so, subject to the following conditions: | ||
20 | |||
21 | The above copyright notice and this permission notice shall be included in | ||
22 | all copies or substantial portions of the Software. | ||
23 | |||
24 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
30 | DEALINGS IN THE SOFTWARE. | ||
31 | """ | ||
32 | from __future__ import absolute_import | ||
33 | |||
34 | import platform | ||
35 | from ctypes.util import find_library | ||
36 | from ctypes import ( | ||
37 | c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long, | ||
38 | c_bool | ||
39 | ) | ||
40 | from ctypes import CDLL, POINTER, CFUNCTYPE | ||
41 | |||
42 | |||
43 | security_path = find_library('Security') | ||
44 | if not security_path: | ||
45 | raise ImportError('The library Security could not be found') | ||
46 | |||
47 | |||
48 | core_foundation_path = find_library('CoreFoundation') | ||
49 | if not core_foundation_path: | ||
50 | raise ImportError('The library CoreFoundation could not be found') | ||
51 | |||
52 | |||
53 | version = platform.mac_ver()[0] | ||
54 | version_info = tuple(map(int, version.split('.'))) | ||
55 | if version_info < (10, 8): | ||
56 | raise OSError( | ||
57 | 'Only OS X 10.8 and newer are supported, not %s.%s' % ( | ||
58 | version_info[0], version_info[1] | ||
59 | ) | ||
60 | ) | ||
61 | |||
62 | Security = CDLL(security_path, use_errno=True) | ||
63 | CoreFoundation = CDLL(core_foundation_path, use_errno=True) | ||
64 | |||
65 | Boolean = c_bool | ||
66 | CFIndex = c_long | ||
67 | CFStringEncoding = c_uint32 | ||
68 | CFData = c_void_p | ||
69 | CFString = c_void_p | ||
70 | CFArray = c_void_p | ||
71 | CFMutableArray = c_void_p | ||
72 | CFDictionary = c_void_p | ||
73 | CFError = c_void_p | ||
74 | CFType = c_void_p | ||
75 | CFTypeID = c_ulong | ||
76 | |||
77 | CFTypeRef = POINTER(CFType) | ||
78 | CFAllocatorRef = c_void_p | ||
79 | |||
80 | OSStatus = c_int32 | ||
81 | |||
82 | CFDataRef = POINTER(CFData) | ||
83 | CFStringRef = POINTER(CFString) | ||
84 | CFArrayRef = POINTER(CFArray) | ||
85 | CFMutableArrayRef = POINTER(CFMutableArray) | ||
86 | CFDictionaryRef = POINTER(CFDictionary) | ||
87 | CFArrayCallBacks = c_void_p | ||
88 | CFDictionaryKeyCallBacks = c_void_p | ||
89 | CFDictionaryValueCallBacks = c_void_p | ||
90 | |||
91 | SecCertificateRef = POINTER(c_void_p) | ||
92 | SecExternalFormat = c_uint32 | ||
93 | SecExternalItemType = c_uint32 | ||
94 | SecIdentityRef = POINTER(c_void_p) | ||
95 | SecItemImportExportFlags = c_uint32 | ||
96 | SecItemImportExportKeyParameters = c_void_p | ||
97 | SecKeychainRef = POINTER(c_void_p) | ||
98 | SSLProtocol = c_uint32 | ||
99 | SSLCipherSuite = c_uint32 | ||
100 | SSLContextRef = POINTER(c_void_p) | ||
101 | SecTrustRef = POINTER(c_void_p) | ||
102 | SSLConnectionRef = c_uint32 | ||
103 | SecTrustResultType = c_uint32 | ||
104 | SecTrustOptionFlags = c_uint32 | ||
105 | SSLProtocolSide = c_uint32 | ||
106 | SSLConnectionType = c_uint32 | ||
107 | SSLSessionOption = c_uint32 | ||
108 | |||
109 | |||
110 | try: | ||
111 | Security.SecItemImport.argtypes = [ | ||
112 | CFDataRef, | ||
113 | CFStringRef, | ||
114 | POINTER(SecExternalFormat), | ||
115 | POINTER(SecExternalItemType), | ||
116 | SecItemImportExportFlags, | ||
117 | POINTER(SecItemImportExportKeyParameters), | ||
118 | SecKeychainRef, | ||
119 | POINTER(CFArrayRef), | ||
120 | ] | ||
121 | Security.SecItemImport.restype = OSStatus | ||
122 | |||
123 | Security.SecCertificateGetTypeID.argtypes = [] | ||
124 | Security.SecCertificateGetTypeID.restype = CFTypeID | ||
125 | |||
126 | Security.SecIdentityGetTypeID.argtypes = [] | ||
127 | Security.SecIdentityGetTypeID.restype = CFTypeID | ||
128 | |||
129 | Security.SecKeyGetTypeID.argtypes = [] | ||
130 | Security.SecKeyGetTypeID.restype = CFTypeID | ||
131 | |||
132 | Security.SecCertificateCreateWithData.argtypes = [ | ||
133 | CFAllocatorRef, | ||
134 | CFDataRef | ||
135 | ] | ||
136 | Security.SecCertificateCreateWithData.restype = SecCertificateRef | ||
137 | |||
138 | Security.SecCertificateCopyData.argtypes = [ | ||
139 | SecCertificateRef | ||
140 | ] | ||
141 | Security.SecCertificateCopyData.restype = CFDataRef | ||
142 | |||
143 | Security.SecCopyErrorMessageString.argtypes = [ | ||
144 | OSStatus, | ||
145 | c_void_p | ||
146 | ] | ||
147 | Security.SecCopyErrorMessageString.restype = CFStringRef | ||
148 | |||
149 | Security.SecIdentityCreateWithCertificate.argtypes = [ | ||
150 | CFTypeRef, | ||
151 | SecCertificateRef, | ||
152 | POINTER(SecIdentityRef) | ||
153 | ] | ||
154 | Security.SecIdentityCreateWithCertificate.restype = OSStatus | ||
155 | |||
156 | Security.SecKeychainCreate.argtypes = [ | ||
157 | c_char_p, | ||
158 | c_uint32, | ||
159 | c_void_p, | ||
160 | Boolean, | ||
161 | c_void_p, | ||
162 | POINTER(SecKeychainRef) | ||
163 | ] | ||
164 | Security.SecKeychainCreate.restype = OSStatus | ||
165 | |||
166 | Security.SecKeychainDelete.argtypes = [ | ||
167 | SecKeychainRef | ||
168 | ] | ||
169 | Security.SecKeychainDelete.restype = OSStatus | ||
170 | |||
171 | Security.SecPKCS12Import.argtypes = [ | ||
172 | CFDataRef, | ||
173 | CFDictionaryRef, | ||
174 | POINTER(CFArrayRef) | ||
175 | ] | ||
176 | Security.SecPKCS12Import.restype = OSStatus | ||
177 | |||
178 | SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) | ||
179 | SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)) | ||
180 | |||
181 | Security.SSLSetIOFuncs.argtypes = [ | ||
182 | SSLContextRef, | ||
183 | SSLReadFunc, | ||
184 | SSLWriteFunc | ||
185 | ] | ||
186 | Security.SSLSetIOFuncs.restype = OSStatus | ||
187 | |||
188 | Security.SSLSetPeerID.argtypes = [ | ||
189 | SSLContextRef, | ||
190 | c_char_p, | ||
191 | c_size_t | ||
192 | ] | ||
193 | Security.SSLSetPeerID.restype = OSStatus | ||
194 | |||
195 | Security.SSLSetCertificate.argtypes = [ | ||
196 | SSLContextRef, | ||
197 | CFArrayRef | ||
198 | ] | ||
199 | Security.SSLSetCertificate.restype = OSStatus | ||
200 | |||
201 | Security.SSLSetCertificateAuthorities.argtypes = [ | ||
202 | SSLContextRef, | ||
203 | CFTypeRef, | ||
204 | Boolean | ||
205 | ] | ||
206 | Security.SSLSetCertificateAuthorities.restype = OSStatus | ||
207 | |||
208 | Security.SSLSetConnection.argtypes = [ | ||
209 | SSLContextRef, | ||
210 | SSLConnectionRef | ||
211 | ] | ||
212 | Security.SSLSetConnection.restype = OSStatus | ||
213 | |||
214 | Security.SSLSetPeerDomainName.argtypes = [ | ||
215 | SSLContextRef, | ||
216 | c_char_p, | ||
217 | c_size_t | ||
218 | ] | ||
219 | Security.SSLSetPeerDomainName.restype = OSStatus | ||
220 | |||
221 | Security.SSLHandshake.argtypes = [ | ||
222 | SSLContextRef | ||
223 | ] | ||
224 | Security.SSLHandshake.restype = OSStatus | ||
225 | |||
226 | Security.SSLRead.argtypes = [ | ||
227 | SSLContextRef, | ||
228 | c_char_p, | ||
229 | c_size_t, | ||
230 | POINTER(c_size_t) | ||
231 | ] | ||
232 | Security.SSLRead.restype = OSStatus | ||
233 | |||
234 | Security.SSLWrite.argtypes = [ | ||
235 | SSLContextRef, | ||
236 | c_char_p, | ||
237 | c_size_t, | ||
238 | POINTER(c_size_t) | ||
239 | ] | ||
240 | Security.SSLWrite.restype = OSStatus | ||
241 | |||
242 | Security.SSLClose.argtypes = [ | ||
243 | SSLContextRef | ||
244 | ] | ||
245 | Security.SSLClose.restype = OSStatus | ||
246 | |||
247 | Security.SSLGetNumberSupportedCiphers.argtypes = [ | ||
248 | SSLContextRef, | ||
249 | POINTER(c_size_t) | ||
250 | ] | ||
251 | Security.SSLGetNumberSupportedCiphers.restype = OSStatus | ||
252 | |||
253 | Security.SSLGetSupportedCiphers.argtypes = [ | ||
254 | SSLContextRef, | ||
255 | POINTER(SSLCipherSuite), | ||
256 | POINTER(c_size_t) | ||
257 | ] | ||
258 | Security.SSLGetSupportedCiphers.restype = OSStatus | ||
259 | |||
260 | Security.SSLSetEnabledCiphers.argtypes = [ | ||
261 | SSLContextRef, | ||
262 | POINTER(SSLCipherSuite), | ||
263 | c_size_t | ||
264 | ] | ||
265 | Security.SSLSetEnabledCiphers.restype = OSStatus | ||
266 | |||
267 | Security.SSLGetNumberEnabledCiphers.argtype = [ | ||
268 | SSLContextRef, | ||
269 | POINTER(c_size_t) | ||
270 | ] | ||
271 | Security.SSLGetNumberEnabledCiphers.restype = OSStatus | ||
272 | |||
273 | Security.SSLGetEnabledCiphers.argtypes = [ | ||
274 | SSLContextRef, | ||
275 | POINTER(SSLCipherSuite), | ||
276 | POINTER(c_size_t) | ||
277 | ] | ||
278 | Security.SSLGetEnabledCiphers.restype = OSStatus | ||
279 | |||
280 | Security.SSLGetNegotiatedCipher.argtypes = [ | ||
281 | SSLContextRef, | ||
282 | POINTER(SSLCipherSuite) | ||
283 | ] | ||
284 | Security.SSLGetNegotiatedCipher.restype = OSStatus | ||
285 | |||
286 | Security.SSLGetNegotiatedProtocolVersion.argtypes = [ | ||
287 | SSLContextRef, | ||
288 | POINTER(SSLProtocol) | ||
289 | ] | ||
290 | Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus | ||
291 | |||
292 | Security.SSLCopyPeerTrust.argtypes = [ | ||
293 | SSLContextRef, | ||
294 | POINTER(SecTrustRef) | ||
295 | ] | ||
296 | Security.SSLCopyPeerTrust.restype = OSStatus | ||
297 | |||
298 | Security.SecTrustSetAnchorCertificates.argtypes = [ | ||
299 | SecTrustRef, | ||
300 | CFArrayRef | ||
301 | ] | ||
302 | Security.SecTrustSetAnchorCertificates.restype = OSStatus | ||
303 | |||
304 | Security.SecTrustSetAnchorCertificatesOnly.argstypes = [ | ||
305 | SecTrustRef, | ||
306 | Boolean | ||
307 | ] | ||
308 | Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus | ||
309 | |||
310 | Security.SecTrustEvaluate.argtypes = [ | ||
311 | SecTrustRef, | ||
312 | POINTER(SecTrustResultType) | ||
313 | ] | ||
314 | Security.SecTrustEvaluate.restype = OSStatus | ||
315 | |||
316 | Security.SecTrustGetCertificateCount.argtypes = [ | ||
317 | SecTrustRef | ||
318 | ] | ||
319 | Security.SecTrustGetCertificateCount.restype = CFIndex | ||
320 | |||
321 | Security.SecTrustGetCertificateAtIndex.argtypes = [ | ||
322 | SecTrustRef, | ||
323 | CFIndex | ||
324 | ] | ||
325 | Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef | ||
326 | |||
327 | Security.SSLCreateContext.argtypes = [ | ||
328 | CFAllocatorRef, | ||
329 | SSLProtocolSide, | ||
330 | SSLConnectionType | ||
331 | ] | ||
332 | Security.SSLCreateContext.restype = SSLContextRef | ||
333 | |||
334 | Security.SSLSetSessionOption.argtypes = [ | ||
335 | SSLContextRef, | ||
336 | SSLSessionOption, | ||
337 | Boolean | ||
338 | ] | ||
339 | Security.SSLSetSessionOption.restype = OSStatus | ||
340 | |||
341 | Security.SSLSetProtocolVersionMin.argtypes = [ | ||
342 | SSLContextRef, | ||
343 | SSLProtocol | ||
344 | ] | ||
345 | Security.SSLSetProtocolVersionMin.restype = OSStatus | ||
346 | |||
347 | Security.SSLSetProtocolVersionMax.argtypes = [ | ||
348 | SSLContextRef, | ||
349 | SSLProtocol | ||
350 | ] | ||
351 | Security.SSLSetProtocolVersionMax.restype = OSStatus | ||
352 | |||
353 | Security.SecCopyErrorMessageString.argtypes = [ | ||
354 | OSStatus, | ||
355 | c_void_p | ||
356 | ] | ||
357 | Security.SecCopyErrorMessageString.restype = CFStringRef | ||
358 | |||
359 | Security.SSLReadFunc = SSLReadFunc | ||
360 | Security.SSLWriteFunc = SSLWriteFunc | ||
361 | Security.SSLContextRef = SSLContextRef | ||
362 | Security.SSLProtocol = SSLProtocol | ||
363 | Security.SSLCipherSuite = SSLCipherSuite | ||
364 | Security.SecIdentityRef = SecIdentityRef | ||
365 | Security.SecKeychainRef = SecKeychainRef | ||
366 | Security.SecTrustRef = SecTrustRef | ||
367 | Security.SecTrustResultType = SecTrustResultType | ||
368 | Security.SecExternalFormat = SecExternalFormat | ||
369 | Security.OSStatus = OSStatus | ||
370 | |||
371 | Security.kSecImportExportPassphrase = CFStringRef.in_dll( | ||
372 | Security, 'kSecImportExportPassphrase' | ||
373 | ) | ||
374 | Security.kSecImportItemIdentity = CFStringRef.in_dll( | ||
375 | Security, 'kSecImportItemIdentity' | ||
376 | ) | ||
377 | |||
378 | # CoreFoundation time! | ||
379 | CoreFoundation.CFRetain.argtypes = [ | ||
380 | CFTypeRef | ||
381 | ] | ||
382 | CoreFoundation.CFRetain.restype = CFTypeRef | ||
383 | |||
384 | CoreFoundation.CFRelease.argtypes = [ | ||
385 | CFTypeRef | ||
386 | ] | ||
387 | CoreFoundation.CFRelease.restype = None | ||
388 | |||
389 | CoreFoundation.CFGetTypeID.argtypes = [ | ||
390 | CFTypeRef | ||
391 | ] | ||
392 | CoreFoundation.CFGetTypeID.restype = CFTypeID | ||
393 | |||
394 | CoreFoundation.CFStringCreateWithCString.argtypes = [ | ||
395 | CFAllocatorRef, | ||
396 | c_char_p, | ||
397 | CFStringEncoding | ||
398 | ] | ||
399 | CoreFoundation.CFStringCreateWithCString.restype = CFStringRef | ||
400 | |||
401 | CoreFoundation.CFStringGetCStringPtr.argtypes = [ | ||
402 | CFStringRef, | ||
403 | CFStringEncoding | ||
404 | ] | ||
405 | CoreFoundation.CFStringGetCStringPtr.restype = c_char_p | ||
406 | |||
407 | CoreFoundation.CFStringGetCString.argtypes = [ | ||
408 | CFStringRef, | ||
409 | c_char_p, | ||
410 | CFIndex, | ||
411 | CFStringEncoding | ||
412 | ] | ||
413 | CoreFoundation.CFStringGetCString.restype = c_bool | ||
414 | |||
415 | CoreFoundation.CFDataCreate.argtypes = [ | ||
416 | CFAllocatorRef, | ||
417 | c_char_p, | ||
418 | CFIndex | ||
419 | ] | ||
420 | CoreFoundation.CFDataCreate.restype = CFDataRef | ||
421 | |||
422 | CoreFoundation.CFDataGetLength.argtypes = [ | ||
423 | CFDataRef | ||
424 | ] | ||
425 | CoreFoundation.CFDataGetLength.restype = CFIndex | ||
426 | |||
427 | CoreFoundation.CFDataGetBytePtr.argtypes = [ | ||
428 | CFDataRef | ||
429 | ] | ||
430 | CoreFoundation.CFDataGetBytePtr.restype = c_void_p | ||
431 | |||
432 | CoreFoundation.CFDictionaryCreate.argtypes = [ | ||
433 | CFAllocatorRef, | ||
434 | POINTER(CFTypeRef), | ||
435 | POINTER(CFTypeRef), | ||
436 | CFIndex, | ||
437 | CFDictionaryKeyCallBacks, | ||
438 | CFDictionaryValueCallBacks | ||
439 | ] | ||
440 | CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef | ||
441 | |||
442 | CoreFoundation.CFDictionaryGetValue.argtypes = [ | ||
443 | CFDictionaryRef, | ||
444 | CFTypeRef | ||
445 | ] | ||
446 | CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef | ||
447 | |||
448 | CoreFoundation.CFArrayCreate.argtypes = [ | ||
449 | CFAllocatorRef, | ||
450 | POINTER(CFTypeRef), | ||
451 | CFIndex, | ||
452 | CFArrayCallBacks, | ||
453 | ] | ||
454 | CoreFoundation.CFArrayCreate.restype = CFArrayRef | ||
455 | |||
456 | CoreFoundation.CFArrayCreateMutable.argtypes = [ | ||
457 | CFAllocatorRef, | ||
458 | CFIndex, | ||
459 | CFArrayCallBacks | ||
460 | ] | ||
461 | CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef | ||
462 | |||
463 | CoreFoundation.CFArrayAppendValue.argtypes = [ | ||
464 | CFMutableArrayRef, | ||
465 | c_void_p | ||
466 | ] | ||
467 | CoreFoundation.CFArrayAppendValue.restype = None | ||
468 | |||
469 | CoreFoundation.CFArrayGetCount.argtypes = [ | ||
470 | CFArrayRef | ||
471 | ] | ||
472 | CoreFoundation.CFArrayGetCount.restype = CFIndex | ||
473 | |||
474 | CoreFoundation.CFArrayGetValueAtIndex.argtypes = [ | ||
475 | CFArrayRef, | ||
476 | CFIndex | ||
477 | ] | ||
478 | CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p | ||
479 | |||
480 | CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( | ||
481 | CoreFoundation, 'kCFAllocatorDefault' | ||
482 | ) | ||
483 | CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks') | ||
484 | CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( | ||
485 | CoreFoundation, 'kCFTypeDictionaryKeyCallBacks' | ||
486 | ) | ||
487 | CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( | ||
488 | CoreFoundation, 'kCFTypeDictionaryValueCallBacks' | ||
489 | ) | ||
490 | |||
491 | CoreFoundation.CFTypeRef = CFTypeRef | ||
492 | CoreFoundation.CFArrayRef = CFArrayRef | ||
493 | CoreFoundation.CFStringRef = CFStringRef | ||
494 | CoreFoundation.CFDictionaryRef = CFDictionaryRef | ||
495 | |||
496 | except (AttributeError): | ||
497 | raise ImportError('Error initializing ctypes') | ||
498 | |||
499 | |||
500 | class CFConst(object): | ||
501 | """ | ||
502 | A class object that acts as essentially a namespace for CoreFoundation | ||
503 | constants. | ||
504 | """ | ||
505 | kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) | ||
506 | |||
507 | |||
508 | class SecurityConst(object): | ||
509 | """ | ||
510 | A class object that acts as essentially a namespace for Security constants. | ||
511 | """ | ||
512 | kSSLSessionOptionBreakOnServerAuth = 0 | ||
513 | |||
514 | kSSLProtocol2 = 1 | ||
515 | kSSLProtocol3 = 2 | ||
516 | kTLSProtocol1 = 4 | ||
517 | kTLSProtocol11 = 7 | ||
518 | kTLSProtocol12 = 8 | ||
519 | |||
520 | kSSLClientSide = 1 | ||
521 | kSSLStreamType = 0 | ||
522 | |||
523 | kSecFormatPEMSequence = 10 | ||
524 | |||
525 | kSecTrustResultInvalid = 0 | ||
526 | kSecTrustResultProceed = 1 | ||
527 | # This gap is present on purpose: this was kSecTrustResultConfirm, which | ||
528 | # is deprecated. | ||
529 | kSecTrustResultDeny = 3 | ||
530 | kSecTrustResultUnspecified = 4 | ||
531 | kSecTrustResultRecoverableTrustFailure = 5 | ||
532 | kSecTrustResultFatalTrustFailure = 6 | ||
533 | kSecTrustResultOtherError = 7 | ||
534 | |||
535 | errSSLProtocol = -9800 | ||
536 | errSSLWouldBlock = -9803 | ||
537 | errSSLClosedGraceful = -9805 | ||
538 | errSSLClosedNoNotify = -9816 | ||
539 | errSSLClosedAbort = -9806 | ||
540 | |||
541 | errSSLXCertChainInvalid = -9807 | ||
542 | errSSLCrypto = -9809 | ||
543 | errSSLInternal = -9810 | ||
544 | errSSLCertExpired = -9814 | ||
545 | errSSLCertNotYetValid = -9815 | ||
546 | errSSLUnknownRootCert = -9812 | ||
547 | errSSLNoRootCert = -9813 | ||
548 | errSSLHostNameMismatch = -9843 | ||
549 | errSSLPeerHandshakeFail = -9824 | ||
550 | errSSLPeerUserCancelled = -9839 | ||
551 | errSSLWeakPeerEphemeralDHKey = -9850 | ||
552 | errSSLServerAuthCompleted = -9841 | ||
553 | errSSLRecordOverflow = -9847 | ||
554 | |||
555 | errSecVerifyFailed = -67808 | ||
556 | errSecNoTrustSettings = -25263 | ||
557 | errSecItemNotFound = -25300 | ||
558 | errSecInvalidTrustSettings = -25262 | ||
559 | |||
560 | # Cipher suites. We only pick the ones our default cipher string allows. | ||
561 | TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C | ||
562 | TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 | ||
563 | TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B | ||
564 | TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F | ||
565 | TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3 | ||
566 | TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F | ||
567 | TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2 | ||
568 | TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E | ||
569 | TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 | ||
570 | TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 | ||
571 | TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A | ||
572 | TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 | ||
573 | TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B | ||
574 | TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A | ||
575 | TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 | ||
576 | TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038 | ||
577 | TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 | ||
578 | TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 | ||
579 | TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 | ||
580 | TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 | ||
581 | TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 | ||
582 | TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040 | ||
583 | TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 | ||
584 | TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032 | ||
585 | TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D | ||
586 | TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C | ||
587 | TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D | ||
588 | TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C | ||
589 | TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 | ||
590 | TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F | ||
591 | TLS_AES_128_GCM_SHA256 = 0x1301 | ||
592 | TLS_AES_256_GCM_SHA384 = 0x1302 | ||
593 | TLS_CHACHA20_POLY1305_SHA256 = 0x1303 | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/low_level.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/low_level.py new file mode 100644 index 0000000..4e5c0db --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/_securetransport/low_level.py | |||
@@ -0,0 +1,343 @@ | |||
1 | """ | ||
2 | Low-level helpers for the SecureTransport bindings. | ||
3 | |||
4 | These are Python functions that are not directly related to the high-level APIs | ||
5 | but are necessary to get them to work. They include a whole bunch of low-level | ||
6 | CoreFoundation messing about and memory management. The concerns in this module | ||
7 | are almost entirely about trying to avoid memory leaks and providing | ||
8 | appropriate and useful assistance to the higher-level code. | ||
9 | """ | ||
10 | import base64 | ||
11 | import ctypes | ||
12 | import itertools | ||
13 | import re | ||
14 | import os | ||
15 | import ssl | ||
16 | import tempfile | ||
17 | |||
18 | from .bindings import Security, CoreFoundation, CFConst | ||
19 | |||
20 | |||
21 | # This regular expression is used to grab PEM data out of a PEM bundle. | ||
22 | _PEM_CERTS_RE = re.compile( | ||
23 | b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL | ||
24 | ) | ||
25 | |||
26 | |||
27 | def _cf_data_from_bytes(bytestring): | ||
28 | """ | ||
29 | Given a bytestring, create a CFData object from it. This CFData object must | ||
30 | be CFReleased by the caller. | ||
31 | """ | ||
32 | return CoreFoundation.CFDataCreate( | ||
33 | CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) | ||
34 | ) | ||
35 | |||
36 | |||
37 | def _cf_dictionary_from_tuples(tuples): | ||
38 | """ | ||
39 | Given a list of Python tuples, create an associated CFDictionary. | ||
40 | """ | ||
41 | dictionary_size = len(tuples) | ||
42 | |||
43 | # We need to get the dictionary keys and values out in the same order. | ||
44 | keys = (t[0] for t in tuples) | ||
45 | values = (t[1] for t in tuples) | ||
46 | cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys) | ||
47 | cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values) | ||
48 | |||
49 | return CoreFoundation.CFDictionaryCreate( | ||
50 | CoreFoundation.kCFAllocatorDefault, | ||
51 | cf_keys, | ||
52 | cf_values, | ||
53 | dictionary_size, | ||
54 | CoreFoundation.kCFTypeDictionaryKeyCallBacks, | ||
55 | CoreFoundation.kCFTypeDictionaryValueCallBacks, | ||
56 | ) | ||
57 | |||
58 | |||
59 | def _cf_string_to_unicode(value): | ||
60 | """ | ||
61 | Creates a Unicode string from a CFString object. Used entirely for error | ||
62 | reporting. | ||
63 | |||
64 | Yes, it annoys me quite a lot that this function is this complex. | ||
65 | """ | ||
66 | value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) | ||
67 | |||
68 | string = CoreFoundation.CFStringGetCStringPtr( | ||
69 | value_as_void_p, | ||
70 | CFConst.kCFStringEncodingUTF8 | ||
71 | ) | ||
72 | if string is None: | ||
73 | buffer = ctypes.create_string_buffer(1024) | ||
74 | result = CoreFoundation.CFStringGetCString( | ||
75 | value_as_void_p, | ||
76 | buffer, | ||
77 | 1024, | ||
78 | CFConst.kCFStringEncodingUTF8 | ||
79 | ) | ||
80 | if not result: | ||
81 | raise OSError('Error copying C string from CFStringRef') | ||
82 | string = buffer.value | ||
83 | if string is not None: | ||
84 | string = string.decode('utf-8') | ||
85 | return string | ||
86 | |||
87 | |||
88 | def _assert_no_error(error, exception_class=None): | ||
89 | """ | ||
90 | Checks the return code and throws an exception if there is an error to | ||
91 | report | ||
92 | """ | ||
93 | if error == 0: | ||
94 | return | ||
95 | |||
96 | cf_error_string = Security.SecCopyErrorMessageString(error, None) | ||
97 | output = _cf_string_to_unicode(cf_error_string) | ||
98 | CoreFoundation.CFRelease(cf_error_string) | ||
99 | |||
100 | if output is None or output == u'': | ||
101 | output = u'OSStatus %s' % error | ||
102 | |||
103 | if exception_class is None: | ||
104 | exception_class = ssl.SSLError | ||
105 | |||
106 | raise exception_class(output) | ||
107 | |||
108 | |||
109 | def _cert_array_from_pem(pem_bundle): | ||
110 | """ | ||
111 | Given a bundle of certs in PEM format, turns them into a CFArray of certs | ||
112 | that can be used to validate a cert chain. | ||
113 | """ | ||
114 | der_certs = [ | ||
115 | base64.b64decode(match.group(1)) | ||
116 | for match in _PEM_CERTS_RE.finditer(pem_bundle) | ||
117 | ] | ||
118 | if not der_certs: | ||
119 | raise ssl.SSLError("No root certificates specified") | ||
120 | |||
121 | cert_array = CoreFoundation.CFArrayCreateMutable( | ||
122 | CoreFoundation.kCFAllocatorDefault, | ||
123 | 0, | ||
124 | ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks) | ||
125 | ) | ||
126 | if not cert_array: | ||
127 | raise ssl.SSLError("Unable to allocate memory!") | ||
128 | |||
129 | try: | ||
130 | for der_bytes in der_certs: | ||
131 | certdata = _cf_data_from_bytes(der_bytes) | ||
132 | if not certdata: | ||
133 | raise ssl.SSLError("Unable to allocate memory!") | ||
134 | cert = Security.SecCertificateCreateWithData( | ||
135 | CoreFoundation.kCFAllocatorDefault, certdata | ||
136 | ) | ||
137 | CoreFoundation.CFRelease(certdata) | ||
138 | if not cert: | ||
139 | raise ssl.SSLError("Unable to build cert object!") | ||
140 | |||
141 | CoreFoundation.CFArrayAppendValue(cert_array, cert) | ||
142 | CoreFoundation.CFRelease(cert) | ||
143 | except Exception: | ||
144 | # We need to free the array before the exception bubbles further. | ||
145 | # We only want to do that if an error occurs: otherwise, the caller | ||
146 | # should free. | ||
147 | CoreFoundation.CFRelease(cert_array) | ||
148 | |||
149 | return cert_array | ||
150 | |||
151 | |||
152 | def _is_cert(item): | ||
153 | """ | ||
154 | Returns True if a given CFTypeRef is a certificate. | ||
155 | """ | ||
156 | expected = Security.SecCertificateGetTypeID() | ||
157 | return CoreFoundation.CFGetTypeID(item) == expected | ||
158 | |||
159 | |||
160 | def _is_identity(item): | ||
161 | """ | ||
162 | Returns True if a given CFTypeRef is an identity. | ||
163 | """ | ||
164 | expected = Security.SecIdentityGetTypeID() | ||
165 | return CoreFoundation.CFGetTypeID(item) == expected | ||
166 | |||
167 | |||
168 | def _temporary_keychain(): | ||
169 | """ | ||
170 | This function creates a temporary Mac keychain that we can use to work with | ||
171 | credentials. This keychain uses a one-time password and a temporary file to | ||
172 | store the data. We expect to have one keychain per socket. The returned | ||
173 | SecKeychainRef must be freed by the caller, including calling | ||
174 | SecKeychainDelete. | ||
175 | |||
176 | Returns a tuple of the SecKeychainRef and the path to the temporary | ||
177 | directory that contains it. | ||
178 | """ | ||
179 | # Unfortunately, SecKeychainCreate requires a path to a keychain. This | ||
180 | # means we cannot use mkstemp to use a generic temporary file. Instead, | ||
181 | # we're going to create a temporary directory and a filename to use there. | ||
182 | # This filename will be 8 random bytes expanded into base64. We also need | ||
183 | # some random bytes to password-protect the keychain we're creating, so we | ||
184 | # ask for 40 random bytes. | ||
185 | random_bytes = os.urandom(40) | ||
186 | filename = base64.b64encode(random_bytes[:8]).decode('utf-8') | ||
187 | password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8 | ||
188 | tempdirectory = tempfile.mkdtemp() | ||
189 | |||
190 | keychain_path = os.path.join(tempdirectory, filename).encode('utf-8') | ||
191 | |||
192 | # We now want to create the keychain itself. | ||
193 | keychain = Security.SecKeychainRef() | ||
194 | status = Security.SecKeychainCreate( | ||
195 | keychain_path, | ||
196 | len(password), | ||
197 | password, | ||
198 | False, | ||
199 | None, | ||
200 | ctypes.byref(keychain) | ||
201 | ) | ||
202 | _assert_no_error(status) | ||
203 | |||
204 | # Having created the keychain, we want to pass it off to the caller. | ||
205 | return keychain, tempdirectory | ||
206 | |||
207 | |||
208 | def _load_items_from_file(keychain, path): | ||
209 | """ | ||
210 | Given a single file, loads all the trust objects from it into arrays and | ||
211 | the keychain. | ||
212 | Returns a tuple of lists: the first list is a list of identities, the | ||
213 | second a list of certs. | ||
214 | """ | ||
215 | certificates = [] | ||
216 | identities = [] | ||
217 | result_array = None | ||
218 | |||
219 | with open(path, 'rb') as f: | ||
220 | raw_filedata = f.read() | ||
221 | |||
222 | try: | ||
223 | filedata = CoreFoundation.CFDataCreate( | ||
224 | CoreFoundation.kCFAllocatorDefault, | ||
225 | raw_filedata, | ||
226 | len(raw_filedata) | ||
227 | ) | ||
228 | result_array = CoreFoundation.CFArrayRef() | ||
229 | result = Security.SecItemImport( | ||
230 | filedata, # cert data | ||
231 | None, # Filename, leaving it out for now | ||
232 | None, # What the type of the file is, we don't care | ||
233 | None, # what's in the file, we don't care | ||
234 | 0, # import flags | ||
235 | None, # key params, can include passphrase in the future | ||
236 | keychain, # The keychain to insert into | ||
237 | ctypes.byref(result_array) # Results | ||
238 | ) | ||
239 | _assert_no_error(result) | ||
240 | |||
241 | # A CFArray is not very useful to us as an intermediary | ||
242 | # representation, so we are going to extract the objects we want | ||
243 | # and then free the array. We don't need to keep hold of keys: the | ||
244 | # keychain already has them! | ||
245 | result_count = CoreFoundation.CFArrayGetCount(result_array) | ||
246 | for index in range(result_count): | ||
247 | item = CoreFoundation.CFArrayGetValueAtIndex( | ||
248 | result_array, index | ||
249 | ) | ||
250 | item = ctypes.cast(item, CoreFoundation.CFTypeRef) | ||
251 | |||
252 | if _is_cert(item): | ||
253 | CoreFoundation.CFRetain(item) | ||
254 | certificates.append(item) | ||
255 | elif _is_identity(item): | ||
256 | CoreFoundation.CFRetain(item) | ||
257 | identities.append(item) | ||
258 | finally: | ||
259 | if result_array: | ||
260 | CoreFoundation.CFRelease(result_array) | ||
261 | |||
262 | CoreFoundation.CFRelease(filedata) | ||
263 | |||
264 | return (identities, certificates) | ||
265 | |||
266 | |||
267 | def _load_client_cert_chain(keychain, *paths): | ||
268 | """ | ||
269 | Load certificates and maybe keys from a number of files. Has the end goal | ||
270 | of returning a CFArray containing one SecIdentityRef, and then zero or more | ||
271 | SecCertificateRef objects, suitable for use as a client certificate trust | ||
272 | chain. | ||
273 | """ | ||
274 | # Ok, the strategy. | ||
275 | # | ||
276 | # This relies on knowing that macOS will not give you a SecIdentityRef | ||
277 | # unless you have imported a key into a keychain. This is a somewhat | ||
278 | # artificial limitation of macOS (for example, it doesn't necessarily | ||
279 | # affect iOS), but there is nothing inside Security.framework that lets you | ||
280 | # get a SecIdentityRef without having a key in a keychain. | ||
281 | # | ||
282 | # So the policy here is we take all the files and iterate them in order. | ||
283 | # Each one will use SecItemImport to have one or more objects loaded from | ||
284 | # it. We will also point at a keychain that macOS can use to work with the | ||
285 | # private key. | ||
286 | # | ||
287 | # Once we have all the objects, we'll check what we actually have. If we | ||
288 | # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, | ||
289 | # we'll take the first certificate (which we assume to be our leaf) and | ||
290 | # ask the keychain to give us a SecIdentityRef with that cert's associated | ||
291 | # key. | ||
292 | # | ||
293 | # We'll then return a CFArray containing the trust chain: one | ||
294 | # SecIdentityRef and then zero-or-more SecCertificateRef objects. The | ||
295 | # responsibility for freeing this CFArray will be with the caller. This | ||
296 | # CFArray must remain alive for the entire connection, so in practice it | ||
297 | # will be stored with a single SSLSocket, along with the reference to the | ||
298 | # keychain. | ||
299 | certificates = [] | ||
300 | identities = [] | ||
301 | |||
302 | # Filter out bad paths. | ||
303 | paths = (path for path in paths if path) | ||
304 | |||
305 | try: | ||
306 | for file_path in paths: | ||
307 | new_identities, new_certs = _load_items_from_file( | ||
308 | keychain, file_path | ||
309 | ) | ||
310 | identities.extend(new_identities) | ||
311 | certificates.extend(new_certs) | ||
312 | |||
313 | # Ok, we have everything. The question is: do we have an identity? If | ||
314 | # not, we want to grab one from the first cert we have. | ||
315 | if not identities: | ||
316 | new_identity = Security.SecIdentityRef() | ||
317 | status = Security.SecIdentityCreateWithCertificate( | ||
318 | keychain, | ||
319 | certificates[0], | ||
320 | ctypes.byref(new_identity) | ||
321 | ) | ||
322 | _assert_no_error(status) | ||
323 | identities.append(new_identity) | ||
324 | |||
325 | # We now want to release the original certificate, as we no longer | ||
326 | # need it. | ||
327 | CoreFoundation.CFRelease(certificates.pop(0)) | ||
328 | |||
329 | # We now need to build a new CFArray that holds the trust chain. | ||
330 | trust_chain = CoreFoundation.CFArrayCreateMutable( | ||
331 | CoreFoundation.kCFAllocatorDefault, | ||
332 | 0, | ||
333 | ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), | ||
334 | ) | ||
335 | for item in itertools.chain(identities, certificates): | ||
336 | # ArrayAppendValue does a CFRetain on the item. That's fine, | ||
337 | # because the finally block will release our other refs to them. | ||
338 | CoreFoundation.CFArrayAppendValue(trust_chain, item) | ||
339 | |||
340 | return trust_chain | ||
341 | finally: | ||
342 | for obj in itertools.chain(identities, certificates): | ||
343 | CoreFoundation.CFRelease(obj) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/appengine.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/appengine.py new file mode 100644 index 0000000..fc00d17 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/appengine.py | |||
@@ -0,0 +1,296 @@ | |||
1 | """ | ||
2 | This module provides a pool manager that uses Google App Engine's | ||
3 | `URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_. | ||
4 | |||
5 | Example usage:: | ||
6 | |||
7 | from pip._vendor.urllib3 import PoolManager | ||
8 | from pip._vendor.urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox | ||
9 | |||
10 | if is_appengine_sandbox(): | ||
11 | # AppEngineManager uses AppEngine's URLFetch API behind the scenes | ||
12 | http = AppEngineManager() | ||
13 | else: | ||
14 | # PoolManager uses a socket-level API behind the scenes | ||
15 | http = PoolManager() | ||
16 | |||
17 | r = http.request('GET', 'https://google.com/') | ||
18 | |||
19 | There are `limitations <https://cloud.google.com/appengine/docs/python/\ | ||
20 | urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be | ||
21 | the best choice for your application. There are three options for using | ||
22 | urllib3 on Google App Engine: | ||
23 | |||
24 | 1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is | ||
25 | cost-effective in many circumstances as long as your usage is within the | ||
26 | limitations. | ||
27 | 2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets. | ||
28 | Sockets also have `limitations and restrictions | ||
29 | <https://cloud.google.com/appengine/docs/python/sockets/\ | ||
30 | #limitations-and-restrictions>`_ and have a lower free quota than URLFetch. | ||
31 | To use sockets, be sure to specify the following in your ``app.yaml``:: | ||
32 | |||
33 | env_variables: | ||
34 | GAE_USE_SOCKETS_HTTPLIB : 'true' | ||
35 | |||
36 | 3. If you are using `App Engine Flexible | ||
37 | <https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard | ||
38 | :class:`PoolManager` without any configuration or special environment variables. | ||
39 | """ | ||
40 | |||
41 | from __future__ import absolute_import | ||
42 | import logging | ||
43 | import os | ||
44 | import warnings | ||
45 | from ..packages.six.moves.urllib.parse import urljoin | ||
46 | |||
47 | from ..exceptions import ( | ||
48 | HTTPError, | ||
49 | HTTPWarning, | ||
50 | MaxRetryError, | ||
51 | ProtocolError, | ||
52 | TimeoutError, | ||
53 | SSLError | ||
54 | ) | ||
55 | |||
56 | from ..packages.six import BytesIO | ||
57 | from ..request import RequestMethods | ||
58 | from ..response import HTTPResponse | ||
59 | from ..util.timeout import Timeout | ||
60 | from ..util.retry import Retry | ||
61 | |||
62 | try: | ||
63 | from google.appengine.api import urlfetch | ||
64 | except ImportError: | ||
65 | urlfetch = None | ||
66 | |||
67 | |||
68 | log = logging.getLogger(__name__) | ||
69 | |||
70 | |||
71 | class AppEnginePlatformWarning(HTTPWarning): | ||
72 | pass | ||
73 | |||
74 | |||
75 | class AppEnginePlatformError(HTTPError): | ||
76 | pass | ||
77 | |||
78 | |||
79 | class AppEngineManager(RequestMethods): | ||
80 | """ | ||
81 | Connection manager for Google App Engine sandbox applications. | ||
82 | |||
83 | This manager uses the URLFetch service directly instead of using the | ||
84 | emulated httplib, and is subject to URLFetch limitations as described in | ||
85 | the App Engine documentation `here | ||
86 | <https://cloud.google.com/appengine/docs/python/urlfetch>`_. | ||
87 | |||
88 | Notably it will raise an :class:`AppEnginePlatformError` if: | ||
89 | * URLFetch is not available. | ||
90 | * If you attempt to use this on App Engine Flexible, as full socket | ||
91 | support is available. | ||
92 | * If a request size is more than 10 megabytes. | ||
93 | * If a response size is more than 32 megabtyes. | ||
94 | * If you use an unsupported request method such as OPTIONS. | ||
95 | |||
96 | Beyond those cases, it will raise normal urllib3 errors. | ||
97 | """ | ||
98 | |||
99 | def __init__(self, headers=None, retries=None, validate_certificate=True, | ||
100 | urlfetch_retries=True): | ||
101 | if not urlfetch: | ||
102 | raise AppEnginePlatformError( | ||
103 | "URLFetch is not available in this environment.") | ||
104 | |||
105 | if is_prod_appengine_mvms(): | ||
106 | raise AppEnginePlatformError( | ||
107 | "Use normal urllib3.PoolManager instead of AppEngineManager" | ||
108 | "on Managed VMs, as using URLFetch is not necessary in " | ||
109 | "this environment.") | ||
110 | |||
111 | warnings.warn( | ||
112 | "urllib3 is using URLFetch on Google App Engine sandbox instead " | ||
113 | "of sockets. To use sockets directly instead of URLFetch see " | ||
114 | "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.", | ||
115 | AppEnginePlatformWarning) | ||
116 | |||
117 | RequestMethods.__init__(self, headers) | ||
118 | self.validate_certificate = validate_certificate | ||
119 | self.urlfetch_retries = urlfetch_retries | ||
120 | |||
121 | self.retries = retries or Retry.DEFAULT | ||
122 | |||
123 | def __enter__(self): | ||
124 | return self | ||
125 | |||
126 | def __exit__(self, exc_type, exc_val, exc_tb): | ||
127 | # Return False to re-raise any potential exceptions | ||
128 | return False | ||
129 | |||
130 | def urlopen(self, method, url, body=None, headers=None, | ||
131 | retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, | ||
132 | **response_kw): | ||
133 | |||
134 | retries = self._get_retries(retries, redirect) | ||
135 | |||
136 | try: | ||
137 | follow_redirects = ( | ||
138 | redirect and | ||
139 | retries.redirect != 0 and | ||
140 | retries.total) | ||
141 | response = urlfetch.fetch( | ||
142 | url, | ||
143 | payload=body, | ||
144 | method=method, | ||
145 | headers=headers or {}, | ||
146 | allow_truncated=False, | ||
147 | follow_redirects=self.urlfetch_retries and follow_redirects, | ||
148 | deadline=self._get_absolute_timeout(timeout), | ||
149 | validate_certificate=self.validate_certificate, | ||
150 | ) | ||
151 | except urlfetch.DeadlineExceededError as e: | ||
152 | raise TimeoutError(self, e) | ||
153 | |||
154 | except urlfetch.InvalidURLError as e: | ||
155 | if 'too large' in str(e): | ||
156 | raise AppEnginePlatformError( | ||
157 | "URLFetch request too large, URLFetch only " | ||
158 | "supports requests up to 10mb in size.", e) | ||
159 | raise ProtocolError(e) | ||
160 | |||
161 | except urlfetch.DownloadError as e: | ||
162 | if 'Too many redirects' in str(e): | ||
163 | raise MaxRetryError(self, url, reason=e) | ||
164 | raise ProtocolError(e) | ||
165 | |||
166 | except urlfetch.ResponseTooLargeError as e: | ||
167 | raise AppEnginePlatformError( | ||
168 | "URLFetch response too large, URLFetch only supports" | ||
169 | "responses up to 32mb in size.", e) | ||
170 | |||
171 | except urlfetch.SSLCertificateError as e: | ||
172 | raise SSLError(e) | ||
173 | |||
174 | except urlfetch.InvalidMethodError as e: | ||
175 | raise AppEnginePlatformError( | ||
176 | "URLFetch does not support method: %s" % method, e) | ||
177 | |||
178 | http_response = self._urlfetch_response_to_http_response( | ||
179 | response, retries=retries, **response_kw) | ||
180 | |||
181 | # Handle redirect? | ||
182 | redirect_location = redirect and http_response.get_redirect_location() | ||
183 | if redirect_location: | ||
184 | # Check for redirect response | ||
185 | if (self.urlfetch_retries and retries.raise_on_redirect): | ||
186 | raise MaxRetryError(self, url, "too many redirects") | ||
187 | else: | ||
188 | if http_response.status == 303: | ||
189 | method = 'GET' | ||
190 | |||
191 | try: | ||
192 | retries = retries.increment(method, url, response=http_response, _pool=self) | ||
193 | except MaxRetryError: | ||
194 | if retries.raise_on_redirect: | ||
195 | raise MaxRetryError(self, url, "too many redirects") | ||
196 | return http_response | ||
197 | |||
198 | retries.sleep_for_retry(http_response) | ||
199 | log.debug("Redirecting %s -> %s", url, redirect_location) | ||
200 | redirect_url = urljoin(url, redirect_location) | ||
201 | return self.urlopen( | ||
202 | method, redirect_url, body, headers, | ||
203 | retries=retries, redirect=redirect, | ||
204 | timeout=timeout, **response_kw) | ||
205 | |||
206 | # Check if we should retry the HTTP response. | ||
207 | has_retry_after = bool(http_response.getheader('Retry-After')) | ||
208 | if retries.is_retry(method, http_response.status, has_retry_after): | ||
209 | retries = retries.increment( | ||
210 | method, url, response=http_response, _pool=self) | ||
211 | log.debug("Retry: %s", url) | ||
212 | retries.sleep(http_response) | ||
213 | return self.urlopen( | ||
214 | method, url, | ||
215 | body=body, headers=headers, | ||
216 | retries=retries, redirect=redirect, | ||
217 | timeout=timeout, **response_kw) | ||
218 | |||
219 | return http_response | ||
220 | |||
221 | def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): | ||
222 | |||
223 | if is_prod_appengine(): | ||
224 | # Production GAE handles deflate encoding automatically, but does | ||
225 | # not remove the encoding header. | ||
226 | content_encoding = urlfetch_resp.headers.get('content-encoding') | ||
227 | |||
228 | if content_encoding == 'deflate': | ||
229 | del urlfetch_resp.headers['content-encoding'] | ||
230 | |||
231 | transfer_encoding = urlfetch_resp.headers.get('transfer-encoding') | ||
232 | # We have a full response's content, | ||
233 | # so let's make sure we don't report ourselves as chunked data. | ||
234 | if transfer_encoding == 'chunked': | ||
235 | encodings = transfer_encoding.split(",") | ||
236 | encodings.remove('chunked') | ||
237 | urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings) | ||
238 | |||
239 | return HTTPResponse( | ||
240 | # In order for decoding to work, we must present the content as | ||
241 | # a file-like object. | ||
242 | body=BytesIO(urlfetch_resp.content), | ||
243 | headers=urlfetch_resp.headers, | ||
244 | status=urlfetch_resp.status_code, | ||
245 | **response_kw | ||
246 | ) | ||
247 | |||
248 | def _get_absolute_timeout(self, timeout): | ||
249 | if timeout is Timeout.DEFAULT_TIMEOUT: | ||
250 | return None # Defer to URLFetch's default. | ||
251 | if isinstance(timeout, Timeout): | ||
252 | if timeout._read is not None or timeout._connect is not None: | ||
253 | warnings.warn( | ||
254 | "URLFetch does not support granular timeout settings, " | ||
255 | "reverting to total or default URLFetch timeout.", | ||
256 | AppEnginePlatformWarning) | ||
257 | return timeout.total | ||
258 | return timeout | ||
259 | |||
260 | def _get_retries(self, retries, redirect): | ||
261 | if not isinstance(retries, Retry): | ||
262 | retries = Retry.from_int( | ||
263 | retries, redirect=redirect, default=self.retries) | ||
264 | |||
265 | if retries.connect or retries.read or retries.redirect: | ||
266 | warnings.warn( | ||
267 | "URLFetch only supports total retries and does not " | ||
268 | "recognize connect, read, or redirect retry parameters.", | ||
269 | AppEnginePlatformWarning) | ||
270 | |||
271 | return retries | ||
272 | |||
273 | |||
274 | def is_appengine(): | ||
275 | return (is_local_appengine() or | ||
276 | is_prod_appengine() or | ||
277 | is_prod_appengine_mvms()) | ||
278 | |||
279 | |||
280 | def is_appengine_sandbox(): | ||
281 | return is_appengine() and not is_prod_appengine_mvms() | ||
282 | |||
283 | |||
284 | def is_local_appengine(): | ||
285 | return ('APPENGINE_RUNTIME' in os.environ and | ||
286 | 'Development/' in os.environ['SERVER_SOFTWARE']) | ||
287 | |||
288 | |||
289 | def is_prod_appengine(): | ||
290 | return ('APPENGINE_RUNTIME' in os.environ and | ||
291 | 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and | ||
292 | not is_prod_appengine_mvms()) | ||
293 | |||
294 | |||
295 | def is_prod_appengine_mvms(): | ||
296 | return os.environ.get('GAE_VM', False) == 'true' | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/ntlmpool.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/ntlmpool.py new file mode 100644 index 0000000..888e0ad --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/ntlmpool.py | |||
@@ -0,0 +1,112 @@ | |||
1 | """ | ||
2 | NTLM authenticating pool, contributed by erikcederstran | ||
3 | |||
4 | Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 | ||
5 | """ | ||
6 | from __future__ import absolute_import | ||
7 | |||
8 | from logging import getLogger | ||
9 | from ntlm import ntlm | ||
10 | |||
11 | from .. import HTTPSConnectionPool | ||
12 | from ..packages.six.moves.http_client import HTTPSConnection | ||
13 | |||
14 | |||
15 | log = getLogger(__name__) | ||
16 | |||
17 | |||
18 | class NTLMConnectionPool(HTTPSConnectionPool): | ||
19 | """ | ||
20 | Implements an NTLM authentication version of an urllib3 connection pool | ||
21 | """ | ||
22 | |||
23 | scheme = 'https' | ||
24 | |||
25 | def __init__(self, user, pw, authurl, *args, **kwargs): | ||
26 | """ | ||
27 | authurl is a random URL on the server that is protected by NTLM. | ||
28 | user is the Windows user, probably in the DOMAIN\\username format. | ||
29 | pw is the password for the user. | ||
30 | """ | ||
31 | super(NTLMConnectionPool, self).__init__(*args, **kwargs) | ||
32 | self.authurl = authurl | ||
33 | self.rawuser = user | ||
34 | user_parts = user.split('\\', 1) | ||
35 | self.domain = user_parts[0].upper() | ||
36 | self.user = user_parts[1] | ||
37 | self.pw = pw | ||
38 | |||
39 | def _new_conn(self): | ||
40 | # Performs the NTLM handshake that secures the connection. The socket | ||
41 | # must be kept open while requests are performed. | ||
42 | self.num_connections += 1 | ||
43 | log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s', | ||
44 | self.num_connections, self.host, self.authurl) | ||
45 | |||
46 | headers = {} | ||
47 | headers['Connection'] = 'Keep-Alive' | ||
48 | req_header = 'Authorization' | ||
49 | resp_header = 'www-authenticate' | ||
50 | |||
51 | conn = HTTPSConnection(host=self.host, port=self.port) | ||
52 | |||
53 | # Send negotiation message | ||
54 | headers[req_header] = ( | ||
55 | 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) | ||
56 | log.debug('Request headers: %s', headers) | ||
57 | conn.request('GET', self.authurl, None, headers) | ||
58 | res = conn.getresponse() | ||
59 | reshdr = dict(res.getheaders()) | ||
60 | log.debug('Response status: %s %s', res.status, res.reason) | ||
61 | log.debug('Response headers: %s', reshdr) | ||
62 | log.debug('Response data: %s [...]', res.read(100)) | ||
63 | |||
64 | # Remove the reference to the socket, so that it can not be closed by | ||
65 | # the response object (we want to keep the socket open) | ||
66 | res.fp = None | ||
67 | |||
68 | # Server should respond with a challenge message | ||
69 | auth_header_values = reshdr[resp_header].split(', ') | ||
70 | auth_header_value = None | ||
71 | for s in auth_header_values: | ||
72 | if s[:5] == 'NTLM ': | ||
73 | auth_header_value = s[5:] | ||
74 | if auth_header_value is None: | ||
75 | raise Exception('Unexpected %s response header: %s' % | ||
76 | (resp_header, reshdr[resp_header])) | ||
77 | |||
78 | # Send authentication message | ||
79 | ServerChallenge, NegotiateFlags = \ | ||
80 | ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) | ||
81 | auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, | ||
82 | self.user, | ||
83 | self.domain, | ||
84 | self.pw, | ||
85 | NegotiateFlags) | ||
86 | headers[req_header] = 'NTLM %s' % auth_msg | ||
87 | log.debug('Request headers: %s', headers) | ||
88 | conn.request('GET', self.authurl, None, headers) | ||
89 | res = conn.getresponse() | ||
90 | log.debug('Response status: %s %s', res.status, res.reason) | ||
91 | log.debug('Response headers: %s', dict(res.getheaders())) | ||
92 | log.debug('Response data: %s [...]', res.read()[:100]) | ||
93 | if res.status != 200: | ||
94 | if res.status == 401: | ||
95 | raise Exception('Server rejected request: wrong ' | ||
96 | 'username or password') | ||
97 | raise Exception('Wrong server response: %s %s' % | ||
98 | (res.status, res.reason)) | ||
99 | |||
100 | res.fp = None | ||
101 | log.debug('Connection established') | ||
102 | return conn | ||
103 | |||
104 | def urlopen(self, method, url, body=None, headers=None, retries=3, | ||
105 | redirect=True, assert_same_host=True): | ||
106 | if headers is None: | ||
107 | headers = {} | ||
108 | headers['Connection'] = 'Keep-Alive' | ||
109 | return super(NTLMConnectionPool, self).urlopen(method, url, body, | ||
110 | headers, retries, | ||
111 | redirect, | ||
112 | assert_same_host) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/pyopenssl.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/pyopenssl.py new file mode 100644 index 0000000..f13e2bc --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/pyopenssl.py | |||
@@ -0,0 +1,455 @@ | |||
1 | """ | ||
2 | SSL with SNI_-support for Python 2. Follow these instructions if you would | ||
3 | like to verify SSL certificates in Python 2. Note, the default libraries do | ||
4 | *not* do certificate checking; you need to do additional work to validate | ||
5 | certificates yourself. | ||
6 | |||
7 | This needs the following packages installed: | ||
8 | |||
9 | * pyOpenSSL (tested with 16.0.0) | ||
10 | * cryptography (minimum 1.3.4, from pyopenssl) | ||
11 | * idna (minimum 2.0, from cryptography) | ||
12 | |||
13 | However, pyopenssl depends on cryptography, which depends on idna, so while we | ||
14 | use all three directly here we end up having relatively few packages required. | ||
15 | |||
16 | You can install them with the following command: | ||
17 | |||
18 | pip install pyopenssl cryptography idna | ||
19 | |||
20 | To activate certificate checking, call | ||
21 | :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code | ||
22 | before you begin making HTTP requests. This can be done in a ``sitecustomize`` | ||
23 | module, or at any other time before your application begins using ``urllib3``, | ||
24 | like this:: | ||
25 | |||
26 | try: | ||
27 | import urllib3.contrib.pyopenssl | ||
28 | urllib3.contrib.pyopenssl.inject_into_urllib3() | ||
29 | except ImportError: | ||
30 | pass | ||
31 | |||
32 | Now you can use :mod:`urllib3` as you normally would, and it will support SNI | ||
33 | when the required modules are installed. | ||
34 | |||
35 | Activating this module also has the positive side effect of disabling SSL/TLS | ||
36 | compression in Python 2 (see `CRIME attack`_). | ||
37 | |||
38 | If you want to configure the default list of supported cipher suites, you can | ||
39 | set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable. | ||
40 | |||
41 | .. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication | ||
42 | .. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) | ||
43 | """ | ||
44 | from __future__ import absolute_import | ||
45 | |||
46 | import OpenSSL.SSL | ||
47 | from cryptography import x509 | ||
48 | from cryptography.hazmat.backends.openssl import backend as openssl_backend | ||
49 | from cryptography.hazmat.backends.openssl.x509 import _Certificate | ||
50 | |||
51 | from socket import timeout, error as SocketError | ||
52 | from io import BytesIO | ||
53 | |||
54 | try: # Platform-specific: Python 2 | ||
55 | from socket import _fileobject | ||
56 | except ImportError: # Platform-specific: Python 3 | ||
57 | _fileobject = None | ||
58 | from ..packages.backports.makefile import backport_makefile | ||
59 | |||
60 | import logging | ||
61 | import ssl | ||
62 | from ..packages import six | ||
63 | import sys | ||
64 | |||
65 | from .. import util | ||
66 | |||
67 | __all__ = ['inject_into_urllib3', 'extract_from_urllib3'] | ||
68 | |||
69 | # SNI always works. | ||
70 | HAS_SNI = True | ||
71 | |||
72 | # Map from urllib3 to PyOpenSSL compatible parameter-values. | ||
73 | _openssl_versions = { | ||
74 | ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, | ||
75 | ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, | ||
76 | } | ||
77 | |||
78 | if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): | ||
79 | _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD | ||
80 | |||
81 | if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): | ||
82 | _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD | ||
83 | |||
84 | try: | ||
85 | _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD}) | ||
86 | except AttributeError: | ||
87 | pass | ||
88 | |||
89 | _stdlib_to_openssl_verify = { | ||
90 | ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, | ||
91 | ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, | ||
92 | ssl.CERT_REQUIRED: | ||
93 | OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, | ||
94 | } | ||
95 | _openssl_to_stdlib_verify = dict( | ||
96 | (v, k) for k, v in _stdlib_to_openssl_verify.items() | ||
97 | ) | ||
98 | |||
99 | # OpenSSL will only write 16K at a time | ||
100 | SSL_WRITE_BLOCKSIZE = 16384 | ||
101 | |||
102 | orig_util_HAS_SNI = util.HAS_SNI | ||
103 | orig_util_SSLContext = util.ssl_.SSLContext | ||
104 | |||
105 | |||
106 | log = logging.getLogger(__name__) | ||
107 | |||
108 | |||
109 | def inject_into_urllib3(): | ||
110 | 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' | ||
111 | |||
112 | _validate_dependencies_met() | ||
113 | |||
114 | util.ssl_.SSLContext = PyOpenSSLContext | ||
115 | util.HAS_SNI = HAS_SNI | ||
116 | util.ssl_.HAS_SNI = HAS_SNI | ||
117 | util.IS_PYOPENSSL = True | ||
118 | util.ssl_.IS_PYOPENSSL = True | ||
119 | |||
120 | |||
121 | def extract_from_urllib3(): | ||
122 | 'Undo monkey-patching by :func:`inject_into_urllib3`.' | ||
123 | |||
124 | util.ssl_.SSLContext = orig_util_SSLContext | ||
125 | util.HAS_SNI = orig_util_HAS_SNI | ||
126 | util.ssl_.HAS_SNI = orig_util_HAS_SNI | ||
127 | util.IS_PYOPENSSL = False | ||
128 | util.ssl_.IS_PYOPENSSL = False | ||
129 | |||
130 | |||
131 | def _validate_dependencies_met(): | ||
132 | """ | ||
133 | Verifies that PyOpenSSL's package-level dependencies have been met. | ||
134 | Throws `ImportError` if they are not met. | ||
135 | """ | ||
136 | # Method added in `cryptography==1.1`; not available in older versions | ||
137 | from cryptography.x509.extensions import Extensions | ||
138 | if getattr(Extensions, "get_extension_for_class", None) is None: | ||
139 | raise ImportError("'cryptography' module missing required functionality. " | ||
140 | "Try upgrading to v1.3.4 or newer.") | ||
141 | |||
142 | # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 | ||
143 | # attribute is only present on those versions. | ||
144 | from OpenSSL.crypto import X509 | ||
145 | x509 = X509() | ||
146 | if getattr(x509, "_x509", None) is None: | ||
147 | raise ImportError("'pyOpenSSL' module missing required functionality. " | ||
148 | "Try upgrading to v0.14 or newer.") | ||
149 | |||
150 | |||
151 | def _dnsname_to_stdlib(name): | ||
152 | """ | ||
153 | Converts a dNSName SubjectAlternativeName field to the form used by the | ||
154 | standard library on the given Python version. | ||
155 | |||
156 | Cryptography produces a dNSName as a unicode string that was idna-decoded | ||
157 | from ASCII bytes. We need to idna-encode that string to get it back, and | ||
158 | then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib | ||
159 | uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). | ||
160 | """ | ||
161 | def idna_encode(name): | ||
162 | """ | ||
163 | Borrowed wholesale from the Python Cryptography Project. It turns out | ||
164 | that we can't just safely call `idna.encode`: it can explode for | ||
165 | wildcard names. This avoids that problem. | ||
166 | """ | ||
167 | from pip._vendor import idna | ||
168 | |||
169 | for prefix in [u'*.', u'.']: | ||
170 | if name.startswith(prefix): | ||
171 | name = name[len(prefix):] | ||
172 | return prefix.encode('ascii') + idna.encode(name) | ||
173 | return idna.encode(name) | ||
174 | |||
175 | name = idna_encode(name) | ||
176 | if sys.version_info >= (3, 0): | ||
177 | name = name.decode('utf-8') | ||
178 | return name | ||
179 | |||
180 | |||
181 | def get_subj_alt_name(peer_cert): | ||
182 | """ | ||
183 | Given an PyOpenSSL certificate, provides all the subject alternative names. | ||
184 | """ | ||
185 | # Pass the cert to cryptography, which has much better APIs for this. | ||
186 | if hasattr(peer_cert, "to_cryptography"): | ||
187 | cert = peer_cert.to_cryptography() | ||
188 | else: | ||
189 | # This is technically using private APIs, but should work across all | ||
190 | # relevant versions before PyOpenSSL got a proper API for this. | ||
191 | cert = _Certificate(openssl_backend, peer_cert._x509) | ||
192 | |||
193 | # We want to find the SAN extension. Ask Cryptography to locate it (it's | ||
194 | # faster than looping in Python) | ||
195 | try: | ||
196 | ext = cert.extensions.get_extension_for_class( | ||
197 | x509.SubjectAlternativeName | ||
198 | ).value | ||
199 | except x509.ExtensionNotFound: | ||
200 | # No such extension, return the empty list. | ||
201 | return [] | ||
202 | except (x509.DuplicateExtension, x509.UnsupportedExtension, | ||
203 | x509.UnsupportedGeneralNameType, UnicodeError) as e: | ||
204 | # A problem has been found with the quality of the certificate. Assume | ||
205 | # no SAN field is present. | ||
206 | log.warning( | ||
207 | "A problem was encountered with the certificate that prevented " | ||
208 | "urllib3 from finding the SubjectAlternativeName field. This can " | ||
209 | "affect certificate validation. The error was %s", | ||
210 | e, | ||
211 | ) | ||
212 | return [] | ||
213 | |||
214 | # We want to return dNSName and iPAddress fields. We need to cast the IPs | ||
215 | # back to strings because the match_hostname function wants them as | ||
216 | # strings. | ||
217 | # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8 | ||
218 | # decoded. This is pretty frustrating, but that's what the standard library | ||
219 | # does with certificates, and so we need to attempt to do the same. | ||
220 | names = [ | ||
221 | ('DNS', _dnsname_to_stdlib(name)) | ||
222 | for name in ext.get_values_for_type(x509.DNSName) | ||
223 | ] | ||
224 | names.extend( | ||
225 | ('IP Address', str(name)) | ||
226 | for name in ext.get_values_for_type(x509.IPAddress) | ||
227 | ) | ||
228 | |||
229 | return names | ||
230 | |||
231 | |||
232 | class WrappedSocket(object): | ||
233 | '''API-compatibility wrapper for Python OpenSSL's Connection-class. | ||
234 | |||
235 | Note: _makefile_refs, _drop() and _reuse() are needed for the garbage | ||
236 | collector of pypy. | ||
237 | ''' | ||
238 | |||
239 | def __init__(self, connection, socket, suppress_ragged_eofs=True): | ||
240 | self.connection = connection | ||
241 | self.socket = socket | ||
242 | self.suppress_ragged_eofs = suppress_ragged_eofs | ||
243 | self._makefile_refs = 0 | ||
244 | self._closed = False | ||
245 | |||
246 | def fileno(self): | ||
247 | return self.socket.fileno() | ||
248 | |||
249 | # Copy-pasted from Python 3.5 source code | ||
250 | def _decref_socketios(self): | ||
251 | if self._makefile_refs > 0: | ||
252 | self._makefile_refs -= 1 | ||
253 | if self._closed: | ||
254 | self.close() | ||
255 | |||
256 | def recv(self, *args, **kwargs): | ||
257 | try: | ||
258 | data = self.connection.recv(*args, **kwargs) | ||
259 | except OpenSSL.SSL.SysCallError as e: | ||
260 | if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): | ||
261 | return b'' | ||
262 | else: | ||
263 | raise SocketError(str(e)) | ||
264 | except OpenSSL.SSL.ZeroReturnError as e: | ||
265 | if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: | ||
266 | return b'' | ||
267 | else: | ||
268 | raise | ||
269 | except OpenSSL.SSL.WantReadError: | ||
270 | rd = util.wait_for_read(self.socket, self.socket.gettimeout()) | ||
271 | if not rd: | ||
272 | raise timeout('The read operation timed out') | ||
273 | else: | ||
274 | return self.recv(*args, **kwargs) | ||
275 | else: | ||
276 | return data | ||
277 | |||
278 | def recv_into(self, *args, **kwargs): | ||
279 | try: | ||
280 | return self.connection.recv_into(*args, **kwargs) | ||
281 | except OpenSSL.SSL.SysCallError as e: | ||
282 | if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): | ||
283 | return 0 | ||
284 | else: | ||
285 | raise SocketError(str(e)) | ||
286 | except OpenSSL.SSL.ZeroReturnError as e: | ||
287 | if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: | ||
288 | return 0 | ||
289 | else: | ||
290 | raise | ||
291 | except OpenSSL.SSL.WantReadError: | ||
292 | rd = util.wait_for_read(self.socket, self.socket.gettimeout()) | ||
293 | if not rd: | ||
294 | raise timeout('The read operation timed out') | ||
295 | else: | ||
296 | return self.recv_into(*args, **kwargs) | ||
297 | |||
298 | def settimeout(self, timeout): | ||
299 | return self.socket.settimeout(timeout) | ||
300 | |||
301 | def _send_until_done(self, data): | ||
302 | while True: | ||
303 | try: | ||
304 | return self.connection.send(data) | ||
305 | except OpenSSL.SSL.WantWriteError: | ||
306 | wr = util.wait_for_write(self.socket, self.socket.gettimeout()) | ||
307 | if not wr: | ||
308 | raise timeout() | ||
309 | continue | ||
310 | except OpenSSL.SSL.SysCallError as e: | ||
311 | raise SocketError(str(e)) | ||
312 | |||
313 | def sendall(self, data): | ||
314 | total_sent = 0 | ||
315 | while total_sent < len(data): | ||
316 | sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) | ||
317 | total_sent += sent | ||
318 | |||
319 | def shutdown(self): | ||
320 | # FIXME rethrow compatible exceptions should we ever use this | ||
321 | self.connection.shutdown() | ||
322 | |||
323 | def close(self): | ||
324 | if self._makefile_refs < 1: | ||
325 | try: | ||
326 | self._closed = True | ||
327 | return self.connection.close() | ||
328 | except OpenSSL.SSL.Error: | ||
329 | return | ||
330 | else: | ||
331 | self._makefile_refs -= 1 | ||
332 | |||
333 | def getpeercert(self, binary_form=False): | ||
334 | x509 = self.connection.get_peer_certificate() | ||
335 | |||
336 | if not x509: | ||
337 | return x509 | ||
338 | |||
339 | if binary_form: | ||
340 | return OpenSSL.crypto.dump_certificate( | ||
341 | OpenSSL.crypto.FILETYPE_ASN1, | ||
342 | x509) | ||
343 | |||
344 | return { | ||
345 | 'subject': ( | ||
346 | (('commonName', x509.get_subject().CN),), | ||
347 | ), | ||
348 | 'subjectAltName': get_subj_alt_name(x509) | ||
349 | } | ||
350 | |||
351 | def _reuse(self): | ||
352 | self._makefile_refs += 1 | ||
353 | |||
354 | def _drop(self): | ||
355 | if self._makefile_refs < 1: | ||
356 | self.close() | ||
357 | else: | ||
358 | self._makefile_refs -= 1 | ||
359 | |||
360 | |||
361 | if _fileobject: # Platform-specific: Python 2 | ||
362 | def makefile(self, mode, bufsize=-1): | ||
363 | self._makefile_refs += 1 | ||
364 | return _fileobject(self, mode, bufsize, close=True) | ||
365 | else: # Platform-specific: Python 3 | ||
366 | makefile = backport_makefile | ||
367 | |||
368 | WrappedSocket.makefile = makefile | ||
369 | |||
370 | |||
371 | class PyOpenSSLContext(object): | ||
372 | """ | ||
373 | I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible | ||
374 | for translating the interface of the standard library ``SSLContext`` object | ||
375 | to calls into PyOpenSSL. | ||
376 | """ | ||
377 | def __init__(self, protocol): | ||
378 | self.protocol = _openssl_versions[protocol] | ||
379 | self._ctx = OpenSSL.SSL.Context(self.protocol) | ||
380 | self._options = 0 | ||
381 | self.check_hostname = False | ||
382 | |||
383 | @property | ||
384 | def options(self): | ||
385 | return self._options | ||
386 | |||
387 | @options.setter | ||
388 | def options(self, value): | ||
389 | self._options = value | ||
390 | self._ctx.set_options(value) | ||
391 | |||
392 | @property | ||
393 | def verify_mode(self): | ||
394 | return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()] | ||
395 | |||
396 | @verify_mode.setter | ||
397 | def verify_mode(self, value): | ||
398 | self._ctx.set_verify( | ||
399 | _stdlib_to_openssl_verify[value], | ||
400 | _verify_callback | ||
401 | ) | ||
402 | |||
403 | def set_default_verify_paths(self): | ||
404 | self._ctx.set_default_verify_paths() | ||
405 | |||
406 | def set_ciphers(self, ciphers): | ||
407 | if isinstance(ciphers, six.text_type): | ||
408 | ciphers = ciphers.encode('utf-8') | ||
409 | self._ctx.set_cipher_list(ciphers) | ||
410 | |||
411 | def load_verify_locations(self, cafile=None, capath=None, cadata=None): | ||
412 | if cafile is not None: | ||
413 | cafile = cafile.encode('utf-8') | ||
414 | if capath is not None: | ||
415 | capath = capath.encode('utf-8') | ||
416 | self._ctx.load_verify_locations(cafile, capath) | ||
417 | if cadata is not None: | ||
418 | self._ctx.load_verify_locations(BytesIO(cadata)) | ||
419 | |||
420 | def load_cert_chain(self, certfile, keyfile=None, password=None): | ||
421 | self._ctx.use_certificate_file(certfile) | ||
422 | if password is not None: | ||
423 | self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password) | ||
424 | self._ctx.use_privatekey_file(keyfile or certfile) | ||
425 | |||
426 | def wrap_socket(self, sock, server_side=False, | ||
427 | do_handshake_on_connect=True, suppress_ragged_eofs=True, | ||
428 | server_hostname=None): | ||
429 | cnx = OpenSSL.SSL.Connection(self._ctx, sock) | ||
430 | |||
431 | if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 | ||
432 | server_hostname = server_hostname.encode('utf-8') | ||
433 | |||
434 | if server_hostname is not None: | ||
435 | cnx.set_tlsext_host_name(server_hostname) | ||
436 | |||
437 | cnx.set_connect_state() | ||
438 | |||
439 | while True: | ||
440 | try: | ||
441 | cnx.do_handshake() | ||
442 | except OpenSSL.SSL.WantReadError: | ||
443 | rd = util.wait_for_read(sock, sock.gettimeout()) | ||
444 | if not rd: | ||
445 | raise timeout('select timed out') | ||
446 | continue | ||
447 | except OpenSSL.SSL.Error as e: | ||
448 | raise ssl.SSLError('bad handshake: %r' % e) | ||
449 | break | ||
450 | |||
451 | return WrappedSocket(cnx, sock) | ||
452 | |||
453 | |||
454 | def _verify_callback(cnx, x509, err_no, err_depth, return_code): | ||
455 | return err_no == 0 | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/securetransport.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/securetransport.py new file mode 100644 index 0000000..77cf861 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/securetransport.py | |||
@@ -0,0 +1,810 @@ | |||
1 | """ | ||
2 | SecureTranport support for urllib3 via ctypes. | ||
3 | |||
4 | This makes platform-native TLS available to urllib3 users on macOS without the | ||
5 | use of a compiler. This is an important feature because the Python Package | ||
6 | Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL | ||
7 | that ships with macOS is not capable of doing TLSv1.2. The only way to resolve | ||
8 | this is to give macOS users an alternative solution to the problem, and that | ||
9 | solution is to use SecureTransport. | ||
10 | |||
11 | We use ctypes here because this solution must not require a compiler. That's | ||
12 | because pip is not allowed to require a compiler either. | ||
13 | |||
14 | This is not intended to be a seriously long-term solution to this problem. | ||
15 | The hope is that PEP 543 will eventually solve this issue for us, at which | ||
16 | point we can retire this contrib module. But in the short term, we need to | ||
17 | solve the impending tire fire that is Python on Mac without this kind of | ||
18 | contrib module. So...here we are. | ||
19 | |||
20 | To use this module, simply import and inject it:: | ||
21 | |||
22 | import urllib3.contrib.securetransport | ||
23 | urllib3.contrib.securetransport.inject_into_urllib3() | ||
24 | |||
25 | Happy TLSing! | ||
26 | """ | ||
27 | from __future__ import absolute_import | ||
28 | |||
29 | import contextlib | ||
30 | import ctypes | ||
31 | import errno | ||
32 | import os.path | ||
33 | import shutil | ||
34 | import socket | ||
35 | import ssl | ||
36 | import threading | ||
37 | import weakref | ||
38 | |||
39 | from .. import util | ||
40 | from ._securetransport.bindings import ( | ||
41 | Security, SecurityConst, CoreFoundation | ||
42 | ) | ||
43 | from ._securetransport.low_level import ( | ||
44 | _assert_no_error, _cert_array_from_pem, _temporary_keychain, | ||
45 | _load_client_cert_chain | ||
46 | ) | ||
47 | |||
48 | try: # Platform-specific: Python 2 | ||
49 | from socket import _fileobject | ||
50 | except ImportError: # Platform-specific: Python 3 | ||
51 | _fileobject = None | ||
52 | from ..packages.backports.makefile import backport_makefile | ||
53 | |||
54 | try: | ||
55 | memoryview(b'') | ||
56 | except NameError: | ||
57 | raise ImportError("SecureTransport only works on Pythons with memoryview") | ||
58 | |||
59 | __all__ = ['inject_into_urllib3', 'extract_from_urllib3'] | ||
60 | |||
61 | # SNI always works | ||
62 | HAS_SNI = True | ||
63 | |||
64 | orig_util_HAS_SNI = util.HAS_SNI | ||
65 | orig_util_SSLContext = util.ssl_.SSLContext | ||
66 | |||
67 | # This dictionary is used by the read callback to obtain a handle to the | ||
68 | # calling wrapped socket. This is a pretty silly approach, but for now it'll | ||
69 | # do. I feel like I should be able to smuggle a handle to the wrapped socket | ||
70 | # directly in the SSLConnectionRef, but for now this approach will work I | ||
71 | # guess. | ||
72 | # | ||
73 | # We need to lock around this structure for inserts, but we don't do it for | ||
74 | # reads/writes in the callbacks. The reasoning here goes as follows: | ||
75 | # | ||
76 | # 1. It is not possible to call into the callbacks before the dictionary is | ||
77 | # populated, so once in the callback the id must be in the dictionary. | ||
78 | # 2. The callbacks don't mutate the dictionary, they only read from it, and | ||
79 | # so cannot conflict with any of the insertions. | ||
80 | # | ||
81 | # This is good: if we had to lock in the callbacks we'd drastically slow down | ||
82 | # the performance of this code. | ||
83 | _connection_refs = weakref.WeakValueDictionary() | ||
84 | _connection_ref_lock = threading.Lock() | ||
85 | |||
86 | # Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over | ||
87 | # for no better reason than we need *a* limit, and this one is right there. | ||
88 | SSL_WRITE_BLOCKSIZE = 16384 | ||
89 | |||
90 | # This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to | ||
91 | # individual cipher suites. We need to do this becuase this is how | ||
92 | # SecureTransport wants them. | ||
93 | CIPHER_SUITES = [ | ||
94 | SecurityConst.TLS_AES_256_GCM_SHA384, | ||
95 | SecurityConst.TLS_CHACHA20_POLY1305_SHA256, | ||
96 | SecurityConst.TLS_AES_128_GCM_SHA256, | ||
97 | SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, | ||
98 | SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, | ||
99 | SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, | ||
100 | SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, | ||
101 | SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, | ||
102 | SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, | ||
103 | SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256, | ||
104 | SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, | ||
105 | SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, | ||
106 | SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, | ||
107 | SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, | ||
108 | SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, | ||
109 | SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, | ||
110 | SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, | ||
111 | SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, | ||
112 | SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA, | ||
113 | SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, | ||
114 | SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, | ||
115 | SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, | ||
116 | SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, | ||
117 | SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, | ||
118 | SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, | ||
119 | SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA, | ||
120 | SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA, | ||
121 | SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384, | ||
122 | SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256, | ||
123 | SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256, | ||
124 | SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256, | ||
125 | SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA, | ||
126 | SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA, | ||
127 | ] | ||
128 | |||
129 | # Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of | ||
130 | # TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. | ||
131 | _protocol_to_min_max = { | ||
132 | ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), | ||
133 | } | ||
134 | |||
135 | if hasattr(ssl, "PROTOCOL_SSLv2"): | ||
136 | _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = ( | ||
137 | SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2 | ||
138 | ) | ||
139 | if hasattr(ssl, "PROTOCOL_SSLv3"): | ||
140 | _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = ( | ||
141 | SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3 | ||
142 | ) | ||
143 | if hasattr(ssl, "PROTOCOL_TLSv1"): | ||
144 | _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = ( | ||
145 | SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1 | ||
146 | ) | ||
147 | if hasattr(ssl, "PROTOCOL_TLSv1_1"): | ||
148 | _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = ( | ||
149 | SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11 | ||
150 | ) | ||
151 | if hasattr(ssl, "PROTOCOL_TLSv1_2"): | ||
152 | _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( | ||
153 | SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12 | ||
154 | ) | ||
155 | if hasattr(ssl, "PROTOCOL_TLS"): | ||
156 | _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23] | ||
157 | |||
158 | |||
159 | def inject_into_urllib3(): | ||
160 | """ | ||
161 | Monkey-patch urllib3 with SecureTransport-backed SSL-support. | ||
162 | """ | ||
163 | util.ssl_.SSLContext = SecureTransportContext | ||
164 | util.HAS_SNI = HAS_SNI | ||
165 | util.ssl_.HAS_SNI = HAS_SNI | ||
166 | util.IS_SECURETRANSPORT = True | ||
167 | util.ssl_.IS_SECURETRANSPORT = True | ||
168 | |||
169 | |||
170 | def extract_from_urllib3(): | ||
171 | """ | ||
172 | Undo monkey-patching by :func:`inject_into_urllib3`. | ||
173 | """ | ||
174 | util.ssl_.SSLContext = orig_util_SSLContext | ||
175 | util.HAS_SNI = orig_util_HAS_SNI | ||
176 | util.ssl_.HAS_SNI = orig_util_HAS_SNI | ||
177 | util.IS_SECURETRANSPORT = False | ||
178 | util.ssl_.IS_SECURETRANSPORT = False | ||
179 | |||
180 | |||
181 | def _read_callback(connection_id, data_buffer, data_length_pointer): | ||
182 | """ | ||
183 | SecureTransport read callback. This is called by ST to request that data | ||
184 | be returned from the socket. | ||
185 | """ | ||
186 | wrapped_socket = None | ||
187 | try: | ||
188 | wrapped_socket = _connection_refs.get(connection_id) | ||
189 | if wrapped_socket is None: | ||
190 | return SecurityConst.errSSLInternal | ||
191 | base_socket = wrapped_socket.socket | ||
192 | |||
193 | requested_length = data_length_pointer[0] | ||
194 | |||
195 | timeout = wrapped_socket.gettimeout() | ||
196 | error = None | ||
197 | read_count = 0 | ||
198 | buffer = (ctypes.c_char * requested_length).from_address(data_buffer) | ||
199 | buffer_view = memoryview(buffer) | ||
200 | |||
201 | try: | ||
202 | while read_count < requested_length: | ||
203 | if timeout is None or timeout >= 0: | ||
204 | readables = util.wait_for_read([base_socket], timeout) | ||
205 | if not readables: | ||
206 | raise socket.error(errno.EAGAIN, 'timed out') | ||
207 | |||
208 | # We need to tell ctypes that we have a buffer that can be | ||
209 | # written to. Upsettingly, we do that like this: | ||
210 | chunk_size = base_socket.recv_into( | ||
211 | buffer_view[read_count:requested_length] | ||
212 | ) | ||
213 | read_count += chunk_size | ||
214 | if not chunk_size: | ||
215 | if not read_count: | ||
216 | return SecurityConst.errSSLClosedGraceful | ||
217 | break | ||
218 | except (socket.error) as e: | ||
219 | error = e.errno | ||
220 | |||
221 | if error is not None and error != errno.EAGAIN: | ||
222 | if error == errno.ECONNRESET: | ||
223 | return SecurityConst.errSSLClosedAbort | ||
224 | raise | ||
225 | |||
226 | data_length_pointer[0] = read_count | ||
227 | |||
228 | if read_count != requested_length: | ||
229 | return SecurityConst.errSSLWouldBlock | ||
230 | |||
231 | return 0 | ||
232 | except Exception as e: | ||
233 | if wrapped_socket is not None: | ||
234 | wrapped_socket._exception = e | ||
235 | return SecurityConst.errSSLInternal | ||
236 | |||
237 | |||
238 | def _write_callback(connection_id, data_buffer, data_length_pointer): | ||
239 | """ | ||
240 | SecureTransport write callback. This is called by ST to request that data | ||
241 | actually be sent on the network. | ||
242 | """ | ||
243 | wrapped_socket = None | ||
244 | try: | ||
245 | wrapped_socket = _connection_refs.get(connection_id) | ||
246 | if wrapped_socket is None: | ||
247 | return SecurityConst.errSSLInternal | ||
248 | base_socket = wrapped_socket.socket | ||
249 | |||
250 | bytes_to_write = data_length_pointer[0] | ||
251 | data = ctypes.string_at(data_buffer, bytes_to_write) | ||
252 | |||
253 | timeout = wrapped_socket.gettimeout() | ||
254 | error = None | ||
255 | sent = 0 | ||
256 | |||
257 | try: | ||
258 | while sent < bytes_to_write: | ||
259 | if timeout is None or timeout >= 0: | ||
260 | writables = util.wait_for_write([base_socket], timeout) | ||
261 | if not writables: | ||
262 | raise socket.error(errno.EAGAIN, 'timed out') | ||
263 | chunk_sent = base_socket.send(data) | ||
264 | sent += chunk_sent | ||
265 | |||
266 | # This has some needless copying here, but I'm not sure there's | ||
267 | # much value in optimising this data path. | ||
268 | data = data[chunk_sent:] | ||
269 | except (socket.error) as e: | ||
270 | error = e.errno | ||
271 | |||
272 | if error is not None and error != errno.EAGAIN: | ||
273 | if error == errno.ECONNRESET: | ||
274 | return SecurityConst.errSSLClosedAbort | ||
275 | raise | ||
276 | |||
277 | data_length_pointer[0] = sent | ||
278 | if sent != bytes_to_write: | ||
279 | return SecurityConst.errSSLWouldBlock | ||
280 | |||
281 | return 0 | ||
282 | except Exception as e: | ||
283 | if wrapped_socket is not None: | ||
284 | wrapped_socket._exception = e | ||
285 | return SecurityConst.errSSLInternal | ||
286 | |||
287 | |||
288 | # We need to keep these two objects references alive: if they get GC'd while | ||
289 | # in use then SecureTransport could attempt to call a function that is in freed | ||
290 | # memory. That would be...uh...bad. Yeah, that's the word. Bad. | ||
291 | _read_callback_pointer = Security.SSLReadFunc(_read_callback) | ||
292 | _write_callback_pointer = Security.SSLWriteFunc(_write_callback) | ||
293 | |||
294 | |||
295 | class WrappedSocket(object): | ||
296 | """ | ||
297 | API-compatibility wrapper for Python's OpenSSL wrapped socket object. | ||
298 | |||
299 | Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage | ||
300 | collector of PyPy. | ||
301 | """ | ||
302 | def __init__(self, socket): | ||
303 | self.socket = socket | ||
304 | self.context = None | ||
305 | self._makefile_refs = 0 | ||
306 | self._closed = False | ||
307 | self._exception = None | ||
308 | self._keychain = None | ||
309 | self._keychain_dir = None | ||
310 | self._client_cert_chain = None | ||
311 | |||
312 | # We save off the previously-configured timeout and then set it to | ||
313 | # zero. This is done because we use select and friends to handle the | ||
314 | # timeouts, but if we leave the timeout set on the lower socket then | ||
315 | # Python will "kindly" call select on that socket again for us. Avoid | ||
316 | # that by forcing the timeout to zero. | ||
317 | self._timeout = self.socket.gettimeout() | ||
318 | self.socket.settimeout(0) | ||
319 | |||
320 | @contextlib.contextmanager | ||
321 | def _raise_on_error(self): | ||
322 | """ | ||
323 | A context manager that can be used to wrap calls that do I/O from | ||
324 | SecureTransport. If any of the I/O callbacks hit an exception, this | ||
325 | context manager will correctly propagate the exception after the fact. | ||
326 | This avoids silently swallowing those exceptions. | ||
327 | |||
328 | It also correctly forces the socket closed. | ||
329 | """ | ||
330 | self._exception = None | ||
331 | |||
332 | # We explicitly don't catch around this yield because in the unlikely | ||
333 | # event that an exception was hit in the block we don't want to swallow | ||
334 | # it. | ||
335 | yield | ||
336 | if self._exception is not None: | ||
337 | exception, self._exception = self._exception, None | ||
338 | self.close() | ||
339 | raise exception | ||
340 | |||
341 | def _set_ciphers(self): | ||
342 | """ | ||
343 | Sets up the allowed ciphers. By default this matches the set in | ||
344 | util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done | ||
345 | custom and doesn't allow changing at this time, mostly because parsing | ||
346 | OpenSSL cipher strings is going to be a freaking nightmare. | ||
347 | """ | ||
348 | ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES) | ||
349 | result = Security.SSLSetEnabledCiphers( | ||
350 | self.context, ciphers, len(CIPHER_SUITES) | ||
351 | ) | ||
352 | _assert_no_error(result) | ||
353 | |||
354 | def _custom_validate(self, verify, trust_bundle): | ||
355 | """ | ||
356 | Called when we have set custom validation. We do this in two cases: | ||
357 | first, when cert validation is entirely disabled; and second, when | ||
358 | using a custom trust DB. | ||
359 | """ | ||
360 | # If we disabled cert validation, just say: cool. | ||
361 | if not verify: | ||
362 | return | ||
363 | |||
364 | # We want data in memory, so load it up. | ||
365 | if os.path.isfile(trust_bundle): | ||
366 | with open(trust_bundle, 'rb') as f: | ||
367 | trust_bundle = f.read() | ||
368 | |||
369 | cert_array = None | ||
370 | trust = Security.SecTrustRef() | ||
371 | |||
372 | try: | ||
373 | # Get a CFArray that contains the certs we want. | ||
374 | cert_array = _cert_array_from_pem(trust_bundle) | ||
375 | |||
376 | # Ok, now the hard part. We want to get the SecTrustRef that ST has | ||
377 | # created for this connection, shove our CAs into it, tell ST to | ||
378 | # ignore everything else it knows, and then ask if it can build a | ||
379 | # chain. This is a buuuunch of code. | ||
380 | result = Security.SSLCopyPeerTrust( | ||
381 | self.context, ctypes.byref(trust) | ||
382 | ) | ||
383 | _assert_no_error(result) | ||
384 | if not trust: | ||
385 | raise ssl.SSLError("Failed to copy trust reference") | ||
386 | |||
387 | result = Security.SecTrustSetAnchorCertificates(trust, cert_array) | ||
388 | _assert_no_error(result) | ||
389 | |||
390 | result = Security.SecTrustSetAnchorCertificatesOnly(trust, True) | ||
391 | _assert_no_error(result) | ||
392 | |||
393 | trust_result = Security.SecTrustResultType() | ||
394 | result = Security.SecTrustEvaluate( | ||
395 | trust, ctypes.byref(trust_result) | ||
396 | ) | ||
397 | _assert_no_error(result) | ||
398 | finally: | ||
399 | if trust: | ||
400 | CoreFoundation.CFRelease(trust) | ||
401 | |||
402 | if cert_array is None: | ||
403 | CoreFoundation.CFRelease(cert_array) | ||
404 | |||
405 | # Ok, now we can look at what the result was. | ||
406 | successes = ( | ||
407 | SecurityConst.kSecTrustResultUnspecified, | ||
408 | SecurityConst.kSecTrustResultProceed | ||
409 | ) | ||
410 | if trust_result.value not in successes: | ||
411 | raise ssl.SSLError( | ||
412 | "certificate verify failed, error code: %d" % | ||
413 | trust_result.value | ||
414 | ) | ||
415 | |||
416 | def handshake(self, | ||
417 | server_hostname, | ||
418 | verify, | ||
419 | trust_bundle, | ||
420 | min_version, | ||
421 | max_version, | ||
422 | client_cert, | ||
423 | client_key, | ||
424 | client_key_passphrase): | ||
425 | """ | ||
426 | Actually performs the TLS handshake. This is run automatically by | ||
427 | wrapped socket, and shouldn't be needed in user code. | ||
428 | """ | ||
429 | # First, we do the initial bits of connection setup. We need to create | ||
430 | # a context, set its I/O funcs, and set the connection reference. | ||
431 | self.context = Security.SSLCreateContext( | ||
432 | None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType | ||
433 | ) | ||
434 | result = Security.SSLSetIOFuncs( | ||
435 | self.context, _read_callback_pointer, _write_callback_pointer | ||
436 | ) | ||
437 | _assert_no_error(result) | ||
438 | |||
439 | # Here we need to compute the handle to use. We do this by taking the | ||
440 | # id of self modulo 2**31 - 1. If this is already in the dictionary, we | ||
441 | # just keep incrementing by one until we find a free space. | ||
442 | with _connection_ref_lock: | ||
443 | handle = id(self) % 2147483647 | ||
444 | while handle in _connection_refs: | ||
445 | handle = (handle + 1) % 2147483647 | ||
446 | _connection_refs[handle] = self | ||
447 | |||
448 | result = Security.SSLSetConnection(self.context, handle) | ||
449 | _assert_no_error(result) | ||
450 | |||
451 | # If we have a server hostname, we should set that too. | ||
452 | if server_hostname: | ||
453 | if not isinstance(server_hostname, bytes): | ||
454 | server_hostname = server_hostname.encode('utf-8') | ||
455 | |||
456 | result = Security.SSLSetPeerDomainName( | ||
457 | self.context, server_hostname, len(server_hostname) | ||
458 | ) | ||
459 | _assert_no_error(result) | ||
460 | |||
461 | # Setup the ciphers. | ||
462 | self._set_ciphers() | ||
463 | |||
464 | # Set the minimum and maximum TLS versions. | ||
465 | result = Security.SSLSetProtocolVersionMin(self.context, min_version) | ||
466 | _assert_no_error(result) | ||
467 | result = Security.SSLSetProtocolVersionMax(self.context, max_version) | ||
468 | _assert_no_error(result) | ||
469 | |||
470 | # If there's a trust DB, we need to use it. We do that by telling | ||
471 | # SecureTransport to break on server auth. We also do that if we don't | ||
472 | # want to validate the certs at all: we just won't actually do any | ||
473 | # authing in that case. | ||
474 | if not verify or trust_bundle is not None: | ||
475 | result = Security.SSLSetSessionOption( | ||
476 | self.context, | ||
477 | SecurityConst.kSSLSessionOptionBreakOnServerAuth, | ||
478 | True | ||
479 | ) | ||
480 | _assert_no_error(result) | ||
481 | |||
482 | # If there's a client cert, we need to use it. | ||
483 | if client_cert: | ||
484 | self._keychain, self._keychain_dir = _temporary_keychain() | ||
485 | self._client_cert_chain = _load_client_cert_chain( | ||
486 | self._keychain, client_cert, client_key | ||
487 | ) | ||
488 | result = Security.SSLSetCertificate( | ||
489 | self.context, self._client_cert_chain | ||
490 | ) | ||
491 | _assert_no_error(result) | ||
492 | |||
493 | while True: | ||
494 | with self._raise_on_error(): | ||
495 | result = Security.SSLHandshake(self.context) | ||
496 | |||
497 | if result == SecurityConst.errSSLWouldBlock: | ||
498 | raise socket.timeout("handshake timed out") | ||
499 | elif result == SecurityConst.errSSLServerAuthCompleted: | ||
500 | self._custom_validate(verify, trust_bundle) | ||
501 | continue | ||
502 | else: | ||
503 | _assert_no_error(result) | ||
504 | break | ||
505 | |||
506 | def fileno(self): | ||
507 | return self.socket.fileno() | ||
508 | |||
509 | # Copy-pasted from Python 3.5 source code | ||
510 | def _decref_socketios(self): | ||
511 | if self._makefile_refs > 0: | ||
512 | self._makefile_refs -= 1 | ||
513 | if self._closed: | ||
514 | self.close() | ||
515 | |||
516 | def recv(self, bufsiz): | ||
517 | buffer = ctypes.create_string_buffer(bufsiz) | ||
518 | bytes_read = self.recv_into(buffer, bufsiz) | ||
519 | data = buffer[:bytes_read] | ||
520 | return data | ||
521 | |||
522 | def recv_into(self, buffer, nbytes=None): | ||
523 | # Read short on EOF. | ||
524 | if self._closed: | ||
525 | return 0 | ||
526 | |||
527 | if nbytes is None: | ||
528 | nbytes = len(buffer) | ||
529 | |||
530 | buffer = (ctypes.c_char * nbytes).from_buffer(buffer) | ||
531 | processed_bytes = ctypes.c_size_t(0) | ||
532 | |||
533 | with self._raise_on_error(): | ||
534 | result = Security.SSLRead( | ||
535 | self.context, buffer, nbytes, ctypes.byref(processed_bytes) | ||
536 | ) | ||
537 | |||
538 | # There are some result codes that we want to treat as "not always | ||
539 | # errors". Specifically, those are errSSLWouldBlock, | ||
540 | # errSSLClosedGraceful, and errSSLClosedNoNotify. | ||
541 | if (result == SecurityConst.errSSLWouldBlock): | ||
542 | # If we didn't process any bytes, then this was just a time out. | ||
543 | # However, we can get errSSLWouldBlock in situations when we *did* | ||
544 | # read some data, and in those cases we should just read "short" | ||
545 | # and return. | ||
546 | if processed_bytes.value == 0: | ||
547 | # Timed out, no data read. | ||
548 | raise socket.timeout("recv timed out") | ||
549 | elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify): | ||
550 | # The remote peer has closed this connection. We should do so as | ||
551 | # well. Note that we don't actually return here because in | ||
552 | # principle this could actually be fired along with return data. | ||
553 | # It's unlikely though. | ||
554 | self.close() | ||
555 | else: | ||
556 | _assert_no_error(result) | ||
557 | |||
558 | # Ok, we read and probably succeeded. We should return whatever data | ||
559 | # was actually read. | ||
560 | return processed_bytes.value | ||
561 | |||
562 | def settimeout(self, timeout): | ||
563 | self._timeout = timeout | ||
564 | |||
565 | def gettimeout(self): | ||
566 | return self._timeout | ||
567 | |||
568 | def send(self, data): | ||
569 | processed_bytes = ctypes.c_size_t(0) | ||
570 | |||
571 | with self._raise_on_error(): | ||
572 | result = Security.SSLWrite( | ||
573 | self.context, data, len(data), ctypes.byref(processed_bytes) | ||
574 | ) | ||
575 | |||
576 | if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0: | ||
577 | # Timed out | ||
578 | raise socket.timeout("send timed out") | ||
579 | else: | ||
580 | _assert_no_error(result) | ||
581 | |||
582 | # We sent, and probably succeeded. Tell them how much we sent. | ||
583 | return processed_bytes.value | ||
584 | |||
585 | def sendall(self, data): | ||
586 | total_sent = 0 | ||
587 | while total_sent < len(data): | ||
588 | sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) | ||
589 | total_sent += sent | ||
590 | |||
591 | def shutdown(self): | ||
592 | with self._raise_on_error(): | ||
593 | Security.SSLClose(self.context) | ||
594 | |||
595 | def close(self): | ||
596 | # TODO: should I do clean shutdown here? Do I have to? | ||
597 | if self._makefile_refs < 1: | ||
598 | self._closed = True | ||
599 | if self.context: | ||
600 | CoreFoundation.CFRelease(self.context) | ||
601 | self.context = None | ||
602 | if self._client_cert_chain: | ||
603 | CoreFoundation.CFRelease(self._client_cert_chain) | ||
604 | self._client_cert_chain = None | ||
605 | if self._keychain: | ||
606 | Security.SecKeychainDelete(self._keychain) | ||
607 | CoreFoundation.CFRelease(self._keychain) | ||
608 | shutil.rmtree(self._keychain_dir) | ||
609 | self._keychain = self._keychain_dir = None | ||
610 | return self.socket.close() | ||
611 | else: | ||
612 | self._makefile_refs -= 1 | ||
613 | |||
614 | def getpeercert(self, binary_form=False): | ||
615 | # Urgh, annoying. | ||
616 | # | ||
617 | # Here's how we do this: | ||
618 | # | ||
619 | # 1. Call SSLCopyPeerTrust to get hold of the trust object for this | ||
620 | # connection. | ||
621 | # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf. | ||
622 | # 3. To get the CN, call SecCertificateCopyCommonName and process that | ||
623 | # string so that it's of the appropriate type. | ||
624 | # 4. To get the SAN, we need to do something a bit more complex: | ||
625 | # a. Call SecCertificateCopyValues to get the data, requesting | ||
626 | # kSecOIDSubjectAltName. | ||
627 | # b. Mess about with this dictionary to try to get the SANs out. | ||
628 | # | ||
629 | # This is gross. Really gross. It's going to be a few hundred LoC extra | ||
630 | # just to repeat something that SecureTransport can *already do*. So my | ||
631 | # operating assumption at this time is that what we want to do is | ||
632 | # instead to just flag to urllib3 that it shouldn't do its own hostname | ||
633 | # validation when using SecureTransport. | ||
634 | if not binary_form: | ||
635 | raise ValueError( | ||
636 | "SecureTransport only supports dumping binary certs" | ||
637 | ) | ||
638 | trust = Security.SecTrustRef() | ||
639 | certdata = None | ||
640 | der_bytes = None | ||
641 | |||
642 | try: | ||
643 | # Grab the trust store. | ||
644 | result = Security.SSLCopyPeerTrust( | ||
645 | self.context, ctypes.byref(trust) | ||
646 | ) | ||
647 | _assert_no_error(result) | ||
648 | if not trust: | ||
649 | # Probably we haven't done the handshake yet. No biggie. | ||
650 | return None | ||
651 | |||
652 | cert_count = Security.SecTrustGetCertificateCount(trust) | ||
653 | if not cert_count: | ||
654 | # Also a case that might happen if we haven't handshaked. | ||
655 | # Handshook? Handshaken? | ||
656 | return None | ||
657 | |||
658 | leaf = Security.SecTrustGetCertificateAtIndex(trust, 0) | ||
659 | assert leaf | ||
660 | |||
661 | # Ok, now we want the DER bytes. | ||
662 | certdata = Security.SecCertificateCopyData(leaf) | ||
663 | assert certdata | ||
664 | |||
665 | data_length = CoreFoundation.CFDataGetLength(certdata) | ||
666 | data_buffer = CoreFoundation.CFDataGetBytePtr(certdata) | ||
667 | der_bytes = ctypes.string_at(data_buffer, data_length) | ||
668 | finally: | ||
669 | if certdata: | ||
670 | CoreFoundation.CFRelease(certdata) | ||
671 | if trust: | ||
672 | CoreFoundation.CFRelease(trust) | ||
673 | |||
674 | return der_bytes | ||
675 | |||
676 | def _reuse(self): | ||
677 | self._makefile_refs += 1 | ||
678 | |||
679 | def _drop(self): | ||
680 | if self._makefile_refs < 1: | ||
681 | self.close() | ||
682 | else: | ||
683 | self._makefile_refs -= 1 | ||
684 | |||
685 | |||
686 | if _fileobject: # Platform-specific: Python 2 | ||
687 | def makefile(self, mode, bufsize=-1): | ||
688 | self._makefile_refs += 1 | ||
689 | return _fileobject(self, mode, bufsize, close=True) | ||
690 | else: # Platform-specific: Python 3 | ||
691 | def makefile(self, mode="r", buffering=None, *args, **kwargs): | ||
692 | # We disable buffering with SecureTransport because it conflicts with | ||
693 | # the buffering that ST does internally (see issue #1153 for more). | ||
694 | buffering = 0 | ||
695 | return backport_makefile(self, mode, buffering, *args, **kwargs) | ||
696 | |||
697 | WrappedSocket.makefile = makefile | ||
698 | |||
699 | |||
700 | class SecureTransportContext(object): | ||
701 | """ | ||
702 | I am a wrapper class for the SecureTransport library, to translate the | ||
703 | interface of the standard library ``SSLContext`` object to calls into | ||
704 | SecureTransport. | ||
705 | """ | ||
706 | def __init__(self, protocol): | ||
707 | self._min_version, self._max_version = _protocol_to_min_max[protocol] | ||
708 | self._options = 0 | ||
709 | self._verify = False | ||
710 | self._trust_bundle = None | ||
711 | self._client_cert = None | ||
712 | self._client_key = None | ||
713 | self._client_key_passphrase = None | ||
714 | |||
715 | @property | ||
716 | def check_hostname(self): | ||
717 | """ | ||
718 | SecureTransport cannot have its hostname checking disabled. For more, | ||
719 | see the comment on getpeercert() in this file. | ||
720 | """ | ||
721 | return True | ||
722 | |||
723 | @check_hostname.setter | ||
724 | def check_hostname(self, value): | ||
725 | """ | ||
726 | SecureTransport cannot have its hostname checking disabled. For more, | ||
727 | see the comment on getpeercert() in this file. | ||
728 | """ | ||
729 | pass | ||
730 | |||
731 | @property | ||
732 | def options(self): | ||
733 | # TODO: Well, crap. | ||
734 | # | ||
735 | # So this is the bit of the code that is the most likely to cause us | ||
736 | # trouble. Essentially we need to enumerate all of the SSL options that | ||
737 | # users might want to use and try to see if we can sensibly translate | ||
738 | # them, or whether we should just ignore them. | ||
739 | return self._options | ||
740 | |||
741 | @options.setter | ||
742 | def options(self, value): | ||
743 | # TODO: Update in line with above. | ||
744 | self._options = value | ||
745 | |||
746 | @property | ||
747 | def verify_mode(self): | ||
748 | return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE | ||
749 | |||
750 | @verify_mode.setter | ||
751 | def verify_mode(self, value): | ||
752 | self._verify = True if value == ssl.CERT_REQUIRED else False | ||
753 | |||
754 | def set_default_verify_paths(self): | ||
755 | # So, this has to do something a bit weird. Specifically, what it does | ||
756 | # is nothing. | ||
757 | # | ||
758 | # This means that, if we had previously had load_verify_locations | ||
759 | # called, this does not undo that. We need to do that because it turns | ||
760 | # out that the rest of the urllib3 code will attempt to load the | ||
761 | # default verify paths if it hasn't been told about any paths, even if | ||
762 | # the context itself was sometime earlier. We resolve that by just | ||
763 | # ignoring it. | ||
764 | pass | ||
765 | |||
766 | def load_default_certs(self): | ||
767 | return self.set_default_verify_paths() | ||
768 | |||
769 | def set_ciphers(self, ciphers): | ||
770 | # For now, we just require the default cipher string. | ||
771 | if ciphers != util.ssl_.DEFAULT_CIPHERS: | ||
772 | raise ValueError( | ||
773 | "SecureTransport doesn't support custom cipher strings" | ||
774 | ) | ||
775 | |||
776 | def load_verify_locations(self, cafile=None, capath=None, cadata=None): | ||
777 | # OK, we only really support cadata and cafile. | ||
778 | if capath is not None: | ||
779 | raise ValueError( | ||
780 | "SecureTransport does not support cert directories" | ||
781 | ) | ||
782 | |||
783 | self._trust_bundle = cafile or cadata | ||
784 | |||
785 | def load_cert_chain(self, certfile, keyfile=None, password=None): | ||
786 | self._client_cert = certfile | ||
787 | self._client_key = keyfile | ||
788 | self._client_cert_passphrase = password | ||
789 | |||
790 | def wrap_socket(self, sock, server_side=False, | ||
791 | do_handshake_on_connect=True, suppress_ragged_eofs=True, | ||
792 | server_hostname=None): | ||
793 | # So, what do we do here? Firstly, we assert some properties. This is a | ||
794 | # stripped down shim, so there is some functionality we don't support. | ||
795 | # See PEP 543 for the real deal. | ||
796 | assert not server_side | ||
797 | assert do_handshake_on_connect | ||
798 | assert suppress_ragged_eofs | ||
799 | |||
800 | # Ok, we're good to go. Now we want to create the wrapped socket object | ||
801 | # and store it in the appropriate place. | ||
802 | wrapped_socket = WrappedSocket(sock) | ||
803 | |||
804 | # Now we can handshake | ||
805 | wrapped_socket.handshake( | ||
806 | server_hostname, self._verify, self._trust_bundle, | ||
807 | self._min_version, self._max_version, self._client_cert, | ||
808 | self._client_key, self._client_key_passphrase | ||
809 | ) | ||
810 | return wrapped_socket | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/socks.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/socks.py new file mode 100644 index 0000000..6c99a75 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/contrib/socks.py | |||
@@ -0,0 +1,188 @@ | |||
1 | # -*- coding: utf-8 -*- | ||
2 | """ | ||
3 | This module contains provisional support for SOCKS proxies from within | ||
4 | urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and | ||
5 | SOCKS5. To enable its functionality, either install PySocks or install this | ||
6 | module with the ``socks`` extra. | ||
7 | |||
8 | The SOCKS implementation supports the full range of urllib3 features. It also | ||
9 | supports the following SOCKS features: | ||
10 | |||
11 | - SOCKS4 | ||
12 | - SOCKS4a | ||
13 | - SOCKS5 | ||
14 | - Usernames and passwords for the SOCKS proxy | ||
15 | |||
16 | Known Limitations: | ||
17 | |||
18 | - Currently PySocks does not support contacting remote websites via literal | ||
19 | IPv6 addresses. Any such connection attempt will fail. You must use a domain | ||
20 | name. | ||
21 | - Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any | ||
22 | such connection attempt will fail. | ||
23 | """ | ||
24 | from __future__ import absolute_import | ||
25 | |||
26 | try: | ||
27 | import socks | ||
28 | except ImportError: | ||
29 | import warnings | ||
30 | from ..exceptions import DependencyWarning | ||
31 | |||
32 | warnings.warn(( | ||
33 | 'SOCKS support in urllib3 requires the installation of optional ' | ||
34 | 'dependencies: specifically, PySocks. For more information, see ' | ||
35 | 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies' | ||
36 | ), | ||
37 | DependencyWarning | ||
38 | ) | ||
39 | raise | ||
40 | |||
41 | from socket import error as SocketError, timeout as SocketTimeout | ||
42 | |||
43 | from ..connection import ( | ||
44 | HTTPConnection, HTTPSConnection | ||
45 | ) | ||
46 | from ..connectionpool import ( | ||
47 | HTTPConnectionPool, HTTPSConnectionPool | ||
48 | ) | ||
49 | from ..exceptions import ConnectTimeoutError, NewConnectionError | ||
50 | from ..poolmanager import PoolManager | ||
51 | from ..util.url import parse_url | ||
52 | |||
53 | try: | ||
54 | import ssl | ||
55 | except ImportError: | ||
56 | ssl = None | ||
57 | |||
58 | |||
59 | class SOCKSConnection(HTTPConnection): | ||
60 | """ | ||
61 | A plain-text HTTP connection that connects via a SOCKS proxy. | ||
62 | """ | ||
63 | def __init__(self, *args, **kwargs): | ||
64 | self._socks_options = kwargs.pop('_socks_options') | ||
65 | super(SOCKSConnection, self).__init__(*args, **kwargs) | ||
66 | |||
67 | def _new_conn(self): | ||
68 | """ | ||
69 | Establish a new connection via the SOCKS proxy. | ||
70 | """ | ||
71 | extra_kw = {} | ||
72 | if self.source_address: | ||
73 | extra_kw['source_address'] = self.source_address | ||
74 | |||
75 | if self.socket_options: | ||
76 | extra_kw['socket_options'] = self.socket_options | ||
77 | |||
78 | try: | ||
79 | conn = socks.create_connection( | ||
80 | (self.host, self.port), | ||
81 | proxy_type=self._socks_options['socks_version'], | ||
82 | proxy_addr=self._socks_options['proxy_host'], | ||
83 | proxy_port=self._socks_options['proxy_port'], | ||
84 | proxy_username=self._socks_options['username'], | ||
85 | proxy_password=self._socks_options['password'], | ||
86 | proxy_rdns=self._socks_options['rdns'], | ||
87 | timeout=self.timeout, | ||
88 | **extra_kw | ||
89 | ) | ||
90 | |||
91 | except SocketTimeout as e: | ||
92 | raise ConnectTimeoutError( | ||
93 | self, "Connection to %s timed out. (connect timeout=%s)" % | ||
94 | (self.host, self.timeout)) | ||
95 | |||
96 | except socks.ProxyError as e: | ||
97 | # This is fragile as hell, but it seems to be the only way to raise | ||
98 | # useful errors here. | ||
99 | if e.socket_err: | ||
100 | error = e.socket_err | ||
101 | if isinstance(error, SocketTimeout): | ||
102 | raise ConnectTimeoutError( | ||
103 | self, | ||
104 | "Connection to %s timed out. (connect timeout=%s)" % | ||
105 | (self.host, self.timeout) | ||
106 | ) | ||
107 | else: | ||
108 | raise NewConnectionError( | ||
109 | self, | ||
110 | "Failed to establish a new connection: %s" % error | ||
111 | ) | ||
112 | else: | ||
113 | raise NewConnectionError( | ||
114 | self, | ||
115 | "Failed to establish a new connection: %s" % e | ||
116 | ) | ||
117 | |||
118 | except SocketError as e: # Defensive: PySocks should catch all these. | ||
119 | raise NewConnectionError( | ||
120 | self, "Failed to establish a new connection: %s" % e) | ||
121 | |||
122 | return conn | ||
123 | |||
124 | |||
125 | # We don't need to duplicate the Verified/Unverified distinction from | ||
126 | # urllib3/connection.py here because the HTTPSConnection will already have been | ||
127 | # correctly set to either the Verified or Unverified form by that module. This | ||
128 | # means the SOCKSHTTPSConnection will automatically be the correct type. | ||
129 | class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): | ||
130 | pass | ||
131 | |||
132 | |||
133 | class SOCKSHTTPConnectionPool(HTTPConnectionPool): | ||
134 | ConnectionCls = SOCKSConnection | ||
135 | |||
136 | |||
137 | class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): | ||
138 | ConnectionCls = SOCKSHTTPSConnection | ||
139 | |||
140 | |||
141 | class SOCKSProxyManager(PoolManager): | ||
142 | """ | ||
143 | A version of the urllib3 ProxyManager that routes connections via the | ||
144 | defined SOCKS proxy. | ||
145 | """ | ||
146 | pool_classes_by_scheme = { | ||
147 | 'http': SOCKSHTTPConnectionPool, | ||
148 | 'https': SOCKSHTTPSConnectionPool, | ||
149 | } | ||
150 | |||
151 | def __init__(self, proxy_url, username=None, password=None, | ||
152 | num_pools=10, headers=None, **connection_pool_kw): | ||
153 | parsed = parse_url(proxy_url) | ||
154 | |||
155 | if parsed.scheme == 'socks5': | ||
156 | socks_version = socks.PROXY_TYPE_SOCKS5 | ||
157 | rdns = False | ||
158 | elif parsed.scheme == 'socks5h': | ||
159 | socks_version = socks.PROXY_TYPE_SOCKS5 | ||
160 | rdns = True | ||
161 | elif parsed.scheme == 'socks4': | ||
162 | socks_version = socks.PROXY_TYPE_SOCKS4 | ||
163 | rdns = False | ||
164 | elif parsed.scheme == 'socks4a': | ||
165 | socks_version = socks.PROXY_TYPE_SOCKS4 | ||
166 | rdns = True | ||
167 | else: | ||
168 | raise ValueError( | ||
169 | "Unable to determine SOCKS version from %s" % proxy_url | ||
170 | ) | ||
171 | |||
172 | self.proxy_url = proxy_url | ||
173 | |||
174 | socks_options = { | ||
175 | 'socks_version': socks_version, | ||
176 | 'proxy_host': parsed.host, | ||
177 | 'proxy_port': parsed.port, | ||
178 | 'username': username, | ||
179 | 'password': password, | ||
180 | 'rdns': rdns | ||
181 | } | ||
182 | connection_pool_kw['_socks_options'] = socks_options | ||
183 | |||
184 | super(SOCKSProxyManager, self).__init__( | ||
185 | num_pools, headers, **connection_pool_kw | ||
186 | ) | ||
187 | |||
188 | self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/exceptions.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/exceptions.py new file mode 100644 index 0000000..670a63e --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/exceptions.py | |||
@@ -0,0 +1,246 @@ | |||
1 | from __future__ import absolute_import | ||
2 | from .packages.six.moves.http_client import ( | ||
3 | IncompleteRead as httplib_IncompleteRead | ||
4 | ) | ||
5 | # Base Exceptions | ||
6 | |||
7 | |||
8 | class HTTPError(Exception): | ||
9 | "Base exception used by this module." | ||
10 | pass | ||
11 | |||
12 | |||
13 | class HTTPWarning(Warning): | ||
14 | "Base warning used by this module." | ||
15 | pass | ||
16 | |||
17 | |||
18 | class PoolError(HTTPError): | ||
19 | "Base exception for errors caused within a pool." | ||
20 | def __init__(self, pool, message): | ||
21 | self.pool = pool | ||
22 | HTTPError.__init__(self, "%s: %s" % (pool, message)) | ||
23 | |||
24 | def __reduce__(self): | ||
25 | # For pickling purposes. | ||
26 | return self.__class__, (None, None) | ||
27 | |||
28 | |||
29 | class RequestError(PoolError): | ||
30 | "Base exception for PoolErrors that have associated URLs." | ||
31 | def __init__(self, pool, url, message): | ||
32 | self.url = url | ||
33 | PoolError.__init__(self, pool, message) | ||
34 | |||
35 | def __reduce__(self): | ||
36 | # For pickling purposes. | ||
37 | return self.__class__, (None, self.url, None) | ||
38 | |||
39 | |||
40 | class SSLError(HTTPError): | ||
41 | "Raised when SSL certificate fails in an HTTPS connection." | ||
42 | pass | ||
43 | |||
44 | |||
45 | class ProxyError(HTTPError): | ||
46 | "Raised when the connection to a proxy fails." | ||
47 | pass | ||
48 | |||
49 | |||
50 | class DecodeError(HTTPError): | ||
51 | "Raised when automatic decoding based on Content-Type fails." | ||
52 | pass | ||
53 | |||
54 | |||
55 | class ProtocolError(HTTPError): | ||
56 | "Raised when something unexpected happens mid-request/response." | ||
57 | pass | ||
58 | |||
59 | |||
60 | #: Renamed to ProtocolError but aliased for backwards compatibility. | ||
61 | ConnectionError = ProtocolError | ||
62 | |||
63 | |||
64 | # Leaf Exceptions | ||
65 | |||
66 | class MaxRetryError(RequestError): | ||
67 | """Raised when the maximum number of retries is exceeded. | ||
68 | |||
69 | :param pool: The connection pool | ||
70 | :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` | ||
71 | :param string url: The requested Url | ||
72 | :param exceptions.Exception reason: The underlying error | ||
73 | |||
74 | """ | ||
75 | |||
76 | def __init__(self, pool, url, reason=None): | ||
77 | self.reason = reason | ||
78 | |||
79 | message = "Max retries exceeded with url: %s (Caused by %r)" % ( | ||
80 | url, reason) | ||
81 | |||
82 | RequestError.__init__(self, pool, url, message) | ||
83 | |||
84 | |||
85 | class HostChangedError(RequestError): | ||
86 | "Raised when an existing pool gets a request for a foreign host." | ||
87 | |||
88 | def __init__(self, pool, url, retries=3): | ||
89 | message = "Tried to open a foreign host with url: %s" % url | ||
90 | RequestError.__init__(self, pool, url, message) | ||
91 | self.retries = retries | ||
92 | |||
93 | |||
94 | class TimeoutStateError(HTTPError): | ||
95 | """ Raised when passing an invalid state to a timeout """ | ||
96 | pass | ||
97 | |||
98 | |||
99 | class TimeoutError(HTTPError): | ||
100 | """ Raised when a socket timeout error occurs. | ||
101 | |||
102 | Catching this error will catch both :exc:`ReadTimeoutErrors | ||
103 | <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. | ||
104 | """ | ||
105 | pass | ||
106 | |||
107 | |||
108 | class ReadTimeoutError(TimeoutError, RequestError): | ||
109 | "Raised when a socket timeout occurs while receiving data from a server" | ||
110 | pass | ||
111 | |||
112 | |||
113 | # This timeout error does not have a URL attached and needs to inherit from the | ||
114 | # base HTTPError | ||
115 | class ConnectTimeoutError(TimeoutError): | ||
116 | "Raised when a socket timeout occurs while connecting to a server" | ||
117 | pass | ||
118 | |||
119 | |||
120 | class NewConnectionError(ConnectTimeoutError, PoolError): | ||
121 | "Raised when we fail to establish a new connection. Usually ECONNREFUSED." | ||
122 | pass | ||
123 | |||
124 | |||
125 | class EmptyPoolError(PoolError): | ||
126 | "Raised when a pool runs out of connections and no more are allowed." | ||
127 | pass | ||
128 | |||
129 | |||
130 | class ClosedPoolError(PoolError): | ||
131 | "Raised when a request enters a pool after the pool has been closed." | ||
132 | pass | ||
133 | |||
134 | |||
135 | class LocationValueError(ValueError, HTTPError): | ||
136 | "Raised when there is something wrong with a given URL input." | ||
137 | pass | ||
138 | |||
139 | |||
140 | class LocationParseError(LocationValueError): | ||
141 | "Raised when get_host or similar fails to parse the URL input." | ||
142 | |||
143 | def __init__(self, location): | ||
144 | message = "Failed to parse: %s" % location | ||
145 | HTTPError.__init__(self, message) | ||
146 | |||
147 | self.location = location | ||
148 | |||
149 | |||
150 | class ResponseError(HTTPError): | ||
151 | "Used as a container for an error reason supplied in a MaxRetryError." | ||
152 | GENERIC_ERROR = 'too many error responses' | ||
153 | SPECIFIC_ERROR = 'too many {status_code} error responses' | ||
154 | |||
155 | |||
156 | class SecurityWarning(HTTPWarning): | ||
157 | "Warned when perfoming security reducing actions" | ||
158 | pass | ||
159 | |||
160 | |||
161 | class SubjectAltNameWarning(SecurityWarning): | ||
162 | "Warned when connecting to a host with a certificate missing a SAN." | ||
163 | pass | ||
164 | |||
165 | |||
166 | class InsecureRequestWarning(SecurityWarning): | ||
167 | "Warned when making an unverified HTTPS request." | ||
168 | pass | ||
169 | |||
170 | |||
171 | class SystemTimeWarning(SecurityWarning): | ||
172 | "Warned when system time is suspected to be wrong" | ||
173 | pass | ||
174 | |||
175 | |||
176 | class InsecurePlatformWarning(SecurityWarning): | ||
177 | "Warned when certain SSL configuration is not available on a platform." | ||
178 | pass | ||
179 | |||
180 | |||
181 | class SNIMissingWarning(HTTPWarning): | ||
182 | "Warned when making a HTTPS request without SNI available." | ||
183 | pass | ||
184 | |||
185 | |||
186 | class DependencyWarning(HTTPWarning): | ||
187 | """ | ||
188 | Warned when an attempt is made to import a module with missing optional | ||
189 | dependencies. | ||
190 | """ | ||
191 | pass | ||
192 | |||
193 | |||
194 | class ResponseNotChunked(ProtocolError, ValueError): | ||
195 | "Response needs to be chunked in order to read it as chunks." | ||
196 | pass | ||
197 | |||
198 | |||
199 | class BodyNotHttplibCompatible(HTTPError): | ||
200 | """ | ||
201 | Body should be httplib.HTTPResponse like (have an fp attribute which | ||
202 | returns raw chunks) for read_chunked(). | ||
203 | """ | ||
204 | pass | ||
205 | |||
206 | |||
207 | class IncompleteRead(HTTPError, httplib_IncompleteRead): | ||
208 | """ | ||
209 | Response length doesn't match expected Content-Length | ||
210 | |||
211 | Subclass of http_client.IncompleteRead to allow int value | ||
212 | for `partial` to avoid creating large objects on streamed | ||
213 | reads. | ||
214 | """ | ||
215 | def __init__(self, partial, expected): | ||
216 | super(IncompleteRead, self).__init__(partial, expected) | ||
217 | |||
218 | def __repr__(self): | ||
219 | return ('IncompleteRead(%i bytes read, ' | ||
220 | '%i more expected)' % (self.partial, self.expected)) | ||
221 | |||
222 | |||
223 | class InvalidHeader(HTTPError): | ||
224 | "The header provided was somehow invalid." | ||
225 | pass | ||
226 | |||
227 | |||
228 | class ProxySchemeUnknown(AssertionError, ValueError): | ||
229 | "ProxyManager does not support the supplied scheme" | ||
230 | # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. | ||
231 | |||
232 | def __init__(self, scheme): | ||
233 | message = "Not supported proxy scheme %s" % scheme | ||
234 | super(ProxySchemeUnknown, self).__init__(message) | ||
235 | |||
236 | |||
237 | class HeaderParsingError(HTTPError): | ||
238 | "Raised by assert_header_parsing, but we convert it to a log.warning statement." | ||
239 | def __init__(self, defects, unparsed_data): | ||
240 | message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) | ||
241 | super(HeaderParsingError, self).__init__(message) | ||
242 | |||
243 | |||
244 | class UnrewindableBodyError(HTTPError): | ||
245 | "urllib3 encountered an error when trying to rewind a body" | ||
246 | pass | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/fields.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/fields.py new file mode 100644 index 0000000..8e15621 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/fields.py | |||
@@ -0,0 +1,178 @@ | |||
1 | from __future__ import absolute_import | ||
2 | import email.utils | ||
3 | import mimetypes | ||
4 | |||
5 | from .packages import six | ||
6 | |||
7 | |||
8 | def guess_content_type(filename, default='application/octet-stream'): | ||
9 | """ | ||
10 | Guess the "Content-Type" of a file. | ||
11 | |||
12 | :param filename: | ||
13 | The filename to guess the "Content-Type" of using :mod:`mimetypes`. | ||
14 | :param default: | ||
15 | If no "Content-Type" can be guessed, default to `default`. | ||
16 | """ | ||
17 | if filename: | ||
18 | return mimetypes.guess_type(filename)[0] or default | ||
19 | return default | ||
20 | |||
21 | |||
22 | def format_header_param(name, value): | ||
23 | """ | ||
24 | Helper function to format and quote a single header parameter. | ||
25 | |||
26 | Particularly useful for header parameters which might contain | ||
27 | non-ASCII values, like file names. This follows RFC 2231, as | ||
28 | suggested by RFC 2388 Section 4.4. | ||
29 | |||
30 | :param name: | ||
31 | The name of the parameter, a string expected to be ASCII only. | ||
32 | :param value: | ||
33 | The value of the parameter, provided as a unicode string. | ||
34 | """ | ||
35 | if not any(ch in value for ch in '"\\\r\n'): | ||
36 | result = '%s="%s"' % (name, value) | ||
37 | try: | ||
38 | result.encode('ascii') | ||
39 | except (UnicodeEncodeError, UnicodeDecodeError): | ||
40 | pass | ||
41 | else: | ||
42 | return result | ||
43 | if not six.PY3 and isinstance(value, six.text_type): # Python 2: | ||
44 | value = value.encode('utf-8') | ||
45 | value = email.utils.encode_rfc2231(value, 'utf-8') | ||
46 | value = '%s*=%s' % (name, value) | ||
47 | return value | ||
48 | |||
49 | |||
50 | class RequestField(object): | ||
51 | """ | ||
52 | A data container for request body parameters. | ||
53 | |||
54 | :param name: | ||
55 | The name of this request field. | ||
56 | :param data: | ||
57 | The data/value body. | ||
58 | :param filename: | ||
59 | An optional filename of the request field. | ||
60 | :param headers: | ||
61 | An optional dict-like object of headers to initially use for the field. | ||
62 | """ | ||
63 | def __init__(self, name, data, filename=None, headers=None): | ||
64 | self._name = name | ||
65 | self._filename = filename | ||
66 | self.data = data | ||
67 | self.headers = {} | ||
68 | if headers: | ||
69 | self.headers = dict(headers) | ||
70 | |||
71 | @classmethod | ||
72 | def from_tuples(cls, fieldname, value): | ||
73 | """ | ||
74 | A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. | ||
75 | |||
76 | Supports constructing :class:`~urllib3.fields.RequestField` from | ||
77 | parameter of key/value strings AND key/filetuple. A filetuple is a | ||
78 | (filename, data, MIME type) tuple where the MIME type is optional. | ||
79 | For example:: | ||
80 | |||
81 | 'foo': 'bar', | ||
82 | 'fakefile': ('foofile.txt', 'contents of foofile'), | ||
83 | 'realfile': ('barfile.txt', open('realfile').read()), | ||
84 | 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), | ||
85 | 'nonamefile': 'contents of nonamefile field', | ||
86 | |||
87 | Field names and filenames must be unicode. | ||
88 | """ | ||
89 | if isinstance(value, tuple): | ||
90 | if len(value) == 3: | ||
91 | filename, data, content_type = value | ||
92 | else: | ||
93 | filename, data = value | ||
94 | content_type = guess_content_type(filename) | ||
95 | else: | ||
96 | filename = None | ||
97 | content_type = None | ||
98 | data = value | ||
99 | |||
100 | request_param = cls(fieldname, data, filename=filename) | ||
101 | request_param.make_multipart(content_type=content_type) | ||
102 | |||
103 | return request_param | ||
104 | |||
105 | def _render_part(self, name, value): | ||
106 | """ | ||
107 | Overridable helper function to format a single header parameter. | ||
108 | |||
109 | :param name: | ||
110 | The name of the parameter, a string expected to be ASCII only. | ||
111 | :param value: | ||
112 | The value of the parameter, provided as a unicode string. | ||
113 | """ | ||
114 | return format_header_param(name, value) | ||
115 | |||
116 | def _render_parts(self, header_parts): | ||
117 | """ | ||
118 | Helper function to format and quote a single header. | ||
119 | |||
120 | Useful for single headers that are composed of multiple items. E.g., | ||
121 | 'Content-Disposition' fields. | ||
122 | |||
123 | :param header_parts: | ||
124 | A sequence of (k, v) typles or a :class:`dict` of (k, v) to format | ||
125 | as `k1="v1"; k2="v2"; ...`. | ||
126 | """ | ||
127 | parts = [] | ||
128 | iterable = header_parts | ||
129 | if isinstance(header_parts, dict): | ||
130 | iterable = header_parts.items() | ||
131 | |||
132 | for name, value in iterable: | ||
133 | if value is not None: | ||
134 | parts.append(self._render_part(name, value)) | ||
135 | |||
136 | return '; '.join(parts) | ||
137 | |||
138 | def render_headers(self): | ||
139 | """ | ||
140 | Renders the headers for this request field. | ||
141 | """ | ||
142 | lines = [] | ||
143 | |||
144 | sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] | ||
145 | for sort_key in sort_keys: | ||
146 | if self.headers.get(sort_key, False): | ||
147 | lines.append('%s: %s' % (sort_key, self.headers[sort_key])) | ||
148 | |||
149 | for header_name, header_value in self.headers.items(): | ||
150 | if header_name not in sort_keys: | ||
151 | if header_value: | ||
152 | lines.append('%s: %s' % (header_name, header_value)) | ||
153 | |||
154 | lines.append('\r\n') | ||
155 | return '\r\n'.join(lines) | ||
156 | |||
157 | def make_multipart(self, content_disposition=None, content_type=None, | ||
158 | content_location=None): | ||
159 | """ | ||
160 | Makes this request field into a multipart request field. | ||
161 | |||
162 | This method overrides "Content-Disposition", "Content-Type" and | ||
163 | "Content-Location" headers to the request parameter. | ||
164 | |||
165 | :param content_type: | ||
166 | The 'Content-Type' of the request body. | ||
167 | :param content_location: | ||
168 | The 'Content-Location' of the request body. | ||
169 | |||
170 | """ | ||
171 | self.headers['Content-Disposition'] = content_disposition or 'form-data' | ||
172 | self.headers['Content-Disposition'] += '; '.join([ | ||
173 | '', self._render_parts( | ||
174 | (('name', self._name), ('filename', self._filename)) | ||
175 | ) | ||
176 | ]) | ||
177 | self.headers['Content-Type'] = content_type | ||
178 | self.headers['Content-Location'] = content_location | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/filepost.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/filepost.py new file mode 100644 index 0000000..e53dedc --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/filepost.py | |||
@@ -0,0 +1,94 @@ | |||
1 | from __future__ import absolute_import | ||
2 | import codecs | ||
3 | |||
4 | from uuid import uuid4 | ||
5 | from io import BytesIO | ||
6 | |||
7 | from .packages import six | ||
8 | from .packages.six import b | ||
9 | from .fields import RequestField | ||
10 | |||
11 | writer = codecs.lookup('utf-8')[3] | ||
12 | |||
13 | |||
14 | def choose_boundary(): | ||
15 | """ | ||
16 | Our embarrassingly-simple replacement for mimetools.choose_boundary. | ||
17 | """ | ||
18 | return uuid4().hex | ||
19 | |||
20 | |||
21 | def iter_field_objects(fields): | ||
22 | """ | ||
23 | Iterate over fields. | ||
24 | |||
25 | Supports list of (k, v) tuples and dicts, and lists of | ||
26 | :class:`~urllib3.fields.RequestField`. | ||
27 | |||
28 | """ | ||
29 | if isinstance(fields, dict): | ||
30 | i = six.iteritems(fields) | ||
31 | else: | ||
32 | i = iter(fields) | ||
33 | |||
34 | for field in i: | ||
35 | if isinstance(field, RequestField): | ||
36 | yield field | ||
37 | else: | ||
38 | yield RequestField.from_tuples(*field) | ||
39 | |||
40 | |||
41 | def iter_fields(fields): | ||
42 | """ | ||
43 | .. deprecated:: 1.6 | ||
44 | |||
45 | Iterate over fields. | ||
46 | |||
47 | The addition of :class:`~urllib3.fields.RequestField` makes this function | ||
48 | obsolete. Instead, use :func:`iter_field_objects`, which returns | ||
49 | :class:`~urllib3.fields.RequestField` objects. | ||
50 | |||
51 | Supports list of (k, v) tuples and dicts. | ||
52 | """ | ||
53 | if isinstance(fields, dict): | ||
54 | return ((k, v) for k, v in six.iteritems(fields)) | ||
55 | |||
56 | return ((k, v) for k, v in fields) | ||
57 | |||
58 | |||
59 | def encode_multipart_formdata(fields, boundary=None): | ||
60 | """ | ||
61 | Encode a dictionary of ``fields`` using the multipart/form-data MIME format. | ||
62 | |||
63 | :param fields: | ||
64 | Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). | ||
65 | |||
66 | :param boundary: | ||
67 | If not specified, then a random boundary will be generated using | ||
68 | :func:`mimetools.choose_boundary`. | ||
69 | """ | ||
70 | body = BytesIO() | ||
71 | if boundary is None: | ||
72 | boundary = choose_boundary() | ||
73 | |||
74 | for field in iter_field_objects(fields): | ||
75 | body.write(b('--%s\r\n' % (boundary))) | ||
76 | |||
77 | writer(body).write(field.render_headers()) | ||
78 | data = field.data | ||
79 | |||
80 | if isinstance(data, int): | ||
81 | data = str(data) # Backwards compatibility | ||
82 | |||
83 | if isinstance(data, six.text_type): | ||
84 | writer(body).write(data) | ||
85 | else: | ||
86 | body.write(data) | ||
87 | |||
88 | body.write(b'\r\n') | ||
89 | |||
90 | body.write(b('--%s--\r\n' % (boundary))) | ||
91 | |||
92 | content_type = str('multipart/form-data; boundary=%s' % boundary) | ||
93 | |||
94 | return body.getvalue(), content_type | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/__init__.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/__init__.py new file mode 100644 index 0000000..324c551 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/__init__.py | |||
@@ -0,0 +1,5 @@ | |||
1 | from __future__ import absolute_import | ||
2 | |||
3 | from . import ssl_match_hostname | ||
4 | |||
5 | __all__ = ('ssl_match_hostname', ) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/backports/__init__.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/backports/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/backports/__init__.py | |||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/backports/makefile.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/backports/makefile.py new file mode 100644 index 0000000..00dee0b --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/backports/makefile.py | |||
@@ -0,0 +1,53 @@ | |||
1 | # -*- coding: utf-8 -*- | ||
2 | """ | ||
3 | backports.makefile | ||
4 | ~~~~~~~~~~~~~~~~~~ | ||
5 | |||
6 | Backports the Python 3 ``socket.makefile`` method for use with anything that | ||
7 | wants to create a "fake" socket object. | ||
8 | """ | ||
9 | import io | ||
10 | |||
11 | from socket import SocketIO | ||
12 | |||
13 | |||
14 | def backport_makefile(self, mode="r", buffering=None, encoding=None, | ||
15 | errors=None, newline=None): | ||
16 | """ | ||
17 | Backport of ``socket.makefile`` from Python 3.5. | ||
18 | """ | ||
19 | if not set(mode) <= set(["r", "w", "b"]): | ||
20 | raise ValueError( | ||
21 | "invalid mode %r (only r, w, b allowed)" % (mode,) | ||
22 | ) | ||
23 | writing = "w" in mode | ||
24 | reading = "r" in mode or not writing | ||
25 | assert reading or writing | ||
26 | binary = "b" in mode | ||
27 | rawmode = "" | ||
28 | if reading: | ||
29 | rawmode += "r" | ||
30 | if writing: | ||
31 | rawmode += "w" | ||
32 | raw = SocketIO(self, rawmode) | ||
33 | self._makefile_refs += 1 | ||
34 | if buffering is None: | ||
35 | buffering = -1 | ||
36 | if buffering < 0: | ||
37 | buffering = io.DEFAULT_BUFFER_SIZE | ||
38 | if buffering == 0: | ||
39 | if not binary: | ||
40 | raise ValueError("unbuffered streams must be binary") | ||
41 | return raw | ||
42 | if reading and writing: | ||
43 | buffer = io.BufferedRWPair(raw, raw, buffering) | ||
44 | elif reading: | ||
45 | buffer = io.BufferedReader(raw, buffering) | ||
46 | else: | ||
47 | assert writing | ||
48 | buffer = io.BufferedWriter(raw, buffering) | ||
49 | if binary: | ||
50 | return buffer | ||
51 | text = io.TextIOWrapper(buffer, encoding, errors, newline) | ||
52 | text.mode = mode | ||
53 | return text | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/ordered_dict.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/ordered_dict.py new file mode 100644 index 0000000..62dcb42 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/ordered_dict.py | |||
@@ -0,0 +1,259 @@ | |||
1 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. | ||
2 | # Passes Python2.7's test suite and incorporates all the latest updates. | ||
3 | # Copyright 2009 Raymond Hettinger, released under the MIT License. | ||
4 | # http://code.activestate.com/recipes/576693/ | ||
5 | try: | ||
6 | from thread import get_ident as _get_ident | ||
7 | except ImportError: | ||
8 | from dummy_thread import get_ident as _get_ident | ||
9 | |||
10 | try: | ||
11 | from _abcoll import KeysView, ValuesView, ItemsView | ||
12 | except ImportError: | ||
13 | pass | ||
14 | |||
15 | |||
16 | class OrderedDict(dict): | ||
17 | 'Dictionary that remembers insertion order' | ||
18 | # An inherited dict maps keys to values. | ||
19 | # The inherited dict provides __getitem__, __len__, __contains__, and get. | ||
20 | # The remaining methods are order-aware. | ||
21 | # Big-O running times for all methods are the same as for regular dictionaries. | ||
22 | |||
23 | # The internal self.__map dictionary maps keys to links in a doubly linked list. | ||
24 | # The circular doubly linked list starts and ends with a sentinel element. | ||
25 | # The sentinel element never gets deleted (this simplifies the algorithm). | ||
26 | # Each link is stored as a list of length three: [PREV, NEXT, KEY]. | ||
27 | |||
28 | def __init__(self, *args, **kwds): | ||
29 | '''Initialize an ordered dictionary. Signature is the same as for | ||
30 | regular dictionaries, but keyword arguments are not recommended | ||
31 | because their insertion order is arbitrary. | ||
32 | |||
33 | ''' | ||
34 | if len(args) > 1: | ||
35 | raise TypeError('expected at most 1 arguments, got %d' % len(args)) | ||
36 | try: | ||
37 | self.__root | ||
38 | except AttributeError: | ||
39 | self.__root = root = [] # sentinel node | ||
40 | root[:] = [root, root, None] | ||
41 | self.__map = {} | ||
42 | self.__update(*args, **kwds) | ||
43 | |||
44 | def __setitem__(self, key, value, dict_setitem=dict.__setitem__): | ||
45 | 'od.__setitem__(i, y) <==> od[i]=y' | ||
46 | # Setting a new item creates a new link which goes at the end of the linked | ||
47 | # list, and the inherited dictionary is updated with the new key/value pair. | ||
48 | if key not in self: | ||
49 | root = self.__root | ||
50 | last = root[0] | ||
51 | last[1] = root[0] = self.__map[key] = [last, root, key] | ||
52 | dict_setitem(self, key, value) | ||
53 | |||
54 | def __delitem__(self, key, dict_delitem=dict.__delitem__): | ||
55 | 'od.__delitem__(y) <==> del od[y]' | ||
56 | # Deleting an existing item uses self.__map to find the link which is | ||
57 | # then removed by updating the links in the predecessor and successor nodes. | ||
58 | dict_delitem(self, key) | ||
59 | link_prev, link_next, key = self.__map.pop(key) | ||
60 | link_prev[1] = link_next | ||
61 | link_next[0] = link_prev | ||
62 | |||
63 | def __iter__(self): | ||
64 | 'od.__iter__() <==> iter(od)' | ||
65 | root = self.__root | ||
66 | curr = root[1] | ||
67 | while curr is not root: | ||
68 | yield curr[2] | ||
69 | curr = curr[1] | ||
70 | |||
71 | def __reversed__(self): | ||
72 | 'od.__reversed__() <==> reversed(od)' | ||
73 | root = self.__root | ||
74 | curr = root[0] | ||
75 | while curr is not root: | ||
76 | yield curr[2] | ||
77 | curr = curr[0] | ||
78 | |||
79 | def clear(self): | ||
80 | 'od.clear() -> None. Remove all items from od.' | ||
81 | try: | ||
82 | for node in self.__map.itervalues(): | ||
83 | del node[:] | ||
84 | root = self.__root | ||
85 | root[:] = [root, root, None] | ||
86 | self.__map.clear() | ||
87 | except AttributeError: | ||
88 | pass | ||
89 | dict.clear(self) | ||
90 | |||
91 | def popitem(self, last=True): | ||
92 | '''od.popitem() -> (k, v), return and remove a (key, value) pair. | ||
93 | Pairs are returned in LIFO order if last is true or FIFO order if false. | ||
94 | |||
95 | ''' | ||
96 | if not self: | ||
97 | raise KeyError('dictionary is empty') | ||
98 | root = self.__root | ||
99 | if last: | ||
100 | link = root[0] | ||
101 | link_prev = link[0] | ||
102 | link_prev[1] = root | ||
103 | root[0] = link_prev | ||
104 | else: | ||
105 | link = root[1] | ||
106 | link_next = link[1] | ||
107 | root[1] = link_next | ||
108 | link_next[0] = root | ||
109 | key = link[2] | ||
110 | del self.__map[key] | ||
111 | value = dict.pop(self, key) | ||
112 | return key, value | ||
113 | |||
114 | # -- the following methods do not depend on the internal structure -- | ||
115 | |||
116 | def keys(self): | ||
117 | 'od.keys() -> list of keys in od' | ||
118 | return list(self) | ||
119 | |||
120 | def values(self): | ||
121 | 'od.values() -> list of values in od' | ||
122 | return [self[key] for key in self] | ||
123 | |||
124 | def items(self): | ||
125 | 'od.items() -> list of (key, value) pairs in od' | ||
126 | return [(key, self[key]) for key in self] | ||
127 | |||
128 | def iterkeys(self): | ||
129 | 'od.iterkeys() -> an iterator over the keys in od' | ||
130 | return iter(self) | ||
131 | |||
132 | def itervalues(self): | ||
133 | 'od.itervalues -> an iterator over the values in od' | ||
134 | for k in self: | ||
135 | yield self[k] | ||
136 | |||
137 | def iteritems(self): | ||
138 | 'od.iteritems -> an iterator over the (key, value) items in od' | ||
139 | for k in self: | ||
140 | yield (k, self[k]) | ||
141 | |||
142 | def update(*args, **kwds): | ||
143 | '''od.update(E, **F) -> None. Update od from dict/iterable E and F. | ||
144 | |||
145 | If E is a dict instance, does: for k in E: od[k] = E[k] | ||
146 | If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] | ||
147 | Or if E is an iterable of items, does: for k, v in E: od[k] = v | ||
148 | In either case, this is followed by: for k, v in F.items(): od[k] = v | ||
149 | |||
150 | ''' | ||
151 | if len(args) > 2: | ||
152 | raise TypeError('update() takes at most 2 positional ' | ||
153 | 'arguments (%d given)' % (len(args),)) | ||
154 | elif not args: | ||
155 | raise TypeError('update() takes at least 1 argument (0 given)') | ||
156 | self = args[0] | ||
157 | # Make progressively weaker assumptions about "other" | ||
158 | other = () | ||
159 | if len(args) == 2: | ||
160 | other = args[1] | ||
161 | if isinstance(other, dict): | ||
162 | for key in other: | ||
163 | self[key] = other[key] | ||
164 | elif hasattr(other, 'keys'): | ||
165 | for key in other.keys(): | ||
166 | self[key] = other[key] | ||
167 | else: | ||
168 | for key, value in other: | ||
169 | self[key] = value | ||
170 | for key, value in kwds.items(): | ||
171 | self[key] = value | ||
172 | |||
173 | __update = update # let subclasses override update without breaking __init__ | ||
174 | |||
175 | __marker = object() | ||
176 | |||
177 | def pop(self, key, default=__marker): | ||
178 | '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. | ||
179 | If key is not found, d is returned if given, otherwise KeyError is raised. | ||
180 | |||
181 | ''' | ||
182 | if key in self: | ||
183 | result = self[key] | ||
184 | del self[key] | ||
185 | return result | ||
186 | if default is self.__marker: | ||
187 | raise KeyError(key) | ||
188 | return default | ||
189 | |||
190 | def setdefault(self, key, default=None): | ||
191 | 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' | ||
192 | if key in self: | ||
193 | return self[key] | ||
194 | self[key] = default | ||
195 | return default | ||
196 | |||
197 | def __repr__(self, _repr_running={}): | ||
198 | 'od.__repr__() <==> repr(od)' | ||
199 | call_key = id(self), _get_ident() | ||
200 | if call_key in _repr_running: | ||
201 | return '...' | ||
202 | _repr_running[call_key] = 1 | ||
203 | try: | ||
204 | if not self: | ||
205 | return '%s()' % (self.__class__.__name__,) | ||
206 | return '%s(%r)' % (self.__class__.__name__, self.items()) | ||
207 | finally: | ||
208 | del _repr_running[call_key] | ||
209 | |||
210 | def __reduce__(self): | ||
211 | 'Return state information for pickling' | ||
212 | items = [[k, self[k]] for k in self] | ||
213 | inst_dict = vars(self).copy() | ||
214 | for k in vars(OrderedDict()): | ||
215 | inst_dict.pop(k, None) | ||
216 | if inst_dict: | ||
217 | return (self.__class__, (items,), inst_dict) | ||
218 | return self.__class__, (items,) | ||
219 | |||
220 | def copy(self): | ||
221 | 'od.copy() -> a shallow copy of od' | ||
222 | return self.__class__(self) | ||
223 | |||
224 | @classmethod | ||
225 | def fromkeys(cls, iterable, value=None): | ||
226 | '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S | ||
227 | and values equal to v (which defaults to None). | ||
228 | |||
229 | ''' | ||
230 | d = cls() | ||
231 | for key in iterable: | ||
232 | d[key] = value | ||
233 | return d | ||
234 | |||
235 | def __eq__(self, other): | ||
236 | '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive | ||
237 | while comparison to a regular mapping is order-insensitive. | ||
238 | |||
239 | ''' | ||
240 | if isinstance(other, OrderedDict): | ||
241 | return len(self)==len(other) and self.items() == other.items() | ||
242 | return dict.__eq__(self, other) | ||
243 | |||
244 | def __ne__(self, other): | ||
245 | return not self == other | ||
246 | |||
247 | # -- the following methods are only used in Python 2.7 -- | ||
248 | |||
249 | def viewkeys(self): | ||
250 | "od.viewkeys() -> a set-like object providing a view on od's keys" | ||
251 | return KeysView(self) | ||
252 | |||
253 | def viewvalues(self): | ||
254 | "od.viewvalues() -> an object providing a view on od's values" | ||
255 | return ValuesView(self) | ||
256 | |||
257 | def viewitems(self): | ||
258 | "od.viewitems() -> a set-like object providing a view on od's items" | ||
259 | return ItemsView(self) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/six.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/six.py new file mode 100644 index 0000000..7bd9225 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/six.py | |||
@@ -0,0 +1,868 @@ | |||
1 | """Utilities for writing code that runs on Python 2 and 3""" | ||
2 | |||
3 | # Copyright (c) 2010-2015 Benjamin Peterson | ||
4 | # | ||
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy | ||
6 | # of this software and associated documentation files (the "Software"), to deal | ||
7 | # in the Software without restriction, including without limitation the rights | ||
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
9 | # copies of the Software, and to permit persons to whom the Software is | ||
10 | # furnished to do so, subject to the following conditions: | ||
11 | # | ||
12 | # The above copyright notice and this permission notice shall be included in all | ||
13 | # copies or substantial portions of the Software. | ||
14 | # | ||
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
21 | # SOFTWARE. | ||
22 | |||
23 | from __future__ import absolute_import | ||
24 | |||
25 | import functools | ||
26 | import itertools | ||
27 | import operator | ||
28 | import sys | ||
29 | import types | ||
30 | |||
31 | __author__ = "Benjamin Peterson <benjamin@python.org>" | ||
32 | __version__ = "1.10.0" | ||
33 | |||
34 | |||
35 | # Useful for very coarse version differentiation. | ||
36 | PY2 = sys.version_info[0] == 2 | ||
37 | PY3 = sys.version_info[0] == 3 | ||
38 | PY34 = sys.version_info[0:2] >= (3, 4) | ||
39 | |||
40 | if PY3: | ||
41 | string_types = str, | ||
42 | integer_types = int, | ||
43 | class_types = type, | ||
44 | text_type = str | ||
45 | binary_type = bytes | ||
46 | |||
47 | MAXSIZE = sys.maxsize | ||
48 | else: | ||
49 | string_types = basestring, | ||
50 | integer_types = (int, long) | ||
51 | class_types = (type, types.ClassType) | ||
52 | text_type = unicode | ||
53 | binary_type = str | ||
54 | |||
55 | if sys.platform.startswith("java"): | ||
56 | # Jython always uses 32 bits. | ||
57 | MAXSIZE = int((1 << 31) - 1) | ||
58 | else: | ||
59 | # It's possible to have sizeof(long) != sizeof(Py_ssize_t). | ||
60 | class X(object): | ||
61 | |||
62 | def __len__(self): | ||
63 | return 1 << 31 | ||
64 | try: | ||
65 | len(X()) | ||
66 | except OverflowError: | ||
67 | # 32-bit | ||
68 | MAXSIZE = int((1 << 31) - 1) | ||
69 | else: | ||
70 | # 64-bit | ||
71 | MAXSIZE = int((1 << 63) - 1) | ||
72 | del X | ||
73 | |||
74 | |||
75 | def _add_doc(func, doc): | ||
76 | """Add documentation to a function.""" | ||
77 | func.__doc__ = doc | ||
78 | |||
79 | |||
80 | def _import_module(name): | ||
81 | """Import module, returning the module after the last dot.""" | ||
82 | __import__(name) | ||
83 | return sys.modules[name] | ||
84 | |||
85 | |||
86 | class _LazyDescr(object): | ||
87 | |||
88 | def __init__(self, name): | ||
89 | self.name = name | ||
90 | |||
91 | def __get__(self, obj, tp): | ||
92 | result = self._resolve() | ||
93 | setattr(obj, self.name, result) # Invokes __set__. | ||
94 | try: | ||
95 | # This is a bit ugly, but it avoids running this again by | ||
96 | # removing this descriptor. | ||
97 | delattr(obj.__class__, self.name) | ||
98 | except AttributeError: | ||
99 | pass | ||
100 | return result | ||
101 | |||
102 | |||
103 | class MovedModule(_LazyDescr): | ||
104 | |||
105 | def __init__(self, name, old, new=None): | ||
106 | super(MovedModule, self).__init__(name) | ||
107 | if PY3: | ||
108 | if new is None: | ||
109 | new = name | ||
110 | self.mod = new | ||
111 | else: | ||
112 | self.mod = old | ||
113 | |||
114 | def _resolve(self): | ||
115 | return _import_module(self.mod) | ||
116 | |||
117 | def __getattr__(self, attr): | ||
118 | _module = self._resolve() | ||
119 | value = getattr(_module, attr) | ||
120 | setattr(self, attr, value) | ||
121 | return value | ||
122 | |||
123 | |||
124 | class _LazyModule(types.ModuleType): | ||
125 | |||
126 | def __init__(self, name): | ||
127 | super(_LazyModule, self).__init__(name) | ||
128 | self.__doc__ = self.__class__.__doc__ | ||
129 | |||
130 | def __dir__(self): | ||
131 | attrs = ["__doc__", "__name__"] | ||
132 | attrs += [attr.name for attr in self._moved_attributes] | ||
133 | return attrs | ||
134 | |||
135 | # Subclasses should override this | ||
136 | _moved_attributes = [] | ||
137 | |||
138 | |||
139 | class MovedAttribute(_LazyDescr): | ||
140 | |||
141 | def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): | ||
142 | super(MovedAttribute, self).__init__(name) | ||
143 | if PY3: | ||
144 | if new_mod is None: | ||
145 | new_mod = name | ||
146 | self.mod = new_mod | ||
147 | if new_attr is None: | ||
148 | if old_attr is None: | ||
149 | new_attr = name | ||
150 | else: | ||
151 | new_attr = old_attr | ||
152 | self.attr = new_attr | ||
153 | else: | ||
154 | self.mod = old_mod | ||
155 | if old_attr is None: | ||
156 | old_attr = name | ||
157 | self.attr = old_attr | ||
158 | |||
159 | def _resolve(self): | ||
160 | module = _import_module(self.mod) | ||
161 | return getattr(module, self.attr) | ||
162 | |||
163 | |||
164 | class _SixMetaPathImporter(object): | ||
165 | |||
166 | """ | ||
167 | A meta path importer to import six.moves and its submodules. | ||
168 | |||
169 | This class implements a PEP302 finder and loader. It should be compatible | ||
170 | with Python 2.5 and all existing versions of Python3 | ||
171 | """ | ||
172 | |||
173 | def __init__(self, six_module_name): | ||
174 | self.name = six_module_name | ||
175 | self.known_modules = {} | ||
176 | |||
177 | def _add_module(self, mod, *fullnames): | ||
178 | for fullname in fullnames: | ||
179 | self.known_modules[self.name + "." + fullname] = mod | ||
180 | |||
181 | def _get_module(self, fullname): | ||
182 | return self.known_modules[self.name + "." + fullname] | ||
183 | |||
184 | def find_module(self, fullname, path=None): | ||
185 | if fullname in self.known_modules: | ||
186 | return self | ||
187 | return None | ||
188 | |||
189 | def __get_module(self, fullname): | ||
190 | try: | ||
191 | return self.known_modules[fullname] | ||
192 | except KeyError: | ||
193 | raise ImportError("This loader does not know module " + fullname) | ||
194 | |||
195 | def load_module(self, fullname): | ||
196 | try: | ||
197 | # in case of a reload | ||
198 | return sys.modules[fullname] | ||
199 | except KeyError: | ||
200 | pass | ||
201 | mod = self.__get_module(fullname) | ||
202 | if isinstance(mod, MovedModule): | ||
203 | mod = mod._resolve() | ||
204 | else: | ||
205 | mod.__loader__ = self | ||
206 | sys.modules[fullname] = mod | ||
207 | return mod | ||
208 | |||
209 | def is_package(self, fullname): | ||
210 | """ | ||
211 | Return true, if the named module is a package. | ||
212 | |||
213 | We need this method to get correct spec objects with | ||
214 | Python 3.4 (see PEP451) | ||
215 | """ | ||
216 | return hasattr(self.__get_module(fullname), "__path__") | ||
217 | |||
218 | def get_code(self, fullname): | ||
219 | """Return None | ||
220 | |||
221 | Required, if is_package is implemented""" | ||
222 | self.__get_module(fullname) # eventually raises ImportError | ||
223 | return None | ||
224 | get_source = get_code # same as get_code | ||
225 | |||
226 | _importer = _SixMetaPathImporter(__name__) | ||
227 | |||
228 | |||
229 | class _MovedItems(_LazyModule): | ||
230 | |||
231 | """Lazy loading of moved objects""" | ||
232 | __path__ = [] # mark as package | ||
233 | |||
234 | |||
235 | _moved_attributes = [ | ||
236 | MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), | ||
237 | MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), | ||
238 | MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), | ||
239 | MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), | ||
240 | MovedAttribute("intern", "__builtin__", "sys"), | ||
241 | MovedAttribute("map", "itertools", "builtins", "imap", "map"), | ||
242 | MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), | ||
243 | MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), | ||
244 | MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), | ||
245 | MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), | ||
246 | MovedAttribute("reduce", "__builtin__", "functools"), | ||
247 | MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), | ||
248 | MovedAttribute("StringIO", "StringIO", "io"), | ||
249 | MovedAttribute("UserDict", "UserDict", "collections"), | ||
250 | MovedAttribute("UserList", "UserList", "collections"), | ||
251 | MovedAttribute("UserString", "UserString", "collections"), | ||
252 | MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), | ||
253 | MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), | ||
254 | MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), | ||
255 | MovedModule("builtins", "__builtin__"), | ||
256 | MovedModule("configparser", "ConfigParser"), | ||
257 | MovedModule("copyreg", "copy_reg"), | ||
258 | MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), | ||
259 | MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), | ||
260 | MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), | ||
261 | MovedModule("http_cookies", "Cookie", "http.cookies"), | ||
262 | MovedModule("html_entities", "htmlentitydefs", "html.entities"), | ||
263 | MovedModule("html_parser", "HTMLParser", "html.parser"), | ||
264 | MovedModule("http_client", "httplib", "http.client"), | ||
265 | MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), | ||
266 | MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), | ||
267 | MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), | ||
268 | MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), | ||
269 | MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), | ||
270 | MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), | ||
271 | MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), | ||
272 | MovedModule("cPickle", "cPickle", "pickle"), | ||
273 | MovedModule("queue", "Queue"), | ||
274 | MovedModule("reprlib", "repr"), | ||
275 | MovedModule("socketserver", "SocketServer"), | ||
276 | MovedModule("_thread", "thread", "_thread"), | ||
277 | MovedModule("tkinter", "Tkinter"), | ||
278 | MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), | ||
279 | MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), | ||
280 | MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), | ||
281 | MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), | ||
282 | MovedModule("tkinter_tix", "Tix", "tkinter.tix"), | ||
283 | MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), | ||
284 | MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), | ||
285 | MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), | ||
286 | MovedModule("tkinter_colorchooser", "tkColorChooser", | ||
287 | "tkinter.colorchooser"), | ||
288 | MovedModule("tkinter_commondialog", "tkCommonDialog", | ||
289 | "tkinter.commondialog"), | ||
290 | MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), | ||
291 | MovedModule("tkinter_font", "tkFont", "tkinter.font"), | ||
292 | MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), | ||
293 | MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", | ||
294 | "tkinter.simpledialog"), | ||
295 | MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), | ||
296 | MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), | ||
297 | MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), | ||
298 | MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), | ||
299 | MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), | ||
300 | MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), | ||
301 | ] | ||
302 | # Add windows specific modules. | ||
303 | if sys.platform == "win32": | ||
304 | _moved_attributes += [ | ||
305 | MovedModule("winreg", "_winreg"), | ||
306 | ] | ||
307 | |||
308 | for attr in _moved_attributes: | ||
309 | setattr(_MovedItems, attr.name, attr) | ||
310 | if isinstance(attr, MovedModule): | ||
311 | _importer._add_module(attr, "moves." + attr.name) | ||
312 | del attr | ||
313 | |||
314 | _MovedItems._moved_attributes = _moved_attributes | ||
315 | |||
316 | moves = _MovedItems(__name__ + ".moves") | ||
317 | _importer._add_module(moves, "moves") | ||
318 | |||
319 | |||
320 | class Module_six_moves_urllib_parse(_LazyModule): | ||
321 | |||
322 | """Lazy loading of moved objects in six.moves.urllib_parse""" | ||
323 | |||
324 | |||
325 | _urllib_parse_moved_attributes = [ | ||
326 | MovedAttribute("ParseResult", "urlparse", "urllib.parse"), | ||
327 | MovedAttribute("SplitResult", "urlparse", "urllib.parse"), | ||
328 | MovedAttribute("parse_qs", "urlparse", "urllib.parse"), | ||
329 | MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), | ||
330 | MovedAttribute("urldefrag", "urlparse", "urllib.parse"), | ||
331 | MovedAttribute("urljoin", "urlparse", "urllib.parse"), | ||
332 | MovedAttribute("urlparse", "urlparse", "urllib.parse"), | ||
333 | MovedAttribute("urlsplit", "urlparse", "urllib.parse"), | ||
334 | MovedAttribute("urlunparse", "urlparse", "urllib.parse"), | ||
335 | MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), | ||
336 | MovedAttribute("quote", "urllib", "urllib.parse"), | ||
337 | MovedAttribute("quote_plus", "urllib", "urllib.parse"), | ||
338 | MovedAttribute("unquote", "urllib", "urllib.parse"), | ||
339 | MovedAttribute("unquote_plus", "urllib", "urllib.parse"), | ||
340 | MovedAttribute("urlencode", "urllib", "urllib.parse"), | ||
341 | MovedAttribute("splitquery", "urllib", "urllib.parse"), | ||
342 | MovedAttribute("splittag", "urllib", "urllib.parse"), | ||
343 | MovedAttribute("splituser", "urllib", "urllib.parse"), | ||
344 | MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), | ||
345 | MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), | ||
346 | MovedAttribute("uses_params", "urlparse", "urllib.parse"), | ||
347 | MovedAttribute("uses_query", "urlparse", "urllib.parse"), | ||
348 | MovedAttribute("uses_relative", "urlparse", "urllib.parse"), | ||
349 | ] | ||
350 | for attr in _urllib_parse_moved_attributes: | ||
351 | setattr(Module_six_moves_urllib_parse, attr.name, attr) | ||
352 | del attr | ||
353 | |||
354 | Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes | ||
355 | |||
356 | _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), | ||
357 | "moves.urllib_parse", "moves.urllib.parse") | ||
358 | |||
359 | |||
360 | class Module_six_moves_urllib_error(_LazyModule): | ||
361 | |||
362 | """Lazy loading of moved objects in six.moves.urllib_error""" | ||
363 | |||
364 | |||
365 | _urllib_error_moved_attributes = [ | ||
366 | MovedAttribute("URLError", "urllib2", "urllib.error"), | ||
367 | MovedAttribute("HTTPError", "urllib2", "urllib.error"), | ||
368 | MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), | ||
369 | ] | ||
370 | for attr in _urllib_error_moved_attributes: | ||
371 | setattr(Module_six_moves_urllib_error, attr.name, attr) | ||
372 | del attr | ||
373 | |||
374 | Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes | ||
375 | |||
376 | _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), | ||
377 | "moves.urllib_error", "moves.urllib.error") | ||
378 | |||
379 | |||
380 | class Module_six_moves_urllib_request(_LazyModule): | ||
381 | |||
382 | """Lazy loading of moved objects in six.moves.urllib_request""" | ||
383 | |||
384 | |||
385 | _urllib_request_moved_attributes = [ | ||
386 | MovedAttribute("urlopen", "urllib2", "urllib.request"), | ||
387 | MovedAttribute("install_opener", "urllib2", "urllib.request"), | ||
388 | MovedAttribute("build_opener", "urllib2", "urllib.request"), | ||
389 | MovedAttribute("pathname2url", "urllib", "urllib.request"), | ||
390 | MovedAttribute("url2pathname", "urllib", "urllib.request"), | ||
391 | MovedAttribute("getproxies", "urllib", "urllib.request"), | ||
392 | MovedAttribute("Request", "urllib2", "urllib.request"), | ||
393 | MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), | ||
394 | MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), | ||
395 | MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), | ||
396 | MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), | ||
397 | MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), | ||
398 | MovedAttribute("BaseHandler", "urllib2", "urllib.request"), | ||
399 | MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), | ||
400 | MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), | ||
401 | MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), | ||
402 | MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), | ||
403 | MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), | ||
404 | MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), | ||
405 | MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), | ||
406 | MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), | ||
407 | MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), | ||
408 | MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), | ||
409 | MovedAttribute("FileHandler", "urllib2", "urllib.request"), | ||
410 | MovedAttribute("FTPHandler", "urllib2", "urllib.request"), | ||
411 | MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), | ||
412 | MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), | ||
413 | MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), | ||
414 | MovedAttribute("urlretrieve", "urllib", "urllib.request"), | ||
415 | MovedAttribute("urlcleanup", "urllib", "urllib.request"), | ||
416 | MovedAttribute("URLopener", "urllib", "urllib.request"), | ||
417 | MovedAttribute("FancyURLopener", "urllib", "urllib.request"), | ||
418 | MovedAttribute("proxy_bypass", "urllib", "urllib.request"), | ||
419 | ] | ||
420 | for attr in _urllib_request_moved_attributes: | ||
421 | setattr(Module_six_moves_urllib_request, attr.name, attr) | ||
422 | del attr | ||
423 | |||
424 | Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes | ||
425 | |||
426 | _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), | ||
427 | "moves.urllib_request", "moves.urllib.request") | ||
428 | |||
429 | |||
430 | class Module_six_moves_urllib_response(_LazyModule): | ||
431 | |||
432 | """Lazy loading of moved objects in six.moves.urllib_response""" | ||
433 | |||
434 | |||
435 | _urllib_response_moved_attributes = [ | ||
436 | MovedAttribute("addbase", "urllib", "urllib.response"), | ||
437 | MovedAttribute("addclosehook", "urllib", "urllib.response"), | ||
438 | MovedAttribute("addinfo", "urllib", "urllib.response"), | ||
439 | MovedAttribute("addinfourl", "urllib", "urllib.response"), | ||
440 | ] | ||
441 | for attr in _urllib_response_moved_attributes: | ||
442 | setattr(Module_six_moves_urllib_response, attr.name, attr) | ||
443 | del attr | ||
444 | |||
445 | Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes | ||
446 | |||
447 | _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), | ||
448 | "moves.urllib_response", "moves.urllib.response") | ||
449 | |||
450 | |||
451 | class Module_six_moves_urllib_robotparser(_LazyModule): | ||
452 | |||
453 | """Lazy loading of moved objects in six.moves.urllib_robotparser""" | ||
454 | |||
455 | |||
456 | _urllib_robotparser_moved_attributes = [ | ||
457 | MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), | ||
458 | ] | ||
459 | for attr in _urllib_robotparser_moved_attributes: | ||
460 | setattr(Module_six_moves_urllib_robotparser, attr.name, attr) | ||
461 | del attr | ||
462 | |||
463 | Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes | ||
464 | |||
465 | _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), | ||
466 | "moves.urllib_robotparser", "moves.urllib.robotparser") | ||
467 | |||
468 | |||
469 | class Module_six_moves_urllib(types.ModuleType): | ||
470 | |||
471 | """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" | ||
472 | __path__ = [] # mark as package | ||
473 | parse = _importer._get_module("moves.urllib_parse") | ||
474 | error = _importer._get_module("moves.urllib_error") | ||
475 | request = _importer._get_module("moves.urllib_request") | ||
476 | response = _importer._get_module("moves.urllib_response") | ||
477 | robotparser = _importer._get_module("moves.urllib_robotparser") | ||
478 | |||
479 | def __dir__(self): | ||
480 | return ['parse', 'error', 'request', 'response', 'robotparser'] | ||
481 | |||
482 | _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), | ||
483 | "moves.urllib") | ||
484 | |||
485 | |||
486 | def add_move(move): | ||
487 | """Add an item to six.moves.""" | ||
488 | setattr(_MovedItems, move.name, move) | ||
489 | |||
490 | |||
491 | def remove_move(name): | ||
492 | """Remove item from six.moves.""" | ||
493 | try: | ||
494 | delattr(_MovedItems, name) | ||
495 | except AttributeError: | ||
496 | try: | ||
497 | del moves.__dict__[name] | ||
498 | except KeyError: | ||
499 | raise AttributeError("no such move, %r" % (name,)) | ||
500 | |||
501 | |||
502 | if PY3: | ||
503 | _meth_func = "__func__" | ||
504 | _meth_self = "__self__" | ||
505 | |||
506 | _func_closure = "__closure__" | ||
507 | _func_code = "__code__" | ||
508 | _func_defaults = "__defaults__" | ||
509 | _func_globals = "__globals__" | ||
510 | else: | ||
511 | _meth_func = "im_func" | ||
512 | _meth_self = "im_self" | ||
513 | |||
514 | _func_closure = "func_closure" | ||
515 | _func_code = "func_code" | ||
516 | _func_defaults = "func_defaults" | ||
517 | _func_globals = "func_globals" | ||
518 | |||
519 | |||
520 | try: | ||
521 | advance_iterator = next | ||
522 | except NameError: | ||
523 | def advance_iterator(it): | ||
524 | return it.next() | ||
525 | next = advance_iterator | ||
526 | |||
527 | |||
528 | try: | ||
529 | callable = callable | ||
530 | except NameError: | ||
531 | def callable(obj): | ||
532 | return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) | ||
533 | |||
534 | |||
535 | if PY3: | ||
536 | def get_unbound_function(unbound): | ||
537 | return unbound | ||
538 | |||
539 | create_bound_method = types.MethodType | ||
540 | |||
541 | def create_unbound_method(func, cls): | ||
542 | return func | ||
543 | |||
544 | Iterator = object | ||
545 | else: | ||
546 | def get_unbound_function(unbound): | ||
547 | return unbound.im_func | ||
548 | |||
549 | def create_bound_method(func, obj): | ||
550 | return types.MethodType(func, obj, obj.__class__) | ||
551 | |||
552 | def create_unbound_method(func, cls): | ||
553 | return types.MethodType(func, None, cls) | ||
554 | |||
555 | class Iterator(object): | ||
556 | |||
557 | def next(self): | ||
558 | return type(self).__next__(self) | ||
559 | |||
560 | callable = callable | ||
561 | _add_doc(get_unbound_function, | ||
562 | """Get the function out of a possibly unbound function""") | ||
563 | |||
564 | |||
565 | get_method_function = operator.attrgetter(_meth_func) | ||
566 | get_method_self = operator.attrgetter(_meth_self) | ||
567 | get_function_closure = operator.attrgetter(_func_closure) | ||
568 | get_function_code = operator.attrgetter(_func_code) | ||
569 | get_function_defaults = operator.attrgetter(_func_defaults) | ||
570 | get_function_globals = operator.attrgetter(_func_globals) | ||
571 | |||
572 | |||
573 | if PY3: | ||
574 | def iterkeys(d, **kw): | ||
575 | return iter(d.keys(**kw)) | ||
576 | |||
577 | def itervalues(d, **kw): | ||
578 | return iter(d.values(**kw)) | ||
579 | |||
580 | def iteritems(d, **kw): | ||
581 | return iter(d.items(**kw)) | ||
582 | |||
583 | def iterlists(d, **kw): | ||
584 | return iter(d.lists(**kw)) | ||
585 | |||
586 | viewkeys = operator.methodcaller("keys") | ||
587 | |||
588 | viewvalues = operator.methodcaller("values") | ||
589 | |||
590 | viewitems = operator.methodcaller("items") | ||
591 | else: | ||
592 | def iterkeys(d, **kw): | ||
593 | return d.iterkeys(**kw) | ||
594 | |||
595 | def itervalues(d, **kw): | ||
596 | return d.itervalues(**kw) | ||
597 | |||
598 | def iteritems(d, **kw): | ||
599 | return d.iteritems(**kw) | ||
600 | |||
601 | def iterlists(d, **kw): | ||
602 | return d.iterlists(**kw) | ||
603 | |||
604 | viewkeys = operator.methodcaller("viewkeys") | ||
605 | |||
606 | viewvalues = operator.methodcaller("viewvalues") | ||
607 | |||
608 | viewitems = operator.methodcaller("viewitems") | ||
609 | |||
610 | _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") | ||
611 | _add_doc(itervalues, "Return an iterator over the values of a dictionary.") | ||
612 | _add_doc(iteritems, | ||
613 | "Return an iterator over the (key, value) pairs of a dictionary.") | ||
614 | _add_doc(iterlists, | ||
615 | "Return an iterator over the (key, [values]) pairs of a dictionary.") | ||
616 | |||
617 | |||
618 | if PY3: | ||
619 | def b(s): | ||
620 | return s.encode("latin-1") | ||
621 | |||
622 | def u(s): | ||
623 | return s | ||
624 | unichr = chr | ||
625 | import struct | ||
626 | int2byte = struct.Struct(">B").pack | ||
627 | del struct | ||
628 | byte2int = operator.itemgetter(0) | ||
629 | indexbytes = operator.getitem | ||
630 | iterbytes = iter | ||
631 | import io | ||
632 | StringIO = io.StringIO | ||
633 | BytesIO = io.BytesIO | ||
634 | _assertCountEqual = "assertCountEqual" | ||
635 | if sys.version_info[1] <= 1: | ||
636 | _assertRaisesRegex = "assertRaisesRegexp" | ||
637 | _assertRegex = "assertRegexpMatches" | ||
638 | else: | ||
639 | _assertRaisesRegex = "assertRaisesRegex" | ||
640 | _assertRegex = "assertRegex" | ||
641 | else: | ||
642 | def b(s): | ||
643 | return s | ||
644 | # Workaround for standalone backslash | ||
645 | |||
646 | def u(s): | ||
647 | return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") | ||
648 | unichr = unichr | ||
649 | int2byte = chr | ||
650 | |||
651 | def byte2int(bs): | ||
652 | return ord(bs[0]) | ||
653 | |||
654 | def indexbytes(buf, i): | ||
655 | return ord(buf[i]) | ||
656 | iterbytes = functools.partial(itertools.imap, ord) | ||
657 | import StringIO | ||
658 | StringIO = BytesIO = StringIO.StringIO | ||
659 | _assertCountEqual = "assertItemsEqual" | ||
660 | _assertRaisesRegex = "assertRaisesRegexp" | ||
661 | _assertRegex = "assertRegexpMatches" | ||
662 | _add_doc(b, """Byte literal""") | ||
663 | _add_doc(u, """Text literal""") | ||
664 | |||
665 | |||
666 | def assertCountEqual(self, *args, **kwargs): | ||
667 | return getattr(self, _assertCountEqual)(*args, **kwargs) | ||
668 | |||
669 | |||
670 | def assertRaisesRegex(self, *args, **kwargs): | ||
671 | return getattr(self, _assertRaisesRegex)(*args, **kwargs) | ||
672 | |||
673 | |||
674 | def assertRegex(self, *args, **kwargs): | ||
675 | return getattr(self, _assertRegex)(*args, **kwargs) | ||
676 | |||
677 | |||
678 | if PY3: | ||
679 | exec_ = getattr(moves.builtins, "exec") | ||
680 | |||
681 | def reraise(tp, value, tb=None): | ||
682 | if value is None: | ||
683 | value = tp() | ||
684 | if value.__traceback__ is not tb: | ||
685 | raise value.with_traceback(tb) | ||
686 | raise value | ||
687 | |||
688 | else: | ||
689 | def exec_(_code_, _globs_=None, _locs_=None): | ||
690 | """Execute code in a namespace.""" | ||
691 | if _globs_ is None: | ||
692 | frame = sys._getframe(1) | ||
693 | _globs_ = frame.f_globals | ||
694 | if _locs_ is None: | ||
695 | _locs_ = frame.f_locals | ||
696 | del frame | ||
697 | elif _locs_ is None: | ||
698 | _locs_ = _globs_ | ||
699 | exec("""exec _code_ in _globs_, _locs_""") | ||
700 | |||
701 | exec_("""def reraise(tp, value, tb=None): | ||
702 | raise tp, value, tb | ||
703 | """) | ||
704 | |||
705 | |||
706 | if sys.version_info[:2] == (3, 2): | ||
707 | exec_("""def raise_from(value, from_value): | ||
708 | if from_value is None: | ||
709 | raise value | ||
710 | raise value from from_value | ||
711 | """) | ||
712 | elif sys.version_info[:2] > (3, 2): | ||
713 | exec_("""def raise_from(value, from_value): | ||
714 | raise value from from_value | ||
715 | """) | ||
716 | else: | ||
717 | def raise_from(value, from_value): | ||
718 | raise value | ||
719 | |||
720 | |||
721 | print_ = getattr(moves.builtins, "print", None) | ||
722 | if print_ is None: | ||
723 | def print_(*args, **kwargs): | ||
724 | """The new-style print function for Python 2.4 and 2.5.""" | ||
725 | fp = kwargs.pop("file", sys.stdout) | ||
726 | if fp is None: | ||
727 | return | ||
728 | |||
729 | def write(data): | ||
730 | if not isinstance(data, basestring): | ||
731 | data = str(data) | ||
732 | # If the file has an encoding, encode unicode with it. | ||
733 | if (isinstance(fp, file) and | ||
734 | isinstance(data, unicode) and | ||
735 | fp.encoding is not None): | ||
736 | errors = getattr(fp, "errors", None) | ||
737 | if errors is None: | ||
738 | errors = "strict" | ||
739 | data = data.encode(fp.encoding, errors) | ||
740 | fp.write(data) | ||
741 | want_unicode = False | ||
742 | sep = kwargs.pop("sep", None) | ||
743 | if sep is not None: | ||
744 | if isinstance(sep, unicode): | ||
745 | want_unicode = True | ||
746 | elif not isinstance(sep, str): | ||
747 | raise TypeError("sep must be None or a string") | ||
748 | end = kwargs.pop("end", None) | ||
749 | if end is not None: | ||
750 | if isinstance(end, unicode): | ||
751 | want_unicode = True | ||
752 | elif not isinstance(end, str): | ||
753 | raise TypeError("end must be None or a string") | ||
754 | if kwargs: | ||
755 | raise TypeError("invalid keyword arguments to print()") | ||
756 | if not want_unicode: | ||
757 | for arg in args: | ||
758 | if isinstance(arg, unicode): | ||
759 | want_unicode = True | ||
760 | break | ||
761 | if want_unicode: | ||
762 | newline = unicode("\n") | ||
763 | space = unicode(" ") | ||
764 | else: | ||
765 | newline = "\n" | ||
766 | space = " " | ||
767 | if sep is None: | ||
768 | sep = space | ||
769 | if end is None: | ||
770 | end = newline | ||
771 | for i, arg in enumerate(args): | ||
772 | if i: | ||
773 | write(sep) | ||
774 | write(arg) | ||
775 | write(end) | ||
776 | if sys.version_info[:2] < (3, 3): | ||
777 | _print = print_ | ||
778 | |||
779 | def print_(*args, **kwargs): | ||
780 | fp = kwargs.get("file", sys.stdout) | ||
781 | flush = kwargs.pop("flush", False) | ||
782 | _print(*args, **kwargs) | ||
783 | if flush and fp is not None: | ||
784 | fp.flush() | ||
785 | |||
786 | _add_doc(reraise, """Reraise an exception.""") | ||
787 | |||
788 | if sys.version_info[0:2] < (3, 4): | ||
789 | def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, | ||
790 | updated=functools.WRAPPER_UPDATES): | ||
791 | def wrapper(f): | ||
792 | f = functools.wraps(wrapped, assigned, updated)(f) | ||
793 | f.__wrapped__ = wrapped | ||
794 | return f | ||
795 | return wrapper | ||
796 | else: | ||
797 | wraps = functools.wraps | ||
798 | |||
799 | |||
800 | def with_metaclass(meta, *bases): | ||
801 | """Create a base class with a metaclass.""" | ||
802 | # This requires a bit of explanation: the basic idea is to make a dummy | ||
803 | # metaclass for one level of class instantiation that replaces itself with | ||
804 | # the actual metaclass. | ||
805 | class metaclass(meta): | ||
806 | |||
807 | def __new__(cls, name, this_bases, d): | ||
808 | return meta(name, bases, d) | ||
809 | return type.__new__(metaclass, 'temporary_class', (), {}) | ||
810 | |||
811 | |||
812 | def add_metaclass(metaclass): | ||
813 | """Class decorator for creating a class with a metaclass.""" | ||
814 | def wrapper(cls): | ||
815 | orig_vars = cls.__dict__.copy() | ||
816 | slots = orig_vars.get('__slots__') | ||
817 | if slots is not None: | ||
818 | if isinstance(slots, str): | ||
819 | slots = [slots] | ||
820 | for slots_var in slots: | ||
821 | orig_vars.pop(slots_var) | ||
822 | orig_vars.pop('__dict__', None) | ||
823 | orig_vars.pop('__weakref__', None) | ||
824 | return metaclass(cls.__name__, cls.__bases__, orig_vars) | ||
825 | return wrapper | ||
826 | |||
827 | |||
828 | def python_2_unicode_compatible(klass): | ||
829 | """ | ||
830 | A decorator that defines __unicode__ and __str__ methods under Python 2. | ||
831 | Under Python 3 it does nothing. | ||
832 | |||
833 | To support Python 2 and 3 with a single code base, define a __str__ method | ||
834 | returning text and apply this decorator to the class. | ||
835 | """ | ||
836 | if PY2: | ||
837 | if '__str__' not in klass.__dict__: | ||
838 | raise ValueError("@python_2_unicode_compatible cannot be applied " | ||
839 | "to %s because it doesn't define __str__()." % | ||
840 | klass.__name__) | ||
841 | klass.__unicode__ = klass.__str__ | ||
842 | klass.__str__ = lambda self: self.__unicode__().encode('utf-8') | ||
843 | return klass | ||
844 | |||
845 | |||
846 | # Complete the moves implementation. | ||
847 | # This code is at the end of this module to speed up module loading. | ||
848 | # Turn this module into a package. | ||
849 | __path__ = [] # required for PEP 302 and PEP 451 | ||
850 | __package__ = __name__ # see PEP 366 @ReservedAssignment | ||
851 | if globals().get("__spec__") is not None: | ||
852 | __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable | ||
853 | # Remove other six meta path importers, since they cause problems. This can | ||
854 | # happen if six is removed from sys.modules and then reloaded. (Setuptools does | ||
855 | # this for some reason.) | ||
856 | if sys.meta_path: | ||
857 | for i, importer in enumerate(sys.meta_path): | ||
858 | # Here's some real nastiness: Another "instance" of the six module might | ||
859 | # be floating around. Therefore, we can't use isinstance() to check for | ||
860 | # the six meta path importer, since the other six instance will have | ||
861 | # inserted an importer with different class. | ||
862 | if (type(importer).__name__ == "_SixMetaPathImporter" and | ||
863 | importer.name == __name__): | ||
864 | del sys.meta_path[i] | ||
865 | break | ||
866 | del i, importer | ||
867 | # Finally, add the importer to the meta path import hook. | ||
868 | sys.meta_path.append(_importer) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py new file mode 100644 index 0000000..accb927 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py | |||
@@ -0,0 +1,19 @@ | |||
1 | import sys | ||
2 | |||
3 | try: | ||
4 | # Our match_hostname function is the same as 3.5's, so we only want to | ||
5 | # import the match_hostname function if it's at least that good. | ||
6 | if sys.version_info < (3, 5): | ||
7 | raise ImportError("Fallback to vendored code") | ||
8 | |||
9 | from ssl import CertificateError, match_hostname | ||
10 | except ImportError: | ||
11 | try: | ||
12 | # Backport of the function from a pypi module | ||
13 | from backports.ssl_match_hostname import CertificateError, match_hostname | ||
14 | except ImportError: | ||
15 | # Our vendored copy | ||
16 | from ._implementation import CertificateError, match_hostname | ||
17 | |||
18 | # Not needed, but documenting what we provide. | ||
19 | __all__ = ('CertificateError', 'match_hostname') | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py new file mode 100644 index 0000000..7272d86 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py | |||
@@ -0,0 +1,157 @@ | |||
1 | """The match_hostname() function from Python 3.3.3, essential when using SSL.""" | ||
2 | |||
3 | # Note: This file is under the PSF license as the code comes from the python | ||
4 | # stdlib. http://docs.python.org/3/license.html | ||
5 | |||
6 | import re | ||
7 | import sys | ||
8 | |||
9 | # ipaddress has been backported to 2.6+ in pypi. If it is installed on the | ||
10 | # system, use it to handle IPAddress ServerAltnames (this was added in | ||
11 | # python-3.5) otherwise only do DNS matching. This allows | ||
12 | # backports.ssl_match_hostname to continue to be used all the way back to | ||
13 | # python-2.4. | ||
14 | try: | ||
15 | from pip._vendor import ipaddress | ||
16 | except ImportError: | ||
17 | ipaddress = None | ||
18 | |||
19 | __version__ = '3.5.0.1' | ||
20 | |||
21 | |||
22 | class CertificateError(ValueError): | ||
23 | pass | ||
24 | |||
25 | |||
26 | def _dnsname_match(dn, hostname, max_wildcards=1): | ||
27 | """Matching according to RFC 6125, section 6.4.3 | ||
28 | |||
29 | http://tools.ietf.org/html/rfc6125#section-6.4.3 | ||
30 | """ | ||
31 | pats = [] | ||
32 | if not dn: | ||
33 | return False | ||
34 | |||
35 | # Ported from python3-syntax: | ||
36 | # leftmost, *remainder = dn.split(r'.') | ||
37 | parts = dn.split(r'.') | ||
38 | leftmost = parts[0] | ||
39 | remainder = parts[1:] | ||
40 | |||
41 | wildcards = leftmost.count('*') | ||
42 | if wildcards > max_wildcards: | ||
43 | # Issue #17980: avoid denials of service by refusing more | ||
44 | # than one wildcard per fragment. A survey of established | ||
45 | # policy among SSL implementations showed it to be a | ||
46 | # reasonable choice. | ||
47 | raise CertificateError( | ||
48 | "too many wildcards in certificate DNS name: " + repr(dn)) | ||
49 | |||
50 | # speed up common case w/o wildcards | ||
51 | if not wildcards: | ||
52 | return dn.lower() == hostname.lower() | ||
53 | |||
54 | # RFC 6125, section 6.4.3, subitem 1. | ||
55 | # The client SHOULD NOT attempt to match a presented identifier in which | ||
56 | # the wildcard character comprises a label other than the left-most label. | ||
57 | if leftmost == '*': | ||
58 | # When '*' is a fragment by itself, it matches a non-empty dotless | ||
59 | # fragment. | ||
60 | pats.append('[^.]+') | ||
61 | elif leftmost.startswith('xn--') or hostname.startswith('xn--'): | ||
62 | # RFC 6125, section 6.4.3, subitem 3. | ||
63 | # The client SHOULD NOT attempt to match a presented identifier | ||
64 | # where the wildcard character is embedded within an A-label or | ||
65 | # U-label of an internationalized domain name. | ||
66 | pats.append(re.escape(leftmost)) | ||
67 | else: | ||
68 | # Otherwise, '*' matches any dotless string, e.g. www* | ||
69 | pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) | ||
70 | |||
71 | # add the remaining fragments, ignore any wildcards | ||
72 | for frag in remainder: | ||
73 | pats.append(re.escape(frag)) | ||
74 | |||
75 | pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) | ||
76 | return pat.match(hostname) | ||
77 | |||
78 | |||
79 | def _to_unicode(obj): | ||
80 | if isinstance(obj, str) and sys.version_info < (3,): | ||
81 | obj = unicode(obj, encoding='ascii', errors='strict') | ||
82 | return obj | ||
83 | |||
84 | def _ipaddress_match(ipname, host_ip): | ||
85 | """Exact matching of IP addresses. | ||
86 | |||
87 | RFC 6125 explicitly doesn't define an algorithm for this | ||
88 | (section 1.7.2 - "Out of Scope"). | ||
89 | """ | ||
90 | # OpenSSL may add a trailing newline to a subjectAltName's IP address | ||
91 | # Divergence from upstream: ipaddress can't handle byte str | ||
92 | ip = ipaddress.ip_address(_to_unicode(ipname).rstrip()) | ||
93 | return ip == host_ip | ||
94 | |||
95 | |||
96 | def match_hostname(cert, hostname): | ||
97 | """Verify that *cert* (in decoded format as returned by | ||
98 | SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 | ||
99 | rules are followed, but IP addresses are not accepted for *hostname*. | ||
100 | |||
101 | CertificateError is raised on failure. On success, the function | ||
102 | returns nothing. | ||
103 | """ | ||
104 | if not cert: | ||
105 | raise ValueError("empty or no certificate, match_hostname needs a " | ||
106 | "SSL socket or SSL context with either " | ||
107 | "CERT_OPTIONAL or CERT_REQUIRED") | ||
108 | try: | ||
109 | # Divergence from upstream: ipaddress can't handle byte str | ||
110 | host_ip = ipaddress.ip_address(_to_unicode(hostname)) | ||
111 | except ValueError: | ||
112 | # Not an IP address (common case) | ||
113 | host_ip = None | ||
114 | except UnicodeError: | ||
115 | # Divergence from upstream: Have to deal with ipaddress not taking | ||
116 | # byte strings. addresses should be all ascii, so we consider it not | ||
117 | # an ipaddress in this case | ||
118 | host_ip = None | ||
119 | except AttributeError: | ||
120 | # Divergence from upstream: Make ipaddress library optional | ||
121 | if ipaddress is None: | ||
122 | host_ip = None | ||
123 | else: | ||
124 | raise | ||
125 | dnsnames = [] | ||
126 | san = cert.get('subjectAltName', ()) | ||
127 | for key, value in san: | ||
128 | if key == 'DNS': | ||
129 | if host_ip is None and _dnsname_match(value, hostname): | ||
130 | return | ||
131 | dnsnames.append(value) | ||
132 | elif key == 'IP Address': | ||
133 | if host_ip is not None and _ipaddress_match(value, host_ip): | ||
134 | return | ||
135 | dnsnames.append(value) | ||
136 | if not dnsnames: | ||
137 | # The subject is only checked when there is no dNSName entry | ||
138 | # in subjectAltName | ||
139 | for sub in cert.get('subject', ()): | ||
140 | for key, value in sub: | ||
141 | # XXX according to RFC 2818, the most specific Common Name | ||
142 | # must be used. | ||
143 | if key == 'commonName': | ||
144 | if _dnsname_match(value, hostname): | ||
145 | return | ||
146 | dnsnames.append(value) | ||
147 | if len(dnsnames) > 1: | ||
148 | raise CertificateError("hostname %r " | ||
149 | "doesn't match either of %s" | ||
150 | % (hostname, ', '.join(map(repr, dnsnames)))) | ||
151 | elif len(dnsnames) == 1: | ||
152 | raise CertificateError("hostname %r " | ||
153 | "doesn't match %r" | ||
154 | % (hostname, dnsnames[0])) | ||
155 | else: | ||
156 | raise CertificateError("no appropriate commonName or " | ||
157 | "subjectAltName fields were found") | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/poolmanager.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/poolmanager.py new file mode 100644 index 0000000..607ae0f --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/poolmanager.py | |||
@@ -0,0 +1,440 @@ | |||
1 | from __future__ import absolute_import | ||
2 | import collections | ||
3 | import functools | ||
4 | import logging | ||
5 | |||
6 | from ._collections import RecentlyUsedContainer | ||
7 | from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool | ||
8 | from .connectionpool import port_by_scheme | ||
9 | from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown | ||
10 | from .packages.six.moves.urllib.parse import urljoin | ||
11 | from .request import RequestMethods | ||
12 | from .util.url import parse_url | ||
13 | from .util.retry import Retry | ||
14 | |||
15 | |||
16 | __all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] | ||
17 | |||
18 | |||
19 | log = logging.getLogger(__name__) | ||
20 | |||
21 | SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', | ||
22 | 'ssl_version', 'ca_cert_dir', 'ssl_context') | ||
23 | |||
24 | # All known keyword arguments that could be provided to the pool manager, its | ||
25 | # pools, or the underlying connections. This is used to construct a pool key. | ||
26 | _key_fields = ( | ||
27 | 'key_scheme', # str | ||
28 | 'key_host', # str | ||
29 | 'key_port', # int | ||
30 | 'key_timeout', # int or float or Timeout | ||
31 | 'key_retries', # int or Retry | ||
32 | 'key_strict', # bool | ||
33 | 'key_block', # bool | ||
34 | 'key_source_address', # str | ||
35 | 'key_key_file', # str | ||
36 | 'key_cert_file', # str | ||
37 | 'key_cert_reqs', # str | ||
38 | 'key_ca_certs', # str | ||
39 | 'key_ssl_version', # str | ||
40 | 'key_ca_cert_dir', # str | ||
41 | 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext | ||
42 | 'key_maxsize', # int | ||
43 | 'key_headers', # dict | ||
44 | 'key__proxy', # parsed proxy url | ||
45 | 'key__proxy_headers', # dict | ||
46 | 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples | ||
47 | 'key__socks_options', # dict | ||
48 | 'key_assert_hostname', # bool or string | ||
49 | 'key_assert_fingerprint', # str | ||
50 | ) | ||
51 | |||
52 | #: The namedtuple class used to construct keys for the connection pool. | ||
53 | #: All custom key schemes should include the fields in this key at a minimum. | ||
54 | PoolKey = collections.namedtuple('PoolKey', _key_fields) | ||
55 | |||
56 | |||
57 | def _default_key_normalizer(key_class, request_context): | ||
58 | """ | ||
59 | Create a pool key out of a request context dictionary. | ||
60 | |||
61 | According to RFC 3986, both the scheme and host are case-insensitive. | ||
62 | Therefore, this function normalizes both before constructing the pool | ||
63 | key for an HTTPS request. If you wish to change this behaviour, provide | ||
64 | alternate callables to ``key_fn_by_scheme``. | ||
65 | |||
66 | :param key_class: | ||
67 | The class to use when constructing the key. This should be a namedtuple | ||
68 | with the ``scheme`` and ``host`` keys at a minimum. | ||
69 | :type key_class: namedtuple | ||
70 | :param request_context: | ||
71 | A dictionary-like object that contain the context for a request. | ||
72 | :type request_context: dict | ||
73 | |||
74 | :return: A namedtuple that can be used as a connection pool key. | ||
75 | :rtype: PoolKey | ||
76 | """ | ||
77 | # Since we mutate the dictionary, make a copy first | ||
78 | context = request_context.copy() | ||
79 | context['scheme'] = context['scheme'].lower() | ||
80 | context['host'] = context['host'].lower() | ||
81 | |||
82 | # These are both dictionaries and need to be transformed into frozensets | ||
83 | for key in ('headers', '_proxy_headers', '_socks_options'): | ||
84 | if key in context and context[key] is not None: | ||
85 | context[key] = frozenset(context[key].items()) | ||
86 | |||
87 | # The socket_options key may be a list and needs to be transformed into a | ||
88 | # tuple. | ||
89 | socket_opts = context.get('socket_options') | ||
90 | if socket_opts is not None: | ||
91 | context['socket_options'] = tuple(socket_opts) | ||
92 | |||
93 | # Map the kwargs to the names in the namedtuple - this is necessary since | ||
94 | # namedtuples can't have fields starting with '_'. | ||
95 | for key in list(context.keys()): | ||
96 | context['key_' + key] = context.pop(key) | ||
97 | |||
98 | # Default to ``None`` for keys missing from the context | ||
99 | for field in key_class._fields: | ||
100 | if field not in context: | ||
101 | context[field] = None | ||
102 | |||
103 | return key_class(**context) | ||
104 | |||
105 | |||
106 | #: A dictionary that maps a scheme to a callable that creates a pool key. | ||
107 | #: This can be used to alter the way pool keys are constructed, if desired. | ||
108 | #: Each PoolManager makes a copy of this dictionary so they can be configured | ||
109 | #: globally here, or individually on the instance. | ||
110 | key_fn_by_scheme = { | ||
111 | 'http': functools.partial(_default_key_normalizer, PoolKey), | ||
112 | 'https': functools.partial(_default_key_normalizer, PoolKey), | ||
113 | } | ||
114 | |||
115 | pool_classes_by_scheme = { | ||
116 | 'http': HTTPConnectionPool, | ||
117 | 'https': HTTPSConnectionPool, | ||
118 | } | ||
119 | |||
120 | |||
121 | class PoolManager(RequestMethods): | ||
122 | """ | ||
123 | Allows for arbitrary requests while transparently keeping track of | ||
124 | necessary connection pools for you. | ||
125 | |||
126 | :param num_pools: | ||
127 | Number of connection pools to cache before discarding the least | ||
128 | recently used pool. | ||
129 | |||
130 | :param headers: | ||
131 | Headers to include with all requests, unless other headers are given | ||
132 | explicitly. | ||
133 | |||
134 | :param \\**connection_pool_kw: | ||
135 | Additional parameters are used to create fresh | ||
136 | :class:`urllib3.connectionpool.ConnectionPool` instances. | ||
137 | |||
138 | Example:: | ||
139 | |||
140 | >>> manager = PoolManager(num_pools=2) | ||
141 | >>> r = manager.request('GET', 'http://google.com/') | ||
142 | >>> r = manager.request('GET', 'http://google.com/mail') | ||
143 | >>> r = manager.request('GET', 'http://yahoo.com/') | ||
144 | >>> len(manager.pools) | ||
145 | 2 | ||
146 | |||
147 | """ | ||
148 | |||
149 | proxy = None | ||
150 | |||
151 | def __init__(self, num_pools=10, headers=None, **connection_pool_kw): | ||
152 | RequestMethods.__init__(self, headers) | ||
153 | self.connection_pool_kw = connection_pool_kw | ||
154 | self.pools = RecentlyUsedContainer(num_pools, | ||
155 | dispose_func=lambda p: p.close()) | ||
156 | |||
157 | # Locally set the pool classes and keys so other PoolManagers can | ||
158 | # override them. | ||
159 | self.pool_classes_by_scheme = pool_classes_by_scheme | ||
160 | self.key_fn_by_scheme = key_fn_by_scheme.copy() | ||
161 | |||
162 | def __enter__(self): | ||
163 | return self | ||
164 | |||
165 | def __exit__(self, exc_type, exc_val, exc_tb): | ||
166 | self.clear() | ||
167 | # Return False to re-raise any potential exceptions | ||
168 | return False | ||
169 | |||
170 | def _new_pool(self, scheme, host, port, request_context=None): | ||
171 | """ | ||
172 | Create a new :class:`ConnectionPool` based on host, port, scheme, and | ||
173 | any additional pool keyword arguments. | ||
174 | |||
175 | If ``request_context`` is provided, it is provided as keyword arguments | ||
176 | to the pool class used. This method is used to actually create the | ||
177 | connection pools handed out by :meth:`connection_from_url` and | ||
178 | companion methods. It is intended to be overridden for customization. | ||
179 | """ | ||
180 | pool_cls = self.pool_classes_by_scheme[scheme] | ||
181 | if request_context is None: | ||
182 | request_context = self.connection_pool_kw.copy() | ||
183 | |||
184 | # Although the context has everything necessary to create the pool, | ||
185 | # this function has historically only used the scheme, host, and port | ||
186 | # in the positional args. When an API change is acceptable these can | ||
187 | # be removed. | ||
188 | for key in ('scheme', 'host', 'port'): | ||
189 | request_context.pop(key, None) | ||
190 | |||
191 | if scheme == 'http': | ||
192 | for kw in SSL_KEYWORDS: | ||
193 | request_context.pop(kw, None) | ||
194 | |||
195 | return pool_cls(host, port, **request_context) | ||
196 | |||
197 | def clear(self): | ||
198 | """ | ||
199 | Empty our store of pools and direct them all to close. | ||
200 | |||
201 | This will not affect in-flight connections, but they will not be | ||
202 | re-used after completion. | ||
203 | """ | ||
204 | self.pools.clear() | ||
205 | |||
206 | def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): | ||
207 | """ | ||
208 | Get a :class:`ConnectionPool` based on the host, port, and scheme. | ||
209 | |||
210 | If ``port`` isn't given, it will be derived from the ``scheme`` using | ||
211 | ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is | ||
212 | provided, it is merged with the instance's ``connection_pool_kw`` | ||
213 | variable and used to create the new connection pool, if one is | ||
214 | needed. | ||
215 | """ | ||
216 | |||
217 | if not host: | ||
218 | raise LocationValueError("No host specified.") | ||
219 | |||
220 | request_context = self._merge_pool_kwargs(pool_kwargs) | ||
221 | request_context['scheme'] = scheme or 'http' | ||
222 | if not port: | ||
223 | port = port_by_scheme.get(request_context['scheme'].lower(), 80) | ||
224 | request_context['port'] = port | ||
225 | request_context['host'] = host | ||
226 | |||
227 | return self.connection_from_context(request_context) | ||
228 | |||
229 | def connection_from_context(self, request_context): | ||
230 | """ | ||
231 | Get a :class:`ConnectionPool` based on the request context. | ||
232 | |||
233 | ``request_context`` must at least contain the ``scheme`` key and its | ||
234 | value must be a key in ``key_fn_by_scheme`` instance variable. | ||
235 | """ | ||
236 | scheme = request_context['scheme'].lower() | ||
237 | pool_key_constructor = self.key_fn_by_scheme[scheme] | ||
238 | pool_key = pool_key_constructor(request_context) | ||
239 | |||
240 | return self.connection_from_pool_key(pool_key, request_context=request_context) | ||
241 | |||
242 | def connection_from_pool_key(self, pool_key, request_context=None): | ||
243 | """ | ||
244 | Get a :class:`ConnectionPool` based on the provided pool key. | ||
245 | |||
246 | ``pool_key`` should be a namedtuple that only contains immutable | ||
247 | objects. At a minimum it must have the ``scheme``, ``host``, and | ||
248 | ``port`` fields. | ||
249 | """ | ||
250 | with self.pools.lock: | ||
251 | # If the scheme, host, or port doesn't match existing open | ||
252 | # connections, open a new ConnectionPool. | ||
253 | pool = self.pools.get(pool_key) | ||
254 | if pool: | ||
255 | return pool | ||
256 | |||
257 | # Make a fresh ConnectionPool of the desired type | ||
258 | scheme = request_context['scheme'] | ||
259 | host = request_context['host'] | ||
260 | port = request_context['port'] | ||
261 | pool = self._new_pool(scheme, host, port, request_context=request_context) | ||
262 | self.pools[pool_key] = pool | ||
263 | |||
264 | return pool | ||
265 | |||
266 | def connection_from_url(self, url, pool_kwargs=None): | ||
267 | """ | ||
268 | Similar to :func:`urllib3.connectionpool.connection_from_url`. | ||
269 | |||
270 | If ``pool_kwargs`` is not provided and a new pool needs to be | ||
271 | constructed, ``self.connection_pool_kw`` is used to initialize | ||
272 | the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` | ||
273 | is provided, it is used instead. Note that if a new pool does not | ||
274 | need to be created for the request, the provided ``pool_kwargs`` are | ||
275 | not used. | ||
276 | """ | ||
277 | u = parse_url(url) | ||
278 | return self.connection_from_host(u.host, port=u.port, scheme=u.scheme, | ||
279 | pool_kwargs=pool_kwargs) | ||
280 | |||
281 | def _merge_pool_kwargs(self, override): | ||
282 | """ | ||
283 | Merge a dictionary of override values for self.connection_pool_kw. | ||
284 | |||
285 | This does not modify self.connection_pool_kw and returns a new dict. | ||
286 | Any keys in the override dictionary with a value of ``None`` are | ||
287 | removed from the merged dictionary. | ||
288 | """ | ||
289 | base_pool_kwargs = self.connection_pool_kw.copy() | ||
290 | if override: | ||
291 | for key, value in override.items(): | ||
292 | if value is None: | ||
293 | try: | ||
294 | del base_pool_kwargs[key] | ||
295 | except KeyError: | ||
296 | pass | ||
297 | else: | ||
298 | base_pool_kwargs[key] = value | ||
299 | return base_pool_kwargs | ||
300 | |||
301 | def urlopen(self, method, url, redirect=True, **kw): | ||
302 | """ | ||
303 | Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` | ||
304 | with custom cross-host redirect logic and only sends the request-uri | ||
305 | portion of the ``url``. | ||
306 | |||
307 | The given ``url`` parameter must be absolute, such that an appropriate | ||
308 | :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. | ||
309 | """ | ||
310 | u = parse_url(url) | ||
311 | conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) | ||
312 | |||
313 | kw['assert_same_host'] = False | ||
314 | kw['redirect'] = False | ||
315 | if 'headers' not in kw: | ||
316 | kw['headers'] = self.headers | ||
317 | |||
318 | if self.proxy is not None and u.scheme == "http": | ||
319 | response = conn.urlopen(method, url, **kw) | ||
320 | else: | ||
321 | response = conn.urlopen(method, u.request_uri, **kw) | ||
322 | |||
323 | redirect_location = redirect and response.get_redirect_location() | ||
324 | if not redirect_location: | ||
325 | return response | ||
326 | |||
327 | # Support relative URLs for redirecting. | ||
328 | redirect_location = urljoin(url, redirect_location) | ||
329 | |||
330 | # RFC 7231, Section 6.4.4 | ||
331 | if response.status == 303: | ||
332 | method = 'GET' | ||
333 | |||
334 | retries = kw.get('retries') | ||
335 | if not isinstance(retries, Retry): | ||
336 | retries = Retry.from_int(retries, redirect=redirect) | ||
337 | |||
338 | try: | ||
339 | retries = retries.increment(method, url, response=response, _pool=conn) | ||
340 | except MaxRetryError: | ||
341 | if retries.raise_on_redirect: | ||
342 | raise | ||
343 | return response | ||
344 | |||
345 | kw['retries'] = retries | ||
346 | kw['redirect'] = redirect | ||
347 | |||
348 | log.info("Redirecting %s -> %s", url, redirect_location) | ||
349 | return self.urlopen(method, redirect_location, **kw) | ||
350 | |||
351 | |||
352 | class ProxyManager(PoolManager): | ||
353 | """ | ||
354 | Behaves just like :class:`PoolManager`, but sends all requests through | ||
355 | the defined proxy, using the CONNECT method for HTTPS URLs. | ||
356 | |||
357 | :param proxy_url: | ||
358 | The URL of the proxy to be used. | ||
359 | |||
360 | :param proxy_headers: | ||
361 | A dictionary contaning headers that will be sent to the proxy. In case | ||
362 | of HTTP they are being sent with each request, while in the | ||
363 | HTTPS/CONNECT case they are sent only once. Could be used for proxy | ||
364 | authentication. | ||
365 | |||
366 | Example: | ||
367 | >>> proxy = urllib3.ProxyManager('http://localhost:3128/') | ||
368 | >>> r1 = proxy.request('GET', 'http://google.com/') | ||
369 | >>> r2 = proxy.request('GET', 'http://httpbin.org/') | ||
370 | >>> len(proxy.pools) | ||
371 | 1 | ||
372 | >>> r3 = proxy.request('GET', 'https://httpbin.org/') | ||
373 | >>> r4 = proxy.request('GET', 'https://twitter.com/') | ||
374 | >>> len(proxy.pools) | ||
375 | 3 | ||
376 | |||
377 | """ | ||
378 | |||
379 | def __init__(self, proxy_url, num_pools=10, headers=None, | ||
380 | proxy_headers=None, **connection_pool_kw): | ||
381 | |||
382 | if isinstance(proxy_url, HTTPConnectionPool): | ||
383 | proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, | ||
384 | proxy_url.port) | ||
385 | proxy = parse_url(proxy_url) | ||
386 | if not proxy.port: | ||
387 | port = port_by_scheme.get(proxy.scheme, 80) | ||
388 | proxy = proxy._replace(port=port) | ||
389 | |||
390 | if proxy.scheme not in ("http", "https"): | ||
391 | raise ProxySchemeUnknown(proxy.scheme) | ||
392 | |||
393 | self.proxy = proxy | ||
394 | self.proxy_headers = proxy_headers or {} | ||
395 | |||
396 | connection_pool_kw['_proxy'] = self.proxy | ||
397 | connection_pool_kw['_proxy_headers'] = self.proxy_headers | ||
398 | |||
399 | super(ProxyManager, self).__init__( | ||
400 | num_pools, headers, **connection_pool_kw) | ||
401 | |||
402 | def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): | ||
403 | if scheme == "https": | ||
404 | return super(ProxyManager, self).connection_from_host( | ||
405 | host, port, scheme, pool_kwargs=pool_kwargs) | ||
406 | |||
407 | return super(ProxyManager, self).connection_from_host( | ||
408 | self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs) | ||
409 | |||
410 | def _set_proxy_headers(self, url, headers=None): | ||
411 | """ | ||
412 | Sets headers needed by proxies: specifically, the Accept and Host | ||
413 | headers. Only sets headers not provided by the user. | ||
414 | """ | ||
415 | headers_ = {'Accept': '*/*'} | ||
416 | |||
417 | netloc = parse_url(url).netloc | ||
418 | if netloc: | ||
419 | headers_['Host'] = netloc | ||
420 | |||
421 | if headers: | ||
422 | headers_.update(headers) | ||
423 | return headers_ | ||
424 | |||
425 | def urlopen(self, method, url, redirect=True, **kw): | ||
426 | "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." | ||
427 | u = parse_url(url) | ||
428 | |||
429 | if u.scheme == "http": | ||
430 | # For proxied HTTPS requests, httplib sets the necessary headers | ||
431 | # on the CONNECT to the proxy. For HTTP, we'll definitely | ||
432 | # need to set 'Host' at the very least. | ||
433 | headers = kw.get('headers', self.headers) | ||
434 | kw['headers'] = self._set_proxy_headers(url, headers) | ||
435 | |||
436 | return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) | ||
437 | |||
438 | |||
439 | def proxy_from_url(url, **kw): | ||
440 | return ProxyManager(proxy_url=url, **kw) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/request.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/request.py new file mode 100644 index 0000000..9d789d6 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/request.py | |||
@@ -0,0 +1,148 @@ | |||
1 | from __future__ import absolute_import | ||
2 | |||
3 | from .filepost import encode_multipart_formdata | ||
4 | from .packages.six.moves.urllib.parse import urlencode | ||
5 | |||
6 | |||
7 | __all__ = ['RequestMethods'] | ||
8 | |||
9 | |||
10 | class RequestMethods(object): | ||
11 | """ | ||
12 | Convenience mixin for classes who implement a :meth:`urlopen` method, such | ||
13 | as :class:`~urllib3.connectionpool.HTTPConnectionPool` and | ||
14 | :class:`~urllib3.poolmanager.PoolManager`. | ||
15 | |||
16 | Provides behavior for making common types of HTTP request methods and | ||
17 | decides which type of request field encoding to use. | ||
18 | |||
19 | Specifically, | ||
20 | |||
21 | :meth:`.request_encode_url` is for sending requests whose fields are | ||
22 | encoded in the URL (such as GET, HEAD, DELETE). | ||
23 | |||
24 | :meth:`.request_encode_body` is for sending requests whose fields are | ||
25 | encoded in the *body* of the request using multipart or www-form-urlencoded | ||
26 | (such as for POST, PUT, PATCH). | ||
27 | |||
28 | :meth:`.request` is for making any kind of request, it will look up the | ||
29 | appropriate encoding format and use one of the above two methods to make | ||
30 | the request. | ||
31 | |||
32 | Initializer parameters: | ||
33 | |||
34 | :param headers: | ||
35 | Headers to include with all requests, unless other headers are given | ||
36 | explicitly. | ||
37 | """ | ||
38 | |||
39 | _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) | ||
40 | |||
41 | def __init__(self, headers=None): | ||
42 | self.headers = headers or {} | ||
43 | |||
44 | def urlopen(self, method, url, body=None, headers=None, | ||
45 | encode_multipart=True, multipart_boundary=None, | ||
46 | **kw): # Abstract | ||
47 | raise NotImplemented("Classes extending RequestMethods must implement " | ||
48 | "their own ``urlopen`` method.") | ||
49 | |||
50 | def request(self, method, url, fields=None, headers=None, **urlopen_kw): | ||
51 | """ | ||
52 | Make a request using :meth:`urlopen` with the appropriate encoding of | ||
53 | ``fields`` based on the ``method`` used. | ||
54 | |||
55 | This is a convenience method that requires the least amount of manual | ||
56 | effort. It can be used in most situations, while still having the | ||
57 | option to drop down to more specific methods when necessary, such as | ||
58 | :meth:`request_encode_url`, :meth:`request_encode_body`, | ||
59 | or even the lowest level :meth:`urlopen`. | ||
60 | """ | ||
61 | method = method.upper() | ||
62 | |||
63 | if method in self._encode_url_methods: | ||
64 | return self.request_encode_url(method, url, fields=fields, | ||
65 | headers=headers, | ||
66 | **urlopen_kw) | ||
67 | else: | ||
68 | return self.request_encode_body(method, url, fields=fields, | ||
69 | headers=headers, | ||
70 | **urlopen_kw) | ||
71 | |||
72 | def request_encode_url(self, method, url, fields=None, headers=None, | ||
73 | **urlopen_kw): | ||
74 | """ | ||
75 | Make a request using :meth:`urlopen` with the ``fields`` encoded in | ||
76 | the url. This is useful for request methods like GET, HEAD, DELETE, etc. | ||
77 | """ | ||
78 | if headers is None: | ||
79 | headers = self.headers | ||
80 | |||
81 | extra_kw = {'headers': headers} | ||
82 | extra_kw.update(urlopen_kw) | ||
83 | |||
84 | if fields: | ||
85 | url += '?' + urlencode(fields) | ||
86 | |||
87 | return self.urlopen(method, url, **extra_kw) | ||
88 | |||
89 | def request_encode_body(self, method, url, fields=None, headers=None, | ||
90 | encode_multipart=True, multipart_boundary=None, | ||
91 | **urlopen_kw): | ||
92 | """ | ||
93 | Make a request using :meth:`urlopen` with the ``fields`` encoded in | ||
94 | the body. This is useful for request methods like POST, PUT, PATCH, etc. | ||
95 | |||
96 | When ``encode_multipart=True`` (default), then | ||
97 | :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode | ||
98 | the payload with the appropriate content type. Otherwise | ||
99 | :meth:`urllib.urlencode` is used with the | ||
100 | 'application/x-www-form-urlencoded' content type. | ||
101 | |||
102 | Multipart encoding must be used when posting files, and it's reasonably | ||
103 | safe to use it in other times too. However, it may break request | ||
104 | signing, such as with OAuth. | ||
105 | |||
106 | Supports an optional ``fields`` parameter of key/value strings AND | ||
107 | key/filetuple. A filetuple is a (filename, data, MIME type) tuple where | ||
108 | the MIME type is optional. For example:: | ||
109 | |||
110 | fields = { | ||
111 | 'foo': 'bar', | ||
112 | 'fakefile': ('foofile.txt', 'contents of foofile'), | ||
113 | 'realfile': ('barfile.txt', open('realfile').read()), | ||
114 | 'typedfile': ('bazfile.bin', open('bazfile').read(), | ||
115 | 'image/jpeg'), | ||
116 | 'nonamefile': 'contents of nonamefile field', | ||
117 | } | ||
118 | |||
119 | When uploading a file, providing a filename (the first parameter of the | ||
120 | tuple) is optional but recommended to best mimick behavior of browsers. | ||
121 | |||
122 | Note that if ``headers`` are supplied, the 'Content-Type' header will | ||
123 | be overwritten because it depends on the dynamic random boundary string | ||
124 | which is used to compose the body of the request. The random boundary | ||
125 | string can be explicitly set with the ``multipart_boundary`` parameter. | ||
126 | """ | ||
127 | if headers is None: | ||
128 | headers = self.headers | ||
129 | |||
130 | extra_kw = {'headers': {}} | ||
131 | |||
132 | if fields: | ||
133 | if 'body' in urlopen_kw: | ||
134 | raise TypeError( | ||
135 | "request got values for both 'fields' and 'body', can only specify one.") | ||
136 | |||
137 | if encode_multipart: | ||
138 | body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) | ||
139 | else: | ||
140 | body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' | ||
141 | |||
142 | extra_kw['body'] = body | ||
143 | extra_kw['headers'] = {'Content-Type': content_type} | ||
144 | |||
145 | extra_kw['headers'].update(headers) | ||
146 | extra_kw.update(urlopen_kw) | ||
147 | |||
148 | return self.urlopen(method, url, **extra_kw) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/response.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/response.py new file mode 100644 index 0000000..54799ba --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/response.py | |||
@@ -0,0 +1,626 @@ | |||
1 | from __future__ import absolute_import | ||
2 | from contextlib import contextmanager | ||
3 | import zlib | ||
4 | import io | ||
5 | import logging | ||
6 | from socket import timeout as SocketTimeout | ||
7 | from socket import error as SocketError | ||
8 | |||
9 | from ._collections import HTTPHeaderDict | ||
10 | from .exceptions import ( | ||
11 | BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError, | ||
12 | ResponseNotChunked, IncompleteRead, InvalidHeader | ||
13 | ) | ||
14 | from .packages.six import string_types as basestring, binary_type, PY3 | ||
15 | from .packages.six.moves import http_client as httplib | ||
16 | from .connection import HTTPException, BaseSSLError | ||
17 | from .util.response import is_fp_closed, is_response_to_head | ||
18 | |||
19 | log = logging.getLogger(__name__) | ||
20 | |||
21 | |||
22 | class DeflateDecoder(object): | ||
23 | |||
24 | def __init__(self): | ||
25 | self._first_try = True | ||
26 | self._data = binary_type() | ||
27 | self._obj = zlib.decompressobj() | ||
28 | |||
29 | def __getattr__(self, name): | ||
30 | return getattr(self._obj, name) | ||
31 | |||
32 | def decompress(self, data): | ||
33 | if not data: | ||
34 | return data | ||
35 | |||
36 | if not self._first_try: | ||
37 | return self._obj.decompress(data) | ||
38 | |||
39 | self._data += data | ||
40 | try: | ||
41 | decompressed = self._obj.decompress(data) | ||
42 | if decompressed: | ||
43 | self._first_try = False | ||
44 | self._data = None | ||
45 | return decompressed | ||
46 | except zlib.error: | ||
47 | self._first_try = False | ||
48 | self._obj = zlib.decompressobj(-zlib.MAX_WBITS) | ||
49 | try: | ||
50 | return self.decompress(self._data) | ||
51 | finally: | ||
52 | self._data = None | ||
53 | |||
54 | |||
55 | class GzipDecoder(object): | ||
56 | |||
57 | def __init__(self): | ||
58 | self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) | ||
59 | |||
60 | def __getattr__(self, name): | ||
61 | return getattr(self._obj, name) | ||
62 | |||
63 | def decompress(self, data): | ||
64 | if not data: | ||
65 | return data | ||
66 | return self._obj.decompress(data) | ||
67 | |||
68 | |||
69 | def _get_decoder(mode): | ||
70 | if mode == 'gzip': | ||
71 | return GzipDecoder() | ||
72 | |||
73 | return DeflateDecoder() | ||
74 | |||
75 | |||
76 | class HTTPResponse(io.IOBase): | ||
77 | """ | ||
78 | HTTP Response container. | ||
79 | |||
80 | Backwards-compatible to httplib's HTTPResponse but the response ``body`` is | ||
81 | loaded and decoded on-demand when the ``data`` property is accessed. This | ||
82 | class is also compatible with the Python standard library's :mod:`io` | ||
83 | module, and can hence be treated as a readable object in the context of that | ||
84 | framework. | ||
85 | |||
86 | Extra parameters for behaviour not present in httplib.HTTPResponse: | ||
87 | |||
88 | :param preload_content: | ||
89 | If True, the response's body will be preloaded during construction. | ||
90 | |||
91 | :param decode_content: | ||
92 | If True, attempts to decode specific content-encoding's based on headers | ||
93 | (like 'gzip' and 'deflate') will be skipped and raw data will be used | ||
94 | instead. | ||
95 | |||
96 | :param original_response: | ||
97 | When this HTTPResponse wrapper is generated from an httplib.HTTPResponse | ||
98 | object, it's convenient to include the original for debug purposes. It's | ||
99 | otherwise unused. | ||
100 | |||
101 | :param retries: | ||
102 | The retries contains the last :class:`~urllib3.util.retry.Retry` that | ||
103 | was used during the request. | ||
104 | |||
105 | :param enforce_content_length: | ||
106 | Enforce content length checking. Body returned by server must match | ||
107 | value of Content-Length header, if present. Otherwise, raise error. | ||
108 | """ | ||
109 | |||
110 | CONTENT_DECODERS = ['gzip', 'deflate'] | ||
111 | REDIRECT_STATUSES = [301, 302, 303, 307, 308] | ||
112 | |||
113 | def __init__(self, body='', headers=None, status=0, version=0, reason=None, | ||
114 | strict=0, preload_content=True, decode_content=True, | ||
115 | original_response=None, pool=None, connection=None, | ||
116 | retries=None, enforce_content_length=False, request_method=None): | ||
117 | |||
118 | if isinstance(headers, HTTPHeaderDict): | ||
119 | self.headers = headers | ||
120 | else: | ||
121 | self.headers = HTTPHeaderDict(headers) | ||
122 | self.status = status | ||
123 | self.version = version | ||
124 | self.reason = reason | ||
125 | self.strict = strict | ||
126 | self.decode_content = decode_content | ||
127 | self.retries = retries | ||
128 | self.enforce_content_length = enforce_content_length | ||
129 | |||
130 | self._decoder = None | ||
131 | self._body = None | ||
132 | self._fp = None | ||
133 | self._original_response = original_response | ||
134 | self._fp_bytes_read = 0 | ||
135 | |||
136 | if body and isinstance(body, (basestring, binary_type)): | ||
137 | self._body = body | ||
138 | |||
139 | self._pool = pool | ||
140 | self._connection = connection | ||
141 | |||
142 | if hasattr(body, 'read'): | ||
143 | self._fp = body | ||
144 | |||
145 | # Are we using the chunked-style of transfer encoding? | ||
146 | self.chunked = False | ||
147 | self.chunk_left = None | ||
148 | tr_enc = self.headers.get('transfer-encoding', '').lower() | ||
149 | # Don't incur the penalty of creating a list and then discarding it | ||
150 | encodings = (enc.strip() for enc in tr_enc.split(",")) | ||
151 | if "chunked" in encodings: | ||
152 | self.chunked = True | ||
153 | |||
154 | # Determine length of response | ||
155 | self.length_remaining = self._init_length(request_method) | ||
156 | |||
157 | # If requested, preload the body. | ||
158 | if preload_content and not self._body: | ||
159 | self._body = self.read(decode_content=decode_content) | ||
160 | |||
161 | def get_redirect_location(self): | ||
162 | """ | ||
163 | Should we redirect and where to? | ||
164 | |||
165 | :returns: Truthy redirect location string if we got a redirect status | ||
166 | code and valid location. ``None`` if redirect status and no | ||
167 | location. ``False`` if not a redirect status code. | ||
168 | """ | ||
169 | if self.status in self.REDIRECT_STATUSES: | ||
170 | return self.headers.get('location') | ||
171 | |||
172 | return False | ||
173 | |||
174 | def release_conn(self): | ||
175 | if not self._pool or not self._connection: | ||
176 | return | ||
177 | |||
178 | self._pool._put_conn(self._connection) | ||
179 | self._connection = None | ||
180 | |||
181 | @property | ||
182 | def data(self): | ||
183 | # For backwords-compat with earlier urllib3 0.4 and earlier. | ||
184 | if self._body: | ||
185 | return self._body | ||
186 | |||
187 | if self._fp: | ||
188 | return self.read(cache_content=True) | ||
189 | |||
190 | @property | ||
191 | def connection(self): | ||
192 | return self._connection | ||
193 | |||
194 | def tell(self): | ||
195 | """ | ||
196 | Obtain the number of bytes pulled over the wire so far. May differ from | ||
197 | the amount of content returned by :meth:``HTTPResponse.read`` if bytes | ||
198 | are encoded on the wire (e.g, compressed). | ||
199 | """ | ||
200 | return self._fp_bytes_read | ||
201 | |||
202 | def _init_length(self, request_method): | ||
203 | """ | ||
204 | Set initial length value for Response content if available. | ||
205 | """ | ||
206 | length = self.headers.get('content-length') | ||
207 | |||
208 | if length is not None and self.chunked: | ||
209 | # This Response will fail with an IncompleteRead if it can't be | ||
210 | # received as chunked. This method falls back to attempt reading | ||
211 | # the response before raising an exception. | ||
212 | log.warning("Received response with both Content-Length and " | ||
213 | "Transfer-Encoding set. This is expressly forbidden " | ||
214 | "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " | ||
215 | "attempting to process response as Transfer-Encoding: " | ||
216 | "chunked.") | ||
217 | return None | ||
218 | |||
219 | elif length is not None: | ||
220 | try: | ||
221 | # RFC 7230 section 3.3.2 specifies multiple content lengths can | ||
222 | # be sent in a single Content-Length header | ||
223 | # (e.g. Content-Length: 42, 42). This line ensures the values | ||
224 | # are all valid ints and that as long as the `set` length is 1, | ||
225 | # all values are the same. Otherwise, the header is invalid. | ||
226 | lengths = set([int(val) for val in length.split(',')]) | ||
227 | if len(lengths) > 1: | ||
228 | raise InvalidHeader("Content-Length contained multiple " | ||
229 | "unmatching values (%s)" % length) | ||
230 | length = lengths.pop() | ||
231 | except ValueError: | ||
232 | length = None | ||
233 | else: | ||
234 | if length < 0: | ||
235 | length = None | ||
236 | |||
237 | # Convert status to int for comparison | ||
238 | # In some cases, httplib returns a status of "_UNKNOWN" | ||
239 | try: | ||
240 | status = int(self.status) | ||
241 | except ValueError: | ||
242 | status = 0 | ||
243 | |||
244 | # Check for responses that shouldn't include a body | ||
245 | if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD': | ||
246 | length = 0 | ||
247 | |||
248 | return length | ||
249 | |||
250 | def _init_decoder(self): | ||
251 | """ | ||
252 | Set-up the _decoder attribute if necessary. | ||
253 | """ | ||
254 | # Note: content-encoding value should be case-insensitive, per RFC 7230 | ||
255 | # Section 3.2 | ||
256 | content_encoding = self.headers.get('content-encoding', '').lower() | ||
257 | if self._decoder is None and content_encoding in self.CONTENT_DECODERS: | ||
258 | self._decoder = _get_decoder(content_encoding) | ||
259 | |||
260 | def _decode(self, data, decode_content, flush_decoder): | ||
261 | """ | ||
262 | Decode the data passed in and potentially flush the decoder. | ||
263 | """ | ||
264 | try: | ||
265 | if decode_content and self._decoder: | ||
266 | data = self._decoder.decompress(data) | ||
267 | except (IOError, zlib.error) as e: | ||
268 | content_encoding = self.headers.get('content-encoding', '').lower() | ||
269 | raise DecodeError( | ||
270 | "Received response with content-encoding: %s, but " | ||
271 | "failed to decode it." % content_encoding, e) | ||
272 | |||
273 | if flush_decoder and decode_content: | ||
274 | data += self._flush_decoder() | ||
275 | |||
276 | return data | ||
277 | |||
278 | def _flush_decoder(self): | ||
279 | """ | ||
280 | Flushes the decoder. Should only be called if the decoder is actually | ||
281 | being used. | ||
282 | """ | ||
283 | if self._decoder: | ||
284 | buf = self._decoder.decompress(b'') | ||
285 | return buf + self._decoder.flush() | ||
286 | |||
287 | return b'' | ||
288 | |||
289 | @contextmanager | ||
290 | def _error_catcher(self): | ||
291 | """ | ||
292 | Catch low-level python exceptions, instead re-raising urllib3 | ||
293 | variants, so that low-level exceptions are not leaked in the | ||
294 | high-level api. | ||
295 | |||
296 | On exit, release the connection back to the pool. | ||
297 | """ | ||
298 | clean_exit = False | ||
299 | |||
300 | try: | ||
301 | try: | ||
302 | yield | ||
303 | |||
304 | except SocketTimeout: | ||
305 | # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but | ||
306 | # there is yet no clean way to get at it from this context. | ||
307 | raise ReadTimeoutError(self._pool, None, 'Read timed out.') | ||
308 | |||
309 | except BaseSSLError as e: | ||
310 | # FIXME: Is there a better way to differentiate between SSLErrors? | ||
311 | if 'read operation timed out' not in str(e): # Defensive: | ||
312 | # This shouldn't happen but just in case we're missing an edge | ||
313 | # case, let's avoid swallowing SSL errors. | ||
314 | raise | ||
315 | |||
316 | raise ReadTimeoutError(self._pool, None, 'Read timed out.') | ||
317 | |||
318 | except (HTTPException, SocketError) as e: | ||
319 | # This includes IncompleteRead. | ||
320 | raise ProtocolError('Connection broken: %r' % e, e) | ||
321 | |||
322 | # If no exception is thrown, we should avoid cleaning up | ||
323 | # unnecessarily. | ||
324 | clean_exit = True | ||
325 | finally: | ||
326 | # If we didn't terminate cleanly, we need to throw away our | ||
327 | # connection. | ||
328 | if not clean_exit: | ||
329 | # The response may not be closed but we're not going to use it | ||
330 | # anymore so close it now to ensure that the connection is | ||
331 | # released back to the pool. | ||
332 | if self._original_response: | ||
333 | self._original_response.close() | ||
334 | |||
335 | # Closing the response may not actually be sufficient to close | ||
336 | # everything, so if we have a hold of the connection close that | ||
337 | # too. | ||
338 | if self._connection: | ||
339 | self._connection.close() | ||
340 | |||
341 | # If we hold the original response but it's closed now, we should | ||
342 | # return the connection back to the pool. | ||
343 | if self._original_response and self._original_response.isclosed(): | ||
344 | self.release_conn() | ||
345 | |||
346 | def read(self, amt=None, decode_content=None, cache_content=False): | ||
347 | """ | ||
348 | Similar to :meth:`httplib.HTTPResponse.read`, but with two additional | ||
349 | parameters: ``decode_content`` and ``cache_content``. | ||
350 | |||
351 | :param amt: | ||
352 | How much of the content to read. If specified, caching is skipped | ||
353 | because it doesn't make sense to cache partial content as the full | ||
354 | response. | ||
355 | |||
356 | :param decode_content: | ||
357 | If True, will attempt to decode the body based on the | ||
358 | 'content-encoding' header. | ||
359 | |||
360 | :param cache_content: | ||
361 | If True, will save the returned data such that the same result is | ||
362 | returned despite of the state of the underlying file object. This | ||
363 | is useful if you want the ``.data`` property to continue working | ||
364 | after having ``.read()`` the file object. (Overridden if ``amt`` is | ||
365 | set.) | ||
366 | """ | ||
367 | self._init_decoder() | ||
368 | if decode_content is None: | ||
369 | decode_content = self.decode_content | ||
370 | |||
371 | if self._fp is None: | ||
372 | return | ||
373 | |||
374 | flush_decoder = False | ||
375 | data = None | ||
376 | |||
377 | with self._error_catcher(): | ||
378 | if amt is None: | ||
379 | # cStringIO doesn't like amt=None | ||
380 | data = self._fp.read() | ||
381 | flush_decoder = True | ||
382 | else: | ||
383 | cache_content = False | ||
384 | data = self._fp.read(amt) | ||
385 | if amt != 0 and not data: # Platform-specific: Buggy versions of Python. | ||
386 | # Close the connection when no data is returned | ||
387 | # | ||
388 | # This is redundant to what httplib/http.client _should_ | ||
389 | # already do. However, versions of python released before | ||
390 | # December 15, 2012 (http://bugs.python.org/issue16298) do | ||
391 | # not properly close the connection in all cases. There is | ||
392 | # no harm in redundantly calling close. | ||
393 | self._fp.close() | ||
394 | flush_decoder = True | ||
395 | if self.enforce_content_length and self.length_remaining not in (0, None): | ||
396 | # This is an edge case that httplib failed to cover due | ||
397 | # to concerns of backward compatibility. We're | ||
398 | # addressing it here to make sure IncompleteRead is | ||
399 | # raised during streaming, so all calls with incorrect | ||
400 | # Content-Length are caught. | ||
401 | raise IncompleteRead(self._fp_bytes_read, self.length_remaining) | ||
402 | |||
403 | if data: | ||
404 | self._fp_bytes_read += len(data) | ||
405 | if self.length_remaining is not None: | ||
406 | self.length_remaining -= len(data) | ||
407 | |||
408 | data = self._decode(data, decode_content, flush_decoder) | ||
409 | |||
410 | if cache_content: | ||
411 | self._body = data | ||
412 | |||
413 | return data | ||
414 | |||
415 | def stream(self, amt=2**16, decode_content=None): | ||
416 | """ | ||
417 | A generator wrapper for the read() method. A call will block until | ||
418 | ``amt`` bytes have been read from the connection or until the | ||
419 | connection is closed. | ||
420 | |||
421 | :param amt: | ||
422 | How much of the content to read. The generator will return up to | ||
423 | much data per iteration, but may return less. This is particularly | ||
424 | likely when using compressed data. However, the empty string will | ||
425 | never be returned. | ||
426 | |||
427 | :param decode_content: | ||
428 | If True, will attempt to decode the body based on the | ||
429 | 'content-encoding' header. | ||
430 | """ | ||
431 | if self.chunked and self.supports_chunked_reads(): | ||
432 | for line in self.read_chunked(amt, decode_content=decode_content): | ||
433 | yield line | ||
434 | else: | ||
435 | while not is_fp_closed(self._fp): | ||
436 | data = self.read(amt=amt, decode_content=decode_content) | ||
437 | |||
438 | if data: | ||
439 | yield data | ||
440 | |||
441 | @classmethod | ||
442 | def from_httplib(ResponseCls, r, **response_kw): | ||
443 | """ | ||
444 | Given an :class:`httplib.HTTPResponse` instance ``r``, return a | ||
445 | corresponding :class:`urllib3.response.HTTPResponse` object. | ||
446 | |||
447 | Remaining parameters are passed to the HTTPResponse constructor, along | ||
448 | with ``original_response=r``. | ||
449 | """ | ||
450 | headers = r.msg | ||
451 | |||
452 | if not isinstance(headers, HTTPHeaderDict): | ||
453 | if PY3: # Python 3 | ||
454 | headers = HTTPHeaderDict(headers.items()) | ||
455 | else: # Python 2 | ||
456 | headers = HTTPHeaderDict.from_httplib(headers) | ||
457 | |||
458 | # HTTPResponse objects in Python 3 don't have a .strict attribute | ||
459 | strict = getattr(r, 'strict', 0) | ||
460 | resp = ResponseCls(body=r, | ||
461 | headers=headers, | ||
462 | status=r.status, | ||
463 | version=r.version, | ||
464 | reason=r.reason, | ||
465 | strict=strict, | ||
466 | original_response=r, | ||
467 | **response_kw) | ||
468 | return resp | ||
469 | |||
470 | # Backwards-compatibility methods for httplib.HTTPResponse | ||
471 | def getheaders(self): | ||
472 | return self.headers | ||
473 | |||
474 | def getheader(self, name, default=None): | ||
475 | return self.headers.get(name, default) | ||
476 | |||
477 | # Backwards compatibility for http.cookiejar | ||
478 | def info(self): | ||
479 | return self.headers | ||
480 | |||
481 | # Overrides from io.IOBase | ||
482 | def close(self): | ||
483 | if not self.closed: | ||
484 | self._fp.close() | ||
485 | |||
486 | if self._connection: | ||
487 | self._connection.close() | ||
488 | |||
489 | @property | ||
490 | def closed(self): | ||
491 | if self._fp is None: | ||
492 | return True | ||
493 | elif hasattr(self._fp, 'isclosed'): | ||
494 | return self._fp.isclosed() | ||
495 | elif hasattr(self._fp, 'closed'): | ||
496 | return self._fp.closed | ||
497 | else: | ||
498 | return True | ||
499 | |||
500 | def fileno(self): | ||
501 | if self._fp is None: | ||
502 | raise IOError("HTTPResponse has no file to get a fileno from") | ||
503 | elif hasattr(self._fp, "fileno"): | ||
504 | return self._fp.fileno() | ||
505 | else: | ||
506 | raise IOError("The file-like object this HTTPResponse is wrapped " | ||
507 | "around has no file descriptor") | ||
508 | |||
509 | def flush(self): | ||
510 | if self._fp is not None and hasattr(self._fp, 'flush'): | ||
511 | return self._fp.flush() | ||
512 | |||
513 | def readable(self): | ||
514 | # This method is required for `io` module compatibility. | ||
515 | return True | ||
516 | |||
517 | def readinto(self, b): | ||
518 | # This method is required for `io` module compatibility. | ||
519 | temp = self.read(len(b)) | ||
520 | if len(temp) == 0: | ||
521 | return 0 | ||
522 | else: | ||
523 | b[:len(temp)] = temp | ||
524 | return len(temp) | ||
525 | |||
526 | def supports_chunked_reads(self): | ||
527 | """ | ||
528 | Checks if the underlying file-like object looks like a | ||
529 | httplib.HTTPResponse object. We do this by testing for the fp | ||
530 | attribute. If it is present we assume it returns raw chunks as | ||
531 | processed by read_chunked(). | ||
532 | """ | ||
533 | return hasattr(self._fp, 'fp') | ||
534 | |||
535 | def _update_chunk_length(self): | ||
536 | # First, we'll figure out length of a chunk and then | ||
537 | # we'll try to read it from socket. | ||
538 | if self.chunk_left is not None: | ||
539 | return | ||
540 | line = self._fp.fp.readline() | ||
541 | line = line.split(b';', 1)[0] | ||
542 | try: | ||
543 | self.chunk_left = int(line, 16) | ||
544 | except ValueError: | ||
545 | # Invalid chunked protocol response, abort. | ||
546 | self.close() | ||
547 | raise httplib.IncompleteRead(line) | ||
548 | |||
549 | def _handle_chunk(self, amt): | ||
550 | returned_chunk = None | ||
551 | if amt is None: | ||
552 | chunk = self._fp._safe_read(self.chunk_left) | ||
553 | returned_chunk = chunk | ||
554 | self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. | ||
555 | self.chunk_left = None | ||
556 | elif amt < self.chunk_left: | ||
557 | value = self._fp._safe_read(amt) | ||
558 | self.chunk_left = self.chunk_left - amt | ||
559 | returned_chunk = value | ||
560 | elif amt == self.chunk_left: | ||
561 | value = self._fp._safe_read(amt) | ||
562 | self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. | ||
563 | self.chunk_left = None | ||
564 | returned_chunk = value | ||
565 | else: # amt > self.chunk_left | ||
566 | returned_chunk = self._fp._safe_read(self.chunk_left) | ||
567 | self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. | ||
568 | self.chunk_left = None | ||
569 | return returned_chunk | ||
570 | |||
571 | def read_chunked(self, amt=None, decode_content=None): | ||
572 | """ | ||
573 | Similar to :meth:`HTTPResponse.read`, but with an additional | ||
574 | parameter: ``decode_content``. | ||
575 | |||
576 | :param decode_content: | ||
577 | If True, will attempt to decode the body based on the | ||
578 | 'content-encoding' header. | ||
579 | """ | ||
580 | self._init_decoder() | ||
581 | # FIXME: Rewrite this method and make it a class with a better structured logic. | ||
582 | if not self.chunked: | ||
583 | raise ResponseNotChunked( | ||
584 | "Response is not chunked. " | ||
585 | "Header 'transfer-encoding: chunked' is missing.") | ||
586 | if not self.supports_chunked_reads(): | ||
587 | raise BodyNotHttplibCompatible( | ||
588 | "Body should be httplib.HTTPResponse like. " | ||
589 | "It should have have an fp attribute which returns raw chunks.") | ||
590 | |||
591 | # Don't bother reading the body of a HEAD request. | ||
592 | if self._original_response and is_response_to_head(self._original_response): | ||
593 | self._original_response.close() | ||
594 | return | ||
595 | |||
596 | with self._error_catcher(): | ||
597 | while True: | ||
598 | self._update_chunk_length() | ||
599 | if self.chunk_left == 0: | ||
600 | break | ||
601 | chunk = self._handle_chunk(amt) | ||
602 | decoded = self._decode(chunk, decode_content=decode_content, | ||
603 | flush_decoder=False) | ||
604 | if decoded: | ||
605 | yield decoded | ||
606 | |||
607 | if decode_content: | ||
608 | # On CPython and PyPy, we should never need to flush the | ||
609 | # decoder. However, on Jython we *might* need to, so | ||
610 | # lets defensively do it anyway. | ||
611 | decoded = self._flush_decoder() | ||
612 | if decoded: # Platform-specific: Jython. | ||
613 | yield decoded | ||
614 | |||
615 | # Chunk content ends with \r\n: discard it. | ||
616 | while True: | ||
617 | line = self._fp.fp.readline() | ||
618 | if not line: | ||
619 | # Some sites may not end with '\r\n'. | ||
620 | break | ||
621 | if line == b'\r\n': | ||
622 | break | ||
623 | |||
624 | # We read everything; close the "file". | ||
625 | if self._original_response: | ||
626 | self._original_response.close() | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/__init__.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/__init__.py new file mode 100644 index 0000000..a84b005 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/__init__.py | |||
@@ -0,0 +1,54 @@ | |||
1 | from __future__ import absolute_import | ||
2 | # For backwards compatibility, provide imports that used to be here. | ||
3 | from .connection import is_connection_dropped | ||
4 | from .request import make_headers | ||
5 | from .response import is_fp_closed | ||
6 | from .ssl_ import ( | ||
7 | SSLContext, | ||
8 | HAS_SNI, | ||
9 | IS_PYOPENSSL, | ||
10 | IS_SECURETRANSPORT, | ||
11 | assert_fingerprint, | ||
12 | resolve_cert_reqs, | ||
13 | resolve_ssl_version, | ||
14 | ssl_wrap_socket, | ||
15 | ) | ||
16 | from .timeout import ( | ||
17 | current_time, | ||
18 | Timeout, | ||
19 | ) | ||
20 | |||
21 | from .retry import Retry | ||
22 | from .url import ( | ||
23 | get_host, | ||
24 | parse_url, | ||
25 | split_first, | ||
26 | Url, | ||
27 | ) | ||
28 | from .wait import ( | ||
29 | wait_for_read, | ||
30 | wait_for_write | ||
31 | ) | ||
32 | |||
33 | __all__ = ( | ||
34 | 'HAS_SNI', | ||
35 | 'IS_PYOPENSSL', | ||
36 | 'IS_SECURETRANSPORT', | ||
37 | 'SSLContext', | ||
38 | 'Retry', | ||
39 | 'Timeout', | ||
40 | 'Url', | ||
41 | 'assert_fingerprint', | ||
42 | 'current_time', | ||
43 | 'is_connection_dropped', | ||
44 | 'is_fp_closed', | ||
45 | 'get_host', | ||
46 | 'parse_url', | ||
47 | 'make_headers', | ||
48 | 'resolve_cert_reqs', | ||
49 | 'resolve_ssl_version', | ||
50 | 'split_first', | ||
51 | 'ssl_wrap_socket', | ||
52 | 'wait_for_read', | ||
53 | 'wait_for_write' | ||
54 | ) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/connection.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/connection.py new file mode 100644 index 0000000..31ecd83 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/connection.py | |||
@@ -0,0 +1,130 @@ | |||
1 | from __future__ import absolute_import | ||
2 | import socket | ||
3 | from .wait import wait_for_read | ||
4 | from .selectors import HAS_SELECT, SelectorError | ||
5 | |||
6 | |||
7 | def is_connection_dropped(conn): # Platform-specific | ||
8 | """ | ||
9 | Returns True if the connection is dropped and should be closed. | ||
10 | |||
11 | :param conn: | ||
12 | :class:`httplib.HTTPConnection` object. | ||
13 | |||
14 | Note: For platforms like AppEngine, this will always return ``False`` to | ||
15 | let the platform handle connection recycling transparently for us. | ||
16 | """ | ||
17 | sock = getattr(conn, 'sock', False) | ||
18 | if sock is False: # Platform-specific: AppEngine | ||
19 | return False | ||
20 | if sock is None: # Connection already closed (such as by httplib). | ||
21 | return True | ||
22 | |||
23 | if not HAS_SELECT: | ||
24 | return False | ||
25 | |||
26 | try: | ||
27 | return bool(wait_for_read(sock, timeout=0.0)) | ||
28 | except SelectorError: | ||
29 | return True | ||
30 | |||
31 | |||
32 | # This function is copied from socket.py in the Python 2.7 standard | ||
33 | # library test suite. Added to its signature is only `socket_options`. | ||
34 | # One additional modification is that we avoid binding to IPv6 servers | ||
35 | # discovered in DNS if the system doesn't have IPv6 functionality. | ||
36 | def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, | ||
37 | source_address=None, socket_options=None): | ||
38 | """Connect to *address* and return the socket object. | ||
39 | |||
40 | Convenience function. Connect to *address* (a 2-tuple ``(host, | ||
41 | port)``) and return the socket object. Passing the optional | ||
42 | *timeout* parameter will set the timeout on the socket instance | ||
43 | before attempting to connect. If no *timeout* is supplied, the | ||
44 | global default timeout setting returned by :func:`getdefaulttimeout` | ||
45 | is used. If *source_address* is set it must be a tuple of (host, port) | ||
46 | for the socket to bind as a source address before making the connection. | ||
47 | An host of '' or port 0 tells the OS to use the default. | ||
48 | """ | ||
49 | |||
50 | host, port = address | ||
51 | if host.startswith('['): | ||
52 | host = host.strip('[]') | ||
53 | err = None | ||
54 | |||
55 | # Using the value from allowed_gai_family() in the context of getaddrinfo lets | ||
56 | # us select whether to work with IPv4 DNS records, IPv6 records, or both. | ||
57 | # The original create_connection function always returns all records. | ||
58 | family = allowed_gai_family() | ||
59 | |||
60 | for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): | ||
61 | af, socktype, proto, canonname, sa = res | ||
62 | sock = None | ||
63 | try: | ||
64 | sock = socket.socket(af, socktype, proto) | ||
65 | |||
66 | # If provided, set socket level options before connecting. | ||
67 | _set_socket_options(sock, socket_options) | ||
68 | |||
69 | if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: | ||
70 | sock.settimeout(timeout) | ||
71 | if source_address: | ||
72 | sock.bind(source_address) | ||
73 | sock.connect(sa) | ||
74 | return sock | ||
75 | |||
76 | except socket.error as e: | ||
77 | err = e | ||
78 | if sock is not None: | ||
79 | sock.close() | ||
80 | sock = None | ||
81 | |||
82 | if err is not None: | ||
83 | raise err | ||
84 | |||
85 | raise socket.error("getaddrinfo returns an empty list") | ||
86 | |||
87 | |||
88 | def _set_socket_options(sock, options): | ||
89 | if options is None: | ||
90 | return | ||
91 | |||
92 | for opt in options: | ||
93 | sock.setsockopt(*opt) | ||
94 | |||
95 | |||
96 | def allowed_gai_family(): | ||
97 | """This function is designed to work in the context of | ||
98 | getaddrinfo, where family=socket.AF_UNSPEC is the default and | ||
99 | will perform a DNS search for both IPv6 and IPv4 records.""" | ||
100 | |||
101 | family = socket.AF_INET | ||
102 | if HAS_IPV6: | ||
103 | family = socket.AF_UNSPEC | ||
104 | return family | ||
105 | |||
106 | |||
107 | def _has_ipv6(host): | ||
108 | """ Returns True if the system can bind an IPv6 address. """ | ||
109 | sock = None | ||
110 | has_ipv6 = False | ||
111 | |||
112 | if socket.has_ipv6: | ||
113 | # has_ipv6 returns true if cPython was compiled with IPv6 support. | ||
114 | # It does not tell us if the system has IPv6 support enabled. To | ||
115 | # determine that we must bind to an IPv6 address. | ||
116 | # https://github.com/shazow/urllib3/pull/611 | ||
117 | # https://bugs.python.org/issue658327 | ||
118 | try: | ||
119 | sock = socket.socket(socket.AF_INET6) | ||
120 | sock.bind((host, 0)) | ||
121 | has_ipv6 = True | ||
122 | except Exception: | ||
123 | pass | ||
124 | |||
125 | if sock: | ||
126 | sock.close() | ||
127 | return has_ipv6 | ||
128 | |||
129 | |||
130 | HAS_IPV6 = _has_ipv6('::1') | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/request.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/request.py new file mode 100644 index 0000000..22882b8 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/request.py | |||
@@ -0,0 +1,118 @@ | |||
1 | from __future__ import absolute_import | ||
2 | from base64 import b64encode | ||
3 | |||
4 | from ..packages.six import b, integer_types | ||
5 | from ..exceptions import UnrewindableBodyError | ||
6 | |||
7 | ACCEPT_ENCODING = 'gzip,deflate' | ||
8 | _FAILEDTELL = object() | ||
9 | |||
10 | |||
11 | def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, | ||
12 | basic_auth=None, proxy_basic_auth=None, disable_cache=None): | ||
13 | """ | ||
14 | Shortcuts for generating request headers. | ||
15 | |||
16 | :param keep_alive: | ||
17 | If ``True``, adds 'connection: keep-alive' header. | ||
18 | |||
19 | :param accept_encoding: | ||
20 | Can be a boolean, list, or string. | ||
21 | ``True`` translates to 'gzip,deflate'. | ||
22 | List will get joined by comma. | ||
23 | String will be used as provided. | ||
24 | |||
25 | :param user_agent: | ||
26 | String representing the user-agent you want, such as | ||
27 | "python-urllib3/0.6" | ||
28 | |||
29 | :param basic_auth: | ||
30 | Colon-separated username:password string for 'authorization: basic ...' | ||
31 | auth header. | ||
32 | |||
33 | :param proxy_basic_auth: | ||
34 | Colon-separated username:password string for 'proxy-authorization: basic ...' | ||
35 | auth header. | ||
36 | |||
37 | :param disable_cache: | ||
38 | If ``True``, adds 'cache-control: no-cache' header. | ||
39 | |||
40 | Example:: | ||
41 | |||
42 | >>> make_headers(keep_alive=True, user_agent="Batman/1.0") | ||
43 | {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} | ||
44 | >>> make_headers(accept_encoding=True) | ||
45 | {'accept-encoding': 'gzip,deflate'} | ||
46 | """ | ||
47 | headers = {} | ||
48 | if accept_encoding: | ||
49 | if isinstance(accept_encoding, str): | ||
50 | pass | ||
51 | elif isinstance(accept_encoding, list): | ||
52 | accept_encoding = ','.join(accept_encoding) | ||
53 | else: | ||
54 | accept_encoding = ACCEPT_ENCODING | ||
55 | headers['accept-encoding'] = accept_encoding | ||
56 | |||
57 | if user_agent: | ||
58 | headers['user-agent'] = user_agent | ||
59 | |||
60 | if keep_alive: | ||
61 | headers['connection'] = 'keep-alive' | ||
62 | |||
63 | if basic_auth: | ||
64 | headers['authorization'] = 'Basic ' + \ | ||
65 | b64encode(b(basic_auth)).decode('utf-8') | ||
66 | |||
67 | if proxy_basic_auth: | ||
68 | headers['proxy-authorization'] = 'Basic ' + \ | ||
69 | b64encode(b(proxy_basic_auth)).decode('utf-8') | ||
70 | |||
71 | if disable_cache: | ||
72 | headers['cache-control'] = 'no-cache' | ||
73 | |||
74 | return headers | ||
75 | |||
76 | |||
77 | def set_file_position(body, pos): | ||
78 | """ | ||
79 | If a position is provided, move file to that point. | ||
80 | Otherwise, we'll attempt to record a position for future use. | ||
81 | """ | ||
82 | if pos is not None: | ||
83 | rewind_body(body, pos) | ||
84 | elif getattr(body, 'tell', None) is not None: | ||
85 | try: | ||
86 | pos = body.tell() | ||
87 | except (IOError, OSError): | ||
88 | # This differentiates from None, allowing us to catch | ||
89 | # a failed `tell()` later when trying to rewind the body. | ||
90 | pos = _FAILEDTELL | ||
91 | |||
92 | return pos | ||
93 | |||
94 | |||
95 | def rewind_body(body, body_pos): | ||
96 | """ | ||
97 | Attempt to rewind body to a certain position. | ||
98 | Primarily used for request redirects and retries. | ||
99 | |||
100 | :param body: | ||
101 | File-like object that supports seek. | ||
102 | |||
103 | :param int pos: | ||
104 | Position to seek to in file. | ||
105 | """ | ||
106 | body_seek = getattr(body, 'seek', None) | ||
107 | if body_seek is not None and isinstance(body_pos, integer_types): | ||
108 | try: | ||
109 | body_seek(body_pos) | ||
110 | except (IOError, OSError): | ||
111 | raise UnrewindableBodyError("An error occurred when rewinding request " | ||
112 | "body for redirect/retry.") | ||
113 | elif body_pos is _FAILEDTELL: | ||
114 | raise UnrewindableBodyError("Unable to record file position for rewinding " | ||
115 | "request body during a redirect/retry.") | ||
116 | else: | ||
117 | raise ValueError("body_pos must be of type integer, " | ||
118 | "instead it was %s." % type(body_pos)) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/response.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/response.py new file mode 100644 index 0000000..c2eb49c --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/response.py | |||
@@ -0,0 +1,81 @@ | |||
1 | from __future__ import absolute_import | ||
2 | from ..packages.six.moves import http_client as httplib | ||
3 | |||
4 | from ..exceptions import HeaderParsingError | ||
5 | |||
6 | |||
7 | def is_fp_closed(obj): | ||
8 | """ | ||
9 | Checks whether a given file-like object is closed. | ||
10 | |||
11 | :param obj: | ||
12 | The file-like object to check. | ||
13 | """ | ||
14 | |||
15 | try: | ||
16 | # Check `isclosed()` first, in case Python3 doesn't set `closed`. | ||
17 | # GH Issue #928 | ||
18 | return obj.isclosed() | ||
19 | except AttributeError: | ||
20 | pass | ||
21 | |||
22 | try: | ||
23 | # Check via the official file-like-object way. | ||
24 | return obj.closed | ||
25 | except AttributeError: | ||
26 | pass | ||
27 | |||
28 | try: | ||
29 | # Check if the object is a container for another file-like object that | ||
30 | # gets released on exhaustion (e.g. HTTPResponse). | ||
31 | return obj.fp is None | ||
32 | except AttributeError: | ||
33 | pass | ||
34 | |||
35 | raise ValueError("Unable to determine whether fp is closed.") | ||
36 | |||
37 | |||
38 | def assert_header_parsing(headers): | ||
39 | """ | ||
40 | Asserts whether all headers have been successfully parsed. | ||
41 | Extracts encountered errors from the result of parsing headers. | ||
42 | |||
43 | Only works on Python 3. | ||
44 | |||
45 | :param headers: Headers to verify. | ||
46 | :type headers: `httplib.HTTPMessage`. | ||
47 | |||
48 | :raises urllib3.exceptions.HeaderParsingError: | ||
49 | If parsing errors are found. | ||
50 | """ | ||
51 | |||
52 | # This will fail silently if we pass in the wrong kind of parameter. | ||
53 | # To make debugging easier add an explicit check. | ||
54 | if not isinstance(headers, httplib.HTTPMessage): | ||
55 | raise TypeError('expected httplib.Message, got {0}.'.format( | ||
56 | type(headers))) | ||
57 | |||
58 | defects = getattr(headers, 'defects', None) | ||
59 | get_payload = getattr(headers, 'get_payload', None) | ||
60 | |||
61 | unparsed_data = None | ||
62 | if get_payload: # Platform-specific: Python 3. | ||
63 | unparsed_data = get_payload() | ||
64 | |||
65 | if defects or unparsed_data: | ||
66 | raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) | ||
67 | |||
68 | |||
69 | def is_response_to_head(response): | ||
70 | """ | ||
71 | Checks whether the request of a response has been a HEAD-request. | ||
72 | Handles the quirks of AppEngine. | ||
73 | |||
74 | :param conn: | ||
75 | :type conn: :class:`httplib.HTTPResponse` | ||
76 | """ | ||
77 | # FIXME: Can we do this somehow without accessing private httplib _method? | ||
78 | method = response._method | ||
79 | if isinstance(method, int): # Platform-specific: Appengine | ||
80 | return method == 3 | ||
81 | return method.upper() == 'HEAD' | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/retry.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/retry.py new file mode 100644 index 0000000..2a7e8c1 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/retry.py | |||
@@ -0,0 +1,401 @@ | |||
1 | from __future__ import absolute_import | ||
2 | import time | ||
3 | import logging | ||
4 | from collections import namedtuple | ||
5 | from itertools import takewhile | ||
6 | import email | ||
7 | import re | ||
8 | |||
9 | from ..exceptions import ( | ||
10 | ConnectTimeoutError, | ||
11 | MaxRetryError, | ||
12 | ProtocolError, | ||
13 | ReadTimeoutError, | ||
14 | ResponseError, | ||
15 | InvalidHeader, | ||
16 | ) | ||
17 | from ..packages import six | ||
18 | |||
19 | |||
20 | log = logging.getLogger(__name__) | ||
21 | |||
22 | # Data structure for representing the metadata of requests that result in a retry. | ||
23 | RequestHistory = namedtuple('RequestHistory', ["method", "url", "error", | ||
24 | "status", "redirect_location"]) | ||
25 | |||
26 | |||
27 | class Retry(object): | ||
28 | """ Retry configuration. | ||
29 | |||
30 | Each retry attempt will create a new Retry object with updated values, so | ||
31 | they can be safely reused. | ||
32 | |||
33 | Retries can be defined as a default for a pool:: | ||
34 | |||
35 | retries = Retry(connect=5, read=2, redirect=5) | ||
36 | http = PoolManager(retries=retries) | ||
37 | response = http.request('GET', 'http://example.com/') | ||
38 | |||
39 | Or per-request (which overrides the default for the pool):: | ||
40 | |||
41 | response = http.request('GET', 'http://example.com/', retries=Retry(10)) | ||
42 | |||
43 | Retries can be disabled by passing ``False``:: | ||
44 | |||
45 | response = http.request('GET', 'http://example.com/', retries=False) | ||
46 | |||
47 | Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless | ||
48 | retries are disabled, in which case the causing exception will be raised. | ||
49 | |||
50 | :param int total: | ||
51 | Total number of retries to allow. Takes precedence over other counts. | ||
52 | |||
53 | Set to ``None`` to remove this constraint and fall back on other | ||
54 | counts. It's a good idea to set this to some sensibly-high value to | ||
55 | account for unexpected edge cases and avoid infinite retry loops. | ||
56 | |||
57 | Set to ``0`` to fail on the first retry. | ||
58 | |||
59 | Set to ``False`` to disable and imply ``raise_on_redirect=False``. | ||
60 | |||
61 | :param int connect: | ||
62 | How many connection-related errors to retry on. | ||
63 | |||
64 | These are errors raised before the request is sent to the remote server, | ||
65 | which we assume has not triggered the server to process the request. | ||
66 | |||
67 | Set to ``0`` to fail on the first retry of this type. | ||
68 | |||
69 | :param int read: | ||
70 | How many times to retry on read errors. | ||
71 | |||
72 | These errors are raised after the request was sent to the server, so the | ||
73 | request may have side-effects. | ||
74 | |||
75 | Set to ``0`` to fail on the first retry of this type. | ||
76 | |||
77 | :param int redirect: | ||
78 | How many redirects to perform. Limit this to avoid infinite redirect | ||
79 | loops. | ||
80 | |||
81 | A redirect is a HTTP response with a status code 301, 302, 303, 307 or | ||
82 | 308. | ||
83 | |||
84 | Set to ``0`` to fail on the first retry of this type. | ||
85 | |||
86 | Set to ``False`` to disable and imply ``raise_on_redirect=False``. | ||
87 | |||
88 | :param int status: | ||
89 | How many times to retry on bad status codes. | ||
90 | |||
91 | These are retries made on responses, where status code matches | ||
92 | ``status_forcelist``. | ||
93 | |||
94 | Set to ``0`` to fail on the first retry of this type. | ||
95 | |||
96 | :param iterable method_whitelist: | ||
97 | Set of uppercased HTTP method verbs that we should retry on. | ||
98 | |||
99 | By default, we only retry on methods which are considered to be | ||
100 | idempotent (multiple requests with the same parameters end with the | ||
101 | same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`. | ||
102 | |||
103 | Set to a ``False`` value to retry on any verb. | ||
104 | |||
105 | :param iterable status_forcelist: | ||
106 | A set of integer HTTP status codes that we should force a retry on. | ||
107 | A retry is initiated if the request method is in ``method_whitelist`` | ||
108 | and the response status code is in ``status_forcelist``. | ||
109 | |||
110 | By default, this is disabled with ``None``. | ||
111 | |||
112 | :param float backoff_factor: | ||
113 | A backoff factor to apply between attempts after the second try | ||
114 | (most errors are resolved immediately by a second try without a | ||
115 | delay). urllib3 will sleep for:: | ||
116 | |||
117 | {backoff factor} * (2 ^ ({number of total retries} - 1)) | ||
118 | |||
119 | seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep | ||
120 | for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer | ||
121 | than :attr:`Retry.BACKOFF_MAX`. | ||
122 | |||
123 | By default, backoff is disabled (set to 0). | ||
124 | |||
125 | :param bool raise_on_redirect: Whether, if the number of redirects is | ||
126 | exhausted, to raise a MaxRetryError, or to return a response with a | ||
127 | response code in the 3xx range. | ||
128 | |||
129 | :param bool raise_on_status: Similar meaning to ``raise_on_redirect``: | ||
130 | whether we should raise an exception, or return a response, | ||
131 | if status falls in ``status_forcelist`` range and retries have | ||
132 | been exhausted. | ||
133 | |||
134 | :param tuple history: The history of the request encountered during | ||
135 | each call to :meth:`~Retry.increment`. The list is in the order | ||
136 | the requests occurred. Each list item is of class :class:`RequestHistory`. | ||
137 | |||
138 | :param bool respect_retry_after_header: | ||
139 | Whether to respect Retry-After header on status codes defined as | ||
140 | :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not. | ||
141 | |||
142 | """ | ||
143 | |||
144 | DEFAULT_METHOD_WHITELIST = frozenset([ | ||
145 | 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE']) | ||
146 | |||
147 | RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) | ||
148 | |||
149 | #: Maximum backoff time. | ||
150 | BACKOFF_MAX = 120 | ||
151 | |||
152 | def __init__(self, total=10, connect=None, read=None, redirect=None, status=None, | ||
153 | method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, | ||
154 | backoff_factor=0, raise_on_redirect=True, raise_on_status=True, | ||
155 | history=None, respect_retry_after_header=True): | ||
156 | |||
157 | self.total = total | ||
158 | self.connect = connect | ||
159 | self.read = read | ||
160 | self.status = status | ||
161 | |||
162 | if redirect is False or total is False: | ||
163 | redirect = 0 | ||
164 | raise_on_redirect = False | ||
165 | |||
166 | self.redirect = redirect | ||
167 | self.status_forcelist = status_forcelist or set() | ||
168 | self.method_whitelist = method_whitelist | ||
169 | self.backoff_factor = backoff_factor | ||
170 | self.raise_on_redirect = raise_on_redirect | ||
171 | self.raise_on_status = raise_on_status | ||
172 | self.history = history or tuple() | ||
173 | self.respect_retry_after_header = respect_retry_after_header | ||
174 | |||
175 | def new(self, **kw): | ||
176 | params = dict( | ||
177 | total=self.total, | ||
178 | connect=self.connect, read=self.read, redirect=self.redirect, status=self.status, | ||
179 | method_whitelist=self.method_whitelist, | ||
180 | status_forcelist=self.status_forcelist, | ||
181 | backoff_factor=self.backoff_factor, | ||
182 | raise_on_redirect=self.raise_on_redirect, | ||
183 | raise_on_status=self.raise_on_status, | ||
184 | history=self.history, | ||
185 | ) | ||
186 | params.update(kw) | ||
187 | return type(self)(**params) | ||
188 | |||
189 | @classmethod | ||
190 | def from_int(cls, retries, redirect=True, default=None): | ||
191 | """ Backwards-compatibility for the old retries format.""" | ||
192 | if retries is None: | ||
193 | retries = default if default is not None else cls.DEFAULT | ||
194 | |||
195 | if isinstance(retries, Retry): | ||
196 | return retries | ||
197 | |||
198 | redirect = bool(redirect) and None | ||
199 | new_retries = cls(retries, redirect=redirect) | ||
200 | log.debug("Converted retries value: %r -> %r", retries, new_retries) | ||
201 | return new_retries | ||
202 | |||
203 | def get_backoff_time(self): | ||
204 | """ Formula for computing the current backoff | ||
205 | |||
206 | :rtype: float | ||
207 | """ | ||
208 | # We want to consider only the last consecutive errors sequence (Ignore redirects). | ||
209 | consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None, | ||
210 | reversed(self.history)))) | ||
211 | if consecutive_errors_len <= 1: | ||
212 | return 0 | ||
213 | |||
214 | backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1)) | ||
215 | return min(self.BACKOFF_MAX, backoff_value) | ||
216 | |||
217 | def parse_retry_after(self, retry_after): | ||
218 | # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 | ||
219 | if re.match(r"^\s*[0-9]+\s*$", retry_after): | ||
220 | seconds = int(retry_after) | ||
221 | else: | ||
222 | retry_date_tuple = email.utils.parsedate(retry_after) | ||
223 | if retry_date_tuple is None: | ||
224 | raise InvalidHeader("Invalid Retry-After header: %s" % retry_after) | ||
225 | retry_date = time.mktime(retry_date_tuple) | ||
226 | seconds = retry_date - time.time() | ||
227 | |||
228 | if seconds < 0: | ||
229 | seconds = 0 | ||
230 | |||
231 | return seconds | ||
232 | |||
233 | def get_retry_after(self, response): | ||
234 | """ Get the value of Retry-After in seconds. """ | ||
235 | |||
236 | retry_after = response.getheader("Retry-After") | ||
237 | |||
238 | if retry_after is None: | ||
239 | return None | ||
240 | |||
241 | return self.parse_retry_after(retry_after) | ||
242 | |||
243 | def sleep_for_retry(self, response=None): | ||
244 | retry_after = self.get_retry_after(response) | ||
245 | if retry_after: | ||
246 | time.sleep(retry_after) | ||
247 | return True | ||
248 | |||
249 | return False | ||
250 | |||
251 | def _sleep_backoff(self): | ||
252 | backoff = self.get_backoff_time() | ||
253 | if backoff <= 0: | ||
254 | return | ||
255 | time.sleep(backoff) | ||
256 | |||
257 | def sleep(self, response=None): | ||
258 | """ Sleep between retry attempts. | ||
259 | |||
260 | This method will respect a server's ``Retry-After`` response header | ||
261 | and sleep the duration of the time requested. If that is not present, it | ||
262 | will use an exponential backoff. By default, the backoff factor is 0 and | ||
263 | this method will return immediately. | ||
264 | """ | ||
265 | |||
266 | if response: | ||
267 | slept = self.sleep_for_retry(response) | ||
268 | if slept: | ||
269 | return | ||
270 | |||
271 | self._sleep_backoff() | ||
272 | |||
273 | def _is_connection_error(self, err): | ||
274 | """ Errors when we're fairly sure that the server did not receive the | ||
275 | request, so it should be safe to retry. | ||
276 | """ | ||
277 | return isinstance(err, ConnectTimeoutError) | ||
278 | |||
279 | def _is_read_error(self, err): | ||
280 | """ Errors that occur after the request has been started, so we should | ||
281 | assume that the server began processing it. | ||
282 | """ | ||
283 | return isinstance(err, (ReadTimeoutError, ProtocolError)) | ||
284 | |||
285 | def _is_method_retryable(self, method): | ||
286 | """ Checks if a given HTTP method should be retried upon, depending if | ||
287 | it is included on the method whitelist. | ||
288 | """ | ||
289 | if self.method_whitelist and method.upper() not in self.method_whitelist: | ||
290 | return False | ||
291 | |||
292 | return True | ||
293 | |||
294 | def is_retry(self, method, status_code, has_retry_after=False): | ||
295 | """ Is this method/status code retryable? (Based on whitelists and control | ||
296 | variables such as the number of total retries to allow, whether to | ||
297 | respect the Retry-After header, whether this header is present, and | ||
298 | whether the returned status code is on the list of status codes to | ||
299 | be retried upon on the presence of the aforementioned header) | ||
300 | """ | ||
301 | if not self._is_method_retryable(method): | ||
302 | return False | ||
303 | |||
304 | if self.status_forcelist and status_code in self.status_forcelist: | ||
305 | return True | ||
306 | |||
307 | return (self.total and self.respect_retry_after_header and | ||
308 | has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES)) | ||
309 | |||
310 | def is_exhausted(self): | ||
311 | """ Are we out of retries? """ | ||
312 | retry_counts = (self.total, self.connect, self.read, self.redirect, self.status) | ||
313 | retry_counts = list(filter(None, retry_counts)) | ||
314 | if not retry_counts: | ||
315 | return False | ||
316 | |||
317 | return min(retry_counts) < 0 | ||
318 | |||
319 | def increment(self, method=None, url=None, response=None, error=None, | ||
320 | _pool=None, _stacktrace=None): | ||
321 | """ Return a new Retry object with incremented retry counters. | ||
322 | |||
323 | :param response: A response object, or None, if the server did not | ||
324 | return a response. | ||
325 | :type response: :class:`~urllib3.response.HTTPResponse` | ||
326 | :param Exception error: An error encountered during the request, or | ||
327 | None if the response was received successfully. | ||
328 | |||
329 | :return: A new ``Retry`` object. | ||
330 | """ | ||
331 | if self.total is False and error: | ||
332 | # Disabled, indicate to re-raise the error. | ||
333 | raise six.reraise(type(error), error, _stacktrace) | ||
334 | |||
335 | total = self.total | ||
336 | if total is not None: | ||
337 | total -= 1 | ||
338 | |||
339 | connect = self.connect | ||
340 | read = self.read | ||
341 | redirect = self.redirect | ||
342 | status_count = self.status | ||
343 | cause = 'unknown' | ||
344 | status = None | ||
345 | redirect_location = None | ||
346 | |||
347 | if error and self._is_connection_error(error): | ||
348 | # Connect retry? | ||
349 | if connect is False: | ||
350 | raise six.reraise(type(error), error, _stacktrace) | ||
351 | elif connect is not None: | ||
352 | connect -= 1 | ||
353 | |||
354 | elif error and self._is_read_error(error): | ||
355 | # Read retry? | ||
356 | if read is False or not self._is_method_retryable(method): | ||
357 | raise six.reraise(type(error), error, _stacktrace) | ||
358 | elif read is not None: | ||
359 | read -= 1 | ||
360 | |||
361 | elif response and response.get_redirect_location(): | ||
362 | # Redirect retry? | ||
363 | if redirect is not None: | ||
364 | redirect -= 1 | ||
365 | cause = 'too many redirects' | ||
366 | redirect_location = response.get_redirect_location() | ||
367 | status = response.status | ||
368 | |||
369 | else: | ||
370 | # Incrementing because of a server error like a 500 in | ||
371 | # status_forcelist and a the given method is in the whitelist | ||
372 | cause = ResponseError.GENERIC_ERROR | ||
373 | if response and response.status: | ||
374 | if status_count is not None: | ||
375 | status_count -= 1 | ||
376 | cause = ResponseError.SPECIFIC_ERROR.format( | ||
377 | status_code=response.status) | ||
378 | status = response.status | ||
379 | |||
380 | history = self.history + (RequestHistory(method, url, error, status, redirect_location),) | ||
381 | |||
382 | new_retry = self.new( | ||
383 | total=total, | ||
384 | connect=connect, read=read, redirect=redirect, status=status_count, | ||
385 | history=history) | ||
386 | |||
387 | if new_retry.is_exhausted(): | ||
388 | raise MaxRetryError(_pool, url, error or ResponseError(cause)) | ||
389 | |||
390 | log.debug("Incremented Retry for (url='%s'): %r", url, new_retry) | ||
391 | |||
392 | return new_retry | ||
393 | |||
394 | def __repr__(self): | ||
395 | return ('{cls.__name__}(total={self.total}, connect={self.connect}, ' | ||
396 | 'read={self.read}, redirect={self.redirect}, status={self.status})').format( | ||
397 | cls=type(self), self=self) | ||
398 | |||
399 | |||
400 | # For backwards compatibility (equivalent to pre-v1.9): | ||
401 | Retry.DEFAULT = Retry(3) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/selectors.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/selectors.py new file mode 100644 index 0000000..9f16c66 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/selectors.py | |||
@@ -0,0 +1,581 @@ | |||
1 | # Backport of selectors.py from Python 3.5+ to support Python < 3.4 | ||
2 | # Also has the behavior specified in PEP 475 which is to retry syscalls | ||
3 | # in the case of an EINTR error. This module is required because selectors34 | ||
4 | # does not follow this behavior and instead returns that no dile descriptor | ||
5 | # events have occurred rather than retry the syscall. The decision to drop | ||
6 | # support for select.devpoll is made to maintain 100% test coverage. | ||
7 | |||
8 | import errno | ||
9 | import math | ||
10 | import select | ||
11 | import socket | ||
12 | import sys | ||
13 | import time | ||
14 | from collections import namedtuple, Mapping | ||
15 | |||
16 | try: | ||
17 | monotonic = time.monotonic | ||
18 | except (AttributeError, ImportError): # Python 3.3< | ||
19 | monotonic = time.time | ||
20 | |||
21 | EVENT_READ = (1 << 0) | ||
22 | EVENT_WRITE = (1 << 1) | ||
23 | |||
24 | HAS_SELECT = True # Variable that shows whether the platform has a selector. | ||
25 | _SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None. | ||
26 | _DEFAULT_SELECTOR = None | ||
27 | |||
28 | |||
29 | class SelectorError(Exception): | ||
30 | def __init__(self, errcode): | ||
31 | super(SelectorError, self).__init__() | ||
32 | self.errno = errcode | ||
33 | |||
34 | def __repr__(self): | ||
35 | return "<SelectorError errno={0}>".format(self.errno) | ||
36 | |||
37 | def __str__(self): | ||
38 | return self.__repr__() | ||
39 | |||
40 | |||
41 | def _fileobj_to_fd(fileobj): | ||
42 | """ Return a file descriptor from a file object. If | ||
43 | given an integer will simply return that integer back. """ | ||
44 | if isinstance(fileobj, int): | ||
45 | fd = fileobj | ||
46 | else: | ||
47 | try: | ||
48 | fd = int(fileobj.fileno()) | ||
49 | except (AttributeError, TypeError, ValueError): | ||
50 | raise ValueError("Invalid file object: {0!r}".format(fileobj)) | ||
51 | if fd < 0: | ||
52 | raise ValueError("Invalid file descriptor: {0}".format(fd)) | ||
53 | return fd | ||
54 | |||
55 | |||
56 | # Determine which function to use to wrap system calls because Python 3.5+ | ||
57 | # already handles the case when system calls are interrupted. | ||
58 | if sys.version_info >= (3, 5): | ||
59 | def _syscall_wrapper(func, _, *args, **kwargs): | ||
60 | """ This is the short-circuit version of the below logic | ||
61 | because in Python 3.5+ all system calls automatically restart | ||
62 | and recalculate their timeouts. """ | ||
63 | try: | ||
64 | return func(*args, **kwargs) | ||
65 | except (OSError, IOError, select.error) as e: | ||
66 | errcode = None | ||
67 | if hasattr(e, "errno"): | ||
68 | errcode = e.errno | ||
69 | raise SelectorError(errcode) | ||
70 | else: | ||
71 | def _syscall_wrapper(func, recalc_timeout, *args, **kwargs): | ||
72 | """ Wrapper function for syscalls that could fail due to EINTR. | ||
73 | All functions should be retried if there is time left in the timeout | ||
74 | in accordance with PEP 475. """ | ||
75 | timeout = kwargs.get("timeout", None) | ||
76 | if timeout is None: | ||
77 | expires = None | ||
78 | recalc_timeout = False | ||
79 | else: | ||
80 | timeout = float(timeout) | ||
81 | if timeout < 0.0: # Timeout less than 0 treated as no timeout. | ||
82 | expires = None | ||
83 | else: | ||
84 | expires = monotonic() + timeout | ||
85 | |||
86 | args = list(args) | ||
87 | if recalc_timeout and "timeout" not in kwargs: | ||
88 | raise ValueError( | ||
89 | "Timeout must be in args or kwargs to be recalculated") | ||
90 | |||
91 | result = _SYSCALL_SENTINEL | ||
92 | while result is _SYSCALL_SENTINEL: | ||
93 | try: | ||
94 | result = func(*args, **kwargs) | ||
95 | # OSError is thrown by select.select | ||
96 | # IOError is thrown by select.epoll.poll | ||
97 | # select.error is thrown by select.poll.poll | ||
98 | # Aren't we thankful for Python 3.x rework for exceptions? | ||
99 | except (OSError, IOError, select.error) as e: | ||
100 | # select.error wasn't a subclass of OSError in the past. | ||
101 | errcode = None | ||
102 | if hasattr(e, "errno"): | ||
103 | errcode = e.errno | ||
104 | elif hasattr(e, "args"): | ||
105 | errcode = e.args[0] | ||
106 | |||
107 | # Also test for the Windows equivalent of EINTR. | ||
108 | is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and | ||
109 | errcode == errno.WSAEINTR)) | ||
110 | |||
111 | if is_interrupt: | ||
112 | if expires is not None: | ||
113 | current_time = monotonic() | ||
114 | if current_time > expires: | ||
115 | raise OSError(errno=errno.ETIMEDOUT) | ||
116 | if recalc_timeout: | ||
117 | if "timeout" in kwargs: | ||
118 | kwargs["timeout"] = expires - current_time | ||
119 | continue | ||
120 | if errcode: | ||
121 | raise SelectorError(errcode) | ||
122 | else: | ||
123 | raise | ||
124 | return result | ||
125 | |||
126 | |||
127 | SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) | ||
128 | |||
129 | |||
130 | class _SelectorMapping(Mapping): | ||
131 | """ Mapping of file objects to selector keys """ | ||
132 | |||
133 | def __init__(self, selector): | ||
134 | self._selector = selector | ||
135 | |||
136 | def __len__(self): | ||
137 | return len(self._selector._fd_to_key) | ||
138 | |||
139 | def __getitem__(self, fileobj): | ||
140 | try: | ||
141 | fd = self._selector._fileobj_lookup(fileobj) | ||
142 | return self._selector._fd_to_key[fd] | ||
143 | except KeyError: | ||
144 | raise KeyError("{0!r} is not registered.".format(fileobj)) | ||
145 | |||
146 | def __iter__(self): | ||
147 | return iter(self._selector._fd_to_key) | ||
148 | |||
149 | |||
150 | class BaseSelector(object): | ||
151 | """ Abstract Selector class | ||
152 | |||
153 | A selector supports registering file objects to be monitored | ||
154 | for specific I/O events. | ||
155 | |||
156 | A file object is a file descriptor or any object with a | ||
157 | `fileno()` method. An arbitrary object can be attached to the | ||
158 | file object which can be used for example to store context info, | ||
159 | a callback, etc. | ||
160 | |||
161 | A selector can use various implementations (select(), poll(), epoll(), | ||
162 | and kqueue()) depending on the platform. The 'DefaultSelector' class uses | ||
163 | the most efficient implementation for the current platform. | ||
164 | """ | ||
165 | def __init__(self): | ||
166 | # Maps file descriptors to keys. | ||
167 | self._fd_to_key = {} | ||
168 | |||
169 | # Read-only mapping returned by get_map() | ||
170 | self._map = _SelectorMapping(self) | ||
171 | |||
172 | def _fileobj_lookup(self, fileobj): | ||
173 | """ Return a file descriptor from a file object. | ||
174 | This wraps _fileobj_to_fd() to do an exhaustive | ||
175 | search in case the object is invalid but we still | ||
176 | have it in our map. Used by unregister() so we can | ||
177 | unregister an object that was previously registered | ||
178 | even if it is closed. It is also used by _SelectorMapping | ||
179 | """ | ||
180 | try: | ||
181 | return _fileobj_to_fd(fileobj) | ||
182 | except ValueError: | ||
183 | |||
184 | # Search through all our mapped keys. | ||
185 | for key in self._fd_to_key.values(): | ||
186 | if key.fileobj is fileobj: | ||
187 | return key.fd | ||
188 | |||
189 | # Raise ValueError after all. | ||
190 | raise | ||
191 | |||
192 | def register(self, fileobj, events, data=None): | ||
193 | """ Register a file object for a set of events to monitor. """ | ||
194 | if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): | ||
195 | raise ValueError("Invalid events: {0!r}".format(events)) | ||
196 | |||
197 | key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) | ||
198 | |||
199 | if key.fd in self._fd_to_key: | ||
200 | raise KeyError("{0!r} (FD {1}) is already registered" | ||
201 | .format(fileobj, key.fd)) | ||
202 | |||
203 | self._fd_to_key[key.fd] = key | ||
204 | return key | ||
205 | |||
206 | def unregister(self, fileobj): | ||
207 | """ Unregister a file object from being monitored. """ | ||
208 | try: | ||
209 | key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) | ||
210 | except KeyError: | ||
211 | raise KeyError("{0!r} is not registered".format(fileobj)) | ||
212 | |||
213 | # Getting the fileno of a closed socket on Windows errors with EBADF. | ||
214 | except socket.error as e: # Platform-specific: Windows. | ||
215 | if e.errno != errno.EBADF: | ||
216 | raise | ||
217 | else: | ||
218 | for key in self._fd_to_key.values(): | ||
219 | if key.fileobj is fileobj: | ||
220 | self._fd_to_key.pop(key.fd) | ||
221 | break | ||
222 | else: | ||
223 | raise KeyError("{0!r} is not registered".format(fileobj)) | ||
224 | return key | ||
225 | |||
226 | def modify(self, fileobj, events, data=None): | ||
227 | """ Change a registered file object monitored events and data. """ | ||
228 | # NOTE: Some subclasses optimize this operation even further. | ||
229 | try: | ||
230 | key = self._fd_to_key[self._fileobj_lookup(fileobj)] | ||
231 | except KeyError: | ||
232 | raise KeyError("{0!r} is not registered".format(fileobj)) | ||
233 | |||
234 | if events != key.events: | ||
235 | self.unregister(fileobj) | ||
236 | key = self.register(fileobj, events, data) | ||
237 | |||
238 | elif data != key.data: | ||
239 | # Use a shortcut to update the data. | ||
240 | key = key._replace(data=data) | ||
241 | self._fd_to_key[key.fd] = key | ||
242 | |||
243 | return key | ||
244 | |||
245 | def select(self, timeout=None): | ||
246 | """ Perform the actual selection until some monitored file objects | ||
247 | are ready or the timeout expires. """ | ||
248 | raise NotImplementedError() | ||
249 | |||
250 | def close(self): | ||
251 | """ Close the selector. This must be called to ensure that all | ||
252 | underlying resources are freed. """ | ||
253 | self._fd_to_key.clear() | ||
254 | self._map = None | ||
255 | |||
256 | def get_key(self, fileobj): | ||
257 | """ Return the key associated with a registered file object. """ | ||
258 | mapping = self.get_map() | ||
259 | if mapping is None: | ||
260 | raise RuntimeError("Selector is closed") | ||
261 | try: | ||
262 | return mapping[fileobj] | ||
263 | except KeyError: | ||
264 | raise KeyError("{0!r} is not registered".format(fileobj)) | ||
265 | |||
266 | def get_map(self): | ||
267 | """ Return a mapping of file objects to selector keys """ | ||
268 | return self._map | ||
269 | |||
270 | def _key_from_fd(self, fd): | ||
271 | """ Return the key associated to a given file descriptor | ||
272 | Return None if it is not found. """ | ||
273 | try: | ||
274 | return self._fd_to_key[fd] | ||
275 | except KeyError: | ||
276 | return None | ||
277 | |||
278 | def __enter__(self): | ||
279 | return self | ||
280 | |||
281 | def __exit__(self, *args): | ||
282 | self.close() | ||
283 | |||
284 | |||
285 | # Almost all platforms have select.select() | ||
286 | if hasattr(select, "select"): | ||
287 | class SelectSelector(BaseSelector): | ||
288 | """ Select-based selector. """ | ||
289 | def __init__(self): | ||
290 | super(SelectSelector, self).__init__() | ||
291 | self._readers = set() | ||
292 | self._writers = set() | ||
293 | |||
294 | def register(self, fileobj, events, data=None): | ||
295 | key = super(SelectSelector, self).register(fileobj, events, data) | ||
296 | if events & EVENT_READ: | ||
297 | self._readers.add(key.fd) | ||
298 | if events & EVENT_WRITE: | ||
299 | self._writers.add(key.fd) | ||
300 | return key | ||
301 | |||
302 | def unregister(self, fileobj): | ||
303 | key = super(SelectSelector, self).unregister(fileobj) | ||
304 | self._readers.discard(key.fd) | ||
305 | self._writers.discard(key.fd) | ||
306 | return key | ||
307 | |||
308 | def _select(self, r, w, timeout=None): | ||
309 | """ Wrapper for select.select because timeout is a positional arg """ | ||
310 | return select.select(r, w, [], timeout) | ||
311 | |||
312 | def select(self, timeout=None): | ||
313 | # Selecting on empty lists on Windows errors out. | ||
314 | if not len(self._readers) and not len(self._writers): | ||
315 | return [] | ||
316 | |||
317 | timeout = None if timeout is None else max(timeout, 0.0) | ||
318 | ready = [] | ||
319 | r, w, _ = _syscall_wrapper(self._select, True, self._readers, | ||
320 | self._writers, timeout) | ||
321 | r = set(r) | ||
322 | w = set(w) | ||
323 | for fd in r | w: | ||
324 | events = 0 | ||
325 | if fd in r: | ||
326 | events |= EVENT_READ | ||
327 | if fd in w: | ||
328 | events |= EVENT_WRITE | ||
329 | |||
330 | key = self._key_from_fd(fd) | ||
331 | if key: | ||
332 | ready.append((key, events & key.events)) | ||
333 | return ready | ||
334 | |||
335 | |||
336 | if hasattr(select, "poll"): | ||
337 | class PollSelector(BaseSelector): | ||
338 | """ Poll-based selector """ | ||
339 | def __init__(self): | ||
340 | super(PollSelector, self).__init__() | ||
341 | self._poll = select.poll() | ||
342 | |||
343 | def register(self, fileobj, events, data=None): | ||
344 | key = super(PollSelector, self).register(fileobj, events, data) | ||
345 | event_mask = 0 | ||
346 | if events & EVENT_READ: | ||
347 | event_mask |= select.POLLIN | ||
348 | if events & EVENT_WRITE: | ||
349 | event_mask |= select.POLLOUT | ||
350 | self._poll.register(key.fd, event_mask) | ||
351 | return key | ||
352 | |||
353 | def unregister(self, fileobj): | ||
354 | key = super(PollSelector, self).unregister(fileobj) | ||
355 | self._poll.unregister(key.fd) | ||
356 | return key | ||
357 | |||
358 | def _wrap_poll(self, timeout=None): | ||
359 | """ Wrapper function for select.poll.poll() so that | ||
360 | _syscall_wrapper can work with only seconds. """ | ||
361 | if timeout is not None: | ||
362 | if timeout <= 0: | ||
363 | timeout = 0 | ||
364 | else: | ||
365 | # select.poll.poll() has a resolution of 1 millisecond, | ||
366 | # round away from zero to wait *at least* timeout seconds. | ||
367 | timeout = math.ceil(timeout * 1e3) | ||
368 | |||
369 | result = self._poll.poll(timeout) | ||
370 | return result | ||
371 | |||
372 | def select(self, timeout=None): | ||
373 | ready = [] | ||
374 | fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) | ||
375 | for fd, event_mask in fd_events: | ||
376 | events = 0 | ||
377 | if event_mask & ~select.POLLIN: | ||
378 | events |= EVENT_WRITE | ||
379 | if event_mask & ~select.POLLOUT: | ||
380 | events |= EVENT_READ | ||
381 | |||
382 | key = self._key_from_fd(fd) | ||
383 | if key: | ||
384 | ready.append((key, events & key.events)) | ||
385 | |||
386 | return ready | ||
387 | |||
388 | |||
389 | if hasattr(select, "epoll"): | ||
390 | class EpollSelector(BaseSelector): | ||
391 | """ Epoll-based selector """ | ||
392 | def __init__(self): | ||
393 | super(EpollSelector, self).__init__() | ||
394 | self._epoll = select.epoll() | ||
395 | |||
396 | def fileno(self): | ||
397 | return self._epoll.fileno() | ||
398 | |||
399 | def register(self, fileobj, events, data=None): | ||
400 | key = super(EpollSelector, self).register(fileobj, events, data) | ||
401 | events_mask = 0 | ||
402 | if events & EVENT_READ: | ||
403 | events_mask |= select.EPOLLIN | ||
404 | if events & EVENT_WRITE: | ||
405 | events_mask |= select.EPOLLOUT | ||
406 | _syscall_wrapper(self._epoll.register, False, key.fd, events_mask) | ||
407 | return key | ||
408 | |||
409 | def unregister(self, fileobj): | ||
410 | key = super(EpollSelector, self).unregister(fileobj) | ||
411 | try: | ||
412 | _syscall_wrapper(self._epoll.unregister, False, key.fd) | ||
413 | except SelectorError: | ||
414 | # This can occur when the fd was closed since registry. | ||
415 | pass | ||
416 | return key | ||
417 | |||
418 | def select(self, timeout=None): | ||
419 | if timeout is not None: | ||
420 | if timeout <= 0: | ||
421 | timeout = 0.0 | ||
422 | else: | ||
423 | # select.epoll.poll() has a resolution of 1 millisecond | ||
424 | # but luckily takes seconds so we don't need a wrapper | ||
425 | # like PollSelector. Just for better rounding. | ||
426 | timeout = math.ceil(timeout * 1e3) * 1e-3 | ||
427 | timeout = float(timeout) | ||
428 | else: | ||
429 | timeout = -1.0 # epoll.poll() must have a float. | ||
430 | |||
431 | # We always want at least 1 to ensure that select can be called | ||
432 | # with no file descriptors registered. Otherwise will fail. | ||
433 | max_events = max(len(self._fd_to_key), 1) | ||
434 | |||
435 | ready = [] | ||
436 | fd_events = _syscall_wrapper(self._epoll.poll, True, | ||
437 | timeout=timeout, | ||
438 | maxevents=max_events) | ||
439 | for fd, event_mask in fd_events: | ||
440 | events = 0 | ||
441 | if event_mask & ~select.EPOLLIN: | ||
442 | events |= EVENT_WRITE | ||
443 | if event_mask & ~select.EPOLLOUT: | ||
444 | events |= EVENT_READ | ||
445 | |||
446 | key = self._key_from_fd(fd) | ||
447 | if key: | ||
448 | ready.append((key, events & key.events)) | ||
449 | return ready | ||
450 | |||
451 | def close(self): | ||
452 | self._epoll.close() | ||
453 | super(EpollSelector, self).close() | ||
454 | |||
455 | |||
456 | if hasattr(select, "kqueue"): | ||
457 | class KqueueSelector(BaseSelector): | ||
458 | """ Kqueue / Kevent-based selector """ | ||
459 | def __init__(self): | ||
460 | super(KqueueSelector, self).__init__() | ||
461 | self._kqueue = select.kqueue() | ||
462 | |||
463 | def fileno(self): | ||
464 | return self._kqueue.fileno() | ||
465 | |||
466 | def register(self, fileobj, events, data=None): | ||
467 | key = super(KqueueSelector, self).register(fileobj, events, data) | ||
468 | if events & EVENT_READ: | ||
469 | kevent = select.kevent(key.fd, | ||
470 | select.KQ_FILTER_READ, | ||
471 | select.KQ_EV_ADD) | ||
472 | |||
473 | _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) | ||
474 | |||
475 | if events & EVENT_WRITE: | ||
476 | kevent = select.kevent(key.fd, | ||
477 | select.KQ_FILTER_WRITE, | ||
478 | select.KQ_EV_ADD) | ||
479 | |||
480 | _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) | ||
481 | |||
482 | return key | ||
483 | |||
484 | def unregister(self, fileobj): | ||
485 | key = super(KqueueSelector, self).unregister(fileobj) | ||
486 | if key.events & EVENT_READ: | ||
487 | kevent = select.kevent(key.fd, | ||
488 | select.KQ_FILTER_READ, | ||
489 | select.KQ_EV_DELETE) | ||
490 | try: | ||
491 | _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) | ||
492 | except SelectorError: | ||
493 | pass | ||
494 | if key.events & EVENT_WRITE: | ||
495 | kevent = select.kevent(key.fd, | ||
496 | select.KQ_FILTER_WRITE, | ||
497 | select.KQ_EV_DELETE) | ||
498 | try: | ||
499 | _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) | ||
500 | except SelectorError: | ||
501 | pass | ||
502 | |||
503 | return key | ||
504 | |||
505 | def select(self, timeout=None): | ||
506 | if timeout is not None: | ||
507 | timeout = max(timeout, 0) | ||
508 | |||
509 | max_events = len(self._fd_to_key) * 2 | ||
510 | ready_fds = {} | ||
511 | |||
512 | kevent_list = _syscall_wrapper(self._kqueue.control, True, | ||
513 | None, max_events, timeout) | ||
514 | |||
515 | for kevent in kevent_list: | ||
516 | fd = kevent.ident | ||
517 | event_mask = kevent.filter | ||
518 | events = 0 | ||
519 | if event_mask == select.KQ_FILTER_READ: | ||
520 | events |= EVENT_READ | ||
521 | if event_mask == select.KQ_FILTER_WRITE: | ||
522 | events |= EVENT_WRITE | ||
523 | |||
524 | key = self._key_from_fd(fd) | ||
525 | if key: | ||
526 | if key.fd not in ready_fds: | ||
527 | ready_fds[key.fd] = (key, events & key.events) | ||
528 | else: | ||
529 | old_events = ready_fds[key.fd][1] | ||
530 | ready_fds[key.fd] = (key, (events | old_events) & key.events) | ||
531 | |||
532 | return list(ready_fds.values()) | ||
533 | |||
534 | def close(self): | ||
535 | self._kqueue.close() | ||
536 | super(KqueueSelector, self).close() | ||
537 | |||
538 | |||
539 | if not hasattr(select, 'select'): # Platform-specific: AppEngine | ||
540 | HAS_SELECT = False | ||
541 | |||
542 | |||
543 | def _can_allocate(struct): | ||
544 | """ Checks that select structs can be allocated by the underlying | ||
545 | operating system, not just advertised by the select module. We don't | ||
546 | check select() because we'll be hopeful that most platforms that | ||
547 | don't have it available will not advertise it. (ie: GAE) """ | ||
548 | try: | ||
549 | # select.poll() objects won't fail until used. | ||
550 | if struct == 'poll': | ||
551 | p = select.poll() | ||
552 | p.poll(0) | ||
553 | |||
554 | # All others will fail on allocation. | ||
555 | else: | ||
556 | getattr(select, struct)().close() | ||
557 | return True | ||
558 | except (OSError, AttributeError) as e: | ||
559 | return False | ||
560 | |||
561 | |||
562 | # Choose the best implementation, roughly: | ||
563 | # kqueue == epoll > poll > select. Devpoll not supported. (See above) | ||
564 | # select() also can't accept a FD > FD_SETSIZE (usually around 1024) | ||
565 | def DefaultSelector(): | ||
566 | """ This function serves as a first call for DefaultSelector to | ||
567 | detect if the select module is being monkey-patched incorrectly | ||
568 | by eventlet, greenlet, and preserve proper behavior. """ | ||
569 | global _DEFAULT_SELECTOR | ||
570 | if _DEFAULT_SELECTOR is None: | ||
571 | if _can_allocate('kqueue'): | ||
572 | _DEFAULT_SELECTOR = KqueueSelector | ||
573 | elif _can_allocate('epoll'): | ||
574 | _DEFAULT_SELECTOR = EpollSelector | ||
575 | elif _can_allocate('poll'): | ||
576 | _DEFAULT_SELECTOR = PollSelector | ||
577 | elif hasattr(select, 'select'): | ||
578 | _DEFAULT_SELECTOR = SelectSelector | ||
579 | else: # Platform-specific: AppEngine | ||
580 | raise ValueError('Platform does not have a selector') | ||
581 | return _DEFAULT_SELECTOR() | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/ssl_.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/ssl_.py new file mode 100644 index 0000000..c11dff2 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/ssl_.py | |||
@@ -0,0 +1,341 @@ | |||
1 | from __future__ import absolute_import | ||
2 | import errno | ||
3 | import warnings | ||
4 | import hmac | ||
5 | |||
6 | from binascii import hexlify, unhexlify | ||
7 | from hashlib import md5, sha1, sha256 | ||
8 | |||
9 | from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning | ||
10 | |||
11 | |||
12 | SSLContext = None | ||
13 | HAS_SNI = False | ||
14 | IS_PYOPENSSL = False | ||
15 | IS_SECURETRANSPORT = False | ||
16 | |||
17 | # Maps the length of a digest to a possible hash function producing this digest | ||
18 | HASHFUNC_MAP = { | ||
19 | 32: md5, | ||
20 | 40: sha1, | ||
21 | 64: sha256, | ||
22 | } | ||
23 | |||
24 | |||
25 | def _const_compare_digest_backport(a, b): | ||
26 | """ | ||
27 | Compare two digests of equal length in constant time. | ||
28 | |||
29 | The digests must be of type str/bytes. | ||
30 | Returns True if the digests match, and False otherwise. | ||
31 | """ | ||
32 | result = abs(len(a) - len(b)) | ||
33 | for l, r in zip(bytearray(a), bytearray(b)): | ||
34 | result |= l ^ r | ||
35 | return result == 0 | ||
36 | |||
37 | |||
38 | _const_compare_digest = getattr(hmac, 'compare_digest', | ||
39 | _const_compare_digest_backport) | ||
40 | |||
41 | |||
42 | try: # Test for SSL features | ||
43 | import ssl | ||
44 | from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 | ||
45 | from ssl import HAS_SNI # Has SNI? | ||
46 | except ImportError: | ||
47 | pass | ||
48 | |||
49 | |||
50 | try: | ||
51 | from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION | ||
52 | except ImportError: | ||
53 | OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000 | ||
54 | OP_NO_COMPRESSION = 0x20000 | ||
55 | |||
56 | # A secure default. | ||
57 | # Sources for more information on TLS ciphers: | ||
58 | # | ||
59 | # - https://wiki.mozilla.org/Security/Server_Side_TLS | ||
60 | # - https://www.ssllabs.com/projects/best-practices/index.html | ||
61 | # - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ | ||
62 | # | ||
63 | # The general intent is: | ||
64 | # - Prefer TLS 1.3 cipher suites | ||
65 | # - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), | ||
66 | # - prefer ECDHE over DHE for better performance, | ||
67 | # - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and | ||
68 | # security, | ||
69 | # - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common, | ||
70 | # - disable NULL authentication, MD5 MACs and DSS for security reasons. | ||
71 | DEFAULT_CIPHERS = ':'.join([ | ||
72 | 'TLS13-AES-256-GCM-SHA384', | ||
73 | 'TLS13-CHACHA20-POLY1305-SHA256', | ||
74 | 'TLS13-AES-128-GCM-SHA256', | ||
75 | 'ECDH+AESGCM', | ||
76 | 'ECDH+CHACHA20', | ||
77 | 'DH+AESGCM', | ||
78 | 'DH+CHACHA20', | ||
79 | 'ECDH+AES256', | ||
80 | 'DH+AES256', | ||
81 | 'ECDH+AES128', | ||
82 | 'DH+AES', | ||
83 | 'RSA+AESGCM', | ||
84 | 'RSA+AES', | ||
85 | '!aNULL', | ||
86 | '!eNULL', | ||
87 | '!MD5', | ||
88 | ]) | ||
89 | |||
90 | try: | ||
91 | from ssl import SSLContext # Modern SSL? | ||
92 | except ImportError: | ||
93 | import sys | ||
94 | |||
95 | class SSLContext(object): # Platform-specific: Python 2 & 3.1 | ||
96 | supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or | ||
97 | (3, 2) <= sys.version_info) | ||
98 | |||
99 | def __init__(self, protocol_version): | ||
100 | self.protocol = protocol_version | ||
101 | # Use default values from a real SSLContext | ||
102 | self.check_hostname = False | ||
103 | self.verify_mode = ssl.CERT_NONE | ||
104 | self.ca_certs = None | ||
105 | self.options = 0 | ||
106 | self.certfile = None | ||
107 | self.keyfile = None | ||
108 | self.ciphers = None | ||
109 | |||
110 | def load_cert_chain(self, certfile, keyfile): | ||
111 | self.certfile = certfile | ||
112 | self.keyfile = keyfile | ||
113 | |||
114 | def load_verify_locations(self, cafile=None, capath=None): | ||
115 | self.ca_certs = cafile | ||
116 | |||
117 | if capath is not None: | ||
118 | raise SSLError("CA directories not supported in older Pythons") | ||
119 | |||
120 | def set_ciphers(self, cipher_suite): | ||
121 | if not self.supports_set_ciphers: | ||
122 | raise TypeError( | ||
123 | 'Your version of Python does not support setting ' | ||
124 | 'a custom cipher suite. Please upgrade to Python ' | ||
125 | '2.7, 3.2, or later if you need this functionality.' | ||
126 | ) | ||
127 | self.ciphers = cipher_suite | ||
128 | |||
129 | def wrap_socket(self, socket, server_hostname=None, server_side=False): | ||
130 | warnings.warn( | ||
131 | 'A true SSLContext object is not available. This prevents ' | ||
132 | 'urllib3 from configuring SSL appropriately and may cause ' | ||
133 | 'certain SSL connections to fail. You can upgrade to a newer ' | ||
134 | 'version of Python to solve this. For more information, see ' | ||
135 | 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' | ||
136 | '#ssl-warnings', | ||
137 | InsecurePlatformWarning | ||
138 | ) | ||
139 | kwargs = { | ||
140 | 'keyfile': self.keyfile, | ||
141 | 'certfile': self.certfile, | ||
142 | 'ca_certs': self.ca_certs, | ||
143 | 'cert_reqs': self.verify_mode, | ||
144 | 'ssl_version': self.protocol, | ||
145 | 'server_side': server_side, | ||
146 | } | ||
147 | if self.supports_set_ciphers: # Platform-specific: Python 2.7+ | ||
148 | return wrap_socket(socket, ciphers=self.ciphers, **kwargs) | ||
149 | else: # Platform-specific: Python 2.6 | ||
150 | return wrap_socket(socket, **kwargs) | ||
151 | |||
152 | |||
153 | def assert_fingerprint(cert, fingerprint): | ||
154 | """ | ||
155 | Checks if given fingerprint matches the supplied certificate. | ||
156 | |||
157 | :param cert: | ||
158 | Certificate as bytes object. | ||
159 | :param fingerprint: | ||
160 | Fingerprint as string of hexdigits, can be interspersed by colons. | ||
161 | """ | ||
162 | |||
163 | fingerprint = fingerprint.replace(':', '').lower() | ||
164 | digest_length = len(fingerprint) | ||
165 | hashfunc = HASHFUNC_MAP.get(digest_length) | ||
166 | if not hashfunc: | ||
167 | raise SSLError( | ||
168 | 'Fingerprint of invalid length: {0}'.format(fingerprint)) | ||
169 | |||
170 | # We need encode() here for py32; works on py2 and p33. | ||
171 | fingerprint_bytes = unhexlify(fingerprint.encode()) | ||
172 | |||
173 | cert_digest = hashfunc(cert).digest() | ||
174 | |||
175 | if not _const_compare_digest(cert_digest, fingerprint_bytes): | ||
176 | raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' | ||
177 | .format(fingerprint, hexlify(cert_digest))) | ||
178 | |||
179 | |||
180 | def resolve_cert_reqs(candidate): | ||
181 | """ | ||
182 | Resolves the argument to a numeric constant, which can be passed to | ||
183 | the wrap_socket function/method from the ssl module. | ||
184 | Defaults to :data:`ssl.CERT_NONE`. | ||
185 | If given a string it is assumed to be the name of the constant in the | ||
186 | :mod:`ssl` module or its abbrevation. | ||
187 | (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. | ||
188 | If it's neither `None` nor a string we assume it is already the numeric | ||
189 | constant which can directly be passed to wrap_socket. | ||
190 | """ | ||
191 | if candidate is None: | ||
192 | return CERT_NONE | ||
193 | |||
194 | if isinstance(candidate, str): | ||
195 | res = getattr(ssl, candidate, None) | ||
196 | if res is None: | ||
197 | res = getattr(ssl, 'CERT_' + candidate) | ||
198 | return res | ||
199 | |||
200 | return candidate | ||
201 | |||
202 | |||
203 | def resolve_ssl_version(candidate): | ||
204 | """ | ||
205 | like resolve_cert_reqs | ||
206 | """ | ||
207 | if candidate is None: | ||
208 | return PROTOCOL_SSLv23 | ||
209 | |||
210 | if isinstance(candidate, str): | ||
211 | res = getattr(ssl, candidate, None) | ||
212 | if res is None: | ||
213 | res = getattr(ssl, 'PROTOCOL_' + candidate) | ||
214 | return res | ||
215 | |||
216 | return candidate | ||
217 | |||
218 | |||
219 | def create_urllib3_context(ssl_version=None, cert_reqs=None, | ||
220 | options=None, ciphers=None): | ||
221 | """All arguments have the same meaning as ``ssl_wrap_socket``. | ||
222 | |||
223 | By default, this function does a lot of the same work that | ||
224 | ``ssl.create_default_context`` does on Python 3.4+. It: | ||
225 | |||
226 | - Disables SSLv2, SSLv3, and compression | ||
227 | - Sets a restricted set of server ciphers | ||
228 | |||
229 | If you wish to enable SSLv3, you can do:: | ||
230 | |||
231 | from pip._vendor.urllib3.util import ssl_ | ||
232 | context = ssl_.create_urllib3_context() | ||
233 | context.options &= ~ssl_.OP_NO_SSLv3 | ||
234 | |||
235 | You can do the same to enable compression (substituting ``COMPRESSION`` | ||
236 | for ``SSLv3`` in the last line above). | ||
237 | |||
238 | :param ssl_version: | ||
239 | The desired protocol version to use. This will default to | ||
240 | PROTOCOL_SSLv23 which will negotiate the highest protocol that both | ||
241 | the server and your installation of OpenSSL support. | ||
242 | :param cert_reqs: | ||
243 | Whether to require the certificate verification. This defaults to | ||
244 | ``ssl.CERT_REQUIRED``. | ||
245 | :param options: | ||
246 | Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, | ||
247 | ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. | ||
248 | :param ciphers: | ||
249 | Which cipher suites to allow the server to select. | ||
250 | :returns: | ||
251 | Constructed SSLContext object with specified options | ||
252 | :rtype: SSLContext | ||
253 | """ | ||
254 | context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) | ||
255 | |||
256 | # Setting the default here, as we may have no ssl module on import | ||
257 | cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs | ||
258 | |||
259 | if options is None: | ||
260 | options = 0 | ||
261 | # SSLv2 is easily broken and is considered harmful and dangerous | ||
262 | options |= OP_NO_SSLv2 | ||
263 | # SSLv3 has several problems and is now dangerous | ||
264 | options |= OP_NO_SSLv3 | ||
265 | # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ | ||
266 | # (issue #309) | ||
267 | options |= OP_NO_COMPRESSION | ||
268 | |||
269 | context.options |= options | ||
270 | |||
271 | if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6 | ||
272 | context.set_ciphers(ciphers or DEFAULT_CIPHERS) | ||
273 | |||
274 | context.verify_mode = cert_reqs | ||
275 | if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 | ||
276 | # We do our own verification, including fingerprints and alternative | ||
277 | # hostnames. So disable it here | ||
278 | context.check_hostname = False | ||
279 | return context | ||
280 | |||
281 | |||
282 | def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, | ||
283 | ca_certs=None, server_hostname=None, | ||
284 | ssl_version=None, ciphers=None, ssl_context=None, | ||
285 | ca_cert_dir=None): | ||
286 | """ | ||
287 | All arguments except for server_hostname, ssl_context, and ca_cert_dir have | ||
288 | the same meaning as they do when using :func:`ssl.wrap_socket`. | ||
289 | |||
290 | :param server_hostname: | ||
291 | When SNI is supported, the expected hostname of the certificate | ||
292 | :param ssl_context: | ||
293 | A pre-made :class:`SSLContext` object. If none is provided, one will | ||
294 | be created using :func:`create_urllib3_context`. | ||
295 | :param ciphers: | ||
296 | A string of ciphers we wish the client to support. This is not | ||
297 | supported on Python 2.6 as the ssl module does not support it. | ||
298 | :param ca_cert_dir: | ||
299 | A directory containing CA certificates in multiple separate files, as | ||
300 | supported by OpenSSL's -CApath flag or the capath argument to | ||
301 | SSLContext.load_verify_locations(). | ||
302 | """ | ||
303 | context = ssl_context | ||
304 | if context is None: | ||
305 | # Note: This branch of code and all the variables in it are no longer | ||
306 | # used by urllib3 itself. We should consider deprecating and removing | ||
307 | # this code. | ||
308 | context = create_urllib3_context(ssl_version, cert_reqs, | ||
309 | ciphers=ciphers) | ||
310 | |||
311 | if ca_certs or ca_cert_dir: | ||
312 | try: | ||
313 | context.load_verify_locations(ca_certs, ca_cert_dir) | ||
314 | except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2 | ||
315 | raise SSLError(e) | ||
316 | # Py33 raises FileNotFoundError which subclasses OSError | ||
317 | # These are not equivalent unless we check the errno attribute | ||
318 | except OSError as e: # Platform-specific: Python 3.3 and beyond | ||
319 | if e.errno == errno.ENOENT: | ||
320 | raise SSLError(e) | ||
321 | raise | ||
322 | elif getattr(context, 'load_default_certs', None) is not None: | ||
323 | # try to load OS default certs; works well on Windows (require Python3.4+) | ||
324 | context.load_default_certs() | ||
325 | |||
326 | if certfile: | ||
327 | context.load_cert_chain(certfile, keyfile) | ||
328 | if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI | ||
329 | return context.wrap_socket(sock, server_hostname=server_hostname) | ||
330 | |||
331 | warnings.warn( | ||
332 | 'An HTTPS request has been made, but the SNI (Subject Name ' | ||
333 | 'Indication) extension to TLS is not available on this platform. ' | ||
334 | 'This may cause the server to present an incorrect TLS ' | ||
335 | 'certificate, which can cause validation failures. You can upgrade to ' | ||
336 | 'a newer version of Python to solve this. For more information, see ' | ||
337 | 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' | ||
338 | '#ssl-warnings', | ||
339 | SNIMissingWarning | ||
340 | ) | ||
341 | return context.wrap_socket(sock) | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/timeout.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/timeout.py new file mode 100644 index 0000000..9c2e6ef --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/timeout.py | |||
@@ -0,0 +1,242 @@ | |||
1 | from __future__ import absolute_import | ||
2 | # The default socket timeout, used by httplib to indicate that no timeout was | ||
3 | # specified by the user | ||
4 | from socket import _GLOBAL_DEFAULT_TIMEOUT | ||
5 | import time | ||
6 | |||
7 | from ..exceptions import TimeoutStateError | ||
8 | |||
9 | # A sentinel value to indicate that no timeout was specified by the user in | ||
10 | # urllib3 | ||
11 | _Default = object() | ||
12 | |||
13 | |||
14 | # Use time.monotonic if available. | ||
15 | current_time = getattr(time, "monotonic", time.time) | ||
16 | |||
17 | |||
18 | class Timeout(object): | ||
19 | """ Timeout configuration. | ||
20 | |||
21 | Timeouts can be defined as a default for a pool:: | ||
22 | |||
23 | timeout = Timeout(connect=2.0, read=7.0) | ||
24 | http = PoolManager(timeout=timeout) | ||
25 | response = http.request('GET', 'http://example.com/') | ||
26 | |||
27 | Or per-request (which overrides the default for the pool):: | ||
28 | |||
29 | response = http.request('GET', 'http://example.com/', timeout=Timeout(10)) | ||
30 | |||
31 | Timeouts can be disabled by setting all the parameters to ``None``:: | ||
32 | |||
33 | no_timeout = Timeout(connect=None, read=None) | ||
34 | response = http.request('GET', 'http://example.com/, timeout=no_timeout) | ||
35 | |||
36 | |||
37 | :param total: | ||
38 | This combines the connect and read timeouts into one; the read timeout | ||
39 | will be set to the time leftover from the connect attempt. In the | ||
40 | event that both a connect timeout and a total are specified, or a read | ||
41 | timeout and a total are specified, the shorter timeout will be applied. | ||
42 | |||
43 | Defaults to None. | ||
44 | |||
45 | :type total: integer, float, or None | ||
46 | |||
47 | :param connect: | ||
48 | The maximum amount of time to wait for a connection attempt to a server | ||
49 | to succeed. Omitting the parameter will default the connect timeout to | ||
50 | the system default, probably `the global default timeout in socket.py | ||
51 | <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. | ||
52 | None will set an infinite timeout for connection attempts. | ||
53 | |||
54 | :type connect: integer, float, or None | ||
55 | |||
56 | :param read: | ||
57 | The maximum amount of time to wait between consecutive | ||
58 | read operations for a response from the server. Omitting | ||
59 | the parameter will default the read timeout to the system | ||
60 | default, probably `the global default timeout in socket.py | ||
61 | <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. | ||
62 | None will set an infinite timeout. | ||
63 | |||
64 | :type read: integer, float, or None | ||
65 | |||
66 | .. note:: | ||
67 | |||
68 | Many factors can affect the total amount of time for urllib3 to return | ||
69 | an HTTP response. | ||
70 | |||
71 | For example, Python's DNS resolver does not obey the timeout specified | ||
72 | on the socket. Other factors that can affect total request time include | ||
73 | high CPU load, high swap, the program running at a low priority level, | ||
74 | or other behaviors. | ||
75 | |||
76 | In addition, the read and total timeouts only measure the time between | ||
77 | read operations on the socket connecting the client and the server, | ||
78 | not the total amount of time for the request to return a complete | ||
79 | response. For most requests, the timeout is raised because the server | ||
80 | has not sent the first byte in the specified time. This is not always | ||
81 | the case; if a server streams one byte every fifteen seconds, a timeout | ||
82 | of 20 seconds will not trigger, even though the request will take | ||
83 | several minutes to complete. | ||
84 | |||
85 | If your goal is to cut off any request after a set amount of wall clock | ||
86 | time, consider having a second "watcher" thread to cut off a slow | ||
87 | request. | ||
88 | """ | ||
89 | |||
90 | #: A sentinel object representing the default timeout value | ||
91 | DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT | ||
92 | |||
93 | def __init__(self, total=None, connect=_Default, read=_Default): | ||
94 | self._connect = self._validate_timeout(connect, 'connect') | ||
95 | self._read = self._validate_timeout(read, 'read') | ||
96 | self.total = self._validate_timeout(total, 'total') | ||
97 | self._start_connect = None | ||
98 | |||
99 | def __str__(self): | ||
100 | return '%s(connect=%r, read=%r, total=%r)' % ( | ||
101 | type(self).__name__, self._connect, self._read, self.total) | ||
102 | |||
103 | @classmethod | ||
104 | def _validate_timeout(cls, value, name): | ||
105 | """ Check that a timeout attribute is valid. | ||
106 | |||
107 | :param value: The timeout value to validate | ||
108 | :param name: The name of the timeout attribute to validate. This is | ||
109 | used to specify in error messages. | ||
110 | :return: The validated and casted version of the given value. | ||
111 | :raises ValueError: If it is a numeric value less than or equal to | ||
112 | zero, or the type is not an integer, float, or None. | ||
113 | """ | ||
114 | if value is _Default: | ||
115 | return cls.DEFAULT_TIMEOUT | ||
116 | |||
117 | if value is None or value is cls.DEFAULT_TIMEOUT: | ||
118 | return value | ||
119 | |||
120 | if isinstance(value, bool): | ||
121 | raise ValueError("Timeout cannot be a boolean value. It must " | ||
122 | "be an int, float or None.") | ||
123 | try: | ||
124 | float(value) | ||
125 | except (TypeError, ValueError): | ||
126 | raise ValueError("Timeout value %s was %s, but it must be an " | ||
127 | "int, float or None." % (name, value)) | ||
128 | |||
129 | try: | ||
130 | if value <= 0: | ||
131 | raise ValueError("Attempted to set %s timeout to %s, but the " | ||
132 | "timeout cannot be set to a value less " | ||
133 | "than or equal to 0." % (name, value)) | ||
134 | except TypeError: # Python 3 | ||
135 | raise ValueError("Timeout value %s was %s, but it must be an " | ||
136 | "int, float or None." % (name, value)) | ||
137 | |||
138 | return value | ||
139 | |||
140 | @classmethod | ||
141 | def from_float(cls, timeout): | ||
142 | """ Create a new Timeout from a legacy timeout value. | ||
143 | |||
144 | The timeout value used by httplib.py sets the same timeout on the | ||
145 | connect(), and recv() socket requests. This creates a :class:`Timeout` | ||
146 | object that sets the individual timeouts to the ``timeout`` value | ||
147 | passed to this function. | ||
148 | |||
149 | :param timeout: The legacy timeout value. | ||
150 | :type timeout: integer, float, sentinel default object, or None | ||
151 | :return: Timeout object | ||
152 | :rtype: :class:`Timeout` | ||
153 | """ | ||
154 | return Timeout(read=timeout, connect=timeout) | ||
155 | |||
156 | def clone(self): | ||
157 | """ Create a copy of the timeout object | ||
158 | |||
159 | Timeout properties are stored per-pool but each request needs a fresh | ||
160 | Timeout object to ensure each one has its own start/stop configured. | ||
161 | |||
162 | :return: a copy of the timeout object | ||
163 | :rtype: :class:`Timeout` | ||
164 | """ | ||
165 | # We can't use copy.deepcopy because that will also create a new object | ||
166 | # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to | ||
167 | # detect the user default. | ||
168 | return Timeout(connect=self._connect, read=self._read, | ||
169 | total=self.total) | ||
170 | |||
171 | def start_connect(self): | ||
172 | """ Start the timeout clock, used during a connect() attempt | ||
173 | |||
174 | :raises urllib3.exceptions.TimeoutStateError: if you attempt | ||
175 | to start a timer that has been started already. | ||
176 | """ | ||
177 | if self._start_connect is not None: | ||
178 | raise TimeoutStateError("Timeout timer has already been started.") | ||
179 | self._start_connect = current_time() | ||
180 | return self._start_connect | ||
181 | |||
182 | def get_connect_duration(self): | ||
183 | """ Gets the time elapsed since the call to :meth:`start_connect`. | ||
184 | |||
185 | :return: Elapsed time. | ||
186 | :rtype: float | ||
187 | :raises urllib3.exceptions.TimeoutStateError: if you attempt | ||
188 | to get duration for a timer that hasn't been started. | ||
189 | """ | ||
190 | if self._start_connect is None: | ||
191 | raise TimeoutStateError("Can't get connect duration for timer " | ||
192 | "that has not started.") | ||
193 | return current_time() - self._start_connect | ||
194 | |||
195 | @property | ||
196 | def connect_timeout(self): | ||
197 | """ Get the value to use when setting a connection timeout. | ||
198 | |||
199 | This will be a positive float or integer, the value None | ||
200 | (never timeout), or the default system timeout. | ||
201 | |||
202 | :return: Connect timeout. | ||
203 | :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None | ||
204 | """ | ||
205 | if self.total is None: | ||
206 | return self._connect | ||
207 | |||
208 | if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: | ||
209 | return self.total | ||
210 | |||
211 | return min(self._connect, self.total) | ||
212 | |||
213 | @property | ||
214 | def read_timeout(self): | ||
215 | """ Get the value for the read timeout. | ||
216 | |||
217 | This assumes some time has elapsed in the connection timeout and | ||
218 | computes the read timeout appropriately. | ||
219 | |||
220 | If self.total is set, the read timeout is dependent on the amount of | ||
221 | time taken by the connect timeout. If the connection time has not been | ||
222 | established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be | ||
223 | raised. | ||
224 | |||
225 | :return: Value to use for the read timeout. | ||
226 | :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None | ||
227 | :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` | ||
228 | has not yet been called on this object. | ||
229 | """ | ||
230 | if (self.total is not None and | ||
231 | self.total is not self.DEFAULT_TIMEOUT and | ||
232 | self._read is not None and | ||
233 | self._read is not self.DEFAULT_TIMEOUT): | ||
234 | # In case the connect timeout has not yet been established. | ||
235 | if self._start_connect is None: | ||
236 | return self._read | ||
237 | return max(0, min(self.total - self.get_connect_duration(), | ||
238 | self._read)) | ||
239 | elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: | ||
240 | return max(0, self.total - self.get_connect_duration()) | ||
241 | else: | ||
242 | return self._read | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/url.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/url.py new file mode 100644 index 0000000..60f826a --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/url.py | |||
@@ -0,0 +1,230 @@ | |||
1 | from __future__ import absolute_import | ||
2 | from collections import namedtuple | ||
3 | |||
4 | from ..exceptions import LocationParseError | ||
5 | |||
6 | |||
7 | url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] | ||
8 | |||
9 | # We only want to normalize urls with an HTTP(S) scheme. | ||
10 | # urllib3 infers URLs without a scheme (None) to be http. | ||
11 | NORMALIZABLE_SCHEMES = ('http', 'https', None) | ||
12 | |||
13 | |||
14 | class Url(namedtuple('Url', url_attrs)): | ||
15 | """ | ||
16 | Datastructure for representing an HTTP URL. Used as a return value for | ||
17 | :func:`parse_url`. Both the scheme and host are normalized as they are | ||
18 | both case-insensitive according to RFC 3986. | ||
19 | """ | ||
20 | __slots__ = () | ||
21 | |||
22 | def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, | ||
23 | query=None, fragment=None): | ||
24 | if path and not path.startswith('/'): | ||
25 | path = '/' + path | ||
26 | if scheme: | ||
27 | scheme = scheme.lower() | ||
28 | if host and scheme in NORMALIZABLE_SCHEMES: | ||
29 | host = host.lower() | ||
30 | return super(Url, cls).__new__(cls, scheme, auth, host, port, path, | ||
31 | query, fragment) | ||
32 | |||
33 | @property | ||
34 | def hostname(self): | ||
35 | """For backwards-compatibility with urlparse. We're nice like that.""" | ||
36 | return self.host | ||
37 | |||
38 | @property | ||
39 | def request_uri(self): | ||
40 | """Absolute path including the query string.""" | ||
41 | uri = self.path or '/' | ||
42 | |||
43 | if self.query is not None: | ||
44 | uri += '?' + self.query | ||
45 | |||
46 | return uri | ||
47 | |||
48 | @property | ||
49 | def netloc(self): | ||
50 | """Network location including host and port""" | ||
51 | if self.port: | ||
52 | return '%s:%d' % (self.host, self.port) | ||
53 | return self.host | ||
54 | |||
55 | @property | ||
56 | def url(self): | ||
57 | """ | ||
58 | Convert self into a url | ||
59 | |||
60 | This function should more or less round-trip with :func:`.parse_url`. The | ||
61 | returned url may not be exactly the same as the url inputted to | ||
62 | :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls | ||
63 | with a blank port will have : removed). | ||
64 | |||
65 | Example: :: | ||
66 | |||
67 | >>> U = parse_url('http://google.com/mail/') | ||
68 | >>> U.url | ||
69 | 'http://google.com/mail/' | ||
70 | >>> Url('http', 'username:password', 'host.com', 80, | ||
71 | ... '/path', 'query', 'fragment').url | ||
72 | 'http://username:password@host.com:80/path?query#fragment' | ||
73 | """ | ||
74 | scheme, auth, host, port, path, query, fragment = self | ||
75 | url = '' | ||
76 | |||
77 | # We use "is not None" we want things to happen with empty strings (or 0 port) | ||
78 | if scheme is not None: | ||
79 | url += scheme + '://' | ||
80 | if auth is not None: | ||
81 | url += auth + '@' | ||
82 | if host is not None: | ||
83 | url += host | ||
84 | if port is not None: | ||
85 | url += ':' + str(port) | ||
86 | if path is not None: | ||
87 | url += path | ||
88 | if query is not None: | ||
89 | url += '?' + query | ||
90 | if fragment is not None: | ||
91 | url += '#' + fragment | ||
92 | |||
93 | return url | ||
94 | |||
95 | def __str__(self): | ||
96 | return self.url | ||
97 | |||
98 | |||
99 | def split_first(s, delims): | ||
100 | """ | ||
101 | Given a string and an iterable of delimiters, split on the first found | ||
102 | delimiter. Return two split parts and the matched delimiter. | ||
103 | |||
104 | If not found, then the first part is the full input string. | ||
105 | |||
106 | Example:: | ||
107 | |||
108 | >>> split_first('foo/bar?baz', '?/=') | ||
109 | ('foo', 'bar?baz', '/') | ||
110 | >>> split_first('foo/bar?baz', '123') | ||
111 | ('foo/bar?baz', '', None) | ||
112 | |||
113 | Scales linearly with number of delims. Not ideal for large number of delims. | ||
114 | """ | ||
115 | min_idx = None | ||
116 | min_delim = None | ||
117 | for d in delims: | ||
118 | idx = s.find(d) | ||
119 | if idx < 0: | ||
120 | continue | ||
121 | |||
122 | if min_idx is None or idx < min_idx: | ||
123 | min_idx = idx | ||
124 | min_delim = d | ||
125 | |||
126 | if min_idx is None or min_idx < 0: | ||
127 | return s, '', None | ||
128 | |||
129 | return s[:min_idx], s[min_idx + 1:], min_delim | ||
130 | |||
131 | |||
132 | def parse_url(url): | ||
133 | """ | ||
134 | Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is | ||
135 | performed to parse incomplete urls. Fields not provided will be None. | ||
136 | |||
137 | Partly backwards-compatible with :mod:`urlparse`. | ||
138 | |||
139 | Example:: | ||
140 | |||
141 | >>> parse_url('http://google.com/mail/') | ||
142 | Url(scheme='http', host='google.com', port=None, path='/mail/', ...) | ||
143 | >>> parse_url('google.com:80') | ||
144 | Url(scheme=None, host='google.com', port=80, path=None, ...) | ||
145 | >>> parse_url('/foo?bar') | ||
146 | Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) | ||
147 | """ | ||
148 | |||
149 | # While this code has overlap with stdlib's urlparse, it is much | ||
150 | # simplified for our needs and less annoying. | ||
151 | # Additionally, this implementations does silly things to be optimal | ||
152 | # on CPython. | ||
153 | |||
154 | if not url: | ||
155 | # Empty | ||
156 | return Url() | ||
157 | |||
158 | scheme = None | ||
159 | auth = None | ||
160 | host = None | ||
161 | port = None | ||
162 | path = None | ||
163 | fragment = None | ||
164 | query = None | ||
165 | |||
166 | # Scheme | ||
167 | if '://' in url: | ||
168 | scheme, url = url.split('://', 1) | ||
169 | |||
170 | # Find the earliest Authority Terminator | ||
171 | # (http://tools.ietf.org/html/rfc3986#section-3.2) | ||
172 | url, path_, delim = split_first(url, ['/', '?', '#']) | ||
173 | |||
174 | if delim: | ||
175 | # Reassemble the path | ||
176 | path = delim + path_ | ||
177 | |||
178 | # Auth | ||
179 | if '@' in url: | ||
180 | # Last '@' denotes end of auth part | ||
181 | auth, url = url.rsplit('@', 1) | ||
182 | |||
183 | # IPv6 | ||
184 | if url and url[0] == '[': | ||
185 | host, url = url.split(']', 1) | ||
186 | host += ']' | ||
187 | |||
188 | # Port | ||
189 | if ':' in url: | ||
190 | _host, port = url.split(':', 1) | ||
191 | |||
192 | if not host: | ||
193 | host = _host | ||
194 | |||
195 | if port: | ||
196 | # If given, ports must be integers. No whitespace, no plus or | ||
197 | # minus prefixes, no non-integer digits such as ^2 (superscript). | ||
198 | if not port.isdigit(): | ||
199 | raise LocationParseError(url) | ||
200 | try: | ||
201 | port = int(port) | ||
202 | except ValueError: | ||
203 | raise LocationParseError(url) | ||
204 | else: | ||
205 | # Blank ports are cool, too. (rfc3986#section-3.2.3) | ||
206 | port = None | ||
207 | |||
208 | elif not host and url: | ||
209 | host = url | ||
210 | |||
211 | if not path: | ||
212 | return Url(scheme, auth, host, port, path, query, fragment) | ||
213 | |||
214 | # Fragment | ||
215 | if '#' in path: | ||
216 | path, fragment = path.split('#', 1) | ||
217 | |||
218 | # Query | ||
219 | if '?' in path: | ||
220 | path, query = path.split('?', 1) | ||
221 | |||
222 | return Url(scheme, auth, host, port, path, query, fragment) | ||
223 | |||
224 | |||
225 | def get_host(url): | ||
226 | """ | ||
227 | Deprecated. Use :func:`parse_url` instead. | ||
228 | """ | ||
229 | p = parse_url(url) | ||
230 | return p.scheme or 'http', p.hostname, p.port | ||
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/wait.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/wait.py new file mode 100644 index 0000000..46392f2 --- /dev/null +++ b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/urllib3/util/wait.py | |||
@@ -0,0 +1,40 @@ | |||
1 | from .selectors import ( | ||
2 | HAS_SELECT, | ||
3 | DefaultSelector, | ||
4 | EVENT_READ, | ||
5 | EVENT_WRITE | ||
6 | ) | ||
7 | |||
8 | |||
9 | def _wait_for_io_events(socks, events, timeout=None): | ||
10 | """ Waits for IO events to be available from a list of sockets | ||
11 | or optionally a single socket if passed in. Returns a list of | ||
12 | sockets that can be interacted with immediately. """ | ||
13 | if not HAS_SELECT: | ||
14 | raise ValueError('Platform does not have a selector') | ||
15 | if not isinstance(socks, list): | ||
16 | # Probably just a single socket. | ||
17 | if hasattr(socks, "fileno"): | ||
18 | socks = [socks] | ||
19 | # Otherwise it might be a non-list iterable. | ||
20 | else: | ||
21 | socks = list(socks) | ||
22 | with DefaultSelector() as selector: | ||
23 | for sock in socks: | ||
24 | selector.register(sock, events) | ||
25 | return [key[0].fileobj for key in | ||
26 | selector.select(timeout) if key[1] & events] | ||
27 | |||
28 | |||
29 | def wait_for_read(socks, timeout=None): | ||
30 | """ Waits for reading to be available from a list of sockets | ||
31 | or optionally a single socket if passed in. Returns a list of | ||
32 | sockets that can be read from immediately. """ | ||
33 | return _wait_for_io_events(socks, EVENT_READ, timeout) | ||
34 | |||
35 | |||
36 | def wait_for_write(socks, timeout=None): | ||
37 | """ Waits for writing to be available from a list of sockets | ||
38 | or optionally a single socket if passed in. Returns a list of | ||
39 | sockets that can be written to immediately. """ | ||
40 | return _wait_for_io_events(socks, EVENT_WRITE, timeout) | ||