diff options
Diffstat (limited to 'venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/distlib/util.py')
-rw-r--r-- | venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/distlib/util.py | 1755 |
1 files changed, 0 insertions, 1755 deletions
diff --git a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/distlib/util.py b/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/distlib/util.py deleted file mode 100644 index b1d3f90..0000000 --- a/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/distlib/util.py +++ /dev/null | |||
@@ -1,1755 +0,0 @@ | |||
1 | # | ||
2 | # Copyright (C) 2012-2017 The Python Software Foundation. | ||
3 | # See LICENSE.txt and CONTRIBUTORS.txt. | ||
4 | # | ||
5 | import codecs | ||
6 | from collections import deque | ||
7 | import contextlib | ||
8 | import csv | ||
9 | from glob import iglob as std_iglob | ||
10 | import io | ||
11 | import json | ||
12 | import logging | ||
13 | import os | ||
14 | import py_compile | ||
15 | import re | ||
16 | import socket | ||
17 | try: | ||
18 | import ssl | ||
19 | except ImportError: # pragma: no cover | ||
20 | ssl = None | ||
21 | import subprocess | ||
22 | import sys | ||
23 | import tarfile | ||
24 | import tempfile | ||
25 | import textwrap | ||
26 | |||
27 | try: | ||
28 | import threading | ||
29 | except ImportError: # pragma: no cover | ||
30 | import dummy_threading as threading | ||
31 | import time | ||
32 | |||
33 | from . import DistlibException | ||
34 | from .compat import (string_types, text_type, shutil, raw_input, StringIO, | ||
35 | cache_from_source, urlopen, urljoin, httplib, xmlrpclib, | ||
36 | splittype, HTTPHandler, BaseConfigurator, valid_ident, | ||
37 | Container, configparser, URLError, ZipFile, fsdecode, | ||
38 | unquote, urlparse) | ||
39 | |||
40 | logger = logging.getLogger(__name__) | ||
41 | |||
42 | # | ||
43 | # Requirement parsing code as per PEP 508 | ||
44 | # | ||
45 | |||
46 | IDENTIFIER = re.compile(r'^([\w\.-]+)\s*') | ||
47 | VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*') | ||
48 | COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*') | ||
49 | MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*') | ||
50 | OR = re.compile(r'^or\b\s*') | ||
51 | AND = re.compile(r'^and\b\s*') | ||
52 | NON_SPACE = re.compile(r'(\S+)\s*') | ||
53 | STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)') | ||
54 | |||
55 | |||
56 | def parse_marker(marker_string): | ||
57 | """ | ||
58 | Parse a marker string and return a dictionary containing a marker expression. | ||
59 | |||
60 | The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in | ||
61 | the expression grammar, or strings. A string contained in quotes is to be | ||
62 | interpreted as a literal string, and a string not contained in quotes is a | ||
63 | variable (such as os_name). | ||
64 | """ | ||
65 | def marker_var(remaining): | ||
66 | # either identifier, or literal string | ||
67 | m = IDENTIFIER.match(remaining) | ||
68 | if m: | ||
69 | result = m.groups()[0] | ||
70 | remaining = remaining[m.end():] | ||
71 | elif not remaining: | ||
72 | raise SyntaxError('unexpected end of input') | ||
73 | else: | ||
74 | q = remaining[0] | ||
75 | if q not in '\'"': | ||
76 | raise SyntaxError('invalid expression: %s' % remaining) | ||
77 | oq = '\'"'.replace(q, '') | ||
78 | remaining = remaining[1:] | ||
79 | parts = [q] | ||
80 | while remaining: | ||
81 | # either a string chunk, or oq, or q to terminate | ||
82 | if remaining[0] == q: | ||
83 | break | ||
84 | elif remaining[0] == oq: | ||
85 | parts.append(oq) | ||
86 | remaining = remaining[1:] | ||
87 | else: | ||
88 | m = STRING_CHUNK.match(remaining) | ||
89 | if not m: | ||
90 | raise SyntaxError('error in string literal: %s' % remaining) | ||
91 | parts.append(m.groups()[0]) | ||
92 | remaining = remaining[m.end():] | ||
93 | else: | ||
94 | s = ''.join(parts) | ||
95 | raise SyntaxError('unterminated string: %s' % s) | ||
96 | parts.append(q) | ||
97 | result = ''.join(parts) | ||
98 | remaining = remaining[1:].lstrip() # skip past closing quote | ||
99 | return result, remaining | ||
100 | |||
101 | def marker_expr(remaining): | ||
102 | if remaining and remaining[0] == '(': | ||
103 | result, remaining = marker(remaining[1:].lstrip()) | ||
104 | if remaining[0] != ')': | ||
105 | raise SyntaxError('unterminated parenthesis: %s' % remaining) | ||
106 | remaining = remaining[1:].lstrip() | ||
107 | else: | ||
108 | lhs, remaining = marker_var(remaining) | ||
109 | while remaining: | ||
110 | m = MARKER_OP.match(remaining) | ||
111 | if not m: | ||
112 | break | ||
113 | op = m.groups()[0] | ||
114 | remaining = remaining[m.end():] | ||
115 | rhs, remaining = marker_var(remaining) | ||
116 | lhs = {'op': op, 'lhs': lhs, 'rhs': rhs} | ||
117 | result = lhs | ||
118 | return result, remaining | ||
119 | |||
120 | def marker_and(remaining): | ||
121 | lhs, remaining = marker_expr(remaining) | ||
122 | while remaining: | ||
123 | m = AND.match(remaining) | ||
124 | if not m: | ||
125 | break | ||
126 | remaining = remaining[m.end():] | ||
127 | rhs, remaining = marker_expr(remaining) | ||
128 | lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs} | ||
129 | return lhs, remaining | ||
130 | |||
131 | def marker(remaining): | ||
132 | lhs, remaining = marker_and(remaining) | ||
133 | while remaining: | ||
134 | m = OR.match(remaining) | ||
135 | if not m: | ||
136 | break | ||
137 | remaining = remaining[m.end():] | ||
138 | rhs, remaining = marker_and(remaining) | ||
139 | lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs} | ||
140 | return lhs, remaining | ||
141 | |||
142 | return marker(marker_string) | ||
143 | |||
144 | |||
145 | def parse_requirement(req): | ||
146 | """ | ||
147 | Parse a requirement passed in as a string. Return a Container | ||
148 | whose attributes contain the various parts of the requirement. | ||
149 | """ | ||
150 | remaining = req.strip() | ||
151 | if not remaining or remaining.startswith('#'): | ||
152 | return None | ||
153 | m = IDENTIFIER.match(remaining) | ||
154 | if not m: | ||
155 | raise SyntaxError('name expected: %s' % remaining) | ||
156 | distname = m.groups()[0] | ||
157 | remaining = remaining[m.end():] | ||
158 | extras = mark_expr = versions = uri = None | ||
159 | if remaining and remaining[0] == '[': | ||
160 | i = remaining.find(']', 1) | ||
161 | if i < 0: | ||
162 | raise SyntaxError('unterminated extra: %s' % remaining) | ||
163 | s = remaining[1:i] | ||
164 | remaining = remaining[i + 1:].lstrip() | ||
165 | extras = [] | ||
166 | while s: | ||
167 | m = IDENTIFIER.match(s) | ||
168 | if not m: | ||
169 | raise SyntaxError('malformed extra: %s' % s) | ||
170 | extras.append(m.groups()[0]) | ||
171 | s = s[m.end():] | ||
172 | if not s: | ||
173 | break | ||
174 | if s[0] != ',': | ||
175 | raise SyntaxError('comma expected in extras: %s' % s) | ||
176 | s = s[1:].lstrip() | ||
177 | if not extras: | ||
178 | extras = None | ||
179 | if remaining: | ||
180 | if remaining[0] == '@': | ||
181 | # it's a URI | ||
182 | remaining = remaining[1:].lstrip() | ||
183 | m = NON_SPACE.match(remaining) | ||
184 | if not m: | ||
185 | raise SyntaxError('invalid URI: %s' % remaining) | ||
186 | uri = m.groups()[0] | ||
187 | t = urlparse(uri) | ||
188 | # there are issues with Python and URL parsing, so this test | ||
189 | # is a bit crude. See bpo-20271, bpo-23505. Python doesn't | ||
190 | # always parse invalid URLs correctly - it should raise | ||
191 | # exceptions for malformed URLs | ||
192 | if not (t.scheme and t.netloc): | ||
193 | raise SyntaxError('Invalid URL: %s' % uri) | ||
194 | remaining = remaining[m.end():].lstrip() | ||
195 | else: | ||
196 | |||
197 | def get_versions(ver_remaining): | ||
198 | """ | ||
199 | Return a list of operator, version tuples if any are | ||
200 | specified, else None. | ||
201 | """ | ||
202 | m = COMPARE_OP.match(ver_remaining) | ||
203 | versions = None | ||
204 | if m: | ||
205 | versions = [] | ||
206 | while True: | ||
207 | op = m.groups()[0] | ||
208 | ver_remaining = ver_remaining[m.end():] | ||
209 | m = VERSION_IDENTIFIER.match(ver_remaining) | ||
210 | if not m: | ||
211 | raise SyntaxError('invalid version: %s' % ver_remaining) | ||
212 | v = m.groups()[0] | ||
213 | versions.append((op, v)) | ||
214 | ver_remaining = ver_remaining[m.end():] | ||
215 | if not ver_remaining or ver_remaining[0] != ',': | ||
216 | break | ||
217 | ver_remaining = ver_remaining[1:].lstrip() | ||
218 | m = COMPARE_OP.match(ver_remaining) | ||
219 | if not m: | ||
220 | raise SyntaxError('invalid constraint: %s' % ver_remaining) | ||
221 | if not versions: | ||
222 | versions = None | ||
223 | return versions, ver_remaining | ||
224 | |||
225 | if remaining[0] != '(': | ||
226 | versions, remaining = get_versions(remaining) | ||
227 | else: | ||
228 | i = remaining.find(')', 1) | ||
229 | if i < 0: | ||
230 | raise SyntaxError('unterminated parenthesis: %s' % remaining) | ||
231 | s = remaining[1:i] | ||
232 | remaining = remaining[i + 1:].lstrip() | ||
233 | # As a special diversion from PEP 508, allow a version number | ||
234 | # a.b.c in parentheses as a synonym for ~= a.b.c (because this | ||
235 | # is allowed in earlier PEPs) | ||
236 | if COMPARE_OP.match(s): | ||
237 | versions, _ = get_versions(s) | ||
238 | else: | ||
239 | m = VERSION_IDENTIFIER.match(s) | ||
240 | if not m: | ||
241 | raise SyntaxError('invalid constraint: %s' % s) | ||
242 | v = m.groups()[0] | ||
243 | s = s[m.end():].lstrip() | ||
244 | if s: | ||
245 | raise SyntaxError('invalid constraint: %s' % s) | ||
246 | versions = [('~=', v)] | ||
247 | |||
248 | if remaining: | ||
249 | if remaining[0] != ';': | ||
250 | raise SyntaxError('invalid requirement: %s' % remaining) | ||
251 | remaining = remaining[1:].lstrip() | ||
252 | |||
253 | mark_expr, remaining = parse_marker(remaining) | ||
254 | |||
255 | if remaining and remaining[0] != '#': | ||
256 | raise SyntaxError('unexpected trailing data: %s' % remaining) | ||
257 | |||
258 | if not versions: | ||
259 | rs = distname | ||
260 | else: | ||
261 | rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions])) | ||
262 | return Container(name=distname, extras=extras, constraints=versions, | ||
263 | marker=mark_expr, url=uri, requirement=rs) | ||
264 | |||
265 | |||
266 | def get_resources_dests(resources_root, rules): | ||
267 | """Find destinations for resources files""" | ||
268 | |||
269 | def get_rel_path(root, path): | ||
270 | # normalizes and returns a lstripped-/-separated path | ||
271 | root = root.replace(os.path.sep, '/') | ||
272 | path = path.replace(os.path.sep, '/') | ||
273 | assert path.startswith(root) | ||
274 | return path[len(root):].lstrip('/') | ||
275 | |||
276 | destinations = {} | ||
277 | for base, suffix, dest in rules: | ||
278 | prefix = os.path.join(resources_root, base) | ||
279 | for abs_base in iglob(prefix): | ||
280 | abs_glob = os.path.join(abs_base, suffix) | ||
281 | for abs_path in iglob(abs_glob): | ||
282 | resource_file = get_rel_path(resources_root, abs_path) | ||
283 | if dest is None: # remove the entry if it was here | ||
284 | destinations.pop(resource_file, None) | ||
285 | else: | ||
286 | rel_path = get_rel_path(abs_base, abs_path) | ||
287 | rel_dest = dest.replace(os.path.sep, '/').rstrip('/') | ||
288 | destinations[resource_file] = rel_dest + '/' + rel_path | ||
289 | return destinations | ||
290 | |||
291 | |||
292 | def in_venv(): | ||
293 | if hasattr(sys, 'real_prefix'): | ||
294 | # virtualenv venvs | ||
295 | result = True | ||
296 | else: | ||
297 | # PEP 405 venvs | ||
298 | result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) | ||
299 | return result | ||
300 | |||
301 | |||
302 | def get_executable(): | ||
303 | # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as | ||
304 | # changes to the stub launcher mean that sys.executable always points | ||
305 | # to the stub on OS X | ||
306 | # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' | ||
307 | # in os.environ): | ||
308 | # result = os.environ['__PYVENV_LAUNCHER__'] | ||
309 | # else: | ||
310 | # result = sys.executable | ||
311 | # return result | ||
312 | result = os.path.normcase(sys.executable) | ||
313 | if not isinstance(result, text_type): | ||
314 | result = fsdecode(result) | ||
315 | return result | ||
316 | |||
317 | |||
318 | def proceed(prompt, allowed_chars, error_prompt=None, default=None): | ||
319 | p = prompt | ||
320 | while True: | ||
321 | s = raw_input(p) | ||
322 | p = prompt | ||
323 | if not s and default: | ||
324 | s = default | ||
325 | if s: | ||
326 | c = s[0].lower() | ||
327 | if c in allowed_chars: | ||
328 | break | ||
329 | if error_prompt: | ||
330 | p = '%c: %s\n%s' % (c, error_prompt, prompt) | ||
331 | return c | ||
332 | |||
333 | |||
334 | def extract_by_key(d, keys): | ||
335 | if isinstance(keys, string_types): | ||
336 | keys = keys.split() | ||
337 | result = {} | ||
338 | for key in keys: | ||
339 | if key in d: | ||
340 | result[key] = d[key] | ||
341 | return result | ||
342 | |||
343 | def read_exports(stream): | ||
344 | if sys.version_info[0] >= 3: | ||
345 | # needs to be a text stream | ||
346 | stream = codecs.getreader('utf-8')(stream) | ||
347 | # Try to load as JSON, falling back on legacy format | ||
348 | data = stream.read() | ||
349 | stream = StringIO(data) | ||
350 | try: | ||
351 | jdata = json.load(stream) | ||
352 | result = jdata['extensions']['python.exports']['exports'] | ||
353 | for group, entries in result.items(): | ||
354 | for k, v in entries.items(): | ||
355 | s = '%s = %s' % (k, v) | ||
356 | entry = get_export_entry(s) | ||
357 | assert entry is not None | ||
358 | entries[k] = entry | ||
359 | return result | ||
360 | except Exception: | ||
361 | stream.seek(0, 0) | ||
362 | |||
363 | def read_stream(cp, stream): | ||
364 | if hasattr(cp, 'read_file'): | ||
365 | cp.read_file(stream) | ||
366 | else: | ||
367 | cp.readfp(stream) | ||
368 | |||
369 | cp = configparser.ConfigParser() | ||
370 | try: | ||
371 | read_stream(cp, stream) | ||
372 | except configparser.MissingSectionHeaderError: | ||
373 | stream.close() | ||
374 | data = textwrap.dedent(data) | ||
375 | stream = StringIO(data) | ||
376 | read_stream(cp, stream) | ||
377 | |||
378 | result = {} | ||
379 | for key in cp.sections(): | ||
380 | result[key] = entries = {} | ||
381 | for name, value in cp.items(key): | ||
382 | s = '%s = %s' % (name, value) | ||
383 | entry = get_export_entry(s) | ||
384 | assert entry is not None | ||
385 | #entry.dist = self | ||
386 | entries[name] = entry | ||
387 | return result | ||
388 | |||
389 | |||
390 | def write_exports(exports, stream): | ||
391 | if sys.version_info[0] >= 3: | ||
392 | # needs to be a text stream | ||
393 | stream = codecs.getwriter('utf-8')(stream) | ||
394 | cp = configparser.ConfigParser() | ||
395 | for k, v in exports.items(): | ||
396 | # TODO check k, v for valid values | ||
397 | cp.add_section(k) | ||
398 | for entry in v.values(): | ||
399 | if entry.suffix is None: | ||
400 | s = entry.prefix | ||
401 | else: | ||
402 | s = '%s:%s' % (entry.prefix, entry.suffix) | ||
403 | if entry.flags: | ||
404 | s = '%s [%s]' % (s, ', '.join(entry.flags)) | ||
405 | cp.set(k, entry.name, s) | ||
406 | cp.write(stream) | ||
407 | |||
408 | |||
409 | @contextlib.contextmanager | ||
410 | def tempdir(): | ||
411 | td = tempfile.mkdtemp() | ||
412 | try: | ||
413 | yield td | ||
414 | finally: | ||
415 | shutil.rmtree(td) | ||
416 | |||
417 | @contextlib.contextmanager | ||
418 | def chdir(d): | ||
419 | cwd = os.getcwd() | ||
420 | try: | ||
421 | os.chdir(d) | ||
422 | yield | ||
423 | finally: | ||
424 | os.chdir(cwd) | ||
425 | |||
426 | |||
427 | @contextlib.contextmanager | ||
428 | def socket_timeout(seconds=15): | ||
429 | cto = socket.getdefaulttimeout() | ||
430 | try: | ||
431 | socket.setdefaulttimeout(seconds) | ||
432 | yield | ||
433 | finally: | ||
434 | socket.setdefaulttimeout(cto) | ||
435 | |||
436 | |||
437 | class cached_property(object): | ||
438 | def __init__(self, func): | ||
439 | self.func = func | ||
440 | #for attr in ('__name__', '__module__', '__doc__'): | ||
441 | # setattr(self, attr, getattr(func, attr, None)) | ||
442 | |||
443 | def __get__(self, obj, cls=None): | ||
444 | if obj is None: | ||
445 | return self | ||
446 | value = self.func(obj) | ||
447 | object.__setattr__(obj, self.func.__name__, value) | ||
448 | #obj.__dict__[self.func.__name__] = value = self.func(obj) | ||
449 | return value | ||
450 | |||
451 | def convert_path(pathname): | ||
452 | """Return 'pathname' as a name that will work on the native filesystem. | ||
453 | |||
454 | The path is split on '/' and put back together again using the current | ||
455 | directory separator. Needed because filenames in the setup script are | ||
456 | always supplied in Unix style, and have to be converted to the local | ||
457 | convention before we can actually use them in the filesystem. Raises | ||
458 | ValueError on non-Unix-ish systems if 'pathname' either starts or | ||
459 | ends with a slash. | ||
460 | """ | ||
461 | if os.sep == '/': | ||
462 | return pathname | ||
463 | if not pathname: | ||
464 | return pathname | ||
465 | if pathname[0] == '/': | ||
466 | raise ValueError("path '%s' cannot be absolute" % pathname) | ||
467 | if pathname[-1] == '/': | ||
468 | raise ValueError("path '%s' cannot end with '/'" % pathname) | ||
469 | |||
470 | paths = pathname.split('/') | ||
471 | while os.curdir in paths: | ||
472 | paths.remove(os.curdir) | ||
473 | if not paths: | ||
474 | return os.curdir | ||
475 | return os.path.join(*paths) | ||
476 | |||
477 | |||
478 | class FileOperator(object): | ||
479 | def __init__(self, dry_run=False): | ||
480 | self.dry_run = dry_run | ||
481 | self.ensured = set() | ||
482 | self._init_record() | ||
483 | |||
484 | def _init_record(self): | ||
485 | self.record = False | ||
486 | self.files_written = set() | ||
487 | self.dirs_created = set() | ||
488 | |||
489 | def record_as_written(self, path): | ||
490 | if self.record: | ||
491 | self.files_written.add(path) | ||
492 | |||
493 | def newer(self, source, target): | ||
494 | """Tell if the target is newer than the source. | ||
495 | |||
496 | Returns true if 'source' exists and is more recently modified than | ||
497 | 'target', or if 'source' exists and 'target' doesn't. | ||
498 | |||
499 | Returns false if both exist and 'target' is the same age or younger | ||
500 | than 'source'. Raise PackagingFileError if 'source' does not exist. | ||
501 | |||
502 | Note that this test is not very accurate: files created in the same | ||
503 | second will have the same "age". | ||
504 | """ | ||
505 | if not os.path.exists(source): | ||
506 | raise DistlibException("file '%r' does not exist" % | ||
507 | os.path.abspath(source)) | ||
508 | if not os.path.exists(target): | ||
509 | return True | ||
510 | |||
511 | return os.stat(source).st_mtime > os.stat(target).st_mtime | ||
512 | |||
513 | def copy_file(self, infile, outfile, check=True): | ||
514 | """Copy a file respecting dry-run and force flags. | ||
515 | """ | ||
516 | self.ensure_dir(os.path.dirname(outfile)) | ||
517 | logger.info('Copying %s to %s', infile, outfile) | ||
518 | if not self.dry_run: | ||
519 | msg = None | ||
520 | if check: | ||
521 | if os.path.islink(outfile): | ||
522 | msg = '%s is a symlink' % outfile | ||
523 | elif os.path.exists(outfile) and not os.path.isfile(outfile): | ||
524 | msg = '%s is a non-regular file' % outfile | ||
525 | if msg: | ||
526 | raise ValueError(msg + ' which would be overwritten') | ||
527 | shutil.copyfile(infile, outfile) | ||
528 | self.record_as_written(outfile) | ||
529 | |||
530 | def copy_stream(self, instream, outfile, encoding=None): | ||
531 | assert not os.path.isdir(outfile) | ||
532 | self.ensure_dir(os.path.dirname(outfile)) | ||
533 | logger.info('Copying stream %s to %s', instream, outfile) | ||
534 | if not self.dry_run: | ||
535 | if encoding is None: | ||
536 | outstream = open(outfile, 'wb') | ||
537 | else: | ||
538 | outstream = codecs.open(outfile, 'w', encoding=encoding) | ||
539 | try: | ||
540 | shutil.copyfileobj(instream, outstream) | ||
541 | finally: | ||
542 | outstream.close() | ||
543 | self.record_as_written(outfile) | ||
544 | |||
545 | def write_binary_file(self, path, data): | ||
546 | self.ensure_dir(os.path.dirname(path)) | ||
547 | if not self.dry_run: | ||
548 | with open(path, 'wb') as f: | ||
549 | f.write(data) | ||
550 | self.record_as_written(path) | ||
551 | |||
552 | def write_text_file(self, path, data, encoding): | ||
553 | self.ensure_dir(os.path.dirname(path)) | ||
554 | if not self.dry_run: | ||
555 | with open(path, 'wb') as f: | ||
556 | f.write(data.encode(encoding)) | ||
557 | self.record_as_written(path) | ||
558 | |||
559 | def set_mode(self, bits, mask, files): | ||
560 | if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'): | ||
561 | # Set the executable bits (owner, group, and world) on | ||
562 | # all the files specified. | ||
563 | for f in files: | ||
564 | if self.dry_run: | ||
565 | logger.info("changing mode of %s", f) | ||
566 | else: | ||
567 | mode = (os.stat(f).st_mode | bits) & mask | ||
568 | logger.info("changing mode of %s to %o", f, mode) | ||
569 | os.chmod(f, mode) | ||
570 | |||
571 | set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) | ||
572 | |||
573 | def ensure_dir(self, path): | ||
574 | path = os.path.abspath(path) | ||
575 | if path not in self.ensured and not os.path.exists(path): | ||
576 | self.ensured.add(path) | ||
577 | d, f = os.path.split(path) | ||
578 | self.ensure_dir(d) | ||
579 | logger.info('Creating %s' % path) | ||
580 | if not self.dry_run: | ||
581 | os.mkdir(path) | ||
582 | if self.record: | ||
583 | self.dirs_created.add(path) | ||
584 | |||
585 | def byte_compile(self, path, optimize=False, force=False, prefix=None): | ||
586 | dpath = cache_from_source(path, not optimize) | ||
587 | logger.info('Byte-compiling %s to %s', path, dpath) | ||
588 | if not self.dry_run: | ||
589 | if force or self.newer(path, dpath): | ||
590 | if not prefix: | ||
591 | diagpath = None | ||
592 | else: | ||
593 | assert path.startswith(prefix) | ||
594 | diagpath = path[len(prefix):] | ||
595 | py_compile.compile(path, dpath, diagpath, True) # raise error | ||
596 | self.record_as_written(dpath) | ||
597 | return dpath | ||
598 | |||
599 | def ensure_removed(self, path): | ||
600 | if os.path.exists(path): | ||
601 | if os.path.isdir(path) and not os.path.islink(path): | ||
602 | logger.debug('Removing directory tree at %s', path) | ||
603 | if not self.dry_run: | ||
604 | shutil.rmtree(path) | ||
605 | if self.record: | ||
606 | if path in self.dirs_created: | ||
607 | self.dirs_created.remove(path) | ||
608 | else: | ||
609 | if os.path.islink(path): | ||
610 | s = 'link' | ||
611 | else: | ||
612 | s = 'file' | ||
613 | logger.debug('Removing %s %s', s, path) | ||
614 | if not self.dry_run: | ||
615 | os.remove(path) | ||
616 | if self.record: | ||
617 | if path in self.files_written: | ||
618 | self.files_written.remove(path) | ||
619 | |||
620 | def is_writable(self, path): | ||
621 | result = False | ||
622 | while not result: | ||
623 | if os.path.exists(path): | ||
624 | result = os.access(path, os.W_OK) | ||
625 | break | ||
626 | parent = os.path.dirname(path) | ||
627 | if parent == path: | ||
628 | break | ||
629 | path = parent | ||
630 | return result | ||
631 | |||
632 | def commit(self): | ||
633 | """ | ||
634 | Commit recorded changes, turn off recording, return | ||
635 | changes. | ||
636 | """ | ||
637 | assert self.record | ||
638 | result = self.files_written, self.dirs_created | ||
639 | self._init_record() | ||
640 | return result | ||
641 | |||
642 | def rollback(self): | ||
643 | if not self.dry_run: | ||
644 | for f in list(self.files_written): | ||
645 | if os.path.exists(f): | ||
646 | os.remove(f) | ||
647 | # dirs should all be empty now, except perhaps for | ||
648 | # __pycache__ subdirs | ||
649 | # reverse so that subdirs appear before their parents | ||
650 | dirs = sorted(self.dirs_created, reverse=True) | ||
651 | for d in dirs: | ||
652 | flist = os.listdir(d) | ||
653 | if flist: | ||
654 | assert flist == ['__pycache__'] | ||
655 | sd = os.path.join(d, flist[0]) | ||
656 | os.rmdir(sd) | ||
657 | os.rmdir(d) # should fail if non-empty | ||
658 | self._init_record() | ||
659 | |||
660 | def resolve(module_name, dotted_path): | ||
661 | if module_name in sys.modules: | ||
662 | mod = sys.modules[module_name] | ||
663 | else: | ||
664 | mod = __import__(module_name) | ||
665 | if dotted_path is None: | ||
666 | result = mod | ||
667 | else: | ||
668 | parts = dotted_path.split('.') | ||
669 | result = getattr(mod, parts.pop(0)) | ||
670 | for p in parts: | ||
671 | result = getattr(result, p) | ||
672 | return result | ||
673 | |||
674 | |||
675 | class ExportEntry(object): | ||
676 | def __init__(self, name, prefix, suffix, flags): | ||
677 | self.name = name | ||
678 | self.prefix = prefix | ||
679 | self.suffix = suffix | ||
680 | self.flags = flags | ||
681 | |||
682 | @cached_property | ||
683 | def value(self): | ||
684 | return resolve(self.prefix, self.suffix) | ||
685 | |||
686 | def __repr__(self): # pragma: no cover | ||
687 | return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix, | ||
688 | self.suffix, self.flags) | ||
689 | |||
690 | def __eq__(self, other): | ||
691 | if not isinstance(other, ExportEntry): | ||
692 | result = False | ||
693 | else: | ||
694 | result = (self.name == other.name and | ||
695 | self.prefix == other.prefix and | ||
696 | self.suffix == other.suffix and | ||
697 | self.flags == other.flags) | ||
698 | return result | ||
699 | |||
700 | __hash__ = object.__hash__ | ||
701 | |||
702 | |||
703 | ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+) | ||
704 | \s*=\s*(?P<callable>(\w+)([:\.]\w+)*) | ||
705 | \s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? | ||
706 | ''', re.VERBOSE) | ||
707 | |||
708 | def get_export_entry(specification): | ||
709 | m = ENTRY_RE.search(specification) | ||
710 | if not m: | ||
711 | result = None | ||
712 | if '[' in specification or ']' in specification: | ||
713 | raise DistlibException("Invalid specification " | ||
714 | "'%s'" % specification) | ||
715 | else: | ||
716 | d = m.groupdict() | ||
717 | name = d['name'] | ||
718 | path = d['callable'] | ||
719 | colons = path.count(':') | ||
720 | if colons == 0: | ||
721 | prefix, suffix = path, None | ||
722 | else: | ||
723 | if colons != 1: | ||
724 | raise DistlibException("Invalid specification " | ||
725 | "'%s'" % specification) | ||
726 | prefix, suffix = path.split(':') | ||
727 | flags = d['flags'] | ||
728 | if flags is None: | ||
729 | if '[' in specification or ']' in specification: | ||
730 | raise DistlibException("Invalid specification " | ||
731 | "'%s'" % specification) | ||
732 | flags = [] | ||
733 | else: | ||
734 | flags = [f.strip() for f in flags.split(',')] | ||
735 | result = ExportEntry(name, prefix, suffix, flags) | ||
736 | return result | ||
737 | |||
738 | |||
739 | def get_cache_base(suffix=None): | ||
740 | """ | ||
741 | Return the default base location for distlib caches. If the directory does | ||
742 | not exist, it is created. Use the suffix provided for the base directory, | ||
743 | and default to '.distlib' if it isn't provided. | ||
744 | |||
745 | On Windows, if LOCALAPPDATA is defined in the environment, then it is | ||
746 | assumed to be a directory, and will be the parent directory of the result. | ||
747 | On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home | ||
748 | directory - using os.expanduser('~') - will be the parent directory of | ||
749 | the result. | ||
750 | |||
751 | The result is just the directory '.distlib' in the parent directory as | ||
752 | determined above, or with the name specified with ``suffix``. | ||
753 | """ | ||
754 | if suffix is None: | ||
755 | suffix = '.distlib' | ||
756 | if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: | ||
757 | result = os.path.expandvars('$localappdata') | ||
758 | else: | ||
759 | # Assume posix, or old Windows | ||
760 | result = os.path.expanduser('~') | ||
761 | # we use 'isdir' instead of 'exists', because we want to | ||
762 | # fail if there's a file with that name | ||
763 | if os.path.isdir(result): | ||
764 | usable = os.access(result, os.W_OK) | ||
765 | if not usable: | ||
766 | logger.warning('Directory exists but is not writable: %s', result) | ||
767 | else: | ||
768 | try: | ||
769 | os.makedirs(result) | ||
770 | usable = True | ||
771 | except OSError: | ||
772 | logger.warning('Unable to create %s', result, exc_info=True) | ||
773 | usable = False | ||
774 | if not usable: | ||
775 | result = tempfile.mkdtemp() | ||
776 | logger.warning('Default location unusable, using %s', result) | ||
777 | return os.path.join(result, suffix) | ||
778 | |||
779 | |||
780 | def path_to_cache_dir(path): | ||
781 | """ | ||
782 | Convert an absolute path to a directory name for use in a cache. | ||
783 | |||
784 | The algorithm used is: | ||
785 | |||
786 | #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. | ||
787 | #. Any occurrence of ``os.sep`` is replaced with ``'--'``. | ||
788 | #. ``'.cache'`` is appended. | ||
789 | """ | ||
790 | d, p = os.path.splitdrive(os.path.abspath(path)) | ||
791 | if d: | ||
792 | d = d.replace(':', '---') | ||
793 | p = p.replace(os.sep, '--') | ||
794 | return d + p + '.cache' | ||
795 | |||
796 | |||
797 | def ensure_slash(s): | ||
798 | if not s.endswith('/'): | ||
799 | return s + '/' | ||
800 | return s | ||
801 | |||
802 | |||
803 | def parse_credentials(netloc): | ||
804 | username = password = None | ||
805 | if '@' in netloc: | ||
806 | prefix, netloc = netloc.split('@', 1) | ||
807 | if ':' not in prefix: | ||
808 | username = prefix | ||
809 | else: | ||
810 | username, password = prefix.split(':', 1) | ||
811 | return username, password, netloc | ||
812 | |||
813 | |||
814 | def get_process_umask(): | ||
815 | result = os.umask(0o22) | ||
816 | os.umask(result) | ||
817 | return result | ||
818 | |||
819 | def is_string_sequence(seq): | ||
820 | result = True | ||
821 | i = None | ||
822 | for i, s in enumerate(seq): | ||
823 | if not isinstance(s, string_types): | ||
824 | result = False | ||
825 | break | ||
826 | assert i is not None | ||
827 | return result | ||
828 | |||
829 | PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' | ||
830 | '([a-z0-9_.+-]+)', re.I) | ||
831 | PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') | ||
832 | |||
833 | |||
834 | def split_filename(filename, project_name=None): | ||
835 | """ | ||
836 | Extract name, version, python version from a filename (no extension) | ||
837 | |||
838 | Return name, version, pyver or None | ||
839 | """ | ||
840 | result = None | ||
841 | pyver = None | ||
842 | filename = unquote(filename).replace(' ', '-') | ||
843 | m = PYTHON_VERSION.search(filename) | ||
844 | if m: | ||
845 | pyver = m.group(1) | ||
846 | filename = filename[:m.start()] | ||
847 | if project_name and len(filename) > len(project_name) + 1: | ||
848 | m = re.match(re.escape(project_name) + r'\b', filename) | ||
849 | if m: | ||
850 | n = m.end() | ||
851 | result = filename[:n], filename[n + 1:], pyver | ||
852 | if result is None: | ||
853 | m = PROJECT_NAME_AND_VERSION.match(filename) | ||
854 | if m: | ||
855 | result = m.group(1), m.group(3), pyver | ||
856 | return result | ||
857 | |||
858 | # Allow spaces in name because of legacy dists like "Twisted Core" | ||
859 | NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*' | ||
860 | r'\(\s*(?P<ver>[^\s)]+)\)$') | ||
861 | |||
862 | def parse_name_and_version(p): | ||
863 | """ | ||
864 | A utility method used to get name and version from a string. | ||
865 | |||
866 | From e.g. a Provides-Dist value. | ||
867 | |||
868 | :param p: A value in a form 'foo (1.0)' | ||
869 | :return: The name and version as a tuple. | ||
870 | """ | ||
871 | m = NAME_VERSION_RE.match(p) | ||
872 | if not m: | ||
873 | raise DistlibException('Ill-formed name/version string: \'%s\'' % p) | ||
874 | d = m.groupdict() | ||
875 | return d['name'].strip().lower(), d['ver'] | ||
876 | |||
877 | def get_extras(requested, available): | ||
878 | result = set() | ||
879 | requested = set(requested or []) | ||
880 | available = set(available or []) | ||
881 | if '*' in requested: | ||
882 | requested.remove('*') | ||
883 | result |= available | ||
884 | for r in requested: | ||
885 | if r == '-': | ||
886 | result.add(r) | ||
887 | elif r.startswith('-'): | ||
888 | unwanted = r[1:] | ||
889 | if unwanted not in available: | ||
890 | logger.warning('undeclared extra: %s' % unwanted) | ||
891 | if unwanted in result: | ||
892 | result.remove(unwanted) | ||
893 | else: | ||
894 | if r not in available: | ||
895 | logger.warning('undeclared extra: %s' % r) | ||
896 | result.add(r) | ||
897 | return result | ||
898 | # | ||
899 | # Extended metadata functionality | ||
900 | # | ||
901 | |||
902 | def _get_external_data(url): | ||
903 | result = {} | ||
904 | try: | ||
905 | # urlopen might fail if it runs into redirections, | ||
906 | # because of Python issue #13696. Fixed in locators | ||
907 | # using a custom redirect handler. | ||
908 | resp = urlopen(url) | ||
909 | headers = resp.info() | ||
910 | ct = headers.get('Content-Type') | ||
911 | if not ct.startswith('application/json'): | ||
912 | logger.debug('Unexpected response for JSON request: %s', ct) | ||
913 | else: | ||
914 | reader = codecs.getreader('utf-8')(resp) | ||
915 | #data = reader.read().decode('utf-8') | ||
916 | #result = json.loads(data) | ||
917 | result = json.load(reader) | ||
918 | except Exception as e: | ||
919 | logger.exception('Failed to get external data for %s: %s', url, e) | ||
920 | return result | ||
921 | |||
922 | _external_data_base_url = 'https://www.red-dove.com/pypi/projects/' | ||
923 | |||
924 | def get_project_data(name): | ||
925 | url = '%s/%s/project.json' % (name[0].upper(), name) | ||
926 | url = urljoin(_external_data_base_url, url) | ||
927 | result = _get_external_data(url) | ||
928 | return result | ||
929 | |||
930 | def get_package_data(name, version): | ||
931 | url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) | ||
932 | url = urljoin(_external_data_base_url, url) | ||
933 | return _get_external_data(url) | ||
934 | |||
935 | |||
936 | class Cache(object): | ||
937 | """ | ||
938 | A class implementing a cache for resources that need to live in the file system | ||
939 | e.g. shared libraries. This class was moved from resources to here because it | ||
940 | could be used by other modules, e.g. the wheel module. | ||
941 | """ | ||
942 | |||
943 | def __init__(self, base): | ||
944 | """ | ||
945 | Initialise an instance. | ||
946 | |||
947 | :param base: The base directory where the cache should be located. | ||
948 | """ | ||
949 | # we use 'isdir' instead of 'exists', because we want to | ||
950 | # fail if there's a file with that name | ||
951 | if not os.path.isdir(base): # pragma: no cover | ||
952 | os.makedirs(base) | ||
953 | if (os.stat(base).st_mode & 0o77) != 0: | ||
954 | logger.warning('Directory \'%s\' is not private', base) | ||
955 | self.base = os.path.abspath(os.path.normpath(base)) | ||
956 | |||
957 | def prefix_to_dir(self, prefix): | ||
958 | """ | ||
959 | Converts a resource prefix to a directory name in the cache. | ||
960 | """ | ||
961 | return path_to_cache_dir(prefix) | ||
962 | |||
963 | def clear(self): | ||
964 | """ | ||
965 | Clear the cache. | ||
966 | """ | ||
967 | not_removed = [] | ||
968 | for fn in os.listdir(self.base): | ||
969 | fn = os.path.join(self.base, fn) | ||
970 | try: | ||
971 | if os.path.islink(fn) or os.path.isfile(fn): | ||
972 | os.remove(fn) | ||
973 | elif os.path.isdir(fn): | ||
974 | shutil.rmtree(fn) | ||
975 | except Exception: | ||
976 | not_removed.append(fn) | ||
977 | return not_removed | ||
978 | |||
979 | |||
980 | class EventMixin(object): | ||
981 | """ | ||
982 | A very simple publish/subscribe system. | ||
983 | """ | ||
984 | def __init__(self): | ||
985 | self._subscribers = {} | ||
986 | |||
987 | def add(self, event, subscriber, append=True): | ||
988 | """ | ||
989 | Add a subscriber for an event. | ||
990 | |||
991 | :param event: The name of an event. | ||
992 | :param subscriber: The subscriber to be added (and called when the | ||
993 | event is published). | ||
994 | :param append: Whether to append or prepend the subscriber to an | ||
995 | existing subscriber list for the event. | ||
996 | """ | ||
997 | subs = self._subscribers | ||
998 | if event not in subs: | ||
999 | subs[event] = deque([subscriber]) | ||
1000 | else: | ||
1001 | sq = subs[event] | ||
1002 | if append: | ||
1003 | sq.append(subscriber) | ||
1004 | else: | ||
1005 | sq.appendleft(subscriber) | ||
1006 | |||
1007 | def remove(self, event, subscriber): | ||
1008 | """ | ||
1009 | Remove a subscriber for an event. | ||
1010 | |||
1011 | :param event: The name of an event. | ||
1012 | :param subscriber: The subscriber to be removed. | ||
1013 | """ | ||
1014 | subs = self._subscribers | ||
1015 | if event not in subs: | ||
1016 | raise ValueError('No subscribers: %r' % event) | ||
1017 | subs[event].remove(subscriber) | ||
1018 | |||
1019 | def get_subscribers(self, event): | ||
1020 | """ | ||
1021 | Return an iterator for the subscribers for an event. | ||
1022 | :param event: The event to return subscribers for. | ||
1023 | """ | ||
1024 | return iter(self._subscribers.get(event, ())) | ||
1025 | |||
1026 | def publish(self, event, *args, **kwargs): | ||
1027 | """ | ||
1028 | Publish a event and return a list of values returned by its | ||
1029 | subscribers. | ||
1030 | |||
1031 | :param event: The event to publish. | ||
1032 | :param args: The positional arguments to pass to the event's | ||
1033 | subscribers. | ||
1034 | :param kwargs: The keyword arguments to pass to the event's | ||
1035 | subscribers. | ||
1036 | """ | ||
1037 | result = [] | ||
1038 | for subscriber in self.get_subscribers(event): | ||
1039 | try: | ||
1040 | value = subscriber(event, *args, **kwargs) | ||
1041 | except Exception: | ||
1042 | logger.exception('Exception during event publication') | ||
1043 | value = None | ||
1044 | result.append(value) | ||
1045 | logger.debug('publish %s: args = %s, kwargs = %s, result = %s', | ||
1046 | event, args, kwargs, result) | ||
1047 | return result | ||
1048 | |||
1049 | # | ||
1050 | # Simple sequencing | ||
1051 | # | ||
1052 | class Sequencer(object): | ||
1053 | def __init__(self): | ||
1054 | self._preds = {} | ||
1055 | self._succs = {} | ||
1056 | self._nodes = set() # nodes with no preds/succs | ||
1057 | |||
1058 | def add_node(self, node): | ||
1059 | self._nodes.add(node) | ||
1060 | |||
1061 | def remove_node(self, node, edges=False): | ||
1062 | if node in self._nodes: | ||
1063 | self._nodes.remove(node) | ||
1064 | if edges: | ||
1065 | for p in set(self._preds.get(node, ())): | ||
1066 | self.remove(p, node) | ||
1067 | for s in set(self._succs.get(node, ())): | ||
1068 | self.remove(node, s) | ||
1069 | # Remove empties | ||
1070 | for k, v in list(self._preds.items()): | ||
1071 | if not v: | ||
1072 | del self._preds[k] | ||
1073 | for k, v in list(self._succs.items()): | ||
1074 | if not v: | ||
1075 | del self._succs[k] | ||
1076 | |||
1077 | def add(self, pred, succ): | ||
1078 | assert pred != succ | ||
1079 | self._preds.setdefault(succ, set()).add(pred) | ||
1080 | self._succs.setdefault(pred, set()).add(succ) | ||
1081 | |||
1082 | def remove(self, pred, succ): | ||
1083 | assert pred != succ | ||
1084 | try: | ||
1085 | preds = self._preds[succ] | ||
1086 | succs = self._succs[pred] | ||
1087 | except KeyError: # pragma: no cover | ||
1088 | raise ValueError('%r not a successor of anything' % succ) | ||
1089 | try: | ||
1090 | preds.remove(pred) | ||
1091 | succs.remove(succ) | ||
1092 | except KeyError: # pragma: no cover | ||
1093 | raise ValueError('%r not a successor of %r' % (succ, pred)) | ||
1094 | |||
1095 | def is_step(self, step): | ||
1096 | return (step in self._preds or step in self._succs or | ||
1097 | step in self._nodes) | ||
1098 | |||
1099 | def get_steps(self, final): | ||
1100 | if not self.is_step(final): | ||
1101 | raise ValueError('Unknown: %r' % final) | ||
1102 | result = [] | ||
1103 | todo = [] | ||
1104 | seen = set() | ||
1105 | todo.append(final) | ||
1106 | while todo: | ||
1107 | step = todo.pop(0) | ||
1108 | if step in seen: | ||
1109 | # if a step was already seen, | ||
1110 | # move it to the end (so it will appear earlier | ||
1111 | # when reversed on return) ... but not for the | ||
1112 | # final step, as that would be confusing for | ||
1113 | # users | ||
1114 | if step != final: | ||
1115 | result.remove(step) | ||
1116 | result.append(step) | ||
1117 | else: | ||
1118 | seen.add(step) | ||
1119 | result.append(step) | ||
1120 | preds = self._preds.get(step, ()) | ||
1121 | todo.extend(preds) | ||
1122 | return reversed(result) | ||
1123 | |||
1124 | @property | ||
1125 | def strong_connections(self): | ||
1126 | #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm | ||
1127 | index_counter = [0] | ||
1128 | stack = [] | ||
1129 | lowlinks = {} | ||
1130 | index = {} | ||
1131 | result = [] | ||
1132 | |||
1133 | graph = self._succs | ||
1134 | |||
1135 | def strongconnect(node): | ||
1136 | # set the depth index for this node to the smallest unused index | ||
1137 | index[node] = index_counter[0] | ||
1138 | lowlinks[node] = index_counter[0] | ||
1139 | index_counter[0] += 1 | ||
1140 | stack.append(node) | ||
1141 | |||
1142 | # Consider successors | ||
1143 | try: | ||
1144 | successors = graph[node] | ||
1145 | except Exception: | ||
1146 | successors = [] | ||
1147 | for successor in successors: | ||
1148 | if successor not in lowlinks: | ||
1149 | # Successor has not yet been visited | ||
1150 | strongconnect(successor) | ||
1151 | lowlinks[node] = min(lowlinks[node],lowlinks[successor]) | ||
1152 | elif successor in stack: | ||
1153 | # the successor is in the stack and hence in the current | ||
1154 | # strongly connected component (SCC) | ||
1155 | lowlinks[node] = min(lowlinks[node],index[successor]) | ||
1156 | |||
1157 | # If `node` is a root node, pop the stack and generate an SCC | ||
1158 | if lowlinks[node] == index[node]: | ||
1159 | connected_component = [] | ||
1160 | |||
1161 | while True: | ||
1162 | successor = stack.pop() | ||
1163 | connected_component.append(successor) | ||
1164 | if successor == node: break | ||
1165 | component = tuple(connected_component) | ||
1166 | # storing the result | ||
1167 | result.append(component) | ||
1168 | |||
1169 | for node in graph: | ||
1170 | if node not in lowlinks: | ||
1171 | strongconnect(node) | ||
1172 | |||
1173 | return result | ||
1174 | |||
1175 | @property | ||
1176 | def dot(self): | ||
1177 | result = ['digraph G {'] | ||
1178 | for succ in self._preds: | ||
1179 | preds = self._preds[succ] | ||
1180 | for pred in preds: | ||
1181 | result.append(' %s -> %s;' % (pred, succ)) | ||
1182 | for node in self._nodes: | ||
1183 | result.append(' %s;' % node) | ||
1184 | result.append('}') | ||
1185 | return '\n'.join(result) | ||
1186 | |||
1187 | # | ||
1188 | # Unarchiving functionality for zip, tar, tgz, tbz, whl | ||
1189 | # | ||
1190 | |||
1191 | ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', | ||
1192 | '.tgz', '.tbz', '.whl') | ||
1193 | |||
1194 | def unarchive(archive_filename, dest_dir, format=None, check=True): | ||
1195 | |||
1196 | def check_path(path): | ||
1197 | if not isinstance(path, text_type): | ||
1198 | path = path.decode('utf-8') | ||
1199 | p = os.path.abspath(os.path.join(dest_dir, path)) | ||
1200 | if not p.startswith(dest_dir) or p[plen] != os.sep: | ||
1201 | raise ValueError('path outside destination: %r' % p) | ||
1202 | |||
1203 | dest_dir = os.path.abspath(dest_dir) | ||
1204 | plen = len(dest_dir) | ||
1205 | archive = None | ||
1206 | if format is None: | ||
1207 | if archive_filename.endswith(('.zip', '.whl')): | ||
1208 | format = 'zip' | ||
1209 | elif archive_filename.endswith(('.tar.gz', '.tgz')): | ||
1210 | format = 'tgz' | ||
1211 | mode = 'r:gz' | ||
1212 | elif archive_filename.endswith(('.tar.bz2', '.tbz')): | ||
1213 | format = 'tbz' | ||
1214 | mode = 'r:bz2' | ||
1215 | elif archive_filename.endswith('.tar'): | ||
1216 | format = 'tar' | ||
1217 | mode = 'r' | ||
1218 | else: # pragma: no cover | ||
1219 | raise ValueError('Unknown format for %r' % archive_filename) | ||
1220 | try: | ||
1221 | if format == 'zip': | ||
1222 | archive = ZipFile(archive_filename, 'r') | ||
1223 | if check: | ||
1224 | names = archive.namelist() | ||
1225 | for name in names: | ||
1226 | check_path(name) | ||
1227 | else: | ||
1228 | archive = tarfile.open(archive_filename, mode) | ||
1229 | if check: | ||
1230 | names = archive.getnames() | ||
1231 | for name in names: | ||
1232 | check_path(name) | ||
1233 | if format != 'zip' and sys.version_info[0] < 3: | ||
1234 | # See Python issue 17153. If the dest path contains Unicode, | ||
1235 | # tarfile extraction fails on Python 2.x if a member path name | ||
1236 | # contains non-ASCII characters - it leads to an implicit | ||
1237 | # bytes -> unicode conversion using ASCII to decode. | ||
1238 | for tarinfo in archive.getmembers(): | ||
1239 | if not isinstance(tarinfo.name, text_type): | ||
1240 | tarinfo.name = tarinfo.name.decode('utf-8') | ||
1241 | archive.extractall(dest_dir) | ||
1242 | |||
1243 | finally: | ||
1244 | if archive: | ||
1245 | archive.close() | ||
1246 | |||
1247 | |||
1248 | def zip_dir(directory): | ||
1249 | """zip a directory tree into a BytesIO object""" | ||
1250 | result = io.BytesIO() | ||
1251 | dlen = len(directory) | ||
1252 | with ZipFile(result, "w") as zf: | ||
1253 | for root, dirs, files in os.walk(directory): | ||
1254 | for name in files: | ||
1255 | full = os.path.join(root, name) | ||
1256 | rel = root[dlen:] | ||
1257 | dest = os.path.join(rel, name) | ||
1258 | zf.write(full, dest) | ||
1259 | return result | ||
1260 | |||
1261 | # | ||
1262 | # Simple progress bar | ||
1263 | # | ||
1264 | |||
1265 | UNITS = ('', 'K', 'M', 'G','T','P') | ||
1266 | |||
1267 | |||
1268 | class Progress(object): | ||
1269 | unknown = 'UNKNOWN' | ||
1270 | |||
1271 | def __init__(self, minval=0, maxval=100): | ||
1272 | assert maxval is None or maxval >= minval | ||
1273 | self.min = self.cur = minval | ||
1274 | self.max = maxval | ||
1275 | self.started = None | ||
1276 | self.elapsed = 0 | ||
1277 | self.done = False | ||
1278 | |||
1279 | def update(self, curval): | ||
1280 | assert self.min <= curval | ||
1281 | assert self.max is None or curval <= self.max | ||
1282 | self.cur = curval | ||
1283 | now = time.time() | ||
1284 | if self.started is None: | ||
1285 | self.started = now | ||
1286 | else: | ||
1287 | self.elapsed = now - self.started | ||
1288 | |||
1289 | def increment(self, incr): | ||
1290 | assert incr >= 0 | ||
1291 | self.update(self.cur + incr) | ||
1292 | |||
1293 | def start(self): | ||
1294 | self.update(self.min) | ||
1295 | return self | ||
1296 | |||
1297 | def stop(self): | ||
1298 | if self.max is not None: | ||
1299 | self.update(self.max) | ||
1300 | self.done = True | ||
1301 | |||
1302 | @property | ||
1303 | def maximum(self): | ||
1304 | return self.unknown if self.max is None else self.max | ||
1305 | |||
1306 | @property | ||
1307 | def percentage(self): | ||
1308 | if self.done: | ||
1309 | result = '100 %' | ||
1310 | elif self.max is None: | ||
1311 | result = ' ?? %' | ||
1312 | else: | ||
1313 | v = 100.0 * (self.cur - self.min) / (self.max - self.min) | ||
1314 | result = '%3d %%' % v | ||
1315 | return result | ||
1316 | |||
1317 | def format_duration(self, duration): | ||
1318 | if (duration <= 0) and self.max is None or self.cur == self.min: | ||
1319 | result = '??:??:??' | ||
1320 | #elif duration < 1: | ||
1321 | # result = '--:--:--' | ||
1322 | else: | ||
1323 | result = time.strftime('%H:%M:%S', time.gmtime(duration)) | ||
1324 | return result | ||
1325 | |||
1326 | @property | ||
1327 | def ETA(self): | ||
1328 | if self.done: | ||
1329 | prefix = 'Done' | ||
1330 | t = self.elapsed | ||
1331 | #import pdb; pdb.set_trace() | ||
1332 | else: | ||
1333 | prefix = 'ETA ' | ||
1334 | if self.max is None: | ||
1335 | t = -1 | ||
1336 | elif self.elapsed == 0 or (self.cur == self.min): | ||
1337 | t = 0 | ||
1338 | else: | ||
1339 | #import pdb; pdb.set_trace() | ||
1340 | t = float(self.max - self.min) | ||
1341 | t /= self.cur - self.min | ||
1342 | t = (t - 1) * self.elapsed | ||
1343 | return '%s: %s' % (prefix, self.format_duration(t)) | ||
1344 | |||
1345 | @property | ||
1346 | def speed(self): | ||
1347 | if self.elapsed == 0: | ||
1348 | result = 0.0 | ||
1349 | else: | ||
1350 | result = (self.cur - self.min) / self.elapsed | ||
1351 | for unit in UNITS: | ||
1352 | if result < 1000: | ||
1353 | break | ||
1354 | result /= 1000.0 | ||
1355 | return '%d %sB/s' % (result, unit) | ||
1356 | |||
1357 | # | ||
1358 | # Glob functionality | ||
1359 | # | ||
1360 | |||
1361 | RICH_GLOB = re.compile(r'\{([^}]*)\}') | ||
1362 | _CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') | ||
1363 | _CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') | ||
1364 | |||
1365 | |||
1366 | def iglob(path_glob): | ||
1367 | """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" | ||
1368 | if _CHECK_RECURSIVE_GLOB.search(path_glob): | ||
1369 | msg = """invalid glob %r: recursive glob "**" must be used alone""" | ||
1370 | raise ValueError(msg % path_glob) | ||
1371 | if _CHECK_MISMATCH_SET.search(path_glob): | ||
1372 | msg = """invalid glob %r: mismatching set marker '{' or '}'""" | ||
1373 | raise ValueError(msg % path_glob) | ||
1374 | return _iglob(path_glob) | ||
1375 | |||
1376 | |||
1377 | def _iglob(path_glob): | ||
1378 | rich_path_glob = RICH_GLOB.split(path_glob, 1) | ||
1379 | if len(rich_path_glob) > 1: | ||
1380 | assert len(rich_path_glob) == 3, rich_path_glob | ||
1381 | prefix, set, suffix = rich_path_glob | ||
1382 | for item in set.split(','): | ||
1383 | for path in _iglob(''.join((prefix, item, suffix))): | ||
1384 | yield path | ||
1385 | else: | ||
1386 | if '**' not in path_glob: | ||
1387 | for item in std_iglob(path_glob): | ||
1388 | yield item | ||
1389 | else: | ||
1390 | prefix, radical = path_glob.split('**', 1) | ||
1391 | if prefix == '': | ||
1392 | prefix = '.' | ||
1393 | if radical == '': | ||
1394 | radical = '*' | ||
1395 | else: | ||
1396 | # we support both | ||
1397 | radical = radical.lstrip('/') | ||
1398 | radical = radical.lstrip('\\') | ||
1399 | for path, dir, files in os.walk(prefix): | ||
1400 | path = os.path.normpath(path) | ||
1401 | for fn in _iglob(os.path.join(path, radical)): | ||
1402 | yield fn | ||
1403 | |||
1404 | if ssl: | ||
1405 | from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, | ||
1406 | CertificateError) | ||
1407 | |||
1408 | |||
1409 | # | ||
1410 | # HTTPSConnection which verifies certificates/matches domains | ||
1411 | # | ||
1412 | |||
1413 | class HTTPSConnection(httplib.HTTPSConnection): | ||
1414 | ca_certs = None # set this to the path to the certs file (.pem) | ||
1415 | check_domain = True # only used if ca_certs is not None | ||
1416 | |||
1417 | # noinspection PyPropertyAccess | ||
1418 | def connect(self): | ||
1419 | sock = socket.create_connection((self.host, self.port), self.timeout) | ||
1420 | if getattr(self, '_tunnel_host', False): | ||
1421 | self.sock = sock | ||
1422 | self._tunnel() | ||
1423 | |||
1424 | if not hasattr(ssl, 'SSLContext'): | ||
1425 | # For 2.x | ||
1426 | if self.ca_certs: | ||
1427 | cert_reqs = ssl.CERT_REQUIRED | ||
1428 | else: | ||
1429 | cert_reqs = ssl.CERT_NONE | ||
1430 | self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, | ||
1431 | cert_reqs=cert_reqs, | ||
1432 | ssl_version=ssl.PROTOCOL_SSLv23, | ||
1433 | ca_certs=self.ca_certs) | ||
1434 | else: # pragma: no cover | ||
1435 | context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) | ||
1436 | context.options |= ssl.OP_NO_SSLv2 | ||
1437 | if self.cert_file: | ||
1438 | context.load_cert_chain(self.cert_file, self.key_file) | ||
1439 | kwargs = {} | ||
1440 | if self.ca_certs: | ||
1441 | context.verify_mode = ssl.CERT_REQUIRED | ||
1442 | context.load_verify_locations(cafile=self.ca_certs) | ||
1443 | if getattr(ssl, 'HAS_SNI', False): | ||
1444 | kwargs['server_hostname'] = self.host | ||
1445 | self.sock = context.wrap_socket(sock, **kwargs) | ||
1446 | if self.ca_certs and self.check_domain: | ||
1447 | try: | ||
1448 | match_hostname(self.sock.getpeercert(), self.host) | ||
1449 | logger.debug('Host verified: %s', self.host) | ||
1450 | except CertificateError: # pragma: no cover | ||
1451 | self.sock.shutdown(socket.SHUT_RDWR) | ||
1452 | self.sock.close() | ||
1453 | raise | ||
1454 | |||
1455 | class HTTPSHandler(BaseHTTPSHandler): | ||
1456 | def __init__(self, ca_certs, check_domain=True): | ||
1457 | BaseHTTPSHandler.__init__(self) | ||
1458 | self.ca_certs = ca_certs | ||
1459 | self.check_domain = check_domain | ||
1460 | |||
1461 | def _conn_maker(self, *args, **kwargs): | ||
1462 | """ | ||
1463 | This is called to create a connection instance. Normally you'd | ||
1464 | pass a connection class to do_open, but it doesn't actually check for | ||
1465 | a class, and just expects a callable. As long as we behave just as a | ||
1466 | constructor would have, we should be OK. If it ever changes so that | ||
1467 | we *must* pass a class, we'll create an UnsafeHTTPSConnection class | ||
1468 | which just sets check_domain to False in the class definition, and | ||
1469 | choose which one to pass to do_open. | ||
1470 | """ | ||
1471 | result = HTTPSConnection(*args, **kwargs) | ||
1472 | if self.ca_certs: | ||
1473 | result.ca_certs = self.ca_certs | ||
1474 | result.check_domain = self.check_domain | ||
1475 | return result | ||
1476 | |||
1477 | def https_open(self, req): | ||
1478 | try: | ||
1479 | return self.do_open(self._conn_maker, req) | ||
1480 | except URLError as e: | ||
1481 | if 'certificate verify failed' in str(e.reason): | ||
1482 | raise CertificateError('Unable to verify server certificate ' | ||
1483 | 'for %s' % req.host) | ||
1484 | else: | ||
1485 | raise | ||
1486 | |||
1487 | # | ||
1488 | # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- | ||
1489 | # Middle proxy using HTTP listens on port 443, or an index mistakenly serves | ||
1490 | # HTML containing a http://xyz link when it should be https://xyz), | ||
1491 | # you can use the following handler class, which does not allow HTTP traffic. | ||
1492 | # | ||
1493 | # It works by inheriting from HTTPHandler - so build_opener won't add a | ||
1494 | # handler for HTTP itself. | ||
1495 | # | ||
1496 | class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): | ||
1497 | def http_open(self, req): | ||
1498 | raise URLError('Unexpected HTTP request on what should be a secure ' | ||
1499 | 'connection: %s' % req) | ||
1500 | |||
1501 | # | ||
1502 | # XML-RPC with timeouts | ||
1503 | # | ||
1504 | |||
1505 | _ver_info = sys.version_info[:2] | ||
1506 | |||
1507 | if _ver_info == (2, 6): | ||
1508 | class HTTP(httplib.HTTP): | ||
1509 | def __init__(self, host='', port=None, **kwargs): | ||
1510 | if port == 0: # 0 means use port 0, not the default port | ||
1511 | port = None | ||
1512 | self._setup(self._connection_class(host, port, **kwargs)) | ||
1513 | |||
1514 | |||
1515 | if ssl: | ||
1516 | class HTTPS(httplib.HTTPS): | ||
1517 | def __init__(self, host='', port=None, **kwargs): | ||
1518 | if port == 0: # 0 means use port 0, not the default port | ||
1519 | port = None | ||
1520 | self._setup(self._connection_class(host, port, **kwargs)) | ||
1521 | |||
1522 | |||
1523 | class Transport(xmlrpclib.Transport): | ||
1524 | def __init__(self, timeout, use_datetime=0): | ||
1525 | self.timeout = timeout | ||
1526 | xmlrpclib.Transport.__init__(self, use_datetime) | ||
1527 | |||
1528 | def make_connection(self, host): | ||
1529 | h, eh, x509 = self.get_host_info(host) | ||
1530 | if _ver_info == (2, 6): | ||
1531 | result = HTTP(h, timeout=self.timeout) | ||
1532 | else: | ||
1533 | if not self._connection or host != self._connection[0]: | ||
1534 | self._extra_headers = eh | ||
1535 | self._connection = host, httplib.HTTPConnection(h) | ||
1536 | result = self._connection[1] | ||
1537 | return result | ||
1538 | |||
1539 | if ssl: | ||
1540 | class SafeTransport(xmlrpclib.SafeTransport): | ||
1541 | def __init__(self, timeout, use_datetime=0): | ||
1542 | self.timeout = timeout | ||
1543 | xmlrpclib.SafeTransport.__init__(self, use_datetime) | ||
1544 | |||
1545 | def make_connection(self, host): | ||
1546 | h, eh, kwargs = self.get_host_info(host) | ||
1547 | if not kwargs: | ||
1548 | kwargs = {} | ||
1549 | kwargs['timeout'] = self.timeout | ||
1550 | if _ver_info == (2, 6): | ||
1551 | result = HTTPS(host, None, **kwargs) | ||
1552 | else: | ||
1553 | if not self._connection or host != self._connection[0]: | ||
1554 | self._extra_headers = eh | ||
1555 | self._connection = host, httplib.HTTPSConnection(h, None, | ||
1556 | **kwargs) | ||
1557 | result = self._connection[1] | ||
1558 | return result | ||
1559 | |||
1560 | |||
1561 | class ServerProxy(xmlrpclib.ServerProxy): | ||
1562 | def __init__(self, uri, **kwargs): | ||
1563 | self.timeout = timeout = kwargs.pop('timeout', None) | ||
1564 | # The above classes only come into play if a timeout | ||
1565 | # is specified | ||
1566 | if timeout is not None: | ||
1567 | scheme, _ = splittype(uri) | ||
1568 | use_datetime = kwargs.get('use_datetime', 0) | ||
1569 | if scheme == 'https': | ||
1570 | tcls = SafeTransport | ||
1571 | else: | ||
1572 | tcls = Transport | ||
1573 | kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) | ||
1574 | self.transport = t | ||
1575 | xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) | ||
1576 | |||
1577 | # | ||
1578 | # CSV functionality. This is provided because on 2.x, the csv module can't | ||
1579 | # handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. | ||
1580 | # | ||
1581 | |||
1582 | def _csv_open(fn, mode, **kwargs): | ||
1583 | if sys.version_info[0] < 3: | ||
1584 | mode += 'b' | ||
1585 | else: | ||
1586 | kwargs['newline'] = '' | ||
1587 | # Python 3 determines encoding from locale. Force 'utf-8' | ||
1588 | # file encoding to match other forced utf-8 encoding | ||
1589 | kwargs['encoding'] = 'utf-8' | ||
1590 | return open(fn, mode, **kwargs) | ||
1591 | |||
1592 | |||
1593 | class CSVBase(object): | ||
1594 | defaults = { | ||
1595 | 'delimiter': str(','), # The strs are used because we need native | ||
1596 | 'quotechar': str('"'), # str in the csv API (2.x won't take | ||
1597 | 'lineterminator': str('\n') # Unicode) | ||
1598 | } | ||
1599 | |||
1600 | def __enter__(self): | ||
1601 | return self | ||
1602 | |||
1603 | def __exit__(self, *exc_info): | ||
1604 | self.stream.close() | ||
1605 | |||
1606 | |||
1607 | class CSVReader(CSVBase): | ||
1608 | def __init__(self, **kwargs): | ||
1609 | if 'stream' in kwargs: | ||
1610 | stream = kwargs['stream'] | ||
1611 | if sys.version_info[0] >= 3: | ||
1612 | # needs to be a text stream | ||
1613 | stream = codecs.getreader('utf-8')(stream) | ||
1614 | self.stream = stream | ||
1615 | else: | ||
1616 | self.stream = _csv_open(kwargs['path'], 'r') | ||
1617 | self.reader = csv.reader(self.stream, **self.defaults) | ||
1618 | |||
1619 | def __iter__(self): | ||
1620 | return self | ||
1621 | |||
1622 | def next(self): | ||
1623 | result = next(self.reader) | ||
1624 | if sys.version_info[0] < 3: | ||
1625 | for i, item in enumerate(result): | ||
1626 | if not isinstance(item, text_type): | ||
1627 | result[i] = item.decode('utf-8') | ||
1628 | return result | ||
1629 | |||
1630 | __next__ = next | ||
1631 | |||
1632 | class CSVWriter(CSVBase): | ||
1633 | def __init__(self, fn, **kwargs): | ||
1634 | self.stream = _csv_open(fn, 'w') | ||
1635 | self.writer = csv.writer(self.stream, **self.defaults) | ||
1636 | |||
1637 | def writerow(self, row): | ||
1638 | if sys.version_info[0] < 3: | ||
1639 | r = [] | ||
1640 | for item in row: | ||
1641 | if isinstance(item, text_type): | ||
1642 | item = item.encode('utf-8') | ||
1643 | r.append(item) | ||
1644 | row = r | ||
1645 | self.writer.writerow(row) | ||
1646 | |||
1647 | # | ||
1648 | # Configurator functionality | ||
1649 | # | ||
1650 | |||
1651 | class Configurator(BaseConfigurator): | ||
1652 | |||
1653 | value_converters = dict(BaseConfigurator.value_converters) | ||
1654 | value_converters['inc'] = 'inc_convert' | ||
1655 | |||
1656 | def __init__(self, config, base=None): | ||
1657 | super(Configurator, self).__init__(config) | ||
1658 | self.base = base or os.getcwd() | ||
1659 | |||
1660 | def configure_custom(self, config): | ||
1661 | def convert(o): | ||
1662 | if isinstance(o, (list, tuple)): | ||
1663 | result = type(o)([convert(i) for i in o]) | ||
1664 | elif isinstance(o, dict): | ||
1665 | if '()' in o: | ||
1666 | result = self.configure_custom(o) | ||
1667 | else: | ||
1668 | result = {} | ||
1669 | for k in o: | ||
1670 | result[k] = convert(o[k]) | ||
1671 | else: | ||
1672 | result = self.convert(o) | ||
1673 | return result | ||
1674 | |||
1675 | c = config.pop('()') | ||
1676 | if not callable(c): | ||
1677 | c = self.resolve(c) | ||
1678 | props = config.pop('.', None) | ||
1679 | # Check for valid identifiers | ||
1680 | args = config.pop('[]', ()) | ||
1681 | if args: | ||
1682 | args = tuple([convert(o) for o in args]) | ||
1683 | items = [(k, convert(config[k])) for k in config if valid_ident(k)] | ||
1684 | kwargs = dict(items) | ||
1685 | result = c(*args, **kwargs) | ||
1686 | if props: | ||
1687 | for n, v in props.items(): | ||
1688 | setattr(result, n, convert(v)) | ||
1689 | return result | ||
1690 | |||
1691 | def __getitem__(self, key): | ||
1692 | result = self.config[key] | ||
1693 | if isinstance(result, dict) and '()' in result: | ||
1694 | self.config[key] = result = self.configure_custom(result) | ||
1695 | return result | ||
1696 | |||
1697 | def inc_convert(self, value): | ||
1698 | """Default converter for the inc:// protocol.""" | ||
1699 | if not os.path.isabs(value): | ||
1700 | value = os.path.join(self.base, value) | ||
1701 | with codecs.open(value, 'r', encoding='utf-8') as f: | ||
1702 | result = json.load(f) | ||
1703 | return result | ||
1704 | |||
1705 | |||
1706 | class SubprocessMixin(object): | ||
1707 | """ | ||
1708 | Mixin for running subprocesses and capturing their output | ||
1709 | """ | ||
1710 | def __init__(self, verbose=False, progress=None): | ||
1711 | self.verbose = verbose | ||
1712 | self.progress = progress | ||
1713 | |||
1714 | def reader(self, stream, context): | ||
1715 | """ | ||
1716 | Read lines from a subprocess' output stream and either pass to a progress | ||
1717 | callable (if specified) or write progress information to sys.stderr. | ||
1718 | """ | ||
1719 | progress = self.progress | ||
1720 | verbose = self.verbose | ||
1721 | while True: | ||
1722 | s = stream.readline() | ||
1723 | if not s: | ||
1724 | break | ||
1725 | if progress is not None: | ||
1726 | progress(s, context) | ||
1727 | else: | ||
1728 | if not verbose: | ||
1729 | sys.stderr.write('.') | ||
1730 | else: | ||
1731 | sys.stderr.write(s.decode('utf-8')) | ||
1732 | sys.stderr.flush() | ||
1733 | stream.close() | ||
1734 | |||
1735 | def run_command(self, cmd, **kwargs): | ||
1736 | p = subprocess.Popen(cmd, stdout=subprocess.PIPE, | ||
1737 | stderr=subprocess.PIPE, **kwargs) | ||
1738 | t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) | ||
1739 | t1.start() | ||
1740 | t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) | ||
1741 | t2.start() | ||
1742 | p.wait() | ||
1743 | t1.join() | ||
1744 | t2.join() | ||
1745 | if self.progress is not None: | ||
1746 | self.progress('done.', 'main') | ||
1747 | elif self.verbose: | ||
1748 | sys.stderr.write('done.\n') | ||
1749 | return p | ||
1750 | |||
1751 | |||
1752 | def normalize_name(name): | ||
1753 | """Normalize a python package name a la PEP 503""" | ||
1754 | # https://www.python.org/dev/peps/pep-0503/#normalized-names | ||
1755 | return re.sub('[-_.]+', '-', name).lower() | ||