You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

980 line
34 KiB

  1. from __future__ import absolute_import
  2. import cgi
  3. import email.utils
  4. import getpass
  5. import json
  6. import logging
  7. import mimetypes
  8. import os
  9. import platform
  10. import re
  11. import shutil
  12. import sys
  13. from pip._vendor import requests, six, urllib3
  14. from pip._vendor.cachecontrol import CacheControlAdapter
  15. from pip._vendor.cachecontrol.caches import FileCache
  16. from pip._vendor.lockfile import LockError
  17. from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
  18. from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
  19. from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
  20. from pip._vendor.requests.structures import CaseInsensitiveDict
  21. from pip._vendor.requests.utils import get_netrc_auth
  22. # NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is
  23. # why we ignore the type on this import
  24. from pip._vendor.six.moves import xmlrpc_client # type: ignore
  25. from pip._vendor.six.moves.urllib import parse as urllib_parse
  26. from pip._vendor.six.moves.urllib import request as urllib_request
  27. from pip._vendor.urllib3.util import IS_PYOPENSSL
  28. import pip
  29. from pip._internal.exceptions import HashMismatch, InstallationError
  30. from pip._internal.locations import write_delete_marker_file
  31. from pip._internal.models.index import PyPI
  32. from pip._internal.utils.encoding import auto_decode
  33. from pip._internal.utils.filesystem import check_path_owner
  34. from pip._internal.utils.glibc import libc_ver
  35. from pip._internal.utils.misc import (
  36. ARCHIVE_EXTENSIONS, ask_path_exists, backup_dir, consume, display_path,
  37. format_size, get_installed_version, rmtree, split_auth_from_netloc,
  38. splitext, unpack_file,
  39. )
  40. from pip._internal.utils.temp_dir import TempDirectory
  41. from pip._internal.utils.typing import MYPY_CHECK_RUNNING
  42. from pip._internal.utils.ui import DownloadProgressProvider
  43. from pip._internal.vcs import vcs
  44. if MYPY_CHECK_RUNNING:
  45. from typing import (
  46. Optional, Tuple, Dict, IO, Text, Union
  47. )
  48. from pip._internal.models.link import Link
  49. from pip._internal.utils.hashes import Hashes
  50. from pip._internal.vcs import AuthInfo
  51. try:
  52. import ssl # noqa
  53. except ImportError:
  54. ssl = None
  55. HAS_TLS = (ssl is not None) or IS_PYOPENSSL
  56. __all__ = ['get_file_content',
  57. 'is_url', 'url_to_path', 'path_to_url',
  58. 'is_archive_file', 'unpack_vcs_link',
  59. 'unpack_file_url', 'is_vcs_url', 'is_file_url',
  60. 'unpack_http_url', 'unpack_url']
  61. logger = logging.getLogger(__name__)
  62. # These are environment variables present when running under various
  63. # CI systems. For each variable, some CI systems that use the variable
  64. # are indicated. The collection was chosen so that for each of a number
  65. # of popular systems, at least one of the environment variables is used.
  66. # This list is used to provide some indication of and lower bound for
  67. # CI traffic to PyPI. Thus, it is okay if the list is not comprehensive.
  68. # For more background, see: https://github.com/pypa/pip/issues/5499
  69. CI_ENVIRONMENT_VARIABLES = (
  70. # Azure Pipelines
  71. 'BUILD_BUILDID',
  72. # Jenkins
  73. 'BUILD_ID',
  74. # AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI
  75. 'CI',
  76. )
  77. def looks_like_ci():
  78. # type: () -> bool
  79. """
  80. Return whether it looks like pip is running under CI.
  81. """
  82. # We don't use the method of checking for a tty (e.g. using isatty())
  83. # because some CI systems mimic a tty (e.g. Travis CI). Thus that
  84. # method doesn't provide definitive information in either direction.
  85. return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES)
  86. def user_agent():
  87. """
  88. Return a string representing the user agent.
  89. """
  90. data = {
  91. "installer": {"name": "pip", "version": pip.__version__},
  92. "python": platform.python_version(),
  93. "implementation": {
  94. "name": platform.python_implementation(),
  95. },
  96. }
  97. if data["implementation"]["name"] == 'CPython':
  98. data["implementation"]["version"] = platform.python_version()
  99. elif data["implementation"]["name"] == 'PyPy':
  100. if sys.pypy_version_info.releaselevel == 'final':
  101. pypy_version_info = sys.pypy_version_info[:3]
  102. else:
  103. pypy_version_info = sys.pypy_version_info
  104. data["implementation"]["version"] = ".".join(
  105. [str(x) for x in pypy_version_info]
  106. )
  107. elif data["implementation"]["name"] == 'Jython':
  108. # Complete Guess
  109. data["implementation"]["version"] = platform.python_version()
  110. elif data["implementation"]["name"] == 'IronPython':
  111. # Complete Guess
  112. data["implementation"]["version"] = platform.python_version()
  113. if sys.platform.startswith("linux"):
  114. from pip._vendor import distro
  115. distro_infos = dict(filter(
  116. lambda x: x[1],
  117. zip(["name", "version", "id"], distro.linux_distribution()),
  118. ))
  119. libc = dict(filter(
  120. lambda x: x[1],
  121. zip(["lib", "version"], libc_ver()),
  122. ))
  123. if libc:
  124. distro_infos["libc"] = libc
  125. if distro_infos:
  126. data["distro"] = distro_infos
  127. if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
  128. data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
  129. if platform.system():
  130. data.setdefault("system", {})["name"] = platform.system()
  131. if platform.release():
  132. data.setdefault("system", {})["release"] = platform.release()
  133. if platform.machine():
  134. data["cpu"] = platform.machine()
  135. if HAS_TLS:
  136. data["openssl_version"] = ssl.OPENSSL_VERSION
  137. setuptools_version = get_installed_version("setuptools")
  138. if setuptools_version is not None:
  139. data["setuptools_version"] = setuptools_version
  140. # Use None rather than False so as not to give the impression that
  141. # pip knows it is not being run under CI. Rather, it is a null or
  142. # inconclusive result. Also, we include some value rather than no
  143. # value to make it easier to know that the check has been run.
  144. data["ci"] = True if looks_like_ci() else None
  145. user_data = os.environ.get("PIP_USER_AGENT_USER_DATA")
  146. if user_data is not None:
  147. data["user_data"] = user_data
  148. return "{data[installer][name]}/{data[installer][version]} {json}".format(
  149. data=data,
  150. json=json.dumps(data, separators=(",", ":"), sort_keys=True),
  151. )
  152. class MultiDomainBasicAuth(AuthBase):
  153. def __init__(self, prompting=True):
  154. # type: (bool) -> None
  155. self.prompting = prompting
  156. self.passwords = {} # type: Dict[str, AuthInfo]
  157. def __call__(self, req):
  158. parsed = urllib_parse.urlparse(req.url)
  159. # Split the credentials from the netloc.
  160. netloc, url_user_password = split_auth_from_netloc(parsed.netloc)
  161. # Set the url of the request to the url without any credentials
  162. req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
  163. # Use any stored credentials that we have for this netloc
  164. username, password = self.passwords.get(netloc, (None, None))
  165. # Use the credentials embedded in the url if we have none stored
  166. if username is None:
  167. username, password = url_user_password
  168. # Get creds from netrc if we still don't have them
  169. if username is None and password is None:
  170. netrc_auth = get_netrc_auth(req.url)
  171. username, password = netrc_auth if netrc_auth else (None, None)
  172. if username or password:
  173. # Store the username and password
  174. self.passwords[netloc] = (username, password)
  175. # Send the basic auth with this request
  176. req = HTTPBasicAuth(username or "", password or "")(req)
  177. # Attach a hook to handle 401 responses
  178. req.register_hook("response", self.handle_401)
  179. return req
  180. def handle_401(self, resp, **kwargs):
  181. # We only care about 401 responses, anything else we want to just
  182. # pass through the actual response
  183. if resp.status_code != 401:
  184. return resp
  185. # We are not able to prompt the user so simply return the response
  186. if not self.prompting:
  187. return resp
  188. parsed = urllib_parse.urlparse(resp.url)
  189. # Prompt the user for a new username and password
  190. username = six.moves.input("User for %s: " % parsed.netloc)
  191. password = getpass.getpass("Password: ")
  192. # Store the new username and password to use for future requests
  193. if username or password:
  194. self.passwords[parsed.netloc] = (username, password)
  195. # Consume content and release the original connection to allow our new
  196. # request to reuse the same one.
  197. resp.content
  198. resp.raw.release_conn()
  199. # Add our new username and password to the request
  200. req = HTTPBasicAuth(username or "", password or "")(resp.request)
  201. req.register_hook("response", self.warn_on_401)
  202. # Send our new request
  203. new_resp = resp.connection.send(req, **kwargs)
  204. new_resp.history.append(resp)
  205. return new_resp
  206. def warn_on_401(self, resp, **kwargs):
  207. # warn user that they provided incorrect credentials
  208. if resp.status_code == 401:
  209. logger.warning('401 Error, Credentials not correct for %s',
  210. resp.request.url)
  211. class LocalFSAdapter(BaseAdapter):
  212. def send(self, request, stream=None, timeout=None, verify=None, cert=None,
  213. proxies=None):
  214. pathname = url_to_path(request.url)
  215. resp = Response()
  216. resp.status_code = 200
  217. resp.url = request.url
  218. try:
  219. stats = os.stat(pathname)
  220. except OSError as exc:
  221. resp.status_code = 404
  222. resp.raw = exc
  223. else:
  224. modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
  225. content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
  226. resp.headers = CaseInsensitiveDict({
  227. "Content-Type": content_type,
  228. "Content-Length": stats.st_size,
  229. "Last-Modified": modified,
  230. })
  231. resp.raw = open(pathname, "rb")
  232. resp.close = resp.raw.close
  233. return resp
  234. def close(self):
  235. pass
  236. class SafeFileCache(FileCache):
  237. """
  238. A file based cache which is safe to use even when the target directory may
  239. not be accessible or writable.
  240. """
  241. def __init__(self, *args, **kwargs):
  242. super(SafeFileCache, self).__init__(*args, **kwargs)
  243. # Check to ensure that the directory containing our cache directory
  244. # is owned by the user current executing pip. If it does not exist
  245. # we will check the parent directory until we find one that does exist.
  246. # If it is not owned by the user executing pip then we will disable
  247. # the cache and log a warning.
  248. if not check_path_owner(self.directory):
  249. logger.warning(
  250. "The directory '%s' or its parent directory is not owned by "
  251. "the current user and the cache has been disabled. Please "
  252. "check the permissions and owner of that directory. If "
  253. "executing pip with sudo, you may want sudo's -H flag.",
  254. self.directory,
  255. )
  256. # Set our directory to None to disable the Cache
  257. self.directory = None
  258. def get(self, *args, **kwargs):
  259. # If we don't have a directory, then the cache should be a no-op.
  260. if self.directory is None:
  261. return
  262. try:
  263. return super(SafeFileCache, self).get(*args, **kwargs)
  264. except (LockError, OSError, IOError):
  265. # We intentionally silence this error, if we can't access the cache
  266. # then we can just skip caching and process the request as if
  267. # caching wasn't enabled.
  268. pass
  269. def set(self, *args, **kwargs):
  270. # If we don't have a directory, then the cache should be a no-op.
  271. if self.directory is None:
  272. return
  273. try:
  274. return super(SafeFileCache, self).set(*args, **kwargs)
  275. except (LockError, OSError, IOError):
  276. # We intentionally silence this error, if we can't access the cache
  277. # then we can just skip caching and process the request as if
  278. # caching wasn't enabled.
  279. pass
  280. def delete(self, *args, **kwargs):
  281. # If we don't have a directory, then the cache should be a no-op.
  282. if self.directory is None:
  283. return
  284. try:
  285. return super(SafeFileCache, self).delete(*args, **kwargs)
  286. except (LockError, OSError, IOError):
  287. # We intentionally silence this error, if we can't access the cache
  288. # then we can just skip caching and process the request as if
  289. # caching wasn't enabled.
  290. pass
  291. class InsecureHTTPAdapter(HTTPAdapter):
  292. def cert_verify(self, conn, url, verify, cert):
  293. conn.cert_reqs = 'CERT_NONE'
  294. conn.ca_certs = None
  295. class PipSession(requests.Session):
  296. timeout = None # type: Optional[int]
  297. def __init__(self, *args, **kwargs):
  298. retries = kwargs.pop("retries", 0)
  299. cache = kwargs.pop("cache", None)
  300. insecure_hosts = kwargs.pop("insecure_hosts", [])
  301. super(PipSession, self).__init__(*args, **kwargs)
  302. # Attach our User Agent to the request
  303. self.headers["User-Agent"] = user_agent()
  304. # Attach our Authentication handler to the session
  305. self.auth = MultiDomainBasicAuth()
  306. # Create our urllib3.Retry instance which will allow us to customize
  307. # how we handle retries.
  308. retries = urllib3.Retry(
  309. # Set the total number of retries that a particular request can
  310. # have.
  311. total=retries,
  312. # A 503 error from PyPI typically means that the Fastly -> Origin
  313. # connection got interrupted in some way. A 503 error in general
  314. # is typically considered a transient error so we'll go ahead and
  315. # retry it.
  316. # A 500 may indicate transient error in Amazon S3
  317. # A 520 or 527 - may indicate transient error in CloudFlare
  318. status_forcelist=[500, 503, 520, 527],
  319. # Add a small amount of back off between failed requests in
  320. # order to prevent hammering the service.
  321. backoff_factor=0.25,
  322. )
  323. # We want to _only_ cache responses on securely fetched origins. We do
  324. # this because we can't validate the response of an insecurely fetched
  325. # origin, and we don't want someone to be able to poison the cache and
  326. # require manual eviction from the cache to fix it.
  327. if cache:
  328. secure_adapter = CacheControlAdapter(
  329. cache=SafeFileCache(cache, use_dir_lock=True),
  330. max_retries=retries,
  331. )
  332. else:
  333. secure_adapter = HTTPAdapter(max_retries=retries)
  334. # Our Insecure HTTPAdapter disables HTTPS validation. It does not
  335. # support caching (see above) so we'll use it for all http:// URLs as
  336. # well as any https:// host that we've marked as ignoring TLS errors
  337. # for.
  338. insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
  339. self.mount("https://", secure_adapter)
  340. self.mount("http://", insecure_adapter)
  341. # Enable file:// urls
  342. self.mount("file://", LocalFSAdapter())
  343. # We want to use a non-validating adapter for any requests which are
  344. # deemed insecure.
  345. for host in insecure_hosts:
  346. self.mount("https://{}/".format(host), insecure_adapter)
  347. def request(self, method, url, *args, **kwargs):
  348. # Allow setting a default timeout on a session
  349. kwargs.setdefault("timeout", self.timeout)
  350. # Dispatch the actual request
  351. return super(PipSession, self).request(method, url, *args, **kwargs)
  352. def get_file_content(url, comes_from=None, session=None):
  353. # type: (str, Optional[str], Optional[PipSession]) -> Tuple[str, Text]
  354. """Gets the content of a file; it may be a filename, file: URL, or
  355. http: URL. Returns (location, content). Content is unicode.
  356. :param url: File path or url.
  357. :param comes_from: Origin description of requirements.
  358. :param session: Instance of pip.download.PipSession.
  359. """
  360. if session is None:
  361. raise TypeError(
  362. "get_file_content() missing 1 required keyword argument: 'session'"
  363. )
  364. match = _scheme_re.search(url)
  365. if match:
  366. scheme = match.group(1).lower()
  367. if (scheme == 'file' and comes_from and
  368. comes_from.startswith('http')):
  369. raise InstallationError(
  370. 'Requirements file %s references URL %s, which is local'
  371. % (comes_from, url))
  372. if scheme == 'file':
  373. path = url.split(':', 1)[1]
  374. path = path.replace('\\', '/')
  375. match = _url_slash_drive_re.match(path)
  376. if match:
  377. path = match.group(1) + ':' + path.split('|', 1)[1]
  378. path = urllib_parse.unquote(path)
  379. if path.startswith('/'):
  380. path = '/' + path.lstrip('/')
  381. url = path
  382. else:
  383. # FIXME: catch some errors
  384. resp = session.get(url)
  385. resp.raise_for_status()
  386. return resp.url, resp.text
  387. try:
  388. with open(url, 'rb') as f:
  389. content = auto_decode(f.read())
  390. except IOError as exc:
  391. raise InstallationError(
  392. 'Could not open requirements file: %s' % str(exc)
  393. )
  394. return url, content
  395. _scheme_re = re.compile(r'^(http|https|file):', re.I)
  396. _url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
  397. def is_url(name):
  398. # type: (Union[str, Text]) -> bool
  399. """Returns true if the name looks like a URL"""
  400. if ':' not in name:
  401. return False
  402. scheme = name.split(':', 1)[0].lower()
  403. return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
  404. def url_to_path(url):
  405. # type: (str) -> str
  406. """
  407. Convert a file: URL to a path.
  408. """
  409. assert url.startswith('file:'), (
  410. "You can only turn file: urls into filenames (not %r)" % url)
  411. _, netloc, path, _, _ = urllib_parse.urlsplit(url)
  412. if not netloc or netloc == 'localhost':
  413. # According to RFC 8089, same as empty authority.
  414. netloc = ''
  415. elif sys.platform == 'win32':
  416. # If we have a UNC path, prepend UNC share notation.
  417. netloc = '\\\\' + netloc
  418. else:
  419. raise ValueError(
  420. 'non-local file URIs are not supported on this platform: %r'
  421. % url
  422. )
  423. path = urllib_request.url2pathname(netloc + path)
  424. return path
  425. def path_to_url(path):
  426. # type: (Union[str, Text]) -> str
  427. """
  428. Convert a path to a file: URL. The path will be made absolute and have
  429. quoted path parts.
  430. """
  431. path = os.path.normpath(os.path.abspath(path))
  432. url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
  433. return url
  434. def is_archive_file(name):
  435. # type: (str) -> bool
  436. """Return True if `name` is a considered as an archive file."""
  437. ext = splitext(name)[1].lower()
  438. if ext in ARCHIVE_EXTENSIONS:
  439. return True
  440. return False
  441. def unpack_vcs_link(link, location):
  442. vcs_backend = _get_used_vcs_backend(link)
  443. vcs_backend.unpack(location)
  444. def _get_used_vcs_backend(link):
  445. for backend in vcs.backends:
  446. if link.scheme in backend.schemes:
  447. vcs_backend = backend(link.url)
  448. return vcs_backend
  449. def is_vcs_url(link):
  450. # type: (Link) -> bool
  451. return bool(_get_used_vcs_backend(link))
  452. def is_file_url(link):
  453. # type: (Link) -> bool
  454. return link.url.lower().startswith('file:')
  455. def is_dir_url(link):
  456. # type: (Link) -> bool
  457. """Return whether a file:// Link points to a directory.
  458. ``link`` must not have any other scheme but file://. Call is_file_url()
  459. first.
  460. """
  461. link_path = url_to_path(link.url_without_fragment)
  462. return os.path.isdir(link_path)
  463. def _progress_indicator(iterable, *args, **kwargs):
  464. return iterable
  465. def _download_url(
  466. resp, # type: Response
  467. link, # type: Link
  468. content_file, # type: IO
  469. hashes, # type: Hashes
  470. progress_bar # type: str
  471. ):
  472. # type: (...) -> None
  473. try:
  474. total_length = int(resp.headers['content-length'])
  475. except (ValueError, KeyError, TypeError):
  476. total_length = 0
  477. cached_resp = getattr(resp, "from_cache", False)
  478. if logger.getEffectiveLevel() > logging.INFO:
  479. show_progress = False
  480. elif cached_resp:
  481. show_progress = False
  482. elif total_length > (40 * 1000):
  483. show_progress = True
  484. elif not total_length:
  485. show_progress = True
  486. else:
  487. show_progress = False
  488. show_url = link.show_url
  489. def resp_read(chunk_size):
  490. try:
  491. # Special case for urllib3.
  492. for chunk in resp.raw.stream(
  493. chunk_size,
  494. # We use decode_content=False here because we don't
  495. # want urllib3 to mess with the raw bytes we get
  496. # from the server. If we decompress inside of
  497. # urllib3 then we cannot verify the checksum
  498. # because the checksum will be of the compressed
  499. # file. This breakage will only occur if the
  500. # server adds a Content-Encoding header, which
  501. # depends on how the server was configured:
  502. # - Some servers will notice that the file isn't a
  503. # compressible file and will leave the file alone
  504. # and with an empty Content-Encoding
  505. # - Some servers will notice that the file is
  506. # already compressed and will leave the file
  507. # alone and will add a Content-Encoding: gzip
  508. # header
  509. # - Some servers won't notice anything at all and
  510. # will take a file that's already been compressed
  511. # and compress it again and set the
  512. # Content-Encoding: gzip header
  513. #
  514. # By setting this not to decode automatically we
  515. # hope to eliminate problems with the second case.
  516. decode_content=False):
  517. yield chunk
  518. except AttributeError:
  519. # Standard file-like object.
  520. while True:
  521. chunk = resp.raw.read(chunk_size)
  522. if not chunk:
  523. break
  524. yield chunk
  525. def written_chunks(chunks):
  526. for chunk in chunks:
  527. content_file.write(chunk)
  528. yield chunk
  529. progress_indicator = _progress_indicator
  530. if link.netloc == PyPI.netloc:
  531. url = show_url
  532. else:
  533. url = link.url_without_fragment
  534. if show_progress: # We don't show progress on cached responses
  535. progress_indicator = DownloadProgressProvider(progress_bar,
  536. max=total_length)
  537. if total_length:
  538. logger.info("Downloading %s (%s)", url, format_size(total_length))
  539. else:
  540. logger.info("Downloading %s", url)
  541. elif cached_resp:
  542. logger.info("Using cached %s", url)
  543. else:
  544. logger.info("Downloading %s", url)
  545. logger.debug('Downloading from URL %s', link)
  546. downloaded_chunks = written_chunks(
  547. progress_indicator(
  548. resp_read(CONTENT_CHUNK_SIZE),
  549. CONTENT_CHUNK_SIZE
  550. )
  551. )
  552. if hashes:
  553. hashes.check_against_chunks(downloaded_chunks)
  554. else:
  555. consume(downloaded_chunks)
  556. def _copy_file(filename, location, link):
  557. copy = True
  558. download_location = os.path.join(location, link.filename)
  559. if os.path.exists(download_location):
  560. response = ask_path_exists(
  561. 'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' %
  562. display_path(download_location), ('i', 'w', 'b', 'a'))
  563. if response == 'i':
  564. copy = False
  565. elif response == 'w':
  566. logger.warning('Deleting %s', display_path(download_location))
  567. os.remove(download_location)
  568. elif response == 'b':
  569. dest_file = backup_dir(download_location)
  570. logger.warning(
  571. 'Backing up %s to %s',
  572. display_path(download_location),
  573. display_path(dest_file),
  574. )
  575. shutil.move(download_location, dest_file)
  576. elif response == 'a':
  577. sys.exit(-1)
  578. if copy:
  579. shutil.copy(filename, download_location)
  580. logger.info('Saved %s', display_path(download_location))
  581. def unpack_http_url(
  582. link, # type: Link
  583. location, # type: str
  584. download_dir=None, # type: Optional[str]
  585. session=None, # type: Optional[PipSession]
  586. hashes=None, # type: Optional[Hashes]
  587. progress_bar="on" # type: str
  588. ):
  589. # type: (...) -> None
  590. if session is None:
  591. raise TypeError(
  592. "unpack_http_url() missing 1 required keyword argument: 'session'"
  593. )
  594. with TempDirectory(kind="unpack") as temp_dir:
  595. # If a download dir is specified, is the file already downloaded there?
  596. already_downloaded_path = None
  597. if download_dir:
  598. already_downloaded_path = _check_download_dir(link,
  599. download_dir,
  600. hashes)
  601. if already_downloaded_path:
  602. from_path = already_downloaded_path
  603. content_type = mimetypes.guess_type(from_path)[0]
  604. else:
  605. # let's download to a tmp dir
  606. from_path, content_type = _download_http_url(link,
  607. session,
  608. temp_dir.path,
  609. hashes,
  610. progress_bar)
  611. # unpack the archive to the build dir location. even when only
  612. # downloading archives, they have to be unpacked to parse dependencies
  613. unpack_file(from_path, location, content_type, link)
  614. # a download dir is specified; let's copy the archive there
  615. if download_dir and not already_downloaded_path:
  616. _copy_file(from_path, download_dir, link)
  617. if not already_downloaded_path:
  618. os.unlink(from_path)
  619. def unpack_file_url(
  620. link, # type: Link
  621. location, # type: str
  622. download_dir=None, # type: Optional[str]
  623. hashes=None # type: Optional[Hashes]
  624. ):
  625. # type: (...) -> None
  626. """Unpack link into location.
  627. If download_dir is provided and link points to a file, make a copy
  628. of the link file inside download_dir.
  629. """
  630. link_path = url_to_path(link.url_without_fragment)
  631. # If it's a url to a local directory
  632. if is_dir_url(link):
  633. if os.path.isdir(location):
  634. rmtree(location)
  635. shutil.copytree(link_path, location, symlinks=True)
  636. if download_dir:
  637. logger.info('Link is a directory, ignoring download_dir')
  638. return
  639. # If --require-hashes is off, `hashes` is either empty, the
  640. # link's embedded hash, or MissingHashes; it is required to
  641. # match. If --require-hashes is on, we are satisfied by any
  642. # hash in `hashes` matching: a URL-based or an option-based
  643. # one; no internet-sourced hash will be in `hashes`.
  644. if hashes:
  645. hashes.check_against_path(link_path)
  646. # If a download dir is specified, is the file already there and valid?
  647. already_downloaded_path = None
  648. if download_dir:
  649. already_downloaded_path = _check_download_dir(link,
  650. download_dir,
  651. hashes)
  652. if already_downloaded_path:
  653. from_path = already_downloaded_path
  654. else:
  655. from_path = link_path
  656. content_type = mimetypes.guess_type(from_path)[0]
  657. # unpack the archive to the build dir location. even when only downloading
  658. # archives, they have to be unpacked to parse dependencies
  659. unpack_file(from_path, location, content_type, link)
  660. # a download dir is specified and not already downloaded
  661. if download_dir and not already_downloaded_path:
  662. _copy_file(from_path, download_dir, link)
  663. class PipXmlrpcTransport(xmlrpc_client.Transport):
  664. """Provide a `xmlrpclib.Transport` implementation via a `PipSession`
  665. object.
  666. """
  667. def __init__(self, index_url, session, use_datetime=False):
  668. xmlrpc_client.Transport.__init__(self, use_datetime)
  669. index_parts = urllib_parse.urlparse(index_url)
  670. self._scheme = index_parts.scheme
  671. self._session = session
  672. def request(self, host, handler, request_body, verbose=False):
  673. parts = (self._scheme, host, handler, None, None, None)
  674. url = urllib_parse.urlunparse(parts)
  675. try:
  676. headers = {'Content-Type': 'text/xml'}
  677. response = self._session.post(url, data=request_body,
  678. headers=headers, stream=True)
  679. response.raise_for_status()
  680. self.verbose = verbose
  681. return self.parse_response(response.raw)
  682. except requests.HTTPError as exc:
  683. logger.critical(
  684. "HTTP error %s while getting %s",
  685. exc.response.status_code, url,
  686. )
  687. raise
  688. def unpack_url(
  689. link, # type: Optional[Link]
  690. location, # type: Optional[str]
  691. download_dir=None, # type: Optional[str]
  692. only_download=False, # type: bool
  693. session=None, # type: Optional[PipSession]
  694. hashes=None, # type: Optional[Hashes]
  695. progress_bar="on" # type: str
  696. ):
  697. # type: (...) -> None
  698. """Unpack link.
  699. If link is a VCS link:
  700. if only_download, export into download_dir and ignore location
  701. else unpack into location
  702. for other types of link:
  703. - unpack into location
  704. - if download_dir, copy the file into download_dir
  705. - if only_download, mark location for deletion
  706. :param hashes: A Hashes object, one of whose embedded hashes must match,
  707. or HashMismatch will be raised. If the Hashes is empty, no matches are
  708. required, and unhashable types of requirements (like VCS ones, which
  709. would ordinarily raise HashUnsupported) are allowed.
  710. """
  711. # non-editable vcs urls
  712. if is_vcs_url(link):
  713. unpack_vcs_link(link, location)
  714. # file urls
  715. elif is_file_url(link):
  716. unpack_file_url(link, location, download_dir, hashes=hashes)
  717. # http urls
  718. else:
  719. if session is None:
  720. session = PipSession()
  721. unpack_http_url(
  722. link,
  723. location,
  724. download_dir,
  725. session,
  726. hashes=hashes,
  727. progress_bar=progress_bar
  728. )
  729. if only_download:
  730. write_delete_marker_file(location)
  731. def _download_http_url(
  732. link, # type: Link
  733. session, # type: PipSession
  734. temp_dir, # type: str
  735. hashes, # type: Hashes
  736. progress_bar # type: str
  737. ):
  738. # type: (...) -> Tuple[str, str]
  739. """Download link url into temp_dir using provided session"""
  740. target_url = link.url.split('#', 1)[0]
  741. try:
  742. resp = session.get(
  743. target_url,
  744. # We use Accept-Encoding: identity here because requests
  745. # defaults to accepting compressed responses. This breaks in
  746. # a variety of ways depending on how the server is configured.
  747. # - Some servers will notice that the file isn't a compressible
  748. # file and will leave the file alone and with an empty
  749. # Content-Encoding
  750. # - Some servers will notice that the file is already
  751. # compressed and will leave the file alone and will add a
  752. # Content-Encoding: gzip header
  753. # - Some servers won't notice anything at all and will take
  754. # a file that's already been compressed and compress it again
  755. # and set the Content-Encoding: gzip header
  756. # By setting this to request only the identity encoding We're
  757. # hoping to eliminate the third case. Hopefully there does not
  758. # exist a server which when given a file will notice it is
  759. # already compressed and that you're not asking for a
  760. # compressed file and will then decompress it before sending
  761. # because if that's the case I don't think it'll ever be
  762. # possible to make this work.
  763. headers={"Accept-Encoding": "identity"},
  764. stream=True,
  765. )
  766. resp.raise_for_status()
  767. except requests.HTTPError as exc:
  768. logger.critical(
  769. "HTTP error %s while getting %s", exc.response.status_code, link,
  770. )
  771. raise
  772. content_type = resp.headers.get('content-type', '')
  773. filename = link.filename # fallback
  774. # Have a look at the Content-Disposition header for a better guess
  775. content_disposition = resp.headers.get('content-disposition')
  776. if content_disposition:
  777. type, params = cgi.parse_header(content_disposition)
  778. # We use ``or`` here because we don't want to use an "empty" value
  779. # from the filename param.
  780. filename = params.get('filename') or filename
  781. ext = splitext(filename)[1]
  782. if not ext:
  783. ext = mimetypes.guess_extension(content_type)
  784. if ext:
  785. filename += ext
  786. if not ext and link.url != resp.url:
  787. ext = os.path.splitext(resp.url)[1]
  788. if ext:
  789. filename += ext
  790. file_path = os.path.join(temp_dir, filename)
  791. with open(file_path, 'wb') as content_file:
  792. _download_url(resp, link, content_file, hashes, progress_bar)
  793. return file_path, content_type
  794. def _check_download_dir(link, download_dir, hashes):
  795. # type: (Link, str, Hashes) -> Optional[str]
  796. """ Check download_dir for previously downloaded file with correct hash
  797. If a correct file is found return its path else None
  798. """
  799. download_path = os.path.join(download_dir, link.filename)
  800. if os.path.exists(download_path):
  801. # If already downloaded, does its hash match?
  802. logger.info('File was already downloaded %s', download_path)
  803. if hashes:
  804. try:
  805. hashes.check_against_path(download_path)
  806. except HashMismatch:
  807. logger.warning(
  808. 'Previously-downloaded file %s has bad hash. '
  809. 'Re-downloading.',
  810. download_path
  811. )
  812. os.unlink(download_path)
  813. return None
  814. return download_path
  815. return None