Skip to content

Commit

Permalink
Blacken src/pip/_internal/network/
Browse files Browse the repository at this point in the history
Progresses the black formatting of the codebase further.
  • Loading branch information
pradyunsg committed Jul 23, 2021
1 parent 2de3af1 commit 1bc0eef
Show file tree
Hide file tree
Showing 8 changed files with 114 additions and 105 deletions.
1 change: 0 additions & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ repos:
^src/pip/_internal/commands|
^src/pip/_internal/index|
^src/pip/_internal/models|
^src/pip/_internal/network|
^src/pip/_internal/operations|
^src/pip/_internal/req|
^src/pip/_internal/vcs|
Expand Down
36 changes: 23 additions & 13 deletions src/pip/_internal/network/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@
keyring = None
except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s", str(exc),
"Keyring is skipped due to an exception: %s",
str(exc),
)
keyring = None

Expand Down Expand Up @@ -62,14 +63,14 @@ def get_keyring_auth(url: Optional[str], username: Optional[str]) -> Optional[Au

except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s", str(exc),
"Keyring is skipped due to an exception: %s",
str(exc),
)
keyring = None
return None


class MultiDomainBasicAuth(AuthBase):

def __init__(
self, prompting: bool = True, index_urls: Optional[List[str]] = None
) -> None:
Expand Down Expand Up @@ -105,8 +106,12 @@ def _get_index_url(self, url: str) -> Optional[str]:
return u
return None

def _get_new_credentials(self, original_url: str, allow_netrc: bool = True,
allow_keyring: bool = False) -> AuthInfo:
def _get_new_credentials(
self,
original_url: str,
allow_netrc: bool = True,
allow_keyring: bool = False,
) -> AuthInfo:
"""Find and return credentials for the specified URL."""
# Split the credentials and netloc from the url.
url, netloc, url_user_password = split_auth_netloc_from_url(
Expand Down Expand Up @@ -145,10 +150,12 @@ def _get_new_credentials(self, original_url: str, allow_netrc: bool = True,
# If we don't have a password and keyring is available, use it.
if allow_keyring:
# The index url is more specific than the netloc, so try it first
# fmt: off
kr_auth = (
get_keyring_auth(index_url, username) or
get_keyring_auth(netloc, username)
)
# fmt: on
if kr_auth:
logger.debug("Found credentials in keyring for %s", netloc)
return kr_auth
Expand Down Expand Up @@ -189,9 +196,9 @@ def _get_url_and_credentials(

assert (
# Credentials were found
(username is not None and password is not None) or
(username is not None and password is not None)
# Credentials were not found
(username is None and password is None)
or (username is None and password is None)
), f"Could not load credentials from url: {original_url}"

return url, username, password
Expand Down Expand Up @@ -244,9 +251,11 @@ def handle_401(self, resp: Response, **kwargs: Any) -> Response:
parsed = urllib.parse.urlparse(resp.url)

# Query the keyring for credentials:
username, password = self._get_new_credentials(resp.url,
allow_netrc=False,
allow_keyring=True)
username, password = self._get_new_credentials(
resp.url,
allow_netrc=False,
allow_keyring=True,
)

# Prompt the user for a new username and password
save = False
Expand Down Expand Up @@ -287,7 +296,8 @@ def warn_on_401(self, resp: Response, **kwargs: Any) -> None:
"""Response callback to warn about incorrect credentials."""
if resp.status_code == 401:
logger.warning(
'401 Error, Credentials not correct for %s', resp.request.url,
"401 Error, Credentials not correct for %s",
resp.request.url,
)

def save_credentials(self, resp: Response, **kwargs: Any) -> None:
Expand All @@ -300,7 +310,7 @@ def save_credentials(self, resp: Response, **kwargs: Any) -> None:
self._credentials_to_save = None
if creds and resp.status_code < 400:
try:
logger.info('Saving credentials to keyring')
logger.info("Saving credentials to keyring")
keyring.set_password(*creds)
except Exception:
logger.exception('Failed to save credentials')
logger.exception("Failed to save credentials")
2 changes: 1 addition & 1 deletion src/pip/_internal/network/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def _get_cache_path(self, name: str) -> str:
def get(self, key: str) -> Optional[bytes]:
path = self._get_cache_path(key)
with suppressed_cache_errors():
with open(path, 'rb') as f:
with open(path, "rb") as f:
return f.read()

def set(self, key: str, value: bytes) -> None:
Expand Down
32 changes: 14 additions & 18 deletions src/pip/_internal/network/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,15 @@

def _get_http_response_size(resp: Response) -> Optional[int]:
try:
return int(resp.headers['content-length'])
return int(resp.headers["content-length"])
except (ValueError, KeyError, TypeError):
return None


def _prepare_download(
resp: Response,
link: Link,
progress_bar: str
progress_bar: str,
) -> Iterable[bytes]:
total_length = _get_http_response_size(resp)

Expand All @@ -42,7 +42,7 @@ def _prepare_download(
logged_url = redact_auth_from_url(url)

if total_length:
logged_url = '{} ({})'.format(logged_url, format_size(total_length))
logged_url = "{} ({})".format(logged_url, format_size(total_length))

if is_from_cache(resp):
logger.info("Using cached %s", logged_url)
Expand All @@ -65,9 +65,7 @@ def _prepare_download(
if not show_progress:
return chunks

return DownloadProgressProvider(
progress_bar, max=total_length
)(chunks)
return DownloadProgressProvider(progress_bar, max=total_length)(chunks)


def sanitize_content_filename(filename: str) -> str:
Expand All @@ -83,7 +81,7 @@ def parse_content_disposition(content_disposition: str, default_filename: str) -
return the default filename if the result is empty.
"""
_type, params = cgi.parse_header(content_disposition)
filename = params.get('filename')
filename = params.get("filename")
if filename:
# We need to sanitize the filename to prevent directory traversal
# in case the filename contains ".." path parts.
Expand All @@ -97,14 +95,12 @@ def _get_http_response_filename(resp: Response, link: Link) -> str:
"""
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
content_disposition = resp.headers.get("content-disposition")
if content_disposition:
filename = parse_content_disposition(content_disposition, filename)
ext: Optional[str] = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(
resp.headers.get('content-type', '')
)
ext = mimetypes.guess_extension(resp.headers.get("content-type", ""))
if ext:
filename += ext
if not ext and link.url != resp.url:
Expand All @@ -115,7 +111,7 @@ def _get_http_response_filename(resp: Response, link: Link) -> str:


def _http_get_download(session: PipSession, link: Link) -> Response:
target_url = link.url.split('#', 1)[0]
target_url = link.url.split("#", 1)[0]
resp = session.get(target_url, headers=HEADERS, stream=True)
raise_for_status(resp)
return resp
Expand Down Expand Up @@ -145,15 +141,14 @@ def __call__(self, link: Link, location: str) -> Tuple[str, str]:
filepath = os.path.join(location, filename)

chunks = _prepare_download(resp, link, self._progress_bar)
with open(filepath, 'wb') as content_file:
with open(filepath, "wb") as content_file:
for chunk in chunks:
content_file.write(chunk)
content_type = resp.headers.get('Content-Type', '')
content_type = resp.headers.get("Content-Type", "")
return filepath, content_type


class BatchDownloader:

def __init__(
self,
session: PipSession,
Expand All @@ -173,16 +168,17 @@ def __call__(
assert e.response is not None
logger.critical(
"HTTP error %s while getting %s",
e.response.status_code, link,
e.response.status_code,
link,
)
raise

filename = _get_http_response_filename(resp, link)
filepath = os.path.join(location, filename)

chunks = _prepare_download(resp, link, self._progress_bar)
with open(filepath, 'wb') as content_file:
with open(filepath, "wb") as content_file:
for chunk in chunks:
content_file.write(chunk)
content_type = resp.headers.get('Content-Type', '')
content_type = resp.headers.get("Content-Type", "")
yield link, (filepath, content_type)
26 changes: 13 additions & 13 deletions src/pip/_internal/network/lazy_wheel.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Lazy ZIP over HTTP"""

__all__ = ['HTTPRangeRequestUnsupported', 'dist_from_wheel_url']
__all__ = ["HTTPRangeRequestUnsupported", "dist_from_wheel_url"]

from bisect import bisect_left, bisect_right
from contextlib import contextmanager
Expand Down Expand Up @@ -53,19 +53,19 @@ def __init__(
raise_for_status(head)
assert head.status_code == 200
self._session, self._url, self._chunk_size = session, url, chunk_size
self._length = int(head.headers['Content-Length'])
self._length = int(head.headers["Content-Length"])
self._file = NamedTemporaryFile()
self.truncate(self._length)
self._left: List[int] = []
self._right: List[int] = []
if 'bytes' not in head.headers.get('Accept-Ranges', 'none'):
raise HTTPRangeRequestUnsupported('range request is not supported')
if "bytes" not in head.headers.get("Accept-Ranges", "none"):
raise HTTPRangeRequestUnsupported("range request is not supported")
self._check_zip()

@property
def mode(self) -> str:
"""Opening mode, which is always rb."""
return 'rb'
return "rb"

@property
def name(self) -> str:
Expand Down Expand Up @@ -94,9 +94,9 @@ def read(self, size: int = -1) -> bytes:
"""
download_size = max(size, self._chunk_size)
start, length = self.tell(), self._length
stop = length if size < 0 else min(start+download_size, length)
start = max(0, stop-download_size)
self._download(start, stop-1)
stop = length if size < 0 else min(start + download_size, length)
start = max(0, stop - download_size)
self._download(start, stop - 1)
return self._file.read(size)

def readable(self) -> bool:
Expand Down Expand Up @@ -170,9 +170,9 @@ def _stream_response(
) -> Response:
"""Return HTTP response to a range request from start to end."""
headers = base_headers.copy()
headers['Range'] = f'bytes={start}-{end}'
headers["Range"] = f"bytes={start}-{end}"
# TODO: Get range requests to be correctly cached
headers['Cache-Control'] = 'no-cache'
headers["Cache-Control"] = "no-cache"
return self._session.get(self._url, headers=headers, stream=True)

def _merge(
Expand All @@ -187,11 +187,11 @@ def _merge(
right (int): Index after last overlapping downloaded data
"""
lslice, rslice = self._left[left:right], self._right[left:right]
i = start = min([start]+lslice[:1])
end = max([end]+rslice[-1:])
i = start = min([start] + lslice[:1])
end = max([end] + rslice[-1:])
for j, k in zip(lslice, rslice):
if j > i:
yield i, j-1
yield i, j - 1
i = k + 1
if i <= end:
yield i, end
Expand Down
Loading

0 comments on commit 1bc0eef

Please sign in to comment.