From d60a679b6ce89456d6866f74b5eb75e0cbf77831 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 15 Oct 2015 17:22:46 -0400 Subject: [PATCH 1/5] Scrape in six used modules from apitools. Ignore them for pep8/pylint purposes until after refactoring/cleanup. --- gcloud/_apitools/buffered_stream.py | 59 ++ gcloud/_apitools/exceptions.py | 148 ++++ gcloud/_apitools/http_wrapper.py | 387 +++++++++++ gcloud/_apitools/stream_slice.py | 64 ++ gcloud/_apitools/transfer.py | 1004 +++++++++++++++++++++++++++ gcloud/_apitools/util.py | 212 ++++++ pylintrc_default | 2 + tox.ini | 2 +- 8 files changed, 1877 insertions(+), 1 deletion(-) create mode 100644 gcloud/_apitools/buffered_stream.py create mode 100644 gcloud/_apitools/exceptions.py create mode 100644 gcloud/_apitools/http_wrapper.py create mode 100644 gcloud/_apitools/stream_slice.py create mode 100644 gcloud/_apitools/transfer.py create mode 100644 gcloud/_apitools/util.py diff --git a/gcloud/_apitools/buffered_stream.py b/gcloud/_apitools/buffered_stream.py new file mode 100644 index 000000000000..bda7e65c1fba --- /dev/null +++ b/gcloud/_apitools/buffered_stream.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python +"""Small helper class to provide a small slice of a stream. + +This class reads ahead to detect if we are at the end of the stream. +""" + +from apitools.base.py import exceptions + + +# TODO(user): Consider replacing this with a StringIO. +class BufferedStream(object): + + """Buffers a stream, reading ahead to determine if we're at the end.""" + + def __init__(self, stream, start, size): + self.__stream = stream + self.__start_pos = start + self.__buffer_pos = 0 + self.__buffered_data = self.__stream.read(size) + self.__stream_at_end = len(self.__buffered_data) < size + self.__end_pos = self.__start_pos + len(self.__buffered_data) + + def __str__(self): + return ('Buffered stream %s from position %s-%s with %s ' + 'bytes remaining' % (self.__stream, self.__start_pos, + self.__end_pos, self._bytes_remaining)) + + def __len__(self): + return len(self.__buffered_data) + + @property + def stream_exhausted(self): + return self.__stream_at_end + + @property + def stream_end_position(self): + return self.__end_pos + + @property + def _bytes_remaining(self): + return len(self.__buffered_data) - self.__buffer_pos + + def read(self, size=None): # pylint: disable=invalid-name + """Reads from the buffer.""" + if size is None or size < 0: + raise exceptions.NotYetImplementedError( + 'Illegal read of size %s requested on BufferedStream. ' + 'Wrapped stream %s is at position %s-%s, ' + '%s bytes remaining.' % + (size, self.__stream, self.__start_pos, self.__end_pos, + self._bytes_remaining)) + + data = '' + if self._bytes_remaining: + size = min(size, self._bytes_remaining) + data = self.__buffered_data[ + self.__buffer_pos:self.__buffer_pos + size] + self.__buffer_pos += size + return data diff --git a/gcloud/_apitools/exceptions.py b/gcloud/_apitools/exceptions.py new file mode 100644 index 000000000000..1d736197c060 --- /dev/null +++ b/gcloud/_apitools/exceptions.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python +"""Exceptions for generated client libraries.""" + + +class Error(Exception): + + """Base class for all exceptions.""" + + +class TypecheckError(Error, TypeError): + + """An object of an incorrect type is provided.""" + + +class NotFoundError(Error): + + """A specified resource could not be found.""" + + +class UserError(Error): + + """Base class for errors related to user input.""" + + +class InvalidDataError(Error): + + """Base class for any invalid data error.""" + + +class CommunicationError(Error): + + """Any communication error talking to an API server.""" + + +class HttpError(CommunicationError): + + """Error making a request. Soon to be HttpError.""" + + def __init__(self, response, content, url): + super(HttpError, self).__init__() + self.response = response + self.content = content + self.url = url + + def __str__(self): + content = self.content.decode('ascii', 'replace') + return 'HttpError accessing <%s>: response: <%s>, content <%s>' % ( + self.url, self.response, content) + + @property + def status_code(self): + # TODO(craigcitro): Turn this into something better than a + # KeyError if there is no status. + return int(self.response['status']) + + @classmethod + def FromResponse(cls, http_response): + return cls(http_response.info, http_response.content, + http_response.request_url) + + +class InvalidUserInputError(InvalidDataError): + + """User-provided input is invalid.""" + + +class InvalidDataFromServerError(InvalidDataError, CommunicationError): + + """Data received from the server is malformed.""" + + +class BatchError(Error): + + """Error generated while constructing a batch request.""" + + +class ConfigurationError(Error): + + """Base class for configuration errors.""" + + +class GeneratedClientError(Error): + + """The generated client configuration is invalid.""" + + +class ConfigurationValueError(UserError): + + """Some part of the user-specified client configuration is invalid.""" + + +class ResourceUnavailableError(Error): + + """User requested an unavailable resource.""" + + +class CredentialsError(Error): + + """Errors related to invalid credentials.""" + + +class TransferError(CommunicationError): + + """Errors related to transfers.""" + + +class TransferRetryError(TransferError): + + """Retryable errors related to transfers.""" + + +class TransferInvalidError(TransferError): + + """The given transfer is invalid.""" + + +class RequestError(CommunicationError): + + """The request was not successful.""" + + +class RetryAfterError(HttpError): + + """The response contained a retry-after header.""" + + def __init__(self, response, content, url, retry_after): + super(RetryAfterError, self).__init__(response, content, url) + self.retry_after = int(retry_after) + + @classmethod + def FromResponse(cls, http_response): + return cls(http_response.info, http_response.content, + http_response.request_url, http_response.retry_after) + + +class BadStatusCodeError(HttpError): + + """The request completed but returned a bad status code.""" + + +class NotYetImplementedError(GeneratedClientError): + + """This functionality is not yet implemented.""" + + +class StreamExhausted(Error): + + """Attempted to read more bytes from a stream than were available.""" diff --git a/gcloud/_apitools/http_wrapper.py b/gcloud/_apitools/http_wrapper.py new file mode 100644 index 000000000000..03a094df0887 --- /dev/null +++ b/gcloud/_apitools/http_wrapper.py @@ -0,0 +1,387 @@ +#!/usr/bin/env python +"""HTTP wrapper for apitools. + +This library wraps the underlying http library we use, which is +currently httplib2. +""" + +import collections +import contextlib +import logging +import socket +import time + +import httplib2 +import six +from six.moves import http_client +from six.moves.urllib import parse + +from apitools.base.py import exceptions +from apitools.base.py import util + +__all__ = [ + 'CheckResponse', + 'GetHttp', + 'HandleExceptionsAndRebuildHttpConnections', + 'MakeRequest', + 'RebuildHttpConnections', + 'Request', + 'Response', + 'RethrowExceptionHandler', +] + + +# 308 and 429 don't have names in httplib. +RESUME_INCOMPLETE = 308 +TOO_MANY_REQUESTS = 429 +_REDIRECT_STATUS_CODES = ( + http_client.MOVED_PERMANENTLY, + http_client.FOUND, + http_client.SEE_OTHER, + http_client.TEMPORARY_REDIRECT, + RESUME_INCOMPLETE, +) + +# http: An httplib2.Http instance. +# http_request: A http_wrapper.Request. +# exc: Exception being raised. +# num_retries: Number of retries consumed; used for exponential backoff. +ExceptionRetryArgs = collections.namedtuple( + 'ExceptionRetryArgs', ['http', 'http_request', 'exc', 'num_retries', + 'max_retry_wait']) + + +@contextlib.contextmanager +def _Httplib2Debuglevel(http_request, level, http=None): + """Temporarily change the value of httplib2.debuglevel, if necessary. + + If http_request has a `loggable_body` distinct from `body`, then we + need to prevent httplib2 from logging the full body. This sets + httplib2.debuglevel for the duration of the `with` block; however, + that alone won't change the value of existing HTTP connections. If + an httplib2.Http object is provided, we'll also change the level on + any cached connections attached to it. + + Args: + http_request: a Request we're logging. + level: (int) the debuglevel for logging. + http: (optional) an httplib2.Http whose connections we should + set the debuglevel on. + + Yields: + None. + """ + if http_request.loggable_body is None: + yield + return + old_level = httplib2.debuglevel + http_levels = {} + httplib2.debuglevel = level + if http is not None: + for connection_key, connection in http.connections.items(): + # httplib2 stores two kinds of values in this dict, connection + # classes and instances. Since the connection types are all + # old-style classes, we can't easily distinguish by connection + # type -- so instead we use the key pattern. + if ':' not in connection_key: + continue + http_levels[connection_key] = connection.debuglevel + connection.set_debuglevel(level) + yield + httplib2.debuglevel = old_level + if http is not None: + for connection_key, old_level in http_levels.items(): + if connection_key in http.connections: + http.connections[connection_key].set_debuglevel(old_level) + + +class Request(object): + + """Class encapsulating the data for an HTTP request.""" + + def __init__(self, url='', http_method='GET', headers=None, body=''): + self.url = url + self.http_method = http_method + self.headers = headers or {} + self.__body = None + self.__loggable_body = None + self.body = body + + @property + def loggable_body(self): + return self.__loggable_body + + @loggable_body.setter + def loggable_body(self, value): + if self.body is None: + raise exceptions.RequestError( + 'Cannot set loggable body on request with no body') + self.__loggable_body = value + + @property + def body(self): + return self.__body + + @body.setter + def body(self, value): + """Sets the request body; handles logging and length measurement.""" + self.__body = value + if value is not None: + # Avoid calling len() which cannot exceed 4GiB in 32-bit python. + body_length = getattr( + self.__body, 'length', None) or len(self.__body) + self.headers['content-length'] = str(body_length) + else: + self.headers.pop('content-length', None) + # This line ensures we don't try to print large requests. + if not isinstance(value, (type(None), six.string_types)): + self.loggable_body = '' + + +# Note: currently the order of fields here is important, since we want +# to be able to pass in the result from httplib2.request. +class Response(collections.namedtuple( + 'HttpResponse', ['info', 'content', 'request_url'])): + + """Class encapsulating data for an HTTP response.""" + __slots__ = () + + def __len__(self): + return self.length + + @property + def length(self): + """Return the length of this response. + + We expose this as an attribute since using len() directly can fail + for responses larger than sys.maxint. + + Returns: + Response length (as int or long) + """ + def ProcessContentRange(content_range): + _, _, range_spec = content_range.partition(' ') + byte_range, _, _ = range_spec.partition('/') + start, _, end = byte_range.partition('-') + return int(end) - int(start) + 1 + + if '-content-encoding' in self.info and 'content-range' in self.info: + # httplib2 rewrites content-length in the case of a compressed + # transfer; we can't trust the content-length header in that + # case, but we *can* trust content-range, if it's present. + return ProcessContentRange(self.info['content-range']) + elif 'content-length' in self.info: + return int(self.info.get('content-length')) + elif 'content-range' in self.info: + return ProcessContentRange(self.info['content-range']) + return len(self.content) + + @property + def status_code(self): + return int(self.info['status']) + + @property + def retry_after(self): + if 'retry-after' in self.info: + return int(self.info['retry-after']) + + @property + def is_redirect(self): + return (self.status_code in _REDIRECT_STATUS_CODES and + 'location' in self.info) + + +def CheckResponse(response): + if response is None: + # Caller shouldn't call us if the response is None, but handle anyway. + raise exceptions.RequestError( + 'Request to url %s did not return a response.' % + response.request_url) + elif (response.status_code >= 500 or + response.status_code == TOO_MANY_REQUESTS): + raise exceptions.BadStatusCodeError.FromResponse(response) + elif response.retry_after: + raise exceptions.RetryAfterError.FromResponse(response) + + +def RebuildHttpConnections(http): + """Rebuilds all http connections in the httplib2.Http instance. + + httplib2 overloads the map in http.connections to contain two different + types of values: + { scheme string: connection class } and + { scheme + authority string : actual http connection } + Here we remove all of the entries for actual connections so that on the + next request httplib2 will rebuild them from the connection types. + + Args: + http: An httplib2.Http instance. + """ + if getattr(http, 'connections', None): + for conn_key in list(http.connections.keys()): + if ':' in conn_key: + del http.connections[conn_key] + + +def RethrowExceptionHandler(*unused_args): + raise + + +def HandleExceptionsAndRebuildHttpConnections(retry_args): + """Exception handler for http failures. + + This catches known failures and rebuilds the underlying HTTP connections. + + Args: + retry_args: An ExceptionRetryArgs tuple. + """ + # If the server indicates how long to wait, use that value. Otherwise, + # calculate the wait time on our own. + retry_after = None + + # Transport failures + if isinstance(retry_args.exc, (http_client.BadStatusLine, + http_client.IncompleteRead, + http_client.ResponseNotReady)): + logging.debug('Caught HTTP error %s, retrying: %s', + type(retry_args.exc).__name__, retry_args.exc) + elif isinstance(retry_args.exc, socket.error): + logging.debug('Caught socket error, retrying: %s', retry_args.exc) + elif isinstance(retry_args.exc, socket.gaierror): + logging.debug( + 'Caught socket address error, retrying: %s', retry_args.exc) + elif isinstance(retry_args.exc, socket.timeout): + logging.debug( + 'Caught socket timeout error, retrying: %s', retry_args.exc) + elif isinstance(retry_args.exc, httplib2.ServerNotFoundError): + logging.debug( + 'Caught server not found error, retrying: %s', retry_args.exc) + elif isinstance(retry_args.exc, ValueError): + # oauth2client tries to JSON-decode the response, which can result + # in a ValueError if the response was invalid. Until that is fixed in + # oauth2client, need to handle it here. + logging.debug('Response content was invalid (%s), retrying', + retry_args.exc) + elif isinstance(retry_args.exc, exceptions.RequestError): + logging.debug('Request returned no response, retrying') + # API-level failures + elif isinstance(retry_args.exc, exceptions.BadStatusCodeError): + logging.debug('Response returned status %s, retrying', + retry_args.exc.status_code) + elif isinstance(retry_args.exc, exceptions.RetryAfterError): + logging.debug('Response returned a retry-after header, retrying') + retry_after = retry_args.exc.retry_after + else: + raise + RebuildHttpConnections(retry_args.http) + logging.debug('Retrying request to url %s after exception %s', + retry_args.http_request.url, retry_args.exc) + time.sleep( + retry_after or util.CalculateWaitForRetry( + retry_args.num_retries, max_wait=retry_args.max_retry_wait)) + + +def MakeRequest(http, http_request, retries=7, max_retry_wait=60, + redirections=5, + retry_func=HandleExceptionsAndRebuildHttpConnections, + check_response_func=CheckResponse): + """Send http_request via the given http, performing error/retry handling. + + Args: + http: An httplib2.Http instance, or a http multiplexer that delegates to + an underlying http, for example, HTTPMultiplexer. + http_request: A Request to send. + retries: (int, default 7) Number of retries to attempt on retryable + replies (such as 429 or 5XX). + max_retry_wait: (int, default 60) Maximum number of seconds to wait + when retrying. + redirections: (int, default 5) Number of redirects to follow. + retry_func: Function to handle retries on exceptions. Arguments are + (Httplib2.Http, Request, Exception, int num_retries). + check_response_func: Function to validate the HTTP response. + Arguments are (Response, response content, url). + + Raises: + InvalidDataFromServerError: if there is no response after retries. + + Returns: + A Response object. + + """ + retry = 0 + while True: + try: + return _MakeRequestNoRetry( + http, http_request, redirections=redirections, + check_response_func=check_response_func) + # retry_func will consume the exception types it handles and raise. + # pylint: disable=broad-except + except Exception as e: + retry += 1 + if retry >= retries: + raise + else: + retry_func(ExceptionRetryArgs( + http, http_request, e, retry, max_retry_wait)) + + +def _MakeRequestNoRetry(http, http_request, redirections=5, + check_response_func=CheckResponse): + """Send http_request via the given http. + + This wrapper exists to handle translation between the plain httplib2 + request/response types and the Request and Response types above. + + Args: + http: An httplib2.Http instance, or a http multiplexer that delegates to + an underlying http, for example, HTTPMultiplexer. + http_request: A Request to send. + redirections: (int, default 5) Number of redirects to follow. + check_response_func: Function to validate the HTTP response. + Arguments are (Response, response content, url). + + Returns: + A Response object. + + Raises: + RequestError if no response could be parsed. + + """ + connection_type = None + # Handle overrides for connection types. This is used if the caller + # wants control over the underlying connection for managing callbacks + # or hash digestion. + if getattr(http, 'connections', None): + url_scheme = parse.urlsplit(http_request.url).scheme + if url_scheme and url_scheme in http.connections: + connection_type = http.connections[url_scheme] + + # Custom printing only at debuglevel 4 + new_debuglevel = 4 if httplib2.debuglevel == 4 else 0 + with _Httplib2Debuglevel(http_request, new_debuglevel, http=http): + info, content = http.request( + str(http_request.url), method=str(http_request.http_method), + body=http_request.body, headers=http_request.headers, + redirections=redirections, connection_type=connection_type) + + if info is None: + raise exceptions.RequestError() + + response = Response(info, content, http_request.url) + check_response_func(response) + return response + + +_HTTP_FACTORIES = [] + + +def _RegisterHttpFactory(factory): + _HTTP_FACTORIES.append(factory) + + +def GetHttp(**kwds): + for factory in _HTTP_FACTORIES: + http = factory(**kwds) + if http is not None: + return http + return httplib2.Http(**kwds) diff --git a/gcloud/_apitools/stream_slice.py b/gcloud/_apitools/stream_slice.py new file mode 100644 index 000000000000..bd43daf91564 --- /dev/null +++ b/gcloud/_apitools/stream_slice.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +"""Small helper class to provide a small slice of a stream.""" + +from apitools.base.py import exceptions + + +class StreamSlice(object): + + """Provides a slice-like object for streams.""" + + def __init__(self, stream, max_bytes): + self.__stream = stream + self.__remaining_bytes = max_bytes + self.__max_bytes = max_bytes + + def __str__(self): + return 'Slice of stream %s with %s/%s bytes not yet read' % ( + self.__stream, self.__remaining_bytes, self.__max_bytes) + + def __len__(self): + return self.__max_bytes + + def __nonzero__(self): + # For 32-bit python2.x, len() cannot exceed a 32-bit number; avoid + # accidental len() calls from httplib in the form of "if this_object:". + return bool(self.__max_bytes) + + @property + def length(self): + # For 32-bit python2.x, len() cannot exceed a 32-bit number. + return self.__max_bytes + + def read(self, size=None): # pylint: disable=missing-docstring + """Read at most size bytes from this slice. + + Compared to other streams, there is one case where we may + unexpectedly raise an exception on read: if the underlying stream + is exhausted (i.e. returns no bytes on read), and the size of this + slice indicates we should still be able to read more bytes, we + raise exceptions.StreamExhausted. + + Args: + size: If provided, read no more than size bytes from the stream. + + Returns: + The bytes read from this slice. + + Raises: + exceptions.StreamExhausted + + """ + if size is not None: + read_size = min(size, self.__remaining_bytes) + else: + read_size = self.__remaining_bytes + data = self.__stream.read(read_size) + if read_size > 0 and not data: + raise exceptions.StreamExhausted( + 'Not enough bytes in stream; expected %d, exhausted ' + 'after %d' % ( + self.__max_bytes, + self.__max_bytes - self.__remaining_bytes)) + self.__remaining_bytes -= len(data) + return data diff --git a/gcloud/_apitools/transfer.py b/gcloud/_apitools/transfer.py new file mode 100644 index 000000000000..f144582213f6 --- /dev/null +++ b/gcloud/_apitools/transfer.py @@ -0,0 +1,1004 @@ +#!/usr/bin/env python +"""Upload and download support for apitools.""" +from __future__ import print_function + +import email.generator as email_generator +import email.mime.multipart as mime_multipart +import email.mime.nonmultipart as mime_nonmultipart +import io +import json +import mimetypes +import os +import threading + +import six +from six.moves import http_client + +from apitools.base.py import buffered_stream +from apitools.base.py import exceptions +from apitools.base.py import http_wrapper +from apitools.base.py import stream_slice +from apitools.base.py import util + +__all__ = [ + 'Download', + 'Upload', + 'RESUMABLE_UPLOAD', + 'SIMPLE_UPLOAD', + 'DownloadProgressPrinter', + 'DownloadCompletePrinter', + 'UploadProgressPrinter', + 'UploadCompletePrinter', +] + +_RESUMABLE_UPLOAD_THRESHOLD = 5 << 20 +SIMPLE_UPLOAD = 'simple' +RESUMABLE_UPLOAD = 'resumable' + + +def DownloadProgressPrinter(response, unused_download): + """Print download progress based on response.""" + if 'content-range' in response.info: + print('Received %s' % response.info['content-range']) + else: + print('Received %d bytes' % response.length) + + +def DownloadCompletePrinter(unused_response, unused_download): + """Print information about a completed download.""" + print('Download complete') + + +def UploadProgressPrinter(response, unused_upload): + """Print upload progress based on response.""" + print('Sent %s' % response.info['range']) + + +def UploadCompletePrinter(unused_response, unused_upload): + """Print information about a completed upload.""" + print('Upload complete') + + +class _Transfer(object): + + """Generic bits common to Uploads and Downloads.""" + + def __init__(self, stream, close_stream=False, chunksize=None, + auto_transfer=True, http=None, num_retries=5): + self.__bytes_http = None + self.__close_stream = close_stream + self.__http = http + self.__stream = stream + self.__url = None + + self.__num_retries = 5 + # Let the @property do validation + self.num_retries = num_retries + + self.retry_func = ( + http_wrapper.HandleExceptionsAndRebuildHttpConnections) + self.auto_transfer = auto_transfer + self.chunksize = chunksize or 1048576 + + def __repr__(self): + return str(self) + + @property + def close_stream(self): + return self.__close_stream + + @property + def http(self): + return self.__http + + @property + def bytes_http(self): + return self.__bytes_http or self.http + + @bytes_http.setter + def bytes_http(self, value): + self.__bytes_http = value + + @property + def num_retries(self): + return self.__num_retries + + @num_retries.setter + def num_retries(self, value): + util.Typecheck(value, six.integer_types) + if value < 0: + raise exceptions.InvalidDataError( + 'Cannot have negative value for num_retries') + self.__num_retries = value + + @property + def stream(self): + return self.__stream + + @property + def url(self): + return self.__url + + def _Initialize(self, http, url): + """Initialize this download by setting self.http and self.url. + + We want the user to be able to override self.http by having set + the value in the constructor; in that case, we ignore the provided + http. + + Args: + http: An httplib2.Http instance or None. + url: The url for this transfer. + + Returns: + None. Initializes self. + """ + self.EnsureUninitialized() + if self.http is None: + self.__http = http or http_wrapper.GetHttp() + self.__url = url + + @property + def initialized(self): + return self.url is not None and self.http is not None + + @property + def _type_name(self): + return type(self).__name__ + + def EnsureInitialized(self): + if not self.initialized: + raise exceptions.TransferInvalidError( + 'Cannot use uninitialized %s', self._type_name) + + def EnsureUninitialized(self): + if self.initialized: + raise exceptions.TransferInvalidError( + 'Cannot re-initialize %s', self._type_name) + + def __del__(self): + if self.__close_stream: + self.__stream.close() + + def _ExecuteCallback(self, callback, response): + # TODO(craigcitro): Push these into a queue. + if callback is not None: + threading.Thread(target=callback, args=(response, self)).start() + + +class Download(_Transfer): + + """Data for a single download. + + Public attributes: + chunksize: default chunksize to use for transfers. + """ + _ACCEPTABLE_STATUSES = set(( + http_client.OK, + http_client.NO_CONTENT, + http_client.PARTIAL_CONTENT, + http_client.REQUESTED_RANGE_NOT_SATISFIABLE, + )) + _REQUIRED_SERIALIZATION_KEYS = set(( + 'auto_transfer', 'progress', 'total_size', 'url')) + + def __init__(self, stream, progress_callback=None, finish_callback=None, + **kwds): + total_size = kwds.pop('total_size', None) + super(Download, self).__init__(stream, **kwds) + self.__initial_response = None + self.__progress = 0 + self.__total_size = total_size + self.__encoding = None + + self.progress_callback = progress_callback + self.finish_callback = finish_callback + + @property + def progress(self): + return self.__progress + + @property + def encoding(self): + return self.__encoding + + @classmethod + def FromFile(cls, filename, overwrite=False, auto_transfer=True, **kwds): + """Create a new download object from a filename.""" + path = os.path.expanduser(filename) + if os.path.exists(path) and not overwrite: + raise exceptions.InvalidUserInputError( + 'File %s exists and overwrite not specified' % path) + return cls(open(path, 'wb'), close_stream=True, + auto_transfer=auto_transfer, **kwds) + + @classmethod + def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds): + """Create a new Download object from a stream.""" + return cls(stream, auto_transfer=auto_transfer, total_size=total_size, + **kwds) + + @classmethod + def FromData(cls, stream, json_data, http=None, auto_transfer=None, + **kwds): + """Create a new Download object from a stream and serialized data.""" + info = json.loads(json_data) + missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) + if missing_keys: + raise exceptions.InvalidDataError( + 'Invalid serialization data, missing keys: %s' % ( + ', '.join(missing_keys))) + download = cls.FromStream(stream, **kwds) + if auto_transfer is not None: + download.auto_transfer = auto_transfer + else: + download.auto_transfer = info['auto_transfer'] + setattr(download, '_Download__progress', info['progress']) + setattr(download, '_Download__total_size', info['total_size']) + download._Initialize( # pylint: disable=protected-access + http, info['url']) + return download + + @property + def serialization_data(self): + self.EnsureInitialized() + return { + 'auto_transfer': self.auto_transfer, + 'progress': self.progress, + 'total_size': self.total_size, + 'url': self.url, + } + + @property + def total_size(self): + return self.__total_size + + def __str__(self): + if not self.initialized: + return 'Download (uninitialized)' + else: + return 'Download with %d/%s bytes transferred from url %s' % ( + self.progress, self.total_size, self.url) + + def ConfigureRequest(self, http_request, url_builder): + url_builder.query_params['alt'] = 'media' + # TODO(craigcitro): We need to send range requests because by + # default httplib2 stores entire reponses in memory. Override + # httplib2's download method (as gsutil does) so that this is not + # necessary. + http_request.headers['Range'] = 'bytes=0-%d' % (self.chunksize - 1,) + + def __SetTotal(self, info): + if 'content-range' in info: + _, _, total = info['content-range'].rpartition('/') + if total != '*': + self.__total_size = int(total) + # Note "total_size is None" means we don't know it; if no size + # info was returned on our initial range request, that means we + # have a 0-byte file. (That last statement has been verified + # empirically, but is not clearly documented anywhere.) + if self.total_size is None: + self.__total_size = 0 + + def InitializeDownload(self, http_request, http=None, client=None): + """Initialize this download by making a request. + + Args: + http_request: The HttpRequest to use to initialize this download. + http: The httplib2.Http instance for this request. + client: If provided, let this client process the final URL before + sending any additional requests. If client is provided and + http is not, client.http will be used instead. + """ + self.EnsureUninitialized() + if http is None and client is None: + raise exceptions.UserError('Must provide client or http.') + http = http or client.http + if client is not None: + http_request.url = client.FinalizeTransferUrl(http_request.url) + url = http_request.url + if self.auto_transfer: + end_byte = self.__ComputeEndByte(0) + self.__SetRangeHeader(http_request, 0, end_byte) + response = http_wrapper.MakeRequest( + self.bytes_http or http, http_request) + if response.status_code not in self._ACCEPTABLE_STATUSES: + raise exceptions.HttpError.FromResponse(response) + self.__initial_response = response + self.__SetTotal(response.info) + url = response.info.get('content-location', response.request_url) + if client is not None: + url = client.FinalizeTransferUrl(url) + self._Initialize(http, url) + # Unless the user has requested otherwise, we want to just + # go ahead and pump the bytes now. + if self.auto_transfer: + self.StreamInChunks() + + def __NormalizeStartEnd(self, start, end=None): + if end is not None: + if start < 0: + raise exceptions.TransferInvalidError( + 'Cannot have end index with negative start index') + elif start >= self.total_size: + raise exceptions.TransferInvalidError( + 'Cannot have start index greater than total size') + end = min(end, self.total_size - 1) + if end < start: + raise exceptions.TransferInvalidError( + 'Range requested with end[%s] < start[%s]' % (end, start)) + return start, end + else: + if start < 0: + start = max(0, start + self.total_size) + return start, self.total_size - 1 + + def __SetRangeHeader(self, request, start, end=None): + if start < 0: + request.headers['range'] = 'bytes=%d' % start + elif end is None: + request.headers['range'] = 'bytes=%d-' % start + else: + request.headers['range'] = 'bytes=%d-%d' % (start, end) + + def __ComputeEndByte(self, start, end=None, use_chunks=True): + """Compute the last byte to fetch for this request. + + This is all based on the HTTP spec for Range and + Content-Range. + + Note that this is potentially confusing in several ways: + * the value for the last byte is 0-based, eg "fetch 10 bytes + from the beginning" would return 9 here. + * if we have no information about size, and don't want to + use the chunksize, we'll return None. + See the tests for more examples. + + Args: + start: byte to start at. + end: (int or None, default: None) Suggested last byte. + use_chunks: (bool, default: True) If False, ignore self.chunksize. + + Returns: + Last byte to use in a Range header, or None. + + """ + end_byte = end + + if start < 0 and not self.total_size: + return end_byte + + if use_chunks: + alternate = start + self.chunksize - 1 + if end_byte is not None: + end_byte = min(end_byte, alternate) + else: + end_byte = alternate + + if self.total_size: + alternate = self.total_size - 1 + if end_byte is not None: + end_byte = min(end_byte, alternate) + else: + end_byte = alternate + + return end_byte + + def __GetChunk(self, start, end, additional_headers=None): + """Retrieve a chunk, and return the full response.""" + self.EnsureInitialized() + request = http_wrapper.Request(url=self.url) + self.__SetRangeHeader(request, start, end=end) + if additional_headers is not None: + request.headers.update(additional_headers) + return http_wrapper.MakeRequest( + self.bytes_http, request, retry_func=self.retry_func, + retries=self.num_retries) + + def __ProcessResponse(self, response): + """Process response (by updating self and writing to self.stream).""" + if response.status_code not in self._ACCEPTABLE_STATUSES: + # We distinguish errors that mean we made a mistake in setting + # up the transfer versus something we should attempt again. + if response.status_code in (http_client.FORBIDDEN, + http_client.NOT_FOUND): + raise exceptions.HttpError.FromResponse(response) + else: + raise exceptions.TransferRetryError(response.content) + if response.status_code in (http_client.OK, + http_client.PARTIAL_CONTENT): + self.stream.write(response.content) + self.__progress += response.length + if response.info and 'content-encoding' in response.info: + # TODO(craigcitro): Handle the case where this changes over a + # download. + self.__encoding = response.info['content-encoding'] + elif response.status_code == http_client.NO_CONTENT: + # It's important to write something to the stream for the case + # of a 0-byte download to a file, as otherwise python won't + # create the file. + self.stream.write('') + return response + + def GetRange(self, start, end=None, additional_headers=None, + use_chunks=True): + """Retrieve a given byte range from this download, inclusive. + + Range must be of one of these three forms: + * 0 <= start, end = None: Fetch from start to the end of the file. + * 0 <= start <= end: Fetch the bytes from start to end. + * start < 0, end = None: Fetch the last -start bytes of the file. + + (These variations correspond to those described in the HTTP 1.1 + protocol for range headers in RFC 2616, sec. 14.35.1.) + + Args: + start: (int) Where to start fetching bytes. (See above.) + end: (int, optional) Where to stop fetching bytes. (See above.) + additional_headers: (bool, optional) Any additional headers to + pass with the request. + use_chunks: (bool, default: True) If False, ignore self.chunksize + and fetch this range in a single request. + + Returns: + None. Streams bytes into self.stream. + """ + self.EnsureInitialized() + progress_end_normalized = False + if self.total_size is not None: + progress, end_byte = self.__NormalizeStartEnd(start, end) + progress_end_normalized = True + else: + progress = start + end_byte = end + while (not progress_end_normalized or end_byte is None or + progress <= end_byte): + end_byte = self.__ComputeEndByte(progress, end=end_byte, + use_chunks=use_chunks) + response = self.__GetChunk(progress, end_byte, + additional_headers=additional_headers) + if not progress_end_normalized: + self.__SetTotal(response.info) + progress, end_byte = self.__NormalizeStartEnd(start, end) + progress_end_normalized = True + response = self.__ProcessResponse(response) + progress += response.length + if response.length == 0: + raise exceptions.TransferRetryError( + 'Zero bytes unexpectedly returned in download response') + + def StreamInChunks(self, callback=None, finish_callback=None, + additional_headers=None): + """Stream the entire download in chunks.""" + self.StreamMedia(callback=callback, finish_callback=finish_callback, + additional_headers=additional_headers, + use_chunks=True) + + def StreamMedia(self, callback=None, finish_callback=None, + additional_headers=None, use_chunks=True): + """Stream the entire download. + + Args: + callback: (default: None) Callback to call as each chunk is + completed. + finish_callback: (default: None) Callback to call when the + download is complete. + additional_headers: (default: None) Additional headers to + include in fetching bytes. + use_chunks: (bool, default: True) If False, ignore self.chunksize + and stream this download in a single request. + + Returns: + None. Streams bytes into self.stream. + """ + callback = callback or self.progress_callback + finish_callback = finish_callback or self.finish_callback + + self.EnsureInitialized() + while True: + if self.__initial_response is not None: + response = self.__initial_response + self.__initial_response = None + else: + end_byte = self.__ComputeEndByte(self.progress, + use_chunks=use_chunks) + response = self.__GetChunk( + self.progress, end_byte, + additional_headers=additional_headers) + if self.total_size is None: + self.__SetTotal(response.info) + response = self.__ProcessResponse(response) + self._ExecuteCallback(callback, response) + if (response.status_code == http_client.OK or + self.progress >= self.total_size): + break + self._ExecuteCallback(finish_callback, response) + + +class Upload(_Transfer): + + """Data for a single Upload. + + Fields: + stream: The stream to upload. + mime_type: MIME type of the upload. + total_size: (optional) Total upload size for the stream. + close_stream: (default: False) Whether or not we should close the + stream when finished with the upload. + auto_transfer: (default: True) If True, stream all bytes as soon as + the upload is created. + """ + _REQUIRED_SERIALIZATION_KEYS = set(( + 'auto_transfer', 'mime_type', 'total_size', 'url')) + + def __init__(self, stream, mime_type, total_size=None, http=None, + close_stream=False, chunksize=None, auto_transfer=True, + progress_callback=None, finish_callback=None, + **kwds): + super(Upload, self).__init__( + stream, close_stream=close_stream, chunksize=chunksize, + auto_transfer=auto_transfer, http=http, **kwds) + self.__complete = False + self.__final_response = None + self.__mime_type = mime_type + self.__progress = 0 + self.__server_chunk_granularity = None + self.__strategy = None + self.__total_size = None + + self.progress_callback = progress_callback + self.finish_callback = finish_callback + self.total_size = total_size + + @property + def progress(self): + return self.__progress + + @classmethod + def FromFile(cls, filename, mime_type=None, auto_transfer=True, **kwds): + """Create a new Upload object from a filename.""" + path = os.path.expanduser(filename) + if not os.path.exists(path): + raise exceptions.NotFoundError('Could not find file %s' % path) + if not mime_type: + mime_type, _ = mimetypes.guess_type(path) + if mime_type is None: + raise exceptions.InvalidUserInputError( + 'Could not guess mime type for %s' % path) + size = os.stat(path).st_size + return cls(open(path, 'rb'), mime_type, total_size=size, + close_stream=True, auto_transfer=auto_transfer, **kwds) + + @classmethod + def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True, + **kwds): + """Create a new Upload object from a stream.""" + if mime_type is None: + raise exceptions.InvalidUserInputError( + 'No mime_type specified for stream') + return cls(stream, mime_type, total_size=total_size, + close_stream=False, auto_transfer=auto_transfer, **kwds) + + @classmethod + def FromData(cls, stream, json_data, http, auto_transfer=None, **kwds): + """Create a new Upload of stream from serialized json_data and http.""" + info = json.loads(json_data) + missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) + if missing_keys: + raise exceptions.InvalidDataError( + 'Invalid serialization data, missing keys: %s' % ( + ', '.join(missing_keys))) + if 'total_size' in kwds: + raise exceptions.InvalidUserInputError( + 'Cannot override total_size on serialized Upload') + upload = cls.FromStream(stream, info['mime_type'], + total_size=info.get('total_size'), **kwds) + if isinstance(stream, io.IOBase) and not stream.seekable(): + raise exceptions.InvalidUserInputError( + 'Cannot restart resumable upload on non-seekable stream') + if auto_transfer is not None: + upload.auto_transfer = auto_transfer + else: + upload.auto_transfer = info['auto_transfer'] + upload.strategy = RESUMABLE_UPLOAD + upload._Initialize( # pylint: disable=protected-access + http, info['url']) + upload.RefreshResumableUploadState() + upload.EnsureInitialized() + if upload.auto_transfer: + upload.StreamInChunks() + return upload + + @property + def serialization_data(self): + self.EnsureInitialized() + if self.strategy != RESUMABLE_UPLOAD: + raise exceptions.InvalidDataError( + 'Serialization only supported for resumable uploads') + return { + 'auto_transfer': self.auto_transfer, + 'mime_type': self.mime_type, + 'total_size': self.total_size, + 'url': self.url, + } + + @property + def complete(self): + return self.__complete + + @property + def mime_type(self): + return self.__mime_type + + def __str__(self): + if not self.initialized: + return 'Upload (uninitialized)' + else: + return 'Upload with %d/%s bytes transferred for url %s' % ( + self.progress, self.total_size or '???', self.url) + + @property + def strategy(self): + return self.__strategy + + @strategy.setter + def strategy(self, value): + if value not in (SIMPLE_UPLOAD, RESUMABLE_UPLOAD): + raise exceptions.UserError(( + 'Invalid value "%s" for upload strategy, must be one of ' + '"simple" or "resumable".') % value) + self.__strategy = value + + @property + def total_size(self): + return self.__total_size + + @total_size.setter + def total_size(self, value): + self.EnsureUninitialized() + self.__total_size = value + + def __SetDefaultUploadStrategy(self, upload_config, http_request): + """Determine and set the default upload strategy for this upload. + + We generally prefer simple or multipart, unless we're forced to + use resumable. This happens when any of (1) the upload is too + large, (2) the simple endpoint doesn't support multipart requests + and we have metadata, or (3) there is no simple upload endpoint. + + Args: + upload_config: Configuration for the upload endpoint. + http_request: The associated http request. + + Returns: + None. + """ + if upload_config.resumable_path is None: + self.strategy = SIMPLE_UPLOAD + if self.strategy is not None: + return + strategy = SIMPLE_UPLOAD + if (self.total_size is not None and + self.total_size > _RESUMABLE_UPLOAD_THRESHOLD): + strategy = RESUMABLE_UPLOAD + if http_request.body and not upload_config.simple_multipart: + strategy = RESUMABLE_UPLOAD + if not upload_config.simple_path: + strategy = RESUMABLE_UPLOAD + self.strategy = strategy + + def ConfigureRequest(self, upload_config, http_request, url_builder): + """Configure the request and url for this upload.""" + # Validate total_size vs. max_size + if (self.total_size and upload_config.max_size and + self.total_size > upload_config.max_size): + raise exceptions.InvalidUserInputError( + 'Upload too big: %s larger than max size %s' % ( + self.total_size, upload_config.max_size)) + # Validate mime type + if not util.AcceptableMimeType(upload_config.accept, self.mime_type): + raise exceptions.InvalidUserInputError( + 'MIME type %s does not match any accepted MIME ranges %s' % ( + self.mime_type, upload_config.accept)) + + self.__SetDefaultUploadStrategy(upload_config, http_request) + if self.strategy == SIMPLE_UPLOAD: + url_builder.relative_path = upload_config.simple_path + if http_request.body: + url_builder.query_params['uploadType'] = 'multipart' + self.__ConfigureMultipartRequest(http_request) + else: + url_builder.query_params['uploadType'] = 'media' + self.__ConfigureMediaRequest(http_request) + else: + url_builder.relative_path = upload_config.resumable_path + url_builder.query_params['uploadType'] = 'resumable' + self.__ConfigureResumableRequest(http_request) + + def __ConfigureMediaRequest(self, http_request): + """Configure http_request as a simple request for this upload.""" + http_request.headers['content-type'] = self.mime_type + http_request.body = self.stream.read() + http_request.loggable_body = '' + + def __ConfigureMultipartRequest(self, http_request): + """Configure http_request as a multipart request for this upload.""" + # This is a multipart/related upload. + msg_root = mime_multipart.MIMEMultipart('related') + # msg_root should not write out its own headers + setattr(msg_root, '_write_headers', lambda self: None) + + # attach the body as one part + msg = mime_nonmultipart.MIMENonMultipart( + *http_request.headers['content-type'].split('/')) + msg.set_payload(http_request.body) + msg_root.attach(msg) + + # attach the media as the second part + msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/')) + msg['Content-Transfer-Encoding'] = 'binary' + msg.set_payload(self.stream.read()) + msg_root.attach(msg) + + # NOTE: We encode the body, but can't use + # `email.message.Message.as_string` because it prepends + # `> ` to `From ` lines. + # NOTE: We must use six.StringIO() instead of io.StringIO() since the + # `email` library uses cStringIO in Py2 and io.StringIO in Py3. + fp = six.StringIO() + g = email_generator.Generator(fp, mangle_from_=False) + g.flatten(msg_root, unixfrom=False) + http_request.body = fp.getvalue() + + multipart_boundary = msg_root.get_boundary() + http_request.headers['content-type'] = ( + 'multipart/related; boundary=%r' % multipart_boundary) + + body_components = http_request.body.split(multipart_boundary) + headers, _, _ = body_components[-2].partition('\n\n') + body_components[-2] = '\n\n'.join([headers, '\n\n--']) + http_request.loggable_body = multipart_boundary.join(body_components) + + def __ConfigureResumableRequest(self, http_request): + http_request.headers['X-Upload-Content-Type'] = self.mime_type + if self.total_size is not None: + http_request.headers[ + 'X-Upload-Content-Length'] = str(self.total_size) + + def RefreshResumableUploadState(self): + """Talk to the server and refresh the state of this resumable upload. + + Returns: + Response if the upload is complete. + """ + if self.strategy != RESUMABLE_UPLOAD: + return + self.EnsureInitialized() + refresh_request = http_wrapper.Request( + url=self.url, http_method='PUT', + headers={'Content-Range': 'bytes */*'}) + refresh_response = http_wrapper.MakeRequest( + self.http, refresh_request, redirections=0, + retries=self.num_retries) + range_header = self._GetRangeHeaderFromResponse(refresh_response) + if refresh_response.status_code in (http_client.OK, + http_client.CREATED): + self.__complete = True + self.__progress = self.total_size + self.stream.seek(self.progress) + # If we're finished, the refresh response will contain the metadata + # originally requested. Cache it so it can be returned in + # StreamInChunks. + self.__final_response = refresh_response + elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE: + if range_header is None: + self.__progress = 0 + else: + self.__progress = self.__GetLastByte(range_header) + 1 + self.stream.seek(self.progress) + else: + raise exceptions.HttpError.FromResponse(refresh_response) + + def _GetRangeHeaderFromResponse(self, response): + return response.info.get('Range', response.info.get('range')) + + def InitializeUpload(self, http_request, http=None, client=None): + """Initialize this upload from the given http_request.""" + if self.strategy is None: + raise exceptions.UserError( + 'No upload strategy set; did you call ConfigureRequest?') + if http is None and client is None: + raise exceptions.UserError('Must provide client or http.') + if self.strategy != RESUMABLE_UPLOAD: + return + http = http or client.http + if client is not None: + http_request.url = client.FinalizeTransferUrl(http_request.url) + self.EnsureUninitialized() + http_response = http_wrapper.MakeRequest(http, http_request, + retries=self.num_retries) + if http_response.status_code != http_client.OK: + raise exceptions.HttpError.FromResponse(http_response) + + self.__server_chunk_granularity = http_response.info.get( + 'X-Goog-Upload-Chunk-Granularity') + url = http_response.info['location'] + if client is not None: + url = client.FinalizeTransferUrl(url) + self._Initialize(http, url) + + # Unless the user has requested otherwise, we want to just + # go ahead and pump the bytes now. + if self.auto_transfer: + return self.StreamInChunks() + else: + return http_response + + def __GetLastByte(self, range_header): + _, _, end = range_header.partition('-') + # TODO(craigcitro): Validate start == 0? + return int(end) + + def __ValidateChunksize(self, chunksize=None): + if self.__server_chunk_granularity is None: + return + chunksize = chunksize or self.chunksize + if chunksize % self.__server_chunk_granularity: + raise exceptions.ConfigurationValueError( + 'Server requires chunksize to be a multiple of %d', + self.__server_chunk_granularity) + + def __StreamMedia(self, callback=None, finish_callback=None, + additional_headers=None, use_chunks=True): + """Helper function for StreamMedia / StreamInChunks.""" + if self.strategy != RESUMABLE_UPLOAD: + raise exceptions.InvalidUserInputError( + 'Cannot stream non-resumable upload') + callback = callback or self.progress_callback + finish_callback = finish_callback or self.finish_callback + # final_response is set if we resumed an already-completed upload. + response = self.__final_response + send_func = self.__SendChunk if use_chunks else self.__SendMediaBody + if use_chunks: + self.__ValidateChunksize(self.chunksize) + self.EnsureInitialized() + while not self.complete: + response = send_func(self.stream.tell(), + additional_headers=additional_headers) + if response.status_code in (http_client.OK, http_client.CREATED): + self.__complete = True + break + self.__progress = self.__GetLastByte(response.info['range']) + if self.progress + 1 != self.stream.tell(): + # TODO(craigcitro): Add a better way to recover here. + raise exceptions.CommunicationError( + 'Failed to transfer all bytes in chunk, upload paused at ' + 'byte %d' % self.progress) + self._ExecuteCallback(callback, response) + if self.__complete and hasattr(self.stream, 'seek'): + current_pos = self.stream.tell() + self.stream.seek(0, os.SEEK_END) + end_pos = self.stream.tell() + self.stream.seek(current_pos) + if current_pos != end_pos: + raise exceptions.TransferInvalidError( + 'Upload complete with %s additional bytes left in stream' % + (int(end_pos) - int(current_pos))) + self._ExecuteCallback(finish_callback, response) + return response + + def StreamMedia(self, callback=None, finish_callback=None, + additional_headers=None): + """Send this resumable upload in a single request. + + Args: + callback: Progress callback function with inputs + (http_wrapper.Response, transfer.Upload) + finish_callback: Final callback function with inputs + (http_wrapper.Response, transfer.Upload) + additional_headers: Dict of headers to include with the upload + http_wrapper.Request. + + Returns: + http_wrapper.Response of final response. + """ + return self.__StreamMedia( + callback=callback, finish_callback=finish_callback, + additional_headers=additional_headers, use_chunks=False) + + def StreamInChunks(self, callback=None, finish_callback=None, + additional_headers=None): + """Send this (resumable) upload in chunks.""" + return self.__StreamMedia( + callback=callback, finish_callback=finish_callback, + additional_headers=additional_headers) + + def __SendMediaRequest(self, request, end): + """Request helper function for SendMediaBody & SendChunk.""" + response = http_wrapper.MakeRequest( + self.bytes_http, request, retry_func=self.retry_func, + retries=self.num_retries) + if response.status_code not in (http_client.OK, http_client.CREATED, + http_wrapper.RESUME_INCOMPLETE): + # We want to reset our state to wherever the server left us + # before this failed request, and then raise. + self.RefreshResumableUploadState() + raise exceptions.HttpError.FromResponse(response) + if response.status_code == http_wrapper.RESUME_INCOMPLETE: + last_byte = self.__GetLastByte( + self._GetRangeHeaderFromResponse(response)) + if last_byte + 1 != end: + self.stream.seek(last_byte) + return response + + def __SendMediaBody(self, start, additional_headers=None): + """Send the entire media stream in a single request.""" + self.EnsureInitialized() + if self.total_size is None: + raise exceptions.TransferInvalidError( + 'Total size must be known for SendMediaBody') + body_stream = stream_slice.StreamSlice( + self.stream, self.total_size - start) + + request = http_wrapper.Request(url=self.url, http_method='PUT', + body=body_stream) + request.headers['Content-Type'] = self.mime_type + if start == self.total_size: + # End of an upload with 0 bytes left to send; just finalize. + range_string = 'bytes */%s' % self.total_size + else: + range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1, + self.total_size) + + request.headers['Content-Range'] = range_string + if additional_headers: + request.headers.update(additional_headers) + + return self.__SendMediaRequest(request, self.total_size) + + def __SendChunk(self, start, additional_headers=None): + """Send the specified chunk.""" + self.EnsureInitialized() + no_log_body = self.total_size is None + if self.total_size is None: + # For the streaming resumable case, we need to detect when + # we're at the end of the stream. + body_stream = buffered_stream.BufferedStream( + self.stream, start, self.chunksize) + end = body_stream.stream_end_position + if body_stream.stream_exhausted: + self.__total_size = end + # TODO: Here, change body_stream from a stream to a string object, + # which means reading a chunk into memory. This works around + # https://code.google.com/p/httplib2/issues/detail?id=176 which can + # cause httplib2 to skip bytes on 401's for file objects. + # Rework this solution to be more general. + body_stream = body_stream.read(self.chunksize) + else: + end = min(start + self.chunksize, self.total_size) + body_stream = stream_slice.StreamSlice(self.stream, end - start) + # TODO(craigcitro): Think about clearer errors on "no data in + # stream". + request = http_wrapper.Request(url=self.url, http_method='PUT', + body=body_stream) + request.headers['Content-Type'] = self.mime_type + if no_log_body: + # Disable logging of streaming body. + # TODO: Remove no_log_body and rework as part of a larger logs + # refactor. + request.loggable_body = '' + if self.total_size is None: + # Streaming resumable upload case, unknown total size. + range_string = 'bytes %s-%s/*' % (start, end - 1) + elif end == start: + # End of an upload with 0 bytes left to send; just finalize. + range_string = 'bytes */%s' % self.total_size + else: + # Normal resumable upload case with known sizes. + range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size) + + request.headers['Content-Range'] = range_string + if additional_headers: + request.headers.update(additional_headers) + + return self.__SendMediaRequest(request, end) diff --git a/gcloud/_apitools/util.py b/gcloud/_apitools/util.py new file mode 100644 index 000000000000..06e01a27e137 --- /dev/null +++ b/gcloud/_apitools/util.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python +"""Assorted utilities shared between parts of apitools.""" + +import collections +import os +import random + +import six +from six.moves import http_client +import six.moves.urllib.error as urllib_error +import six.moves.urllib.parse as urllib_parse +import six.moves.urllib.request as urllib_request + +from apitools.base.protorpclite import messages +from apitools.base.py import encoding +from apitools.base.py import exceptions + +__all__ = [ + 'DetectGae', + 'DetectGce', +] + +_RESERVED_URI_CHARS = r":/?#[]@!$&'()*+,;=" + + +def DetectGae(): + """Determine whether or not we're running on GAE. + + This is based on: + https://developers.google.com/appengine/docs/python/#The_Environment + + Returns: + True iff we're running on GAE. + """ + server_software = os.environ.get('SERVER_SOFTWARE', '') + return (server_software.startswith('Development/') or + server_software.startswith('Google App Engine/')) + + +def DetectGce(): + """Determine whether or not we're running on GCE. + + This is based on: + https://cloud.google.com/compute/docs/metadata#runninggce + + Returns: + True iff we're running on a GCE instance. + """ + try: + o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open( + urllib_request.Request('http://metadata.google.internal')) + except urllib_error.URLError: + return False + return (o.getcode() == http_client.OK and + o.headers.get('metadata-flavor') == 'Google') + + +def NormalizeScopes(scope_spec): + """Normalize scope_spec to a set of strings.""" + if isinstance(scope_spec, six.string_types): + return set(scope_spec.split(' ')) + elif isinstance(scope_spec, collections.Iterable): + return set(scope_spec) + raise exceptions.TypecheckError( + 'NormalizeScopes expected string or iterable, found %s' % ( + type(scope_spec),)) + + +def Typecheck(arg, arg_type, msg=None): + if not isinstance(arg, arg_type): + if msg is None: + if isinstance(arg_type, tuple): + msg = 'Type of arg is "%s", not one of %r' % ( + type(arg), arg_type) + else: + msg = 'Type of arg is "%s", not "%s"' % (type(arg), arg_type) + raise exceptions.TypecheckError(msg) + return arg + + +def ExpandRelativePath(method_config, params, relative_path=None): + """Determine the relative path for request.""" + path = relative_path or method_config.relative_path or '' + + for param in method_config.path_params: + param_template = '{%s}' % param + # For more details about "reserved word expansion", see: + # http://tools.ietf.org/html/rfc6570#section-3.2.2 + reserved_chars = '' + reserved_template = '{+%s}' % param + if reserved_template in path: + reserved_chars = _RESERVED_URI_CHARS + path = path.replace(reserved_template, param_template) + if param_template not in path: + raise exceptions.InvalidUserInputError( + 'Missing path parameter %s' % param) + try: + # TODO(craigcitro): Do we want to support some sophisticated + # mapping here? + value = params[param] + except KeyError: + raise exceptions.InvalidUserInputError( + 'Request missing required parameter %s' % param) + if value is None: + raise exceptions.InvalidUserInputError( + 'Request missing required parameter %s' % param) + try: + if not isinstance(value, six.string_types): + value = str(value) + path = path.replace(param_template, + urllib_parse.quote(value.encode('utf_8'), + reserved_chars)) + except TypeError as e: + raise exceptions.InvalidUserInputError( + 'Error setting required parameter %s to value %s: %s' % ( + param, value, e)) + return path + + +def CalculateWaitForRetry(retry_attempt, max_wait=60): + """Calculates amount of time to wait before a retry attempt. + + Wait time grows exponentially with the number of attempts. A + random amount of jitter is added to spread out retry attempts from + different clients. + + Args: + retry_attempt: Retry attempt counter. + max_wait: Upper bound for wait time [seconds]. + + Returns: + Number of seconds to wait before retrying request. + + """ + + wait_time = 2 ** retry_attempt + max_jitter = wait_time / 4.0 + wait_time += random.uniform(-max_jitter, max_jitter) + return max(1, min(wait_time, max_wait)) + + +def AcceptableMimeType(accept_patterns, mime_type): + """Return True iff mime_type is acceptable for one of accept_patterns. + + Note that this function assumes that all patterns in accept_patterns + will be simple types of the form "type/subtype", where one or both + of these can be "*". We do not support parameters (i.e. "; q=") in + patterns. + + Args: + accept_patterns: list of acceptable MIME types. + mime_type: the mime type we would like to match. + + Returns: + Whether or not mime_type matches (at least) one of these patterns. + """ + if '/' not in mime_type: + raise exceptions.InvalidUserInputError( + 'Invalid MIME type: "%s"' % mime_type) + unsupported_patterns = [p for p in accept_patterns if ';' in p] + if unsupported_patterns: + raise exceptions.GeneratedClientError( + 'MIME patterns with parameter unsupported: "%s"' % ', '.join( + unsupported_patterns)) + + def MimeTypeMatches(pattern, mime_type): + """Return True iff mime_type is acceptable for pattern.""" + # Some systems use a single '*' instead of '*/*'. + if pattern == '*': + pattern = '*/*' + return all(accept in ('*', provided) for accept, provided + in zip(pattern.split('/'), mime_type.split('/'))) + + return any(MimeTypeMatches(pattern, mime_type) + for pattern in accept_patterns) + + +def MapParamNames(params, request_type): + """Reverse parameter remappings for URL construction.""" + return [encoding.GetCustomJsonFieldMapping(request_type, json_name=p) or p + for p in params] + + +def MapRequestParams(params, request_type): + """Perform any renames/remappings needed for URL construction. + + Currently, we have several ways to customize JSON encoding, in + particular of field names and enums. This works fine for JSON + bodies, but also needs to be applied for path and query parameters + in the URL. + + This function takes a dictionary from param names to values, and + performs any registered mappings. We also need the request type (to + look up the mappings). + + Args: + params: (dict) Map from param names to values + request_type: (protorpc.messages.Message) request type for this API call + + Returns: + A new dict of the same size, with all registered mappings applied. + """ + new_params = dict(params) + for param_name, value in params.items(): + field_remapping = encoding.GetCustomJsonFieldMapping( + request_type, python_name=param_name) + if field_remapping is not None: + new_params[field_remapping] = new_params.pop(param_name) + if isinstance(value, messages.Enum): + new_params[param_name] = encoding.GetCustomJsonEnumMapping( + type(value), python_name=str(value)) or str(value) + return new_params diff --git a/pylintrc_default b/pylintrc_default index 11c7b3a77768..ca07e47e9eb6 100644 --- a/pylintrc_default +++ b/pylintrc_default @@ -29,8 +29,10 @@ # os.walk in astroid.modutils.get_module_files. # RATIONALE: # _datastore_v1_pb2.py: protobuf-generated code. +# gcloud/_apitools: vendored-in code (remove when cleaned up) ignore = _datastore_v1_pb2.py + gcloud/_apitools # Pickle collected data for later comparisons. # DEFAULT: persistent=yes diff --git a/tox.ini b/tox.ini index 4cb76f997fac..5828bb84d602 100644 --- a/tox.ini +++ b/tox.ini @@ -55,7 +55,7 @@ deps = {[testenv:docs]deps} passenv = {[testenv:docs]passenv} [pep8] -exclude = gcloud/datastore/_datastore_v1_pb2.py,gcloud/bigtable/_generated/*,docs/conf.py +exclude = gcloud/datastore/_datastore_v1_pb2.py,gcloud/bigtable/_generated/*,docs/conf.py,gcloud/_apitools verbose = 1 [testenv:lint] From e4ab85146b2002d538f7246a7fbe0a818760dcc8 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 15 Oct 2015 17:36:17 -0400 Subject: [PATCH 2/5] Get vendored-in modules importable. --- gcloud/_apitools/__init__.py | 1 + gcloud/_apitools/buffered_stream.py | 3 +- gcloud/_apitools/exceptions.py | 1 - gcloud/_apitools/http_wrapper.py | 5 +- gcloud/_apitools/stream_slice.py | 3 +- gcloud/_apitools/transfer.py | 11 +-- gcloud/_apitools/util.py | 145 ++-------------------------- 7 files changed, 20 insertions(+), 149 deletions(-) create mode 100644 gcloud/_apitools/__init__.py diff --git a/gcloud/_apitools/__init__.py b/gcloud/_apitools/__init__.py new file mode 100644 index 000000000000..2017be678403 --- /dev/null +++ b/gcloud/_apitools/__init__.py @@ -0,0 +1 @@ +# Vendored-in for from google-apitools 0.4.11 diff --git a/gcloud/_apitools/buffered_stream.py b/gcloud/_apitools/buffered_stream.py index bda7e65c1fba..355e529d5eac 100644 --- a/gcloud/_apitools/buffered_stream.py +++ b/gcloud/_apitools/buffered_stream.py @@ -1,10 +1,9 @@ -#!/usr/bin/env python """Small helper class to provide a small slice of a stream. This class reads ahead to detect if we are at the end of the stream. """ -from apitools.base.py import exceptions +from gcloud._apitools import exceptions # TODO(user): Consider replacing this with a StringIO. diff --git a/gcloud/_apitools/exceptions.py b/gcloud/_apitools/exceptions.py index 1d736197c060..934ab666bf9f 100644 --- a/gcloud/_apitools/exceptions.py +++ b/gcloud/_apitools/exceptions.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Exceptions for generated client libraries.""" diff --git a/gcloud/_apitools/http_wrapper.py b/gcloud/_apitools/http_wrapper.py index 03a094df0887..4c9c4f8c0d50 100644 --- a/gcloud/_apitools/http_wrapper.py +++ b/gcloud/_apitools/http_wrapper.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """HTTP wrapper for apitools. This library wraps the underlying http library we use, which is @@ -16,8 +15,8 @@ from six.moves import http_client from six.moves.urllib import parse -from apitools.base.py import exceptions -from apitools.base.py import util +from gcloud._apitools import exceptions +from gcloud._apitools import util __all__ = [ 'CheckResponse', diff --git a/gcloud/_apitools/stream_slice.py b/gcloud/_apitools/stream_slice.py index bd43daf91564..974cf133c8ff 100644 --- a/gcloud/_apitools/stream_slice.py +++ b/gcloud/_apitools/stream_slice.py @@ -1,7 +1,6 @@ -#!/usr/bin/env python """Small helper class to provide a small slice of a stream.""" -from apitools.base.py import exceptions +from gcloud._apitools import exceptions class StreamSlice(object): diff --git a/gcloud/_apitools/transfer.py b/gcloud/_apitools/transfer.py index f144582213f6..7792c19b6d52 100644 --- a/gcloud/_apitools/transfer.py +++ b/gcloud/_apitools/transfer.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Upload and download support for apitools.""" from __future__ import print_function @@ -14,11 +13,11 @@ import six from six.moves import http_client -from apitools.base.py import buffered_stream -from apitools.base.py import exceptions -from apitools.base.py import http_wrapper -from apitools.base.py import stream_slice -from apitools.base.py import util +from gcloud._apitools import buffered_stream +from gcloud._apitools import exceptions +from gcloud._apitools import http_wrapper +from gcloud._apitools import stream_slice +from gcloud._apitools import util __all__ = [ 'Download', diff --git a/gcloud/_apitools/util.py b/gcloud/_apitools/util.py index 06e01a27e137..42dfe8a0861e 100644 --- a/gcloud/_apitools/util.py +++ b/gcloud/_apitools/util.py @@ -1,69 +1,20 @@ -#!/usr/bin/env python -"""Assorted utilities shared between parts of apitools.""" +"""Assorted utilities shared between parts of apitools. -import collections -import os -import random - -import six -from six.moves import http_client -import six.moves.urllib.error as urllib_error -import six.moves.urllib.parse as urllib_parse -import six.moves.urllib.request as urllib_request - -from apitools.base.protorpclite import messages -from apitools.base.py import encoding -from apitools.base.py import exceptions - -__all__ = [ - 'DetectGae', - 'DetectGce', -] +Pruned to include only helpers used by other vendored-in modules: -_RESERVED_URI_CHARS = r":/?#[]@!$&'()*+,;=" +``gcloud._apidools.transfer`` uses: +- Typecheck +- AcceptableMimeType -def DetectGae(): - """Determine whether or not we're running on GAE. +``gcloud._apitools.http_wrapper`` uses: - This is based on: - https://developers.google.com/appengine/docs/python/#The_Environment - - Returns: - True iff we're running on GAE. - """ - server_software = os.environ.get('SERVER_SOFTWARE', '') - return (server_software.startswith('Development/') or - server_software.startswith('Google App Engine/')) +- CalculateWaitForRetry +""" +import random -def DetectGce(): - """Determine whether or not we're running on GCE. - - This is based on: - https://cloud.google.com/compute/docs/metadata#runninggce - - Returns: - True iff we're running on a GCE instance. - """ - try: - o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open( - urllib_request.Request('http://metadata.google.internal')) - except urllib_error.URLError: - return False - return (o.getcode() == http_client.OK and - o.headers.get('metadata-flavor') == 'Google') - - -def NormalizeScopes(scope_spec): - """Normalize scope_spec to a set of strings.""" - if isinstance(scope_spec, six.string_types): - return set(scope_spec.split(' ')) - elif isinstance(scope_spec, collections.Iterable): - return set(scope_spec) - raise exceptions.TypecheckError( - 'NormalizeScopes expected string or iterable, found %s' % ( - type(scope_spec),)) +from gcloud._apitools import exceptions def Typecheck(arg, arg_type, msg=None): @@ -78,45 +29,6 @@ def Typecheck(arg, arg_type, msg=None): return arg -def ExpandRelativePath(method_config, params, relative_path=None): - """Determine the relative path for request.""" - path = relative_path or method_config.relative_path or '' - - for param in method_config.path_params: - param_template = '{%s}' % param - # For more details about "reserved word expansion", see: - # http://tools.ietf.org/html/rfc6570#section-3.2.2 - reserved_chars = '' - reserved_template = '{+%s}' % param - if reserved_template in path: - reserved_chars = _RESERVED_URI_CHARS - path = path.replace(reserved_template, param_template) - if param_template not in path: - raise exceptions.InvalidUserInputError( - 'Missing path parameter %s' % param) - try: - # TODO(craigcitro): Do we want to support some sophisticated - # mapping here? - value = params[param] - except KeyError: - raise exceptions.InvalidUserInputError( - 'Request missing required parameter %s' % param) - if value is None: - raise exceptions.InvalidUserInputError( - 'Request missing required parameter %s' % param) - try: - if not isinstance(value, six.string_types): - value = str(value) - path = path.replace(param_template, - urllib_parse.quote(value.encode('utf_8'), - reserved_chars)) - except TypeError as e: - raise exceptions.InvalidUserInputError( - 'Error setting required parameter %s to value %s: %s' % ( - param, value, e)) - return path - - def CalculateWaitForRetry(retry_attempt, max_wait=60): """Calculates amount of time to wait before a retry attempt. @@ -173,40 +85,3 @@ def MimeTypeMatches(pattern, mime_type): return any(MimeTypeMatches(pattern, mime_type) for pattern in accept_patterns) - - -def MapParamNames(params, request_type): - """Reverse parameter remappings for URL construction.""" - return [encoding.GetCustomJsonFieldMapping(request_type, json_name=p) or p - for p in params] - - -def MapRequestParams(params, request_type): - """Perform any renames/remappings needed for URL construction. - - Currently, we have several ways to customize JSON encoding, in - particular of field names and enums. This works fine for JSON - bodies, but also needs to be applied for path and query parameters - in the URL. - - This function takes a dictionary from param names to values, and - performs any registered mappings. We also need the request type (to - look up the mappings). - - Args: - params: (dict) Map from param names to values - request_type: (protorpc.messages.Message) request type for this API call - - Returns: - A new dict of the same size, with all registered mappings applied. - """ - new_params = dict(params) - for param_name, value in params.items(): - field_remapping = encoding.GetCustomJsonFieldMapping( - request_type, python_name=param_name) - if field_remapping is not None: - new_params[field_remapping] = new_params.pop(param_name) - if isinstance(value, messages.Enum): - new_params[param_name] = encoding.GetCustomJsonEnumMapping( - type(value), python_name=str(value)) or str(value) - return new_params From d2e4f9939063afb3d80df18c07fa4d50256951d8 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 15 Oct 2015 17:39:28 -0400 Subject: [PATCH 3/5] Drop use of external 'google-apitools' in favor of vendored-in versions. --- gcloud/storage/blob.py | 4 ++-- gcloud/storage/test_blob.py | 12 ++++++------ setup.py | 1 - 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/gcloud/storage/blob.py b/gcloud/storage/blob.py index 0be46a6469e6..29243bb8a678 100644 --- a/gcloud/storage/blob.py +++ b/gcloud/storage/blob.py @@ -25,8 +25,8 @@ import six from six.moves.urllib.parse import quote # pylint: disable=F0401 -from apitools.base.py import http_wrapper -from apitools.base.py import transfer +from gcloud._apitools import http_wrapper +from gcloud._apitools import transfer from gcloud._helpers import _RFC3339_MICROS from gcloud._helpers import UTC diff --git a/gcloud/storage/test_blob.py b/gcloud/storage/test_blob.py index a8be71047da7..ed5bd6bdd7da 100644 --- a/gcloud/storage/test_blob.py +++ b/gcloud/storage/test_blob.py @@ -409,8 +409,8 @@ def test_upload_from_file_resumable(self): from six.moves.urllib.parse import urlsplit from tempfile import NamedTemporaryFile from gcloud._testing import _Monkey - from apitools.base.py import http_wrapper - from apitools.base.py import transfer + from gcloud._apitools import http_wrapper + from gcloud._apitools import transfer BLOB_NAME = 'blob-name' UPLOAD_URL = 'http://example.com/upload/name/key' DATA = b'ABCDEF' @@ -470,7 +470,7 @@ def test_upload_from_file_w_slash_in_name(self): from six.moves.urllib.parse import parse_qsl from six.moves.urllib.parse import urlsplit from tempfile import NamedTemporaryFile - from apitools.base.py import http_wrapper + from gcloud._apitools import http_wrapper BLOB_NAME = 'parent/child' UPLOAD_URL = 'http://example.com/upload/name/parent%2Fchild' DATA = b'ABCDEF' @@ -518,7 +518,7 @@ def _upload_from_filename_test_helper(self, properties=None, from six.moves.urllib.parse import parse_qsl from six.moves.urllib.parse import urlsplit from tempfile import NamedTemporaryFile - from apitools.base.py import http_wrapper + from gcloud._apitools import http_wrapper BLOB_NAME = 'blob-name' UPLOAD_URL = 'http://example.com/upload/name/key' DATA = b'ABCDEF' @@ -584,7 +584,7 @@ def test_upload_from_string_w_bytes(self): from six.moves.http_client import OK from six.moves.urllib.parse import parse_qsl from six.moves.urllib.parse import urlsplit - from apitools.base.py import http_wrapper + from gcloud._apitools import http_wrapper BLOB_NAME = 'blob-name' UPLOAD_URL = 'http://example.com/upload/name/key' DATA = b'ABCDEF' @@ -623,7 +623,7 @@ def test_upload_from_string_w_text(self): from six.moves.http_client import OK from six.moves.urllib.parse import parse_qsl from six.moves.urllib.parse import urlsplit - from apitools.base.py import http_wrapper + from gcloud._apitools import http_wrapper BLOB_NAME = 'blob-name' UPLOAD_URL = 'http://example.com/upload/name/key' DATA = u'ABCDEF\u1234' diff --git a/setup.py b/setup.py index 610e463b3cbf..b657d199af3b 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,6 @@ REQUIREMENTS = [ - 'google-apitools', 'httplib2 >= 0.9.1', 'oauth2client >= 1.4.6', 'protobuf >= 3.0.0a3', From b8b2766d0e358cd5a9417f5f3116560a3db79d42 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 15 Oct 2015 21:16:32 -0400 Subject: [PATCH 4/5] Make pylint ignore the 'gcloud/_apitools' modules until cleanups done. --- gcloud/_apitools/buffered_stream.py | 1 + gcloud/_apitools/exceptions.py | 1 + gcloud/_apitools/http_wrapper.py | 1 + gcloud/_apitools/stream_slice.py | 1 + gcloud/_apitools/transfer.py | 1 + gcloud/_apitools/util.py | 1 + pylintrc_default | 2 -- 7 files changed, 6 insertions(+), 2 deletions(-) diff --git a/gcloud/_apitools/buffered_stream.py b/gcloud/_apitools/buffered_stream.py index 355e529d5eac..83205d37aedc 100644 --- a/gcloud/_apitools/buffered_stream.py +++ b/gcloud/_apitools/buffered_stream.py @@ -1,3 +1,4 @@ +# pylint: skip-file """Small helper class to provide a small slice of a stream. This class reads ahead to detect if we are at the end of the stream. diff --git a/gcloud/_apitools/exceptions.py b/gcloud/_apitools/exceptions.py index 934ab666bf9f..14f6e5c2a8b6 100644 --- a/gcloud/_apitools/exceptions.py +++ b/gcloud/_apitools/exceptions.py @@ -1,3 +1,4 @@ +# pylint: skip-file """Exceptions for generated client libraries.""" diff --git a/gcloud/_apitools/http_wrapper.py b/gcloud/_apitools/http_wrapper.py index 4c9c4f8c0d50..7bfedd1ebdfe 100644 --- a/gcloud/_apitools/http_wrapper.py +++ b/gcloud/_apitools/http_wrapper.py @@ -1,3 +1,4 @@ +# pylint: skip-file """HTTP wrapper for apitools. This library wraps the underlying http library we use, which is diff --git a/gcloud/_apitools/stream_slice.py b/gcloud/_apitools/stream_slice.py index 974cf133c8ff..3f202f6628b9 100644 --- a/gcloud/_apitools/stream_slice.py +++ b/gcloud/_apitools/stream_slice.py @@ -1,3 +1,4 @@ +# pylint: skip-file """Small helper class to provide a small slice of a stream.""" from gcloud._apitools import exceptions diff --git a/gcloud/_apitools/transfer.py b/gcloud/_apitools/transfer.py index 7792c19b6d52..1ea924a55973 100644 --- a/gcloud/_apitools/transfer.py +++ b/gcloud/_apitools/transfer.py @@ -1,3 +1,4 @@ +# pylint: skip-file """Upload and download support for apitools.""" from __future__ import print_function diff --git a/gcloud/_apitools/util.py b/gcloud/_apitools/util.py index 42dfe8a0861e..11db18014c0c 100644 --- a/gcloud/_apitools/util.py +++ b/gcloud/_apitools/util.py @@ -1,3 +1,4 @@ +# pylint: skip-file """Assorted utilities shared between parts of apitools. Pruned to include only helpers used by other vendored-in modules: diff --git a/pylintrc_default b/pylintrc_default index ca07e47e9eb6..11c7b3a77768 100644 --- a/pylintrc_default +++ b/pylintrc_default @@ -29,10 +29,8 @@ # os.walk in astroid.modutils.get_module_files. # RATIONALE: # _datastore_v1_pb2.py: protobuf-generated code. -# gcloud/_apitools: vendored-in code (remove when cleaned up) ignore = _datastore_v1_pb2.py - gcloud/_apitools # Pickle collected data for later comparisons. # DEFAULT: persistent=yes From ad1ab44c97743ff08e12344e1b4a7e25b647ebe1 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 15 Oct 2015 21:37:21 -0400 Subject: [PATCH 5/5] Don't test for coverage on 'gcloud/_apitools' by default. --- .coveragerc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.coveragerc b/.coveragerc index 77dd56b8e2e8..5f8115c03edd 100644 --- a/.coveragerc +++ b/.coveragerc @@ -3,6 +3,8 @@ omit = */demo/* */demo.py */_generated/*.py + # Exclude the forked code until tests are complete. + */_apitools/*.py exclude_lines = # Re-enable the standard pragma pragma: NO COVER