code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
seconds = offset.days * 24 * 60 * 60 + offset.seconds microseconds = seconds * 10**6 + offset.microseconds return microseconds / (10**6 * 1.0)
def total_seconds(offset)
Backport of offset.total_seconds() from python 2.7+.
2.676379
2.62054
1.021308
# Check if the string includes a time zone offset. Break out the # part that doesn't include time zone info. Convert to uppercase # because all our comparisons should be case-insensitive. time_zone_match = _TIME_ZONE_RE.search(encoded_datetime) if time_zone_match: time_string = encoded_datetime[:time_zone_match.start(1)].upper() else: time_string = encoded_datetime.upper() if '.' in time_string: format_string = '%Y-%m-%dT%H:%M:%S.%f' else: format_string = '%Y-%m-%dT%H:%M:%S' decoded_datetime = datetime.datetime.strptime(time_string, format_string) if not time_zone_match: return decoded_datetime # Time zone info was included in the parameter. Add a tzinfo # object to the datetime. Datetimes can't be changed after they're # created, so we'll need to create a new one. if time_zone_match.group('z'): offset_minutes = 0 else: sign = time_zone_match.group('sign') hours, minutes = [int(value) for value in time_zone_match.group('hours', 'minutes')] offset_minutes = hours * 60 + minutes if sign == '-': offset_minutes *= -1 return datetime.datetime(decoded_datetime.year, decoded_datetime.month, decoded_datetime.day, decoded_datetime.hour, decoded_datetime.minute, decoded_datetime.second, decoded_datetime.microsecond, TimeZoneOffset(offset_minutes))
def decode_datetime(encoded_datetime)
Decode a DateTimeField parameter from a string to a python datetime. Args: encoded_datetime: A string in RFC 3339 format. Returns: A datetime object with the date and time specified in encoded_datetime. Raises: ValueError: If the string is not in a recognized format.
2.417413
2.417941
0.999782
message = super(DateTimeField, self).value_from_message(message) if message.time_zone_offset is None: return datetime.datetime.utcfromtimestamp( message.milliseconds / 1000.0) # Need to subtract the time zone offset, because when we call # datetime.fromtimestamp, it will add the time zone offset to the # value we pass. milliseconds = (message.milliseconds - 60000 * message.time_zone_offset) timezone = util.TimeZoneOffset(message.time_zone_offset) return datetime.datetime.fromtimestamp(milliseconds / 1000.0, tz=timezone)
def value_from_message(self, message)
Convert DateTimeMessage to a datetime. Args: A DateTimeMessage instance. Returns: A datetime instance.
3.270323
3.40798
0.959607
metadata_url = 'http://{}'.format( os.environ.get('GCE_METADATA_ROOT', 'metadata.google.internal')) try: o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open( urllib_request.Request( metadata_url, headers={'Metadata-Flavor': 'Google'})) except urllib_error.URLError: return False return (o.getcode() == http_client.OK and o.headers.get('metadata-flavor') == 'Google')
def DetectGce()
Determine whether or not we're running on GCE. This is based on: https://cloud.google.com/compute/docs/metadata#runninggce Returns: True iff we're running on a GCE instance.
2.974069
3.141432
0.946724
if isinstance(scope_spec, six.string_types): return set(scope_spec.split(' ')) elif isinstance(scope_spec, collections.Iterable): return set(scope_spec) raise exceptions.TypecheckError( 'NormalizeScopes expected string or iterable, found %s' % ( type(scope_spec),))
def NormalizeScopes(scope_spec)
Normalize scope_spec to a set of strings.
2.765561
2.472041
1.118736
path = relative_path or method_config.relative_path or '' for param in method_config.path_params: param_template = '{%s}' % param # For more details about "reserved word expansion", see: # http://tools.ietf.org/html/rfc6570#section-3.2.2 reserved_chars = '' reserved_template = '{+%s}' % param if reserved_template in path: reserved_chars = _RESERVED_URI_CHARS path = path.replace(reserved_template, param_template) if param_template not in path: raise exceptions.InvalidUserInputError( 'Missing path parameter %s' % param) try: # TODO(craigcitro): Do we want to support some sophisticated # mapping here? value = params[param] except KeyError: raise exceptions.InvalidUserInputError( 'Request missing required parameter %s' % param) if value is None: raise exceptions.InvalidUserInputError( 'Request missing required parameter %s' % param) try: if not isinstance(value, six.string_types): value = str(value) path = path.replace(param_template, urllib_parse.quote(value.encode('utf_8'), reserved_chars)) except TypeError as e: raise exceptions.InvalidUserInputError( 'Error setting required parameter %s to value %s: %s' % ( param, value, e)) return path
def ExpandRelativePath(method_config, params, relative_path=None)
Determine the relative path for request.
3.090018
3.015127
1.024838
wait_time = 2 ** retry_attempt max_jitter = wait_time / 4.0 wait_time += random.uniform(-max_jitter, max_jitter) return max(1, min(wait_time, max_wait))
def CalculateWaitForRetry(retry_attempt, max_wait=60)
Calculates amount of time to wait before a retry attempt. Wait time grows exponentially with the number of attempts. A random amount of jitter is added to spread out retry attempts from different clients. Args: retry_attempt: Retry attempt counter. max_wait: Upper bound for wait time [seconds]. Returns: Number of seconds to wait before retrying request.
3.292525
3.184778
1.033832
if '/' not in mime_type: raise exceptions.InvalidUserInputError( 'Invalid MIME type: "%s"' % mime_type) unsupported_patterns = [p for p in accept_patterns if ';' in p] if unsupported_patterns: raise exceptions.GeneratedClientError( 'MIME patterns with parameter unsupported: "%s"' % ', '.join( unsupported_patterns)) def MimeTypeMatches(pattern, mime_type): # Some systems use a single '*' instead of '*/*'. if pattern == '*': pattern = '*/*' return all(accept in ('*', provided) for accept, provided in zip(pattern.split('/'), mime_type.split('/'))) return any(MimeTypeMatches(pattern, mime_type) for pattern in accept_patterns)
def AcceptableMimeType(accept_patterns, mime_type)
Return True iff mime_type is acceptable for one of accept_patterns. Note that this function assumes that all patterns in accept_patterns will be simple types of the form "type/subtype", where one or both of these can be "*". We do not support parameters (i.e. "; q=") in patterns. Args: accept_patterns: list of acceptable MIME types. mime_type: the mime type we would like to match. Returns: Whether or not mime_type matches (at least) one of these patterns.
4.318086
4.20408
1.027118
return [encoding.GetCustomJsonFieldMapping(request_type, json_name=p) or p for p in params]
def MapParamNames(params, request_type)
Reverse parameter remappings for URL construction.
20.282026
17.419619
1.164321
new_params = dict(params) for param_name, value in params.items(): field_remapping = encoding.GetCustomJsonFieldMapping( request_type, python_name=param_name) if field_remapping is not None: new_params[field_remapping] = new_params.pop(param_name) param_name = field_remapping if isinstance(value, messages.Enum): new_params[param_name] = encoding.GetCustomJsonEnumMapping( type(value), python_name=str(value)) or str(value) return new_params
def MapRequestParams(params, request_type)
Perform any renames/remappings needed for URL construction. Currently, we have several ways to customize JSON encoding, in particular of field names and enums. This works fine for JSON bodies, but also needs to be applied for path and query parameters in the URL. This function takes a dictionary from param names to values, and performs any registered mappings. We also need the request type (to look up the mappings). Args: params: (dict) Map from param names to values request_type: (protorpc.messages.Message) request type for this API call Returns: A new dict of the same size, with all registered mappings applied.
3.423714
3.179737
1.076729
util.Typecheck(json_value, JsonValue) _ValidateJsonValue(json_value) if json_value.is_null: return None entries = [(f, json_value.get_assigned_value(f.name)) for f in json_value.all_fields()] assigned_entries = [(f, value) for f, value in entries if value is not None] field, value = assigned_entries[0] if not isinstance(field, messages.MessageField): return value elif field.message_type is JsonObject: return _JsonObjectToPythonValue(value) elif field.message_type is JsonArray: return _JsonArrayToPythonValue(value)
def _JsonValueToPythonValue(json_value)
Convert the given JsonValue to a json string.
3.465993
3.286996
1.054456
if py_value is None: return JsonValue(is_null=True) if isinstance(py_value, bool): return JsonValue(boolean_value=py_value) if isinstance(py_value, six.string_types): return JsonValue(string_value=py_value) if isinstance(py_value, numbers.Number): if isinstance(py_value, six.integer_types): if _MININT64 < py_value < _MAXINT64: return JsonValue(integer_value=py_value) return JsonValue(double_value=float(py_value)) if isinstance(py_value, dict): return JsonValue(object_value=_PythonValueToJsonObject(py_value)) if isinstance(py_value, collections.Iterable): return JsonValue(array_value=_PythonValueToJsonArray(py_value)) raise exceptions.InvalidDataError( 'Cannot convert "%s" to JsonValue' % py_value)
def _PythonValueToJsonValue(py_value)
Convert the given python value to a JsonValue.
1.795027
1.746453
1.027813
capabilities = [ messages.Variant.INT64, messages.Variant.UINT64, ] if field.variant not in capabilities: return encoding.CodecResult(value=value, complete=False) if field.repeated: result = [str(x) for x in value] else: result = str(value) return encoding.CodecResult(value=result, complete=True)
def _EncodeInt64Field(field, value)
Handle the special case of int64 as a string.
3.519387
3.385023
1.039694
if field.repeated: result = [d.isoformat() for d in value] else: result = value.isoformat() return encoding.CodecResult(value=result, complete=True)
def _EncodeDateField(field, value)
Encoder for datetime.date objects.
5.197536
4.651879
1.117298
homoglyphs = { '\xa0': ' ', # &nbsp; ? '\u00e3': '', # TODO(gsfowler) drop after .proto spurious char elided '\u00a0': ' ', # &nbsp; ? '\u00a9': '(C)', # COPYRIGHT SIGN (would you believe "asciiglyph"?) '\u00ae': '(R)', # REGISTERED SIGN (would you believe "asciiglyph"?) '\u2014': '-', # EM DASH '\u2018': "'", # LEFT SINGLE QUOTATION MARK '\u2019': "'", # RIGHT SINGLE QUOTATION MARK '\u201c': '"', # LEFT DOUBLE QUOTATION MARK '\u201d': '"', # RIGHT DOUBLE QUOTATION MARK '\u2026': '...', # HORIZONTAL ELLIPSIS '\u2e3a': '-', # TWO-EM DASH } def _ReplaceOne(c): equiv = homoglyphs.get(c) if equiv is not None: return equiv try: c.encode('ascii') return c except UnicodeError: pass try: return c.encode('unicode-escape').decode('ascii') except UnicodeError: return '?' return ''.join([_ReplaceOne(c) for c in s])
def ReplaceHomoglyphs(s)
Returns s with unicode homoglyphs replaced by ascii equivalents.
3.043939
3.020684
1.007699
if not isinstance(description, six.string_types): return description if six.PY3: # https://docs.python.org/3/reference/lexical_analysis.html#index-18 description = description.replace('\\N', '\\\\N') description = description.replace('\\u', '\\\\u') description = description.replace('\\U', '\\\\U') description = ReplaceHomoglyphs(description) return description.replace('"""', '" " "')
def CleanDescription(description)
Return a version of description safe for printing in a docstring.
3.521184
3.215562
1.095045
if discovery_url.startswith('http'): return [discovery_url] elif '.' not in discovery_url: raise ValueError('Unrecognized value "%s" for discovery url') api_name, _, api_version = discovery_url.partition('.') return [ 'https://www.googleapis.com/discovery/v1/apis/%s/%s/rest' % ( api_name, api_version), 'https://%s.googleapis.com/$discovery/rest?version=%s' % ( api_name, api_version), ]
def _NormalizeDiscoveryUrls(discovery_url)
Expands a few abbreviations into full discovery urls.
2.49041
2.426464
1.026353
f = tempfile.NamedTemporaryFile(suffix='gz', mode='w+b', delete=False) try: f.write(gzipped_content) f.close() # force file synchronization with gzip.open(f.name, 'rb') as h: decompressed_content = h.read() return decompressed_content finally: os.unlink(f.name)
def _Gunzip(gzipped_content)
Returns gunzipped content from gzipped contents.
2.454038
2.457083
0.998761
response = urllib_request.urlopen(url) encoding = response.info().get('Content-Encoding') if encoding == 'gzip': content = _Gunzip(response.read()) else: content = response.read() return content
def _GetURLContent(url)
Download and return the content of URL.
2.265752
2.373886
0.954449
discovery_urls = _NormalizeDiscoveryUrls(discovery_url) discovery_doc = None last_exception = None for url in discovery_urls: for _ in range(retries): try: content = _GetURLContent(url) if isinstance(content, bytes): content = content.decode('utf8') discovery_doc = json.loads(content) break except (urllib_error.HTTPError, urllib_error.URLError) as e: logging.info( 'Attempting to fetch discovery doc again after "%s"', e) last_exception = e if discovery_doc is None: raise CommunicationError( 'Could not find discovery doc at any of %s: %s' % ( discovery_urls, last_exception)) return discovery_doc
def FetchDiscoveryDoc(discovery_url, retries=5)
Fetch the discovery document at the given url.
2.554806
2.492266
1.025093
if not name: return name for prefix in self.__strip_prefixes: if name.startswith(prefix): return name[len(prefix):] return name
def __StripName(self, name)
Strip strip_prefix entries from name.
2.833683
2.204185
1.285592
name = re.sub('[^_A-Za-z0-9]', '_', name) if name[0].isdigit(): name = '_%s' % name while keyword.iskeyword(name): name = '%s_' % name # If we end up with __ as a prefix, we'll run afoul of python # field renaming, so we manually correct for it. if name.startswith('__'): name = 'f%s' % name return name
def CleanName(name)
Perform generic name cleaning.
4.770554
4.709384
1.012989
path_components = path.split('/') normalized_components = [] for component in path_components: if re.match(r'{[A-Za-z0-9_]+}$', component): normalized_components.append( '{%s}' % Names.CleanName(component[1:-1])) else: normalized_components.append(component) return '/'.join(normalized_components)
def NormalizeRelativePath(path)
Normalize camelCase entries in path.
2.721869
2.558244
1.06396
# TODO(craigcitro): Get rid of this case here and in MethodName. if name is None: return name # TODO(craigcitro): This is a hack to handle the case of specific # protorpc class names; clean this up. if name.startswith(('protorpc.', 'message_types.', 'apitools.base.protorpclite.', 'apitools.base.protorpclite.message_types.')): return name name = self.__StripName(name) name = self.__ToCamel(name, separator=separator) return self.CleanName(name)
def ClassName(self, name, separator='_')
Generate a valid class name from name.
5.504889
5.386267
1.022023
if name is None: return None name = Names.__ToCamel(name, separator=separator) return Names.CleanName(name)
def MethodName(self, name, separator='_')
Generate a valid method name from name.
8.349212
7.86828
1.061123
# TODO(craigcitro): We shouldn't need to strip this name, but some # of the service names here are excessive. Fix the API and then # remove this. name = self.__StripName(name) if self.__name_convention == 'LOWER_CAMEL': name = Names.__ToLowerCamel(name) elif self.__name_convention == 'LOWER_WITH_UNDER': name = Names.__FromCamel(name) return Names.CleanName(name)
def FieldName(self, name)
Generate a valid field name from name.
8.121869
7.52442
1.079401
scopes = set( discovery_doc.get('auth', {}).get('oauth2', {}).get('scopes', {})) scopes.update(scope_ls) package = discovery_doc['name'] url_version = discovery_doc['version'] base_url, base_path = _ComputePaths(package, url_version, discovery_doc) client_info = { 'package': package, 'version': NormalizeVersion(discovery_doc['version']), 'url_version': url_version, 'scopes': sorted(list(scopes)), 'client_id': client_id, 'client_secret': client_secret, 'user_agent': user_agent, 'api_key': api_key, 'base_url': base_url, 'base_path': base_path, } client_class_name = '%s%s' % ( names.ClassName(client_info['package']), names.ClassName(client_info['version'])) client_info['client_class_name'] = client_class_name return cls(**client_info)
def Create(cls, discovery_doc, scope_ls, client_id, client_secret, user_agent, names, api_key)
Create a new ClientInfo object from a discovery document.
2.450586
2.4582
0.996902
old_context = self.__comment_context self.__comment_context = True yield self.__comment_context = old_context
def CommentContext(self)
Print without any argument formatting.
4.36537
3.487255
1.251807
if position is None: position = len(_CREDENTIALS_METHODS) else: position = min(position, len(_CREDENTIALS_METHODS)) _CREDENTIALS_METHODS.insert(position, method) return method
def _RegisterCredentialsMethod(method, position=None)
Register a new method for fetching credentials. This new method should be a function with signature: client_info, **kwds -> Credentials or None This method can be used as a decorator, unless position needs to be supplied. Note that method must *always* accept arbitrary keyword arguments. Args: method: New credential-fetching method. position: (default: None) Where in the list of methods to add this; if None, we append. In all but rare cases, this should be either 0 or None. Returns: method, for use as a decorator.
2.049298
2.412508
0.849447
scopes = util.NormalizeScopes(scopes) client_info = { 'client_id': client_id, 'client_secret': client_secret, 'scope': ' '.join(sorted(scopes)), 'user_agent': user_agent or '%s-generated/0.1' % package_name, } for method in _CREDENTIALS_METHODS: credentials = method(client_info, **kwds) if credentials is not None: return credentials credentials_filename = credentials_filename or os.path.expanduser( '~/.apitools.token') credentials = CredentialsFromFile(credentials_filename, client_info, oauth2client_args=oauth2client_args) if credentials is not None: return credentials raise exceptions.CredentialsError('Could not create valid credentials')
def GetCredentials(package_name, scopes, client_id, client_secret, user_agent, credentials_filename=None, api_key=None, # pylint: disable=unused-argument client=None, # pylint: disable=unused-argument oauth2client_args=None, **kwds)
Attempt to get credentials, using an oauth dance as the last resort.
2.795469
2.651158
1.054433
filename = os.path.expanduser(filename) # We have two options, based on our version of oauth2client. if oauth2client.__version__ > '1.5.2': # oauth2client >= 2.0.0 credentials = ( service_account.ServiceAccountCredentials.from_json_keyfile_name( filename, scopes=scopes)) if credentials is not None: if user_agent is not None: credentials.user_agent = user_agent return credentials else: # oauth2client < 2.0.0 with open(filename) as keyfile: service_account_info = json.load(keyfile) account_type = service_account_info.get('type') if account_type != oauth2client.client.SERVICE_ACCOUNT: raise exceptions.CredentialsError( 'Invalid service account credentials: %s' % (filename,)) # pylint: disable=protected-access credentials = service_account._ServiceAccountCredentials( service_account_id=service_account_info['client_id'], service_account_email=service_account_info['client_email'], private_key_id=service_account_info['private_key_id'], private_key_pkcs8_text=service_account_info['private_key'], scopes=scopes, user_agent=user_agent) # pylint: enable=protected-access return credentials
def ServiceAccountCredentialsFromFile(filename, scopes, user_agent=None)
Use the credentials in filename to create a token for scopes.
2.128118
2.119621
1.004009
private_key_filename = os.path.expanduser(private_key_filename) scopes = util.NormalizeScopes(scopes) if oauth2client.__version__ > '1.5.2': # oauth2client >= 2.0.0 credentials = ( service_account.ServiceAccountCredentials.from_p12_keyfile( service_account_name, private_key_filename, scopes=scopes)) if credentials is not None: credentials.user_agent = user_agent return credentials else: # oauth2client < 2.0.0 with open(private_key_filename, 'rb') as key_file: return oauth2client.client.SignedJwtAssertionCredentials( service_account_name, key_file.read(), scopes, user_agent=user_agent)
def ServiceAccountCredentialsFromP12File( service_account_name, private_key_filename, scopes, user_agent)
Create a new credential from the named .p12 keyfile.
2.131992
2.131459
1.00025
if use_metadata_ip: base_url = os.environ.get('GCE_METADATA_IP', '169.254.169.254') else: base_url = os.environ.get( 'GCE_METADATA_ROOT', 'metadata.google.internal') url = 'http://' + base_url + '/computeMetadata/v1/' + relative_url # Extra header requirement can be found here: # https://developers.google.com/compute/docs/metadata headers = {'Metadata-Flavor': 'Google'} request = urllib.request.Request(url, headers=headers) opener = urllib.request.build_opener(urllib.request.ProxyHandler({})) try: response = opener.open(request) except urllib.error.URLError as e: raise exceptions.CommunicationError( 'Could not reach metadata service: %s' % e.reason) return response
def _GceMetadataRequest(relative_url, use_metadata_ip=False)
Request the given url from the GCE metadata service.
2.086559
2.084773
1.000857
# There's one rare situation where gsutil will not have argparse # available, but doesn't need anything depending on argparse anyway, # since they're bringing their own credentials. So we just allow this # to fail with an ImportError in those cases. # # TODO(craigcitro): Move this import back to the top when we drop # python 2.6 support (eg when gsutil does). import argparse parser = argparse.ArgumentParser(parents=[tools.argparser]) # Get command line argparse flags. flags, _ = parser.parse_known_args(args=args) # Allow `gflags` and `argparse` to be used side-by-side. if hasattr(FLAGS, 'auth_host_name'): flags.auth_host_name = FLAGS.auth_host_name if hasattr(FLAGS, 'auth_host_port'): flags.auth_host_port = FLAGS.auth_host_port if hasattr(FLAGS, 'auth_local_webserver'): flags.noauth_local_webserver = (not FLAGS.auth_local_webserver) return flags
def _GetRunFlowFlags(args=None)
Retrieves command line flags based on gflags module.
5.290605
5.029087
1.052001
user_agent = client_info['user_agent'] scope_key = client_info['scope'] if not isinstance(scope_key, six.string_types): scope_key = ':'.join(scope_key) storage_key = client_info['client_id'] + user_agent + scope_key if _NEW_FILESTORE: credential_store = multiprocess_file_storage.MultiprocessFileStorage( path, storage_key) else: credential_store = multistore_file.get_credential_storage_custom_string_key( # noqa path, storage_key) if hasattr(FLAGS, 'auth_local_webserver'): FLAGS.auth_local_webserver = False credentials = credential_store.get() if credentials is None or credentials.invalid: print('Generating new OAuth credentials ...') for _ in range(20): # If authorization fails, we want to retry, rather than let this # cascade up and get caught elsewhere. If users want out of the # retry loop, they can ^C. try: flow = oauth2client.client.OAuth2WebServerFlow(**client_info) flags = _GetRunFlowFlags(args=oauth2client_args) credentials = tools.run_flow(flow, credential_store, flags) break except (oauth2client.client.FlowExchangeError, SystemExit) as e: # Here SystemExit is "no credential at all", and the # FlowExchangeError is "invalid" -- usually because # you reused a token. print('Invalid authorization: %s' % (e,)) except httplib2.HttpLib2Error as e: print('Communication error: %s' % (e,)) raise exceptions.CredentialsError( 'Communication error creating credentials: %s' % e) return credentials
def CredentialsFromFile(path, client_info, oauth2client_args=None)
Read credentials from a file.
4.574462
4.59552
0.995418
credentials.refresh(http) url = _GetUserinfoUrl(credentials) response, content = http.request(url) return json.loads(content or '{}')
def GetUserinfo(credentials, http=None): # pylint: disable=invalid-name http = http or httplib2.Http() url = _GetUserinfoUrl(credentials) # We ignore communication woes here (i.e. SSL errors, socket # timeout), as handling these should be done in a common location. response, content = http.request(url) if response.status == http_client.BAD_REQUEST
Get the userinfo associated with the given credentials. This is dependent on the token having either the userinfo.email or userinfo.profile scope for the given token. Args: credentials: (oauth2client.client.Credentials) incoming credentials http: (httplib2.Http, optional) http instance to use Returns: The email address for this token, or None if the required scopes aren't available.
4.942995
6.210207
0.795947
if ((service_account_name and not service_account_keyfile) or (service_account_keyfile and not service_account_name)): raise exceptions.CredentialsError( 'Service account name or keyfile provided without the other') scopes = client_info['scope'].split() user_agent = client_info['user_agent'] # Use the .json credentials, if provided. if service_account_json_keyfile: return ServiceAccountCredentialsFromFile( service_account_json_keyfile, scopes, user_agent=user_agent) # Fall back to .p12 if there's no .json credentials. if service_account_name is not None: return ServiceAccountCredentialsFromP12File( service_account_name, service_account_keyfile, scopes, user_agent)
def _GetServiceAccountCredentials( client_info, service_account_name=None, service_account_keyfile=None, service_account_json_keyfile=None, **unused_kwds)
Returns ServiceAccountCredentials from give file.
2.780754
2.682171
1.036755
scopes = client_info['scope'].split() if skip_application_default_credentials: return None gc = oauth2client.client.GoogleCredentials with cache_file_lock: try: # pylint: disable=protected-access # We've already done our own check for GAE/GCE # credentials, we don't want to pay for checking again. credentials = gc._implicit_credentials_from_files() except oauth2client.client.ApplicationDefaultCredentialsError: return None # If we got back a non-service account credential, we need to use # a heuristic to decide whether or not the application default # credential will work for us. We assume that if we're requesting # cloud-platform, our scopes are a subset of cloud scopes, and the # ADC will work. cp = 'https://www.googleapis.com/auth/cloud-platform' if credentials is None: return None if not isinstance(credentials, gc) or cp in scopes: return credentials.create_scoped(scopes) return None
def _GetApplicationDefaultCredentials( client_info, skip_application_default_credentials=False, **unused_kwds)
Returns ADC with right scopes.
5.627213
5.525358
1.018434
creds = { # Credentials metadata dict. 'scopes': sorted(list(scopes)) if scopes else None, 'svc_acct_name': self.__service_account_name, } cache_file = _MultiProcessCacheFile(cache_filename) try: cached_creds_str = cache_file.LockedRead() if not cached_creds_str: return None cached_creds = json.loads(cached_creds_str) if creds['svc_acct_name'] == cached_creds['svc_acct_name']: if creds['scopes'] in (None, cached_creds['scopes']): return cached_creds['scopes'] except KeyboardInterrupt: raise except: # pylint: disable=bare-except # Treat exceptions as a cache miss. pass
def _CheckCacheFileForMatch(self, cache_filename, scopes)
Checks the cache file to see if it matches the given credentials. Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials. Returns: List of scopes (if cache matches) or None.
3.650684
3.735316
0.977343
# Credentials metadata dict. creds = {'scopes': sorted(list(scopes)), 'svc_acct_name': self.__service_account_name} creds_str = json.dumps(creds) cache_file = _MultiProcessCacheFile(cache_filename) try: cache_file.LockedWrite(creds_str) except KeyboardInterrupt: raise except: # pylint: disable=bare-except # Treat exceptions as a cache miss. pass
def _WriteCacheFile(self, cache_filename, scopes)
Writes the credential metadata to the cache file. This does not save the credentials themselves (CredentialStore class optionally handles that after this class is initialized). Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials.
5.610873
5.80194
0.967068
if not util.DetectGce(): raise exceptions.ResourceUnavailableError( 'GCE credentials requested outside a GCE instance') if not self.GetServiceAccount(self.__service_account_name): raise exceptions.ResourceUnavailableError( 'GCE credentials requested but service account ' '%s does not exist.' % self.__service_account_name) if scopes: scope_ls = util.NormalizeScopes(scopes) instance_scopes = self.GetInstanceScopes() if scope_ls > instance_scopes: raise exceptions.CredentialsError( 'Instance did not have access to scopes %s' % ( sorted(list(scope_ls - instance_scopes)),)) else: scopes = self.GetInstanceScopes() return scopes
def _ScopesFromMetadataServer(self, scopes)
Returns instance scopes based on GCE metadata server.
4.495979
4.240528
1.06024
relative_url = 'instance/service-accounts/{0}/token'.format( self.__service_account_name) try: response = _GceMetadataRequest(relative_url) except exceptions.CommunicationError: self.invalid = True if self.store: self.store.locked_put(self) raise content = response.read() try: credential_info = json.loads(content) except ValueError: raise exceptions.CredentialsError( 'Could not parse response as JSON: %s' % content) self.access_token = credential_info['access_token'] if 'expires_in' in credential_info: expires_in = int(credential_info['expires_in']) self.token_expiry = ( datetime.timedelta(seconds=expires_in) + datetime.datetime.utcnow()) else: self.token_expiry = None self.invalid = False if self.store: self.store.locked_put(self)
def _do_refresh_request(self, unused_http_request)
Refresh self.access_token by querying the metadata server. If self.store is initialized, store acquired credentials there.
3.130414
2.814344
1.112307
# pylint: disable=import-error from google.appengine.api import app_identity try: token, _ = app_identity.get_access_token(self._scopes) except app_identity.Error as e: raise exceptions.CredentialsError(str(e)) self.access_token = token
def _refresh(self, _)
Refresh self.access_token. Args: _: (ignored) A function matching httplib2.Http.request's signature.
3.744386
3.061532
1.223043
try: is_locked = self._process_lock.acquire(timeout=self._lock_timeout) yield is_locked finally: if is_locked: self._process_lock.release()
def _ProcessLockAcquired(self)
Context manager for process locks with timeout.
3.088618
2.649801
1.165604
file_contents = None with self._thread_lock: if not self._EnsureFileExists(): return None with self._process_lock_getter() as acquired_plock: if not acquired_plock: return None with open(self._filename, 'rb') as f: file_contents = f.read().decode(encoding=self._encoding) return file_contents
def LockedRead(self)
Acquire an interprocess lock and dump cache contents. This method safely acquires the locks then reads a string from the cache file. If the file does not exist and cannot be created, it will return None. If the locks cannot be acquired, this will also return None. Returns: cache data - string if present, None on failure.
4.508183
4.109715
1.096958
if isinstance(cache_data, six.text_type): cache_data = cache_data.encode(encoding=self._encoding) with self._thread_lock: if not self._EnsureFileExists(): return False with self._process_lock_getter() as acquired_plock: if not acquired_plock: return False with open(self._filename, 'wb') as f: f.write(cache_data) return True
def LockedWrite(self, cache_data)
Acquire an interprocess lock and write a string. This method safely acquires the locks then writes a string to the cache file. If the string is written successfully the function will return True, if the write fails for any reason it will return False. Args: cache_data: string or bytes to write. Returns: bool: success
3.631351
3.465033
1.047999
if not os.path.exists(self._filename): old_umask = os.umask(0o177) try: open(self._filename, 'a+b').close() except OSError: return False finally: os.umask(old_umask) return True
def _EnsureFileExists(self)
Touches a file; returns False on error, True on success.
2.376425
2.090463
1.136794
request = encoding.CopyProtoMessage(request) setattr(request, current_token_attribute, None) while limit is None or limit: if batch_size_attribute: # On Py3, None is not comparable so min() below will fail. # On Py2, None is always less than any number so if batch_size # is None, the request_batch_size will always be None regardless # of the value of limit. This doesn't generally strike me as the # correct behavior, but this change preserves the existing Py2 # behavior on Py3. if batch_size is None: request_batch_size = None else: request_batch_size = min(batch_size, limit or batch_size) setattr(request, batch_size_attribute, request_batch_size) response = getattr(service, method)(request, global_params=global_params) items = getattr(response, field) if predicate: items = list(filter(predicate, items)) for item in items: yield item if limit is None: continue limit -= 1 if not limit: return token = getattr(response, next_token_attribute) if not token: return setattr(request, current_token_attribute, token)
def YieldFromList( service, request, global_params=None, limit=None, batch_size=100, method='List', field='items', predicate=None, current_token_attribute='pageToken', next_token_attribute='nextPageToken', batch_size_attribute='maxResults')
Make a series of List requests, keeping track of page tokens. Args: service: apitools_base.BaseApiService, A service with a .List() method. request: protorpc.messages.Message, The request message corresponding to the service's .List() method, with all the attributes populated except the .maxResults and .pageToken attributes. global_params: protorpc.messages.Message, The global query parameters to provide when calling the given method. limit: int, The maximum number of records to yield. None if all available records should be yielded. batch_size: int, The number of items to retrieve per request. method: str, The name of the method used to fetch resources. field: str, The field in the response that will be a list of items. predicate: lambda, A function that returns true for items to be yielded. current_token_attribute: str, The name of the attribute in a request message holding the page token for the page being requested. next_token_attribute: str, The name of the attribute in a response message holding the page token for the next page. batch_size_attribute: str, The name of the attribute in a response message holding the maximum number of results to be returned. None if caller-specified batch size is unsupported. Yields: protorpc.message.Message, The resources listed by the service.
3.265479
3.409728
0.957695
if method_info.description: description = util.CleanDescription(method_info.description) first_line, newline, remaining = method_info.description.partition( '\n') if not first_line.endswith('.'): first_line = '%s.' % first_line description = '%s%s%s' % (first_line, newline, remaining) else: description = '%s method for the %s service.' % (method_name, name) with printer.CommentContext(): printer('r')
def __PrintDocstring(self, printer, method_info, method_name, name)
Print a docstring for a service method.
4.140004
3.819187
1.084001
printer() printer('service %s {', self.__GetServiceClassName(name)) with printer.Indent(): for method_name, method_info in method_info_map.items(): for line in textwrap.wrap(method_info.description, printer.CalculateWidth() - 3): printer('// %s', line) printer('rpc %s (%s) returns (%s);', method_name, method_info.request_type_name, method_info.response_type_name) printer('}')
def __WriteProtoServiceDeclaration(self, printer, name, method_info_map)
Write a single service declaration to a proto file.
2.566642
2.478991
1.035358
self.Validate() client_info = self.__client_info printer('// Generated services for %s version %s.', client_info.package, client_info.version) printer() printer('syntax = "proto2";') printer('package %s;', self.__package) printer('import "%s";', client_info.messages_proto_file_name) printer() for name, method_info_map in self.__service_method_info_map.items(): self.__WriteProtoServiceDeclaration(printer, name, method_info_map)
def WriteProtoFile(self, printer)
Write the services in this registry to out as proto.
3.681227
3.333413
1.104342
self.Validate() client_info = self.__client_info printer('', client_info.package, client_info.version) printer('# NOTE: This file is autogenerated and should not be edited ' 'by hand.') printer('from %s import base_api', self.__base_files_package) if self.__root_package: import_prefix = 'from {0} '.format(self.__root_package) else: import_prefix = '' printer('%simport %s as messages', import_prefix, client_info.messages_rule_name) printer() printer() printer('class %s(base_api.BaseApiClient):', client_info.client_class_name) with printer.Indent(): printer( '', client_info.package, client_info.version) printer() printer('MESSAGES_MODULE = messages') printer('BASE_URL = {0!r}'.format(client_info.base_url)) printer() printer('_PACKAGE = {0!r}'.format(client_info.package)) printer('_SCOPES = {0!r}'.format( client_info.scopes or ['https://www.googleapis.com/auth/userinfo.email'])) printer('_VERSION = {0!r}'.format(client_info.version)) printer('_CLIENT_ID = {0!r}'.format(client_info.client_id)) printer('_CLIENT_SECRET = {0!r}'.format(client_info.client_secret)) printer('_USER_AGENT = {0!r}'.format(client_info.user_agent)) printer('_CLIENT_CLASS_NAME = {0!r}'.format( client_info.client_class_name)) printer('_URL_VERSION = {0!r}'.format(client_info.url_version)) printer('_API_KEY = {0!r}'.format(client_info.api_key)) printer() printer("def __init__(self, url='', credentials=None,") with printer.Indent(indent=' '): printer('get_credentials=True, http=None, model=None,') printer('log_request=False, log_response=False,') printer('credentials_args=None, default_global_params=None,') printer('additional_http_headers=None, ' 'response_encoding=None):') with printer.Indent(): printer('', client_info.package) printer('url = url or self.BASE_URL') printer( 'super(%s, self).__init__(', client_info.client_class_name) printer(' url, credentials=credentials,') printer(' get_credentials=get_credentials, http=http, ' 'model=model,') printer(' log_request=log_request, ' 'log_response=log_response,') printer(' credentials_args=credentials_args,') printer(' default_global_params=default_global_params,') printer(' additional_http_headers=additional_http_headers,') printer(' response_encoding=response_encoding)') for name in self.__service_method_info_map.keys(): printer('self.%s = self.%s(self)', name, self.__GetServiceClassName(name)) for name, method_info in self.__service_method_info_map.items(): self.__WriteSingleService( printer, name, method_info, client_info.client_class_name)
def WriteFile(self, printer)
Write the services in this registry to out.
2.24452
2.211348
1.015001
schema = {} schema['id'] = self.__names.ClassName('%sRequest' % ( self.__names.ClassName(method_description['id'], separator='.'),)) schema['type'] = 'object' schema['properties'] = collections.OrderedDict() if 'parameterOrder' not in method_description: ordered_parameters = list(method_description.get('parameters', [])) else: ordered_parameters = method_description['parameterOrder'][:] for k in method_description['parameters']: if k not in ordered_parameters: ordered_parameters.append(k) for parameter_name in ordered_parameters: field_name = self.__names.CleanName(parameter_name) field = dict(method_description['parameters'][parameter_name]) if 'type' not in field: raise ValueError('No type found in parameter %s' % field) schema['properties'][field_name] = field if body_type is not None: body_field_name = self.__GetRequestField( method_description, body_type) if body_field_name in schema['properties']: raise ValueError('Failed to normalize request resource name') if 'description' not in body_type: body_type['description'] = ( 'A %s resource to be passed as the request body.' % ( self.__GetRequestType(body_type),)) schema['properties'][body_field_name] = body_type self.__message_registry.AddDescriptorFromSchema(schema['id'], schema) return schema['id']
def __CreateRequestType(self, method_description, body_type=None)
Create a request type for this method.
3.059279
3.044058
1.005
schema = {} method_name = self.__names.ClassName( method_description['id'], separator='.') schema['id'] = self.__names.ClassName('%sResponse' % method_name) schema['type'] = 'object' schema['description'] = 'An empty %s response.' % method_name self.__message_registry.AddDescriptorFromSchema(schema['id'], schema) return schema['id']
def __CreateVoidResponseType(self, method_description)
Create an empty response type.
5.648316
5.150361
1.096684
if not request_type: return True method_id = method_description.get('id', '') if method_id in self.__unelidable_request_methods: return True message = self.__message_registry.LookupDescriptorOrDie(request_type) if message is None: return True field_names = [x.name for x in message.fields] parameters = method_description.get('parameters', {}) for param_name, param_info in parameters.items(): if (param_info.get('location') != 'path' or self.__names.CleanName(param_name) not in field_names): break else: return False return True
def __NeedRequestType(self, method_description, request_type)
Determine if this method needs a new request type created.
3.729894
3.652825
1.021099
size_groups = re.match(r'(?P<size>\d+)(?P<unit>.B)?$', max_size) if size_groups is None: raise ValueError('Could not parse maxSize') size, unit = size_groups.group('size', 'unit') shift = 0 if unit is not None: unit_dict = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40} shift = unit_dict.get(unit.upper()) if shift is None: raise ValueError('Unknown unit %s' % unit) return int(size) * (1 << shift)
def __MaxSizeToInt(self, max_size)
Convert max_size to an int.
2.573007
2.402274
1.071071
config = base_api.ApiUploadInfo() if 'maxSize' in media_upload_config: config.max_size = self.__MaxSizeToInt( media_upload_config['maxSize']) if 'accept' not in media_upload_config: logging.warn( 'No accept types found for upload configuration in ' 'method %s, using */*', method_id) config.accept.extend([ str(a) for a in media_upload_config.get('accept', '*/*')]) for accept_pattern in config.accept: if not _MIME_PATTERN_RE.match(accept_pattern): logging.warn('Unexpected MIME type: %s', accept_pattern) protocols = media_upload_config.get('protocols', {}) for protocol in ('simple', 'resumable'): media = protocols.get(protocol, {}) for attr in ('multipart', 'path'): if attr in media: setattr(config, '%s_%s' % (protocol, attr), media[attr]) return config
def __ComputeUploadConfig(self, media_upload_config, method_id)
Fill out the upload config for this method.
3.657346
3.600462
1.015799
relative_path = self.__names.NormalizeRelativePath( ''.join((self.__client_info.base_path, method_description['path']))) method_id = method_description['id'] ordered_params = [] for param_name in method_description.get('parameterOrder', []): param_info = method_description['parameters'][param_name] if param_info.get('required', False): ordered_params.append(param_name) method_info = base_api.ApiMethodInfo( relative_path=relative_path, method_id=method_id, http_method=method_description['httpMethod'], description=util.CleanDescription( method_description.get('description', '')), query_params=[], path_params=[], ordered_params=ordered_params, request_type_name=self.__names.ClassName(request), response_type_name=self.__names.ClassName(response), request_field=request_field, ) flat_path = method_description.get('flatPath', None) if flat_path is not None: flat_path = self.__names.NormalizeRelativePath( self.__client_info.base_path + flat_path) if flat_path != relative_path: method_info.flat_path = flat_path if method_description.get('supportsMediaUpload', False): method_info.upload_config = self.__ComputeUploadConfig( method_description.get('mediaUpload'), method_id) method_info.supports_download = method_description.get( 'supportsMediaDownload', False) self.__all_scopes.update(method_description.get('scopes', ())) for param, desc in method_description.get('parameters', {}).items(): param = self.__names.CleanName(param) location = desc['location'] if location == 'query': method_info.query_params.append(param) elif location == 'path': method_info.path_params.append(param) else: raise ValueError( 'Unknown parameter location %s for parameter %s' % ( location, param)) method_info.path_params.sort() method_info.query_params.sort() return method_info
def __ComputeMethodInfo(self, method_description, request, response, request_field)
Compute the base_api.ApiMethodInfo for this method.
2.282131
2.283422
0.999434
body_field_name = self.__BodyFieldName(body_type) if body_field_name in method_description.get('parameters', {}): body_field_name = self.__names.FieldName( '%s_resource' % body_field_name) # It's exceedingly unlikely that we'd get two name collisions, which # means it's bound to happen at some point. while body_field_name in method_description.get('parameters', {}): body_field_name = self.__names.FieldName( '%s_body' % body_field_name) return body_field_name
def __GetRequestField(self, method_description, body_type)
Determine the request field for this method.
4.116657
4.057324
1.014624
service_name = self.__names.CleanName(service_name) method_descriptions = methods.get('methods', {}) method_info_map = collections.OrderedDict() items = sorted(method_descriptions.items()) for method_name, method_description in items: method_name = self.__names.MethodName(method_name) # NOTE: According to the discovery document, if the request or # response is present, it will simply contain a `$ref`. body_type = method_description.get('request') if body_type is None: request_type = None else: request_type = self.__GetRequestType(body_type) if self.__NeedRequestType(method_description, request_type): request = self.__CreateRequestType( method_description, body_type=body_type) request_field = self.__GetRequestField( method_description, body_type) else: request = request_type request_field = base_api.REQUEST_IS_BODY if 'response' in method_description: response = method_description['response']['$ref'] else: response = self.__CreateVoidResponseType(method_description) method_info_map[method_name] = self.__ComputeMethodInfo( method_description, request, response, request_field) nested_services = methods.get('resources', {}) services = sorted(nested_services.items()) for subservice_name, submethods in services: new_service_name = '%s_%s' % (service_name, subservice_name) self.AddServiceFromResource(new_service_name, submethods) self.__RegisterService(service_name, method_info_map)
def AddServiceFromResource(self, service_name, methods)
Add a new service named service_name with the given methods.
3.412632
3.430616
0.994758
buf = io.BytesIO() with GzipFile(fileobj=buf, mode='wb', compresslevel=compresslevel) as f: f.write(data) return buf.getvalue()
def compress(data, compresslevel=9)
Compress data in one shot and return the compressed string. Optional argument is the compression level, in range of 0-9.
1.859637
2.25396
0.825053
with GzipFile(fileobj=io.BytesIO(data)) as f: return f.read()
def decompress(data)
Decompress a gzip compressed string in one shot. Return the decompressed string.
3.342492
3.312952
1.008916
'''Return the uncompressed stream file position indicator to the beginning of the file''' if self.mode != READ: raise OSError("Can't rewind in write mode") self.fileobj.seek(0) self._new_member = True self.extrabuf = b"" self.extrasize = 0 self.extrastart = 0 self.offset = 0
def rewind(self)
Return the uncompressed stream file position indicator to the beginning of the file
6.300933
4.124669
1.527622
if http_request.loggable_body is None: yield return old_level = httplib2.debuglevel http_levels = {} httplib2.debuglevel = level if http is not None: for connection_key, connection in http.connections.items(): # httplib2 stores two kinds of values in this dict, connection # classes and instances. Since the connection types are all # old-style classes, we can't easily distinguish by connection # type -- so instead we use the key pattern. if ':' not in connection_key: continue http_levels[connection_key] = connection.debuglevel connection.set_debuglevel(level) yield httplib2.debuglevel = old_level if http is not None: for connection_key, old_level in http_levels.items(): if connection_key in http.connections: http.connections[connection_key].set_debuglevel(old_level)
def _Httplib2Debuglevel(http_request, level, http=None)
Temporarily change the value of httplib2.debuglevel, if necessary. If http_request has a `loggable_body` distinct from `body`, then we need to prevent httplib2 from logging the full body. This sets httplib2.debuglevel for the duration of the `with` block; however, that alone won't change the value of existing HTTP connections. If an httplib2.Http object is provided, we'll also change the level on any cached connections attached to it. Args: http_request: a Request we're logging. level: (int) the debuglevel for logging. http: (optional) an httplib2.Http whose connections we should set the debuglevel on. Yields: None.
3.357979
3.137661
1.070217
if getattr(http, 'connections', None): for conn_key in list(http.connections.keys()): if ':' in conn_key: del http.connections[conn_key]
def RebuildHttpConnections(http)
Rebuilds all http connections in the httplib2.Http instance. httplib2 overloads the map in http.connections to contain two different types of values: { scheme string: connection class } and { scheme + authority string : actual http connection } Here we remove all of the entries for actual connections so that on the next request httplib2 will rebuild them from the connection types. Args: http: An httplib2.Http instance.
3.321159
3.636966
0.913167
# If the server indicates how long to wait, use that value. Otherwise, # calculate the wait time on our own. retry_after = None # Transport failures if isinstance(retry_args.exc, (http_client.BadStatusLine, http_client.IncompleteRead, http_client.ResponseNotReady)): logging.debug('Caught HTTP error %s, retrying: %s', type(retry_args.exc).__name__, retry_args.exc) elif isinstance(retry_args.exc, socket.error): logging.debug('Caught socket error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, socket.gaierror): logging.debug( 'Caught socket address error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, socket.timeout): logging.debug( 'Caught socket timeout error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, httplib2.ServerNotFoundError): logging.debug( 'Caught server not found error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, ValueError): # oauth2client tries to JSON-decode the response, which can result # in a ValueError if the response was invalid. Until that is fixed in # oauth2client, need to handle it here. logging.debug('Response content was invalid (%s), retrying', retry_args.exc) elif (isinstance(retry_args.exc, TokenRefreshError) and hasattr(retry_args.exc, 'status') and (retry_args.exc.status == TOO_MANY_REQUESTS or retry_args.exc.status >= 500)): logging.debug( 'Caught transient credential refresh error (%s), retrying', retry_args.exc) elif isinstance(retry_args.exc, exceptions.RequestError): logging.debug('Request returned no response, retrying') # API-level failures elif isinstance(retry_args.exc, exceptions.BadStatusCodeError): logging.debug('Response returned status %s, retrying', retry_args.exc.status_code) elif isinstance(retry_args.exc, exceptions.RetryAfterError): logging.debug('Response returned a retry-after header, retrying') retry_after = retry_args.exc.retry_after else: raise retry_args.exc RebuildHttpConnections(retry_args.http) logging.debug('Retrying request to url %s after exception %s', retry_args.http_request.url, retry_args.exc) time.sleep( retry_after or util.CalculateWaitForRetry( retry_args.num_retries, max_wait=retry_args.max_retry_wait))
def HandleExceptionsAndRebuildHttpConnections(retry_args)
Exception handler for http failures. This catches known failures and rebuilds the underlying HTTP connections. Args: retry_args: An ExceptionRetryArgs tuple.
2.479664
2.49586
0.993511
retry = 0 first_req_time = time.time() while True: try: return _MakeRequestNoRetry( http, http_request, redirections=redirections, check_response_func=check_response_func) # retry_func will consume the exception types it handles and raise. # pylint: disable=broad-except except Exception as e: retry += 1 if retry >= retries: raise else: total_wait_sec = time.time() - first_req_time retry_func(ExceptionRetryArgs(http, http_request, e, retry, max_retry_wait, total_wait_sec))
def MakeRequest(http, http_request, retries=7, max_retry_wait=60, redirections=5, retry_func=HandleExceptionsAndRebuildHttpConnections, check_response_func=CheckResponse)
Send http_request via the given http, performing error/retry handling. Args: http: An httplib2.Http instance, or a http multiplexer that delegates to an underlying http, for example, HTTPMultiplexer. http_request: A Request to send. retries: (int, default 7) Number of retries to attempt on retryable replies (such as 429 or 5XX). max_retry_wait: (int, default 60) Maximum number of seconds to wait when retrying. redirections: (int, default 5) Number of redirects to follow. retry_func: Function to handle retries on exceptions. Argument is an ExceptionRetryArgs tuple. check_response_func: Function to validate the HTTP response. Arguments are (Response, response content, url). Raises: InvalidDataFromServerError: if there is no response after retries. Returns: A Response object.
3.377247
3.489613
0.9678
connection_type = None # Handle overrides for connection types. This is used if the caller # wants control over the underlying connection for managing callbacks # or hash digestion. if getattr(http, 'connections', None): url_scheme = parse.urlsplit(http_request.url).scheme if url_scheme and url_scheme in http.connections: connection_type = http.connections[url_scheme] # Custom printing only at debuglevel 4 new_debuglevel = 4 if httplib2.debuglevel == 4 else 0 with _Httplib2Debuglevel(http_request, new_debuglevel, http=http): info, content = http.request( str(http_request.url), method=str(http_request.http_method), body=http_request.body, headers=http_request.headers, redirections=redirections, connection_type=connection_type) if info is None: raise exceptions.RequestError() response = Response(info, content, http_request.url) check_response_func(response) return response
def _MakeRequestNoRetry(http, http_request, redirections=5, check_response_func=CheckResponse)
Send http_request via the given http. This wrapper exists to handle translation between the plain httplib2 request/response types and the Request and Response types above. Args: http: An httplib2.Http instance, or a http multiplexer that delegates to an underlying http, for example, HTTPMultiplexer. http_request: A Request to send. redirections: (int, default 5) Number of redirects to follow. check_response_func: Function to validate the HTTP response. Arguments are (Response, response content, url). Returns: A Response object. Raises: RequestError if no response could be parsed.
4.313099
4.63979
0.929589
self.__body = value if value is not None: # Avoid calling len() which cannot exceed 4GiB in 32-bit python. body_length = getattr( self.__body, 'length', None) or len(self.__body) self.headers['content-length'] = str(body_length) else: self.headers.pop('content-length', None) # This line ensures we don't try to print large requests. if not isinstance(value, (type(None), six.string_types)): self.loggable_body = '<media body>'
def body(self, value)
Sets the request body; handles logging and length measurement.
5.315469
4.755117
1.117842
def ProcessContentRange(content_range): _, _, range_spec = content_range.partition(' ') byte_range, _, _ = range_spec.partition('/') start, _, end = byte_range.partition('-') return int(end) - int(start) + 1 if '-content-encoding' in self.info and 'content-range' in self.info: # httplib2 rewrites content-length in the case of a compressed # transfer; we can't trust the content-length header in that # case, but we *can* trust content-range, if it's present. return ProcessContentRange(self.info['content-range']) elif 'content-length' in self.info: return int(self.info.get('content-length')) elif 'content-range' in self.info: return ProcessContentRange(self.info['content-range']) return len(self.content)
def length(self)
Return the length of this response. We expose this as an attribute since using len() directly can fail for responses larger than sys.maxint. Returns: Response length (as int or long)
3.489202
3.416611
1.021246
raise exceptions.NotYetImplementedError( 'Illegal read of size %s requested on BufferedStream. ' 'Wrapped stream %s is at position %s-%s, ' '%s bytes remaining.' % (size, self.__stream, self.__start_pos, self.__end_pos, self._bytes_remaining)) data = '' if self._bytes_remaining: size = min(size, self._bytes_remaining) data = self.__buffered_data[ self.__buffer_pos:self.__buffer_pos + size] self.__buffer_pos += size return data
def read(self, size=None): # pylint: disable=invalid-name if size is None or size < 0
Reads from the buffer.
5.056664
4.631125
1.091887
self.Validate() extended_descriptor.WriteMessagesFile( self.__file_descriptor, self.__package, self.__client_info.version, printer)
def WriteProtoFile(self, printer)
Write the messages file to out as proto.
17.129868
14.101894
1.214721
self.Validate() extended_descriptor.WritePythonFile( self.__file_descriptor, self.__package, self.__client_info.version, printer)
def WriteFile(self, printer)
Write the messages file to out.
21.875067
21.377094
1.023295
if not isinstance(new_descriptor, ( extended_descriptor.ExtendedMessageDescriptor, extended_descriptor.ExtendedEnumDescriptor)): raise ValueError('Cannot add descriptor of type %s' % ( type(new_descriptor),)) full_name = self.__ComputeFullName(new_descriptor.name) if full_name in self.__message_registry: raise ValueError( 'Attempt to re-register descriptor %s' % full_name) if full_name not in self.__nascent_types: raise ValueError('Directly adding types is not supported') new_descriptor.full_name = full_name self.__message_registry[full_name] = new_descriptor if isinstance(new_descriptor, extended_descriptor.ExtendedMessageDescriptor): self.__current_env.message_types.append(new_descriptor) elif isinstance(new_descriptor, extended_descriptor.ExtendedEnumDescriptor): self.__current_env.enum_types.append(new_descriptor) self.__unknown_types.discard(full_name) self.__nascent_types.remove(full_name)
def __RegisterDescriptor(self, new_descriptor)
Register the given descriptor in this registry.
2.778966
2.784138
0.998142
message = extended_descriptor.ExtendedEnumDescriptor() message.name = self.__names.ClassName(name) message.description = util.CleanDescription(description) self.__DeclareDescriptor(message.name) for index, (enum_name, enum_description) in enumerate( zip(enum_values, enum_descriptions)): enum_value = extended_descriptor.ExtendedEnumValueDescriptor() enum_value.name = self.__names.NormalizeEnumName(enum_name) if enum_value.name != enum_name: message.enum_mappings.append( extended_descriptor.ExtendedEnumDescriptor.JsonEnumMapping( python_name=enum_value.name, json_name=enum_name)) self.__AddImport('from %s import encoding' % self.__base_files_package) enum_value.number = index enum_value.description = util.CleanDescription( enum_description or '<no description>') message.values.append(enum_value) self.__RegisterDescriptor(message)
def AddEnumDescriptor(self, name, description, enum_values, enum_descriptions)
Add a new EnumDescriptor named name with the given enum values.
3.645015
3.680652
0.990318
# TODO(craigcitro): This is a hack. Remove it. message = extended_descriptor.ExtendedMessageDescriptor() message.name = self.__names.ClassName(schema['id']) message.alias_for = alias_for self.__DeclareDescriptor(message.name) self.__AddImport('from %s import extra_types' % self.__base_files_package) self.__RegisterDescriptor(message)
def __DeclareMessageAlias(self, schema, alias_for)
Declare schema as an alias for alias_for.
9.539835
9.148257
1.042804
additional_properties_info = schema['additionalProperties'] entries_type_name = self.__AddAdditionalPropertyType( message.name, additional_properties_info) description = util.CleanDescription( additional_properties_info.get('description')) if description is None: description = 'Additional properties of type %s' % message.name attrs = { 'items': { '$ref': entries_type_name, }, 'description': description, 'type': 'array', } field_name = 'additionalProperties' message.fields.append(self.__FieldDescriptorFromProperties( field_name, len(properties) + 1, attrs)) self.__AddImport('from %s import encoding' % self.__base_files_package) message.decorators.append( 'encoding.MapUnrecognizedFields(%r)' % field_name)
def __AddAdditionalProperties(self, message, schema, properties)
Add an additionalProperties field to message.
4.883934
4.900885
0.996541
# TODO(craigcitro): Is schema_name redundant? if self.__GetDescriptor(schema_name): return if schema.get('enum'): self.__DeclareEnum(schema_name, schema) return if schema.get('type') == 'any': self.__DeclareMessageAlias(schema, 'extra_types.JsonValue') return if schema.get('type') != 'object': raise ValueError('Cannot create message descriptors for type %s' % schema.get('type')) message = extended_descriptor.ExtendedMessageDescriptor() message.name = self.__names.ClassName(schema['id']) message.description = util.CleanDescription(schema.get( 'description', 'A %s object.' % message.name)) self.__DeclareDescriptor(message.name) with self.__DescriptorEnv(message): properties = schema.get('properties', {}) for index, (name, attrs) in enumerate(sorted(properties.items())): field = self.__FieldDescriptorFromProperties( name, index + 1, attrs) message.fields.append(field) if field.name != name: message.field_mappings.append( type(message).JsonFieldMapping( python_name=field.name, json_name=name)) self.__AddImport( 'from %s import encoding' % self.__base_files_package) if 'additionalProperties' in schema: self.__AddAdditionalProperties(message, schema, properties) self.__RegisterDescriptor(message)
def AddDescriptorFromSchema(self, schema_name, schema)
Add a new MessageDescriptor named schema_name based on schema.
4.527438
4.389608
1.031399
new_type_name = 'AdditionalProperty' property_schema = dict(property_schema) # We drop the description here on purpose, so the resulting # messages are less repetitive. property_schema.pop('description', None) description = 'An additional property for a %s object.' % name schema = { 'id': new_type_name, 'type': 'object', 'description': description, 'properties': { 'key': { 'type': 'string', 'description': 'Name of the additional property.', }, 'value': property_schema, }, } self.AddDescriptorFromSchema(new_type_name, schema) return new_type_name
def __AddAdditionalPropertyType(self, name, property_schema)
Add a new nested AdditionalProperty message.
3.342621
3.061619
1.091782
entry_schema.pop('description', None) description = 'Single entry in a %s.' % parent_name schema = { 'id': entry_type_name, 'type': 'object', 'description': description, 'properties': { 'entry': { 'type': 'array', 'items': entry_schema, }, }, } self.AddDescriptorFromSchema(entry_type_name, schema) return entry_type_name
def __AddEntryType(self, entry_type_name, entry_schema, parent_name)
Add a type for a list entry.
2.957359
2.848449
1.038235
field = descriptor.FieldDescriptor() field.name = self.__names.CleanName(name) field.number = index field.label = self.__ComputeLabel(attrs) new_type_name_hint = self.__names.ClassName( '%sValue' % self.__names.ClassName(name)) type_info = self.__GetTypeInfo(attrs, new_type_name_hint) field.type_name = type_info.type_name field.variant = type_info.variant if 'default' in attrs: # TODO(craigcitro): Correctly handle non-primitive default values. default = attrs['default'] if not (field.type_name == 'string' or field.variant == messages.Variant.ENUM): default = str(json.loads(default)) if field.variant == messages.Variant.ENUM: default = self.__names.NormalizeEnumName(default) field.default_value = default extended_field = extended_descriptor.ExtendedFieldDescriptor() extended_field.name = field.name extended_field.description = util.CleanDescription( attrs.get('description', 'A %s attribute.' % field.type_name)) extended_field.field_descriptor = field return extended_field
def __FieldDescriptorFromProperties(self, name, index, attrs)
Create a field descriptor for these attrs.
3.379935
3.287405
1.028147
type_ref = self.__names.ClassName(attrs.get('$ref')) type_name = attrs.get('type') if not (type_ref or type_name): raise ValueError('No type found for %s' % attrs) if type_ref: self.__AddIfUnknown(type_ref) # We don't actually know this is a message -- it might be an # enum. However, we can't check that until we've created all the # types, so we come back and fix this up later. return TypeInfo( type_name=type_ref, variant=messages.Variant.MESSAGE) if 'enum' in attrs: enum_name = '%sValuesEnum' % name_hint return self.__DeclareEnum(enum_name, attrs) if 'format' in attrs: type_info = self.PRIMITIVE_FORMAT_MAP.get(attrs['format']) if type_info is None: # If we don't recognize the format, the spec says we fall back # to just using the type name. if type_name in self.PRIMITIVE_TYPE_INFO_MAP: return self.PRIMITIVE_TYPE_INFO_MAP[type_name] raise ValueError('Unknown type/format "%s"/"%s"' % ( attrs['format'], type_name)) if type_info.type_name.startswith(( 'apitools.base.protorpclite.message_types.', 'message_types.')): self.__AddImport( 'from %s import message_types as _message_types' % self.__protorpc_package) if type_info.type_name.startswith('extra_types.'): self.__AddImport( 'from %s import extra_types' % self.__base_files_package) return type_info if type_name in self.PRIMITIVE_TYPE_INFO_MAP: type_info = self.PRIMITIVE_TYPE_INFO_MAP[type_name] if type_info.type_name.startswith('extra_types.'): self.__AddImport( 'from %s import extra_types' % self.__base_files_package) return type_info if type_name == 'array': items = attrs.get('items') if not items: raise ValueError('Array type with no item type: %s' % attrs) entry_name_hint = self.__names.ClassName( items.get('title') or '%sListEntry' % name_hint) entry_label = self.__ComputeLabel(items) if entry_label == descriptor.FieldDescriptor.Label.REPEATED: parent_name = self.__names.ClassName( items.get('title') or name_hint) entry_type_name = self.__AddEntryType( entry_name_hint, items.get('items'), parent_name) return TypeInfo(type_name=entry_type_name, variant=messages.Variant.MESSAGE) return self.__GetTypeInfo(items, entry_name_hint) elif type_name == 'any': self.__AddImport('from %s import extra_types' % self.__base_files_package) return self.PRIMITIVE_TYPE_INFO_MAP['any'] elif type_name == 'object': # TODO(craigcitro): Think of a better way to come up with names. if not name_hint: raise ValueError( 'Cannot create subtype without some name hint') schema = dict(attrs) schema['id'] = name_hint self.AddDescriptorFromSchema(name_hint, schema) self.__AddIfUnknown(name_hint) return TypeInfo( type_name=name_hint, variant=messages.Variant.MESSAGE) raise ValueError('Unknown type: %s' % type_name)
def __GetTypeInfo(self, attrs, name_hint)
Return a TypeInfo object for attrs, creating one if needed.
2.939783
2.907238
1.011195
if args.discovery_url: try: return util.FetchDiscoveryDoc(args.discovery_url) except exceptions.CommunicationError: raise exceptions.GeneratedClientError( 'Could not fetch discovery doc') infile = os.path.expanduser(args.infile) or '/dev/stdin' with io.open(infile, encoding='utf8') as f: return json.loads(util.ReplaceHomoglyphs(f.read()))
def _GetDiscoveryDocFromFlags(args)
Get the discovery doc from flags.
4.459189
4.321371
1.031892
discovery_doc = _GetDiscoveryDocFromFlags(args) names = util.Names( args.strip_prefix, args.experimental_name_convention, args.experimental_capitalize_enums) if args.client_json: try: with io.open(args.client_json, encoding='utf8') as client_json: f = json.loads(util.ReplaceHomoglyphs(client_json.read())) web = f.get('installed', f.get('web', {})) client_id = web.get('client_id') client_secret = web.get('client_secret') except IOError: raise exceptions.NotFoundError( 'Failed to open client json file: %s' % args.client_json) else: client_id = args.client_id client_secret = args.client_secret if not client_id: logging.warning('No client ID supplied') client_id = '' if not client_secret: logging.warning('No client secret supplied') client_secret = '' client_info = util.ClientInfo.Create( discovery_doc, args.scope, client_id, client_secret, args.user_agent, names, args.api_key) outdir = os.path.expanduser(args.outdir) or client_info.default_directory if os.path.exists(outdir) and not args.overwrite: raise exceptions.ConfigurationValueError( 'Output directory exists, pass --overwrite to replace ' 'the existing files.') if not os.path.exists(outdir): os.makedirs(outdir) return gen_client_lib.DescriptorGenerator( discovery_doc, client_info, names, args.root_package, outdir, base_package=args.base_package, protorpc_package=args.protorpc_package, init_wildcards_file=(args.init_file == 'wildcards'), use_proto2=args.experimental_proto2_output, unelidable_request_methods=args.unelidable_request_methods, apitools_version=args.apitools_version)
def _GetCodegenFromFlags(args)
Create a codegen object from flags.
3.460611
3.415953
1.013073
codegen = _GetCodegenFromFlags(args) if codegen is None: logging.error('Failed to create codegen, exiting.') return 128 _WriteGeneratedFiles(args, codegen) if args.init_file != 'none': _WriteInit(codegen)
def GenerateClient(args)
Driver for client code generation.
7.39981
7.344214
1.00757
discovery_doc = _GetDiscoveryDocFromFlags(args) package = discovery_doc['name'] original_outdir = os.path.expanduser(args.outdir) args.outdir = os.path.join( args.outdir, 'apitools/clients/%s' % package) args.root_package = 'apitools.clients.%s' % package codegen = _GetCodegenFromFlags(args) if codegen is None: logging.error('Failed to create codegen, exiting.') return 1 _WriteGeneratedFiles(args, codegen) _WriteInit(codegen) with util.Chdir(original_outdir): _WriteSetupPy(codegen) with util.Chdir('apitools'): _WriteIntermediateInit(codegen) with util.Chdir('clients'): _WriteIntermediateInit(codegen)
def GeneratePipPackage(args)
Generate a client as a pip-installable tarball.
4.721947
4.491842
1.051227
read_size = min(size, self.__remaining_bytes) else: read_size = self.__remaining_bytes data = self.__stream.read(read_size) if read_size > 0 and not data: raise exceptions.StreamExhausted( 'Not enough bytes in stream; expected %d, exhausted ' 'after %d' % ( self.__max_bytes, self.__max_bytes - self.__remaining_bytes)) self.__remaining_bytes -= len(data) return data
def read(self, size=None): # pylint: disable=missing-docstring if size is not None
Read at most size bytes from this slice. Compared to other streams, there is one case where we may unexpectedly raise an exception on read: if the underlying stream is exhausted (i.e. returns no bytes on read), and the size of this slice indicates we should still be able to read more bytes, we raise exceptions.StreamExhausted. Args: size: If provided, read no more than size bytes from the stream. Returns: The bytes read from this slice. Raises: exceptions.StreamExhausted
3.505681
3.750592
0.934701
import urllib2, shutil egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3]) url = download_base + egg_name saveto = os.path.join(to_dir, egg_name) src = dst = None if not os.path.exists(saveto): # Avoid repeated downloads try: from distutils import log if delay: log.warn(, version, download_base, delay, url ); from time import sleep; sleep(delay) log.warn("Downloading %s", url) src = urllib2.urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = _validate_md5(egg_name, src.read()) dst = open(saveto,"wb"); dst.write(data) finally: if src: src.close() if dst: dst.close() return os.path.realpath(saveto)
def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay = 15 )
Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt.
4.09857
4.240924
0.966433
try: import setuptools except ImportError: egg = None try: egg = download_setuptools(version, delay=0) sys.path.insert(0,egg) from setuptools.command.easy_install import main return main(list(argv)+[egg]) # we're done here finally: if egg and os.path.exists(egg): os.unlink(egg) else: if setuptools.__version__ == '0.0.1': print >>sys.stderr, ( "You have an obsolete version of setuptools installed. Please\n" "remove it from your system entirely before rerunning this script." ) sys.exit(2) req = "setuptools>="+version import pkg_resources try: pkg_resources.require(req) except pkg_resources.VersionConflict: try: from setuptools.command.easy_install import main except ImportError: from easy_install import main main(list(argv)+[download_setuptools(delay=0)]) sys.exit(0) # try to force an exit else: if argv: from setuptools.command.easy_install import main main(argv) else: print "Setuptools version",version,"or greater has been installed." print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def main(argv, version=DEFAULT_VERSION)
Install or upgrade setuptools and EasyInstall
4.209947
4.00234
1.051871
import re for name in filenames: base = os.path.basename(name) f = open(name,'rb') md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, 'rb'); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,'w') f.write(src) f.close()
def update_md5(filenames)
Update our built-in md5 registry
2.920425
2.902117
1.006309
# Retrieve the configs for the desired method and service. method_config = service.GetMethodConfig(method) upload_config = service.GetUploadConfig(method) # Prepare the HTTP Request. http_request = service.PrepareHttpRequest( method_config, request, global_params=global_params, upload_config=upload_config) # Create the request and add it to our master list. api_request = self.ApiCall( http_request, self.retryable_codes, service, method_config) self.api_requests.append(api_request)
def Add(self, service, method, request, global_params=None)
Add a request to the batch. Args: service: A class inheriting base_api.BaseApiService. method: A string indicated desired method from the service. See the example in the class docstring. request: An input message appropriate for the specified service.method. global_params: Optional additional parameters to pass into method.PrepareHttpRequest. Returns: None
4.288174
3.969789
1.080202
requests = [request for request in self.api_requests if not request.terminal_state] batch_size = max_batch_size or len(requests) for attempt in range(max_retries): if attempt: time.sleep(sleep_between_polls) for i in range(0, len(requests), batch_size): # Create a batch_http_request object and populate it with # incomplete requests. batch_http_request = BatchHttpRequest( batch_url=self.batch_url, callback=batch_request_callback, response_encoding=self.response_encoding ) for request in itertools.islice(requests, i, i + batch_size): batch_http_request.Add( request.http_request, request.HandleResponse) batch_http_request.Execute(http) if hasattr(http.request, 'credentials'): if any(request.authorization_failed for request in itertools.islice(requests, i, i + batch_size)): http.request.credentials.refresh(http) # Collect retryable requests. requests = [request for request in self.api_requests if not request.terminal_state] if not requests: break return self.api_requests
def Execute(self, http, sleep_between_polls=5, max_retries=5, max_batch_size=None, batch_request_callback=None)
Execute all of the requests in the batch. Args: http: httplib2.Http object for use in the request. sleep_between_polls: Integer number of seconds to sleep between polls. max_retries: Max retries. Any requests that have not succeeded by this number of retries simply report the last response or exception, whatever it happened to be. max_batch_size: int, if specified requests will be split in batches of given size. batch_request_callback: function of (http_response, exception) passed to BatchHttpRequest which will be run on any given results. Returns: List of ApiCalls.
3.078434
3.187186
0.965878
if not (header.startswith('<') or header.endswith('>')): raise exceptions.BatchError( 'Invalid value for Content-ID: %s' % header) if '+' not in header: raise exceptions.BatchError( 'Invalid value for Content-ID: %s' % header) _, request_id = header[1:-1].rsplit('+', 1) return urllib_parse.unquote(request_id)
def _ConvertHeaderToId(header)
Convert a Content-ID header value to an id. Presumes the Content-ID header conforms to the format that _ConvertIdToHeader() returns. Args: header: A string indicating the Content-ID header value. Returns: The extracted id value. Raises: BatchError if the header is not in the expected format.
3.622553
3.191417
1.135092
# Construct status line parsed = urllib_parse.urlsplit(request.url) request_line = urllib_parse.urlunsplit( ('', '', parsed.path, parsed.query, '')) if not isinstance(request_line, six.text_type): request_line = request_line.decode('utf-8') status_line = u' '.join(( request.http_method, request_line, u'HTTP/1.1\n' )) major, minor = request.headers.get( 'content-type', 'application/json').split('/') msg = mime_nonmultipart.MIMENonMultipart(major, minor) # MIMENonMultipart adds its own Content-Type header. # Keep all of the other headers in `request.headers`. for key, value in request.headers.items(): if key == 'content-type': continue msg[key] = value msg['Host'] = parsed.netloc msg.set_unixfrom(None) if request.body is not None: msg.set_payload(request.body) # Serialize the mime message. str_io = six.StringIO() # maxheaderlen=0 means don't line wrap headers. gen = generator.Generator(str_io, maxheaderlen=0) gen.flatten(msg, unixfrom=False) body = str_io.getvalue() return status_line + body
def _SerializeRequest(self, request)
Convert a http_wrapper.Request object into a string. Args: request: A http_wrapper.Request to serialize. Returns: The request as a string in application/http format.
3.537065
3.556652
0.994493
# Strip off the status line. status_line, payload = payload.split('\n', 1) _, status, _ = status_line.split(' ', 2) # Parse the rest of the response. parser = email_parser.Parser() msg = parser.parsestr(payload) # Get the headers. info = dict(msg) info['status'] = status # Create Response from the parsed headers. content = msg.get_payload() return http_wrapper.Response(info, content, self.__batch_url)
def _DeserializeResponse(self, payload)
Convert string into Response and content. Args: payload: Header and body string to be deserialized. Returns: A Response object
4.889833
4.903798
0.997152
handler = RequestResponseAndHandler(request, None, callback) self.__request_response_handlers[self._NewId()] = handler
def Add(self, request, callback=None)
Add a new request. Args: request: A http_wrapper.Request to add to the batch. callback: A callback to be called for this response, of the form callback(response, exception). The first parameter is the deserialized response object. The second is an apiclient.errors.HttpError exception object if an HTTP error occurred while processing the request, or None if no errors occurred. Returns: None
12.126768
22.673737
0.534838
message = mime_multipart.MIMEMultipart('mixed') # Message should not write out its own headers. setattr(message, '_write_headers', lambda self: None) # Add all the individual requests. for key in self.__request_response_handlers: msg = mime_nonmultipart.MIMENonMultipart('application', 'http') msg['Content-Transfer-Encoding'] = 'binary' msg['Content-ID'] = self._ConvertIdToHeader(key) body = self._SerializeRequest( self.__request_response_handlers[key].request) msg.set_payload(body) message.attach(msg) request = http_wrapper.Request(self.__batch_url, 'POST') request.body = message.as_string() request.headers['content-type'] = ( 'multipart/mixed; boundary="%s"') % message.get_boundary() response = http_wrapper.MakeRequest(http, request) if response.status_code >= 300: raise exceptions.HttpError.FromResponse(response) # Prepend with a content-type header so Parser can handle it. header = 'content-type: %s\r\n\r\n' % response.info['content-type'] content = response.content if isinstance(content, bytes) and self.__response_encoding: content = response.content.decode(self.__response_encoding) parser = email_parser.Parser() mime_response = parser.parsestr(header + content) if not mime_response.is_multipart(): raise exceptions.BatchError( 'Response not in multipart/mixed format.') for part in mime_response.get_payload(): request_id = self._ConvertHeaderToId(part['Content-ID']) response = self._DeserializeResponse(part.get_payload()) # Disable protected access because namedtuple._replace(...) # is not actually meant to be protected. # pylint: disable=protected-access self.__request_response_handlers[request_id] = ( self.__request_response_handlers[request_id]._replace( response=response))
def _Execute(self, http)
Serialize batch request, send to server, process response. Args: http: A httplib2.Http object to be used to make the request with. Raises: httplib2.HttpLib2Error if a transport error has occured. apiclient.errors.BatchError if the response is the wrong format.
3.427081
3.286485
1.04278
self._Execute(http) for key in self.__request_response_handlers: response = self.__request_response_handlers[key].response callback = self.__request_response_handlers[key].handler exception = None if response.status_code >= 300: exception = exceptions.HttpError.FromResponse(response) if callback is not None: callback(response, exception) if self.__callback is not None: self.__callback(response, exception)
def Execute(self, http)
Execute all the requests as a single batched HTTP request. Args: http: A httplib2.Http object to be used with the request. Returns: None Raises: BatchError if the response is the wrong format.
3.278226
3.447321
0.950949
outer_definition_name = cls.outer_definition_name() if outer_definition_name is None: return six.text_type(cls.__name__) return u'%s.%s' % (outer_definition_name, cls.__name__)
def definition_name(cls)
Helper method for creating definition name. Names will be generated to include the classes package name, scope (if the class is nested in another definition) and class name. By default, the package name for a definition is derived from its module name. However, this value can be overriden by placing a 'package' attribute in the module that contains the definition class. For example: package = 'some.alternate.package' class MyMessage(Message): ... >>> MyMessage.definition_name() some.alternate.package.MyMessage Returns: Dot-separated fully qualified name of definition.
2.792894
3.356714
0.832032
outer_definition = cls.message_definition() if not outer_definition: return util.get_package_for_module(cls.__module__) return outer_definition.definition_name()
def outer_definition_name(cls)
Helper method for creating outer definition name. Returns: If definition is nested, will return the outer definitions name, else the package name.
5.745057
5.706094
1.006828