code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
result = self.search(artist_name, search_type=100, limit=limit) if result['result']['artistCount'] <= 0: LOG.warning('Artist %s not existed!', artist_name) raise SearchNotFound('Artist {} not existed.'.format(artist_name)) else: artists = result['result']['artists'] if quiet: artist_id, artist_name = artists[0]['id'], artists[0]['name'] artist = Artist(artist_id, artist_name) return artist else: return self.display.select_one_artist(artists)
def search_artist(self, artist_name, quiet=False, limit=9)
Search artist by artist name. :params artist_name: artist name. :params quiet: automatically select the best one. :params limit: artist count returned by weapi. :return: a Artist object.
3.317434
3.145239
1.054748
result = self.search(playlist_name, search_type=1000, limit=limit) if result['result']['playlistCount'] <= 0: LOG.warning('Playlist %s not existed!', playlist_name) raise SearchNotFound('playlist {} not existed'.format(playlist_name)) else: playlists = result['result']['playlists'] if quiet: playlist_id, playlist_name = playlists[0]['id'], playlists[0]['name'] playlist = Playlist(playlist_id, playlist_name) return playlist else: return self.display.select_one_playlist(playlists)
def search_playlist(self, playlist_name, quiet=False, limit=9)
Search playlist by playlist name. :params playlist_name: playlist name. :params quiet: automatically select the best one. :params limit: playlist count returned by weapi. :return: a Playlist object.
3.326632
3.170994
1.049082
result = self.search(user_name, search_type=1002, limit=limit) if result['result']['userprofileCount'] <= 0: LOG.warning('User %s not existed!', user_name) raise SearchNotFound('user {} not existed'.format(user_name)) else: users = result['result']['userprofiles'] if quiet: user_id, user_name = users[0]['userId'], users[0]['nickname'] user = User(user_id, user_name) return user else: return self.display.select_one_user(users)
def search_user(self, user_name, quiet=False, limit=9)
Search user by user name. :params user_name: user name. :params quiet: automatically select the best one. :params limit: user count returned by weapi. :return: a User object.
4.089898
4.058319
1.007781
url = 'http://music.163.com/weapi/user/playlist?csrf_token=' csrf = '' params = {'offset': 0, 'uid': user_id, 'limit': limit, 'csrf_token': csrf} result = self.post_request(url, params) playlists = result['playlist'] return self.display.select_one_playlist(playlists)
def get_user_playlists(self, user_id, limit=1000)
Get a user's all playlists. warning: login is required for private playlist. :params user_id: user id. :params limit: playlist count returned by weapi. :return: a Playlist Object.
3.451555
3.101528
1.112856
url = 'http://music.163.com/weapi/v3/playlist/detail?csrf_token=' csrf = '' params = {'id': playlist_id, 'offset': 0, 'total': True, 'limit': limit, 'n': 1000, 'csrf_token': csrf} result = self.post_request(url, params) songs = result['playlist']['tracks'] songs = [Song(song['id'], song['name']) for song in songs] return songs
def get_playlist_songs(self, playlist_id, limit=1000)
Get a playlists's all songs. :params playlist_id: playlist id. :params limit: length of result returned by weapi. :return: a list of Song object.
2.304689
2.14725
1.073321
url = 'http://music.163.com/api/album/{}/'.format(album_id) result = self.get_request(url) songs = result['album']['songs'] songs = [Song(song['id'], song['name']) for song in songs] return songs
def get_album_songs(self, album_id)
Get a album's all songs. warning: use old api. :params album_id: album id. :return: a list of Song object.
2.611627
2.490122
1.048795
url = 'http://music.163.com/api/artist/{}'.format(artist_id) result = self.get_request(url) hot_songs = result['hotSongs'] songs = [Song(song['id'], song['name']) for song in hot_songs] return songs
def get_artists_hot_songs(self, artist_id)
Get a artist's top50 songs. warning: use old api. :params artist_id: artist id. :return: a list of Song object.
2.342818
2.441073
0.959749
url = 'http://music.163.com/weapi/song/enhance/player/url?csrf_token=' csrf = '' params = {'ids': [song_id], 'br': bit_rate, 'csrf_token': csrf} result = self.post_request(url, params) song_url = result['data'][0]['url'] # download address if song_url is None: # Taylor Swift's song is not available LOG.warning( 'Song %s is not available due to copyright issue. => %s', song_id, result) raise SongNotAvailable( 'Song {} is not available due to copyright issue.'.format(song_id)) else: return song_url
def get_song_url(self, song_id, bit_rate=320000)
Get a song's download address. :params song_id: song id<int>. :params bit_rate: {'MD 128k': 128000, 'HD 320k': 320000} :return: a song's download address.
3.053683
2.854229
1.06988
url = 'http://music.163.com/api/song/lyric?os=osx&id={}&lv=-1&kv=-1&tv=-1'.format( # NOQA song_id) result = self.get_request(url) if 'lrc' in result and result['lrc']['lyric'] is not None: lyric_info = result['lrc']['lyric'] else: lyric_info = 'Lyric not found.' return lyric_info
def get_song_lyric(self, song_id)
Get a song's lyric. warning: use old api. :params song_id: song id. :return: a song's lyric.
2.264598
2.207221
1.025995
if not os.path.exists(folder): os.makedirs(folder) fpath = os.path.join(folder, song_name+'.mp3') if sys.platform == 'win32' or sys.platform == 'cygwin': valid_name = re.sub(r'[<>:"/\\|?*]', '', song_name) if valid_name != song_name: click.echo('{} will be saved as: {}.mp3'.format(song_name, valid_name)) fpath = os.path.join(folder, valid_name + '.mp3') if not os.path.exists(fpath): resp = self.download_session.get( song_url, timeout=self.timeout, stream=True) length = int(resp.headers.get('content-length')) label = 'Downloading {} {}kb'.format(song_name, int(length/1024)) with click.progressbar(length=length, label=label) as progressbar: with open(fpath, 'wb') as song_file: for chunk in resp.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks song_file.write(chunk) progressbar.update(1024) if lyric_info: folder = os.path.join(folder, 'lyric') if not os.path.exists(folder): os.makedirs(folder) fpath = os.path.join(folder, song_name+'.lrc') with open(fpath, 'w') as lyric_file: lyric_file.write(lyric_info)
def get_song_by_url(self, song_url, song_name, folder, lyric_info)
Download a song and save it to disk. :params song_url: download address. :params song_name: song name. :params folder: storage path. :params lyric: lyric info.
1.787878
1.79776
0.994503
username = click.prompt('Please enter your email or phone number') password = click.prompt('Please enter your password', hide_input=True) pattern = re.compile(r'^0\d{2,3}\d{7,8}$|^1[34578]\d{9}$') if pattern.match(username): # use phone number to login url = 'https://music.163.com/weapi/login/cellphone' params = { 'phone': username, 'password': hashlib.md5(password.encode('utf-8')).hexdigest(), 'rememberLogin': 'true'} else: # use email to login url = 'https://music.163.com/weapi/login?csrf_token=' params = { 'username': username, 'password': hashlib.md5(password.encode('utf-8')).hexdigest(), 'rememberLogin': 'true'} try: result = self.post_request(url, params) except PostRequestIllegal: click.echo('Password Error!') sys.exit(1) self.session.cookies.save() uid = result['account']['id'] with open(person_info_path, 'w') as person_info: person_info.write(str(uid))
def login(self)
Login entrance.
2.395299
2.33815
1.024442
# prepare OrderedDict geojson structure feature = OrderedDict() # the list of fields that will be processed by get_properties # we will remove fields that have been already processed # to increase performance on large numbers fields = list(self.fields.values()) # optional id attribute if self.Meta.id_field: field = self.fields[self.Meta.id_field] value = field.get_attribute(instance) feature["id"] = field.to_representation(value) fields.remove(field) # required type attribute # must be "Feature" according to GeoJSON spec feature["type"] = "Feature" # required geometry attribute # MUST be present in output according to GeoJSON spec field = self.fields[self.Meta.geo_field] geo_value = field.get_attribute(instance) feature["geometry"] = field.to_representation(geo_value) fields.remove(field) # Bounding Box # if auto_bbox feature is enabled # bbox will be determined automatically automatically if self.Meta.auto_bbox and geo_value: feature["bbox"] = geo_value.extent # otherwise it can be determined via another field elif self.Meta.bbox_geo_field: field = self.fields[self.Meta.bbox_geo_field] value = field.get_attribute(instance) feature["bbox"] = value.extent if hasattr(value, 'extent') else None fields.remove(field) # GeoJSON properties feature["properties"] = self.get_properties(instance, fields) return feature
def to_representation(self, instance)
Serialize objects -> primitives.
3.356359
3.307971
1.014628
properties = OrderedDict() for field in fields: if field.write_only: continue value = field.get_attribute(instance) representation = None if value is not None: representation = field.to_representation(value) properties[field.field_name] = representation return properties
def get_properties(self, instance, fields)
Get the feature metadata which will be used for the GeoJSON "properties" key. By default it returns all serializer fields excluding those used for the ID, the geometry and the bounding box. :param instance: The current Django model instance :param fields: The list of fields to process (fields already processed have been removed) :return: OrderedDict containing the properties of the current feature :rtype: OrderedDict
2.948001
3.254222
0.9059
if 'properties' in data: data = self.unformat_geojson(data) return super(GeoFeatureModelSerializer, self).to_internal_value(data)
def to_internal_value(self, data)
Override the parent method to first remove the GeoJSON formatting
5.382009
3.088508
1.742592
attrs = feature["properties"] if 'geometry' in feature: attrs[self.Meta.geo_field] = feature['geometry'] if self.Meta.bbox_geo_field and 'bbox' in feature: attrs[self.Meta.bbox_geo_field] = Polygon.from_bbox(feature['bbox']) return attrs
def unformat_geojson(self, feature)
This function should return a dictionary containing keys which maps to serializer fields. Remember that GeoJSON contains a key "properties" which contains the feature metadata. This should be flattened to make sure this metadata is stored in the right serializer fields. :param feature: The dictionary containing the feature data directly from the GeoJSON data. :return: A new dictionary which maps the GeoJSON values to serializer fields
3.252904
3.582741
0.907937
if hasattr(value, '__iter__'): return tuple(self._recursive_round(v, precision) for v in value) return round(value, precision)
def _recursive_round(self, value, precision)
Round all numbers within an array or nested arrays value: number or nested array of numbers precision: integer valueue of number of decimals to keep
2.53648
3.0143
0.841482
if geo_type in ('MultiPoint', 'LineString'): close = (geo_type == 'LineString') output = [] for coord in geometry: coord = tuple(coord) if not output or coord != output[-1]: output.append(coord) if close and len(output) == 1: output.append(output[0]) return tuple(output) if geo_type in ('MultiLineString', 'Polygon'): return [ self._rm_redundant_points(c, 'LineString') for c in geometry] if geo_type == 'MultiPolygon': return [self._rm_redundant_points(c, 'Polygon') for c in geometry] return geometry
def _rm_redundant_points(self, geometry, geo_type)
Remove redundant coordinate pairs from geometry geometry: array of coordinates or nested-array of coordinates geo_type: GeoJSON type attribute for provided geometry, used to determine structure of provided `geometry` argument
2.124415
2.16633
0.980651
from django.contrib.gis.db import models from rest_framework.serializers import ModelSerializer from .fields import GeometryField try: # drf 3.0 field_mapping = ModelSerializer._field_mapping.mapping except AttributeError: # drf 3.1 field_mapping = ModelSerializer.serializer_field_mapping # map GeoDjango fields to drf-gis GeometryField field_mapping.update({ models.GeometryField: GeometryField, models.PointField: GeometryField, models.LineStringField: GeometryField, models.PolygonField: GeometryField, models.MultiPointField: GeometryField, models.MultiLineStringField: GeometryField, models.MultiPolygonField: GeometryField, models.GeometryCollectionField: GeometryField })
def ready(self)
update Django Rest Framework serializer mappings
2.499902
2.204364
1.13407
# d * (180 / pi) / earthRadius ==> degrees longitude # (degrees longitude) / cos(latitude) ==> degrees latitude lat = latitude if latitude >= 0 else -1 * latitude rad2deg = 180 / pi earthRadius = 6378160.0 latitudeCorrection = 0.5 * (1 + cos(lat * pi / 180)) return (distance / (earthRadius * latitudeCorrection) * rad2deg)
def dist_to_deg(self, distance, latitude)
distance = distance in meters latitude = latitude in degrees at the equator, the distance of one degree is equal in latitude and longitude. at higher latitudes, a degree longitude is shorter in length, proportional to cos(latitude) http://en.wikipedia.org/wiki/Decimal_degrees This function is part of a distance filter where the database 'distance' is in degrees. There's no good single-valued answer to this problem. The distance/ degree is quite constant N/S around the earth (latitude), but varies over a huge range E/W (longitude). Split the difference: I'm going to average the the degrees latitude and degrees longitude corresponding to the given distance. At high latitudes, this will be too short N/S and too long E/W. It splits the errors between the two axes. Errors are < 25 percent for latitudes < 60 degrees N/S.
4.765443
5.05414
0.942879
# * base will never contain params, query, or fragment # * url will never contain a scheme or net_loc. # In general, this means we can safely join on /; we just need to # ensure we end up with precisely one / joining base and url. The # exception here is the case of media uploads, where url will be an # absolute url. if url.startswith('http://') or url.startswith('https://'): return urllib.parse.urljoin(base, url) new_base = base if base.endswith('/') else base + '/' new_url = url[1:] if url.startswith('/') else url return new_base + new_url
def _urljoin(base, url): # pylint: disable=invalid-name # In general, it's unsafe to simply join base and url. However, for # the case of discovery documents, we know
Custom urljoin replacement supporting : before / in url.
5.555404
5.85896
0.948189
args = { 'api_key': self._API_KEY, 'client': self, 'client_id': self._CLIENT_ID, 'client_secret': self._CLIENT_SECRET, 'package_name': self._PACKAGE, 'scopes': self._SCOPES, 'user_agent': self._USER_AGENT, } args.update(kwds) # credentials_lib can be expensive to import so do it only if needed. from apitools.base.py import credentials_lib # TODO(craigcitro): It's a bit dangerous to pass this # still-half-initialized self into this method, but we might need # to set attributes on it associated with our credentials. # Consider another way around this (maybe a callback?) and whether # or not it's worth it. self._credentials = credentials_lib.GetCredentials(**args)
def _SetCredentials(self, **kwds)
Fetch credentials, and set them for this client. Note that we can't simply return credentials, since creating them may involve side-effecting self. Args: **kwds: Additional keyword arguments are passed on to GetCredentials. Returns: None. Sets self._credentials.
5.493142
5.444002
1.009026
old_model = self.response_type_model self.__response_type_model = 'json' yield self.__response_type_model = old_model
def JsonResponseModel(self)
In this context, return raw JSON instead of proto.
4.928895
4.5707
1.078368
if self.log_request: logging.info( 'Calling method %s with %s: %s', method_config.method_id, method_config.request_type_name, request) return request
def ProcessRequest(self, method_config, request)
Hook for pre-processing of requests.
4.438678
4.367477
1.016303
http_request.headers.update(self.additional_http_headers) if self.log_request: logging.info('Making http %s to %s', http_request.http_method, http_request.url) logging.info('Headers: %s', pprint.pformat(http_request.headers)) if http_request.body: # TODO(craigcitro): Make this safe to print in the case of # non-printable body characters. logging.info('Body:\n%s', http_request.loggable_body or http_request.body) else: logging.info('Body: (none)') return http_request
def ProcessHttpRequest(self, http_request)
Hook for pre-processing of http requests.
3.48067
3.358329
1.036429
try: message = encoding.JsonToMessage(response_type, data) except (exceptions.InvalidDataFromServerError, messages.ValidationError, ValueError) as e: raise exceptions.InvalidDataFromServerError( 'Error decoding response "%s" as type %s: %s' % ( data, response_type.__name__, e)) return message
def DeserializeMessage(self, response_type, data)
Deserialize the given data as method_config.response_type.
4.406787
4.311622
1.022072
url_builder = _UrlBuilder.FromUrl(url) if self.global_params.key: url_builder.query_params['key'] = self.global_params.key return url_builder.url
def FinalizeTransferUrl(self, url)
Modify the url for a given transfer, based on auth and version.
4.657919
4.683556
0.994526
method_config = self._method_configs.get(method) if method_config: return method_config func = getattr(self, method, None) if func is None: raise KeyError(method) method_config = getattr(func, 'method_config', None) if method_config is None: raise KeyError(method) self._method_configs[method] = config = method_config() return config
def GetMethodConfig(self, method)
Returns service cached method config for given method.
2.281445
2.237793
1.019507
util.Typecheck(global_params, (type(None), self.__client.params_type)) result = self.__client.params_type() global_params = global_params or self.__client.params_type() for field in result.all_fields(): value = global_params.get_assigned_value(field.name) if value is None: value = default_params.get_assigned_value(field.name) if value not in (None, [], ()): setattr(result, field.name, value) return result
def __CombineGlobalParams(self, global_params, default_params)
Combine the given params with the defaults.
3.481728
3.272967
1.063783
if isinstance(field, messages.BytesField) and value is not None: return base64.urlsafe_b64encode(value) elif isinstance(value, six.text_type): return value.encode('utf8') elif isinstance(value, six.binary_type): return value.decode('utf8') elif isinstance(value, datetime.datetime): return value.isoformat() return value
def __FinalUrlValue(self, value, field)
Encode value for the URL, using field to skip encoding for bytes.
2.267944
2.177902
1.041343
# First, handle the global params. global_params = self.__CombineGlobalParams( global_params, self.__client.global_params) global_param_names = util.MapParamNames( [x.name for x in self.__client.params_type.all_fields()], self.__client.params_type) global_params_type = type(global_params) query_info = dict( (param, self.__FinalUrlValue(getattr(global_params, param), getattr(global_params_type, param))) for param in global_param_names) # Next, add the query params. query_param_names = util.MapParamNames(query_params, type(request)) request_type = type(request) query_info.update( (param, self.__FinalUrlValue(getattr(request, param, None), getattr(request_type, param))) for param in query_param_names) query_info = dict((k, v) for k, v in query_info.items() if v is not None) query_info = self.__EncodePrettyPrint(query_info) query_info = util.MapRequestParams(query_info, type(request)) return query_info
def __ConstructQueryParams(self, query_params, request, global_params)
Construct a dictionary of query parameters for this request.
3.278734
3.219539
1.018386
python_param_names = util.MapParamNames( method_config.path_params, type(request)) params = dict([(param, getattr(request, param, None)) for param in python_param_names]) params = util.MapRequestParams(params, type(request)) return util.ExpandRelativePath(method_config, params, relative_path=relative_path)
def __ConstructRelativePath(self, method_config, request, relative_path=None)
Determine the relative path for request.
4.391394
4.26283
1.030159
if (http_request.http_method == 'GET' and len(http_request.url) > _MAX_URL_LENGTH): http_request.http_method = 'POST' http_request.headers['x-http-method-override'] = 'GET' http_request.headers[ 'content-type'] = 'application/x-www-form-urlencoded' http_request.body = url_builder.query url_builder.query_params = {} http_request.url = url_builder.url
def __FinalizeRequest(self, http_request, url_builder)
Make any final general adjustments to the request.
2.527205
2.505237
1.008769
if http_response.status_code not in (http_client.OK, http_client.CREATED, http_client.NO_CONTENT): raise exceptions.HttpError.FromResponse( http_response, method_config=method_config, request=request) if http_response.status_code == http_client.NO_CONTENT: # TODO(craigcitro): Find out why _replace doesn't seem to work # here. http_response = http_wrapper.Response( info=http_response.info, content='{}', request_url=http_response.request_url) content = http_response.content if self._client.response_encoding and isinstance(content, bytes): content = content.decode(self._client.response_encoding) if self.__client.response_type_model == 'json': return content response_type = _LoadClass(method_config.response_type_name, self.__client.MESSAGES_MODULE) return self.__client.DeserializeMessage(response_type, content)
def __ProcessHttpResponse(self, method_config, http_response, request)
Process the given http response.
3.458881
3.50253
0.987538
# TODO(craigcitro): Make the default a little better here, and # include the apitools version. user_agent = client.user_agent or 'apitools-client/1.0' http_request.headers['user-agent'] = user_agent http_request.headers['accept'] = 'application/json' http_request.headers['accept-encoding'] = 'gzip, deflate'
def __SetBaseHeaders(self, http_request, client)
Fill in the basic headers on http_request.
3.796301
3.691957
1.028262
if not method_config.request_field: return request_type = _LoadClass( method_config.request_type_name, self.__client.MESSAGES_MODULE) if method_config.request_field == REQUEST_IS_BODY: body_value = request body_type = request_type else: body_value = getattr(request, method_config.request_field) body_field = request_type.field_by_name( method_config.request_field) util.Typecheck(body_field, messages.MessageField) body_type = body_field.type # If there was no body provided, we use an empty message of the # appropriate type. body_value = body_value or body_type() if upload and not body_value: # We're going to fill in the body later. return util.Typecheck(body_value, body_type) http_request.headers['content-type'] = 'application/json' http_request.body = self.__client.SerializeMessage(body_value)
def __SetBody(self, http_request, method_config, request, upload)
Fill in the body on http_request.
3.592741
3.483766
1.031281
request_type = _LoadClass( method_config.request_type_name, self.__client.MESSAGES_MODULE) util.Typecheck(request, request_type) request = self.__client.ProcessRequest(method_config, request) http_request = http_wrapper.Request( http_method=method_config.http_method) self.__SetBaseHeaders(http_request, self.__client) self.__SetBody(http_request, method_config, request, upload) url_builder = _UrlBuilder( self.__client.url, relative_path=method_config.relative_path) url_builder.query_params = self.__ConstructQueryParams( method_config.query_params, request, global_params) # It's important that upload and download go before we fill in the # relative path, so that they can replace it. if upload is not None: upload.ConfigureRequest(upload_config, http_request, url_builder) if download is not None: download.ConfigureRequest(http_request, url_builder) url_builder.relative_path = self.__ConstructRelativePath( method_config, request, relative_path=url_builder.relative_path) self.__FinalizeRequest(http_request, url_builder) return self.__client.ProcessHttpRequest(http_request)
def PrepareHttpRequest(self, method_config, request, global_params=None, upload=None, upload_config=None, download=None)
Prepares an HTTP request to be sent.
3.435085
3.413005
1.006469
if upload is not None and download is not None: # TODO(craigcitro): This just involves refactoring the logic # below into callbacks that we can pass around; in particular, # the order should be that the upload gets the initial request, # and then passes its reply to a download if one exists, and # then that goes to ProcessResponse and is returned. raise exceptions.NotYetImplementedError( 'Cannot yet use both upload and download at once') http_request = self.PrepareHttpRequest( method_config, request, global_params, upload, upload_config, download) # TODO(craigcitro): Make num_retries customizable on Transfer # objects, and pass in self.__client.num_retries when initializing # an upload or download. if download is not None: download.InitializeDownload(http_request, client=self.client) return http_response = None if upload is not None: http_response = upload.InitializeUpload( http_request, client=self.client) if http_response is None: http = self.__client.http if upload and upload.bytes_http: http = upload.bytes_http opts = { 'retries': self.__client.num_retries, 'max_retry_wait': self.__client.max_retry_wait, } if self.__client.check_response_func: opts['check_response_func'] = self.__client.check_response_func if self.__client.retry_func: opts['retry_func'] = self.__client.retry_func http_response = http_wrapper.MakeRequest( http, http_request, **opts) return self.ProcessHttpResponse(method_config, http_response, request)
def _RunMethod(self, method_config, request, global_params=None, upload=None, upload_config=None, download=None)
Call this method with request.
4.33732
4.350792
0.996904
return self.__client.ProcessResponse( method_config, self.__ProcessHttpResponse(method_config, http_response, request))
def ProcessHttpResponse(self, method_config, http_response, request=None)
Convert an HTTP response to the expected message type.
6.50029
6.07608
1.069816
enum_value_descriptor = EnumValueDescriptor() enum_value_descriptor.name = six.text_type(enum_value.name) enum_value_descriptor.number = enum_value.number return enum_value_descriptor
def describe_enum_value(enum_value)
Build descriptor for Enum instance. Args: enum_value: Enum value to provide descriptor for. Returns: Initialized EnumValueDescriptor instance describing the Enum instance.
2.231517
2.539523
0.878715
enum_descriptor = EnumDescriptor() enum_descriptor.name = enum_definition.definition_name().split('.')[-1] values = [] for number in enum_definition.numbers(): value = enum_definition.lookup_by_number(number) values.append(describe_enum_value(value)) if values: enum_descriptor.values = values return enum_descriptor
def describe_enum(enum_definition)
Build descriptor for Enum class. Args: enum_definition: Enum class to provide descriptor for. Returns: Initialized EnumDescriptor instance describing the Enum class.
3.001738
3.532976
0.849634
field_descriptor = FieldDescriptor() field_descriptor.name = field_definition.name field_descriptor.number = field_definition.number field_descriptor.variant = field_definition.variant if isinstance(field_definition, messages.EnumField): field_descriptor.type_name = field_definition.type.definition_name() if isinstance(field_definition, messages.MessageField): field_descriptor.type_name = ( field_definition.message_type.definition_name()) if field_definition.default is not None: field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[ type(field_definition)](field_definition.default) # Set label. if field_definition.repeated: field_descriptor.label = FieldDescriptor.Label.REPEATED elif field_definition.required: field_descriptor.label = FieldDescriptor.Label.REQUIRED else: field_descriptor.label = FieldDescriptor.Label.OPTIONAL return field_descriptor
def describe_field(field_definition)
Build descriptor for Field instance. Args: field_definition: Field instance to provide descriptor for. Returns: Initialized FieldDescriptor instance describing the Field instance.
2.100514
2.101882
0.999349
message_descriptor = MessageDescriptor() message_descriptor.name = message_definition.definition_name().split( '.')[-1] fields = sorted(message_definition.all_fields(), key=lambda v: v.number) if fields: message_descriptor.fields = [describe_field(field) for field in fields] try: nested_messages = message_definition.__messages__ except AttributeError: pass else: message_descriptors = [] for name in nested_messages: value = getattr(message_definition, name) message_descriptors.append(describe_message(value)) message_descriptor.message_types = message_descriptors try: nested_enums = message_definition.__enums__ except AttributeError: pass else: enum_descriptors = [] for name in nested_enums: value = getattr(message_definition, name) enum_descriptors.append(describe_enum(value)) message_descriptor.enum_types = enum_descriptors return message_descriptor
def describe_message(message_definition)
Build descriptor for Message class. Args: message_definition: Message class to provide descriptor for. Returns: Initialized MessageDescriptor instance describing the Message class.
2.0176
2.123562
0.950102
descriptor = FileDescriptor() descriptor.package = util.get_package_for_module(module) if not descriptor.package: descriptor.package = None message_descriptors = [] enum_descriptors = [] # Need to iterate over all top level attributes of the module looking for # message and enum definitions. Each definition must be itself described. for name in sorted(dir(module)): value = getattr(module, name) if isinstance(value, type): if issubclass(value, messages.Message): message_descriptors.append(describe_message(value)) elif issubclass(value, messages.Enum): enum_descriptors.append(describe_enum(value)) if message_descriptors: descriptor.message_types = message_descriptors if enum_descriptors: descriptor.enum_types = enum_descriptors return descriptor
def describe_file(module)
Build a file from a specified Python module. Args: module: Python module to describe. Returns: Initialized FileDescriptor instance describing the module.
2.988718
3.087894
0.967882
descriptor = FileSet() file_descriptors = [] for module in modules: file_descriptors.append(describe_file(module)) if file_descriptors: descriptor.files = file_descriptors return descriptor
def describe_file_set(modules)
Build a file set from a specified Python modules. Args: modules: Iterable of Python module to describe. Returns: Initialized FileSet instance describing the modules.
3.446715
4.445404
0.775343
if isinstance(value, types.ModuleType): return describe_file(value) elif isinstance(value, messages.Field): return describe_field(value) elif isinstance(value, messages.Enum): return describe_enum_value(value) elif isinstance(value, type): if issubclass(value, messages.Message): return describe_message(value) elif issubclass(value, messages.Enum): return describe_enum(value) return None
def describe(value)
Describe any value as a descriptor. Helper function for describing any object with an appropriate descriptor object. Args: value: Value to describe as a descriptor. Returns: Descriptor message class if object is describable as a descriptor, else None.
2.157217
2.449238
0.880771
# Attempt to import descriptor as a module. if definition_name.startswith('.'): definition_name = definition_name[1:] if not definition_name.startswith('.'): leaf = definition_name.split('.')[-1] if definition_name: try: module = importer(definition_name, '', '', [leaf]) except ImportError: pass else: return describe(module) try: # Attempt to use messages.find_definition to find item. return describe(messages.find_definition(definition_name, importer=__import__)) except messages.DefinitionNotFoundError as err: # There are things that find_definition will not find, but if # the parent is loaded, its children can be searched for a # match. split_name = definition_name.rsplit('.', 1) if len(split_name) > 1: parent, child = split_name try: parent_definition = import_descriptor_loader( parent, importer=importer) except messages.DefinitionNotFoundError: # Fall through to original error. pass else: # Check the parent definition for a matching descriptor. if isinstance(parent_definition, EnumDescriptor): search_list = parent_definition.values or [] elif isinstance(parent_definition, MessageDescriptor): search_list = parent_definition.fields or [] else: search_list = [] for definition in search_list: if definition.name == child: return definition # Still didn't find. Reraise original exception. raise err
def import_descriptor_loader(definition_name, importer=__import__)
Find objects by importing modules as needed. A definition loader is a function that resolves a definition name to a descriptor. The import finder resolves definitions to their names by importing modules when necessary. Args: definition_name: Name of definition to find. importer: Import function used for importing new modules. Returns: Appropriate descriptor for any describable type located by name. Raises: DefinitionNotFoundError when a name does not refer to either a definition or a module.
3.631516
3.681502
0.986422
try: return self.__descriptors[definition_name] except KeyError: pass if self.__descriptor_loader: definition = self.__descriptor_loader(definition_name) self.__descriptors[definition_name] = definition return definition else: raise messages.DefinitionNotFoundError( 'Could not find definition for %s' % definition_name)
def lookup_descriptor(self, definition_name)
Lookup descriptor by name. Get descriptor from library by name. If descriptor is not found will attempt to find via descriptor loader if provided. Args: definition_name: Definition name to find. Returns: Descriptor that describes definition name. Raises: DefinitionNotFoundError if not descriptor exists for definition name.
2.741362
2.616538
1.047706
while True: descriptor = self.lookup_descriptor(definition_name) if isinstance(descriptor, FileDescriptor): return descriptor.package else: index = definition_name.rfind('.') if index < 0: return None definition_name = definition_name[:index]
def lookup_package(self, definition_name)
Determines the package name for any definition. Determine the package that any definition name belongs to. May check parent for package name and will resolve missing descriptors if provided descriptor loader. Args: definition_name: Definition name to find package for.
2.898144
2.974405
0.974361
first_import_error = None for module_name in ['json', 'simplejson']: try: module = __import__(module_name, {}, {}, 'json') if not hasattr(module, 'JSONEncoder'): message = ( 'json library "%s" is not compatible with ProtoRPC' % module_name) logging.warning(message) raise ImportError(message) else: return module except ImportError as err: if not first_import_error: first_import_error = err logging.error('Must use valid json library (json or simplejson)') raise first_import_error
def _load_json_module()
Try to load a valid json module. There are more than one json modules that might be installed. They are mostly compatible with one another but some versions may be different. This function attempts to load various json modules in a preferred order. It does a basic check to guess if a loaded version of json is compatible. Returns: Compatible json module. Raises: ImportError if there are no json modules or the loaded json module is not compatible with ProtoRPC.
3.302884
3.183669
1.037446
if isinstance(value, messages.Enum): return str(value) if six.PY3 and isinstance(value, bytes): return value.decode('utf8') if isinstance(value, messages.Message): result = {} for field in value.all_fields(): item = value.get_assigned_value(field.name) if item not in (None, [], ()): result[field.name] = ( self.__protojson_protocol.encode_field(field, item)) # Handle unrecognized fields, so they're included when a message is # decoded then encoded. for unknown_key in value.all_unrecognized_fields(): unrecognized_field, _ = value.get_unrecognized_field_info( unknown_key) # Unknown fields are not encoded as they should have been # processed before we get to here. result[unknown_key] = unrecognized_field return result return super(MessageJSONEncoder, self).default(value)
def default(self, value)
Return dictionary instance from a message object. Args: value: Value to get dictionary for. If not encodable, will call superclasses default method.
4.168869
4.205711
0.99124
if isinstance(field, messages.BytesField): if field.repeated: value = [base64.b64encode(byte) for byte in value] else: value = base64.b64encode(value) elif isinstance(field, message_types.DateTimeField): # DateTimeField stores its data as a RFC 3339 compliant string. if field.repeated: value = [i.isoformat() for i in value] else: value = value.isoformat() return value
def encode_field(self, field, value)
Encode a python field value to a JSON value. Args: field: A ProtoRPC field instance. value: A python value supported by field. Returns: A JSON serializable value appropriate for field.
2.687272
2.655512
1.01196
message.check_initialized() return json.dumps(message, cls=MessageJSONEncoder, protojson_protocol=self)
def encode_message(self, message)
Encode Message instance to JSON string. Args: Message instance to encode in to JSON string. Returns: String encoding of Message instance in protocol JSON format. Raises: messages.ValidationError if message is not initialized.
17.817205
14.953481
1.191509
encoded_message = six.ensure_str(encoded_message) if not encoded_message.strip(): return message_type() dictionary = json.loads(encoded_message) message = self.__decode_dictionary(message_type, dictionary) message.check_initialized() return message
def decode_message(self, message_type, encoded_message)
Merge JSON structure to Message instance. Args: message_type: Message to decode data to. encoded_message: JSON encoded version of message. Returns: Decoded instance of message_type. Raises: ValueError: If encoded_message is not valid JSON. messages.ValidationError if merged message is not initialized.
3.947392
4.232828
0.932566
if isinstance(value, bool): return messages.Variant.BOOL elif isinstance(value, six.integer_types): return messages.Variant.INT64 elif isinstance(value, float): return messages.Variant.DOUBLE elif isinstance(value, six.string_types): return messages.Variant.STRING elif isinstance(value, (list, tuple)): # Find the most specific variant that covers all elements. variant_priority = [None, messages.Variant.INT64, messages.Variant.DOUBLE, messages.Variant.STRING] chosen_priority = 0 for v in value: variant = self.__find_variant(v) try: priority = variant_priority.index(variant) except IndexError: priority = -1 if priority > chosen_priority: chosen_priority = priority return variant_priority[chosen_priority] # Unrecognized type. return None
def __find_variant(self, value)
Find the messages.Variant type that describes this value. Args: value: The value whose variant type is being determined. Returns: The messages.Variant value that best describes value's type, or None if it's a type we don't know how to handle.
2.241062
2.086882
1.073881
message = message_type() for key, value in six.iteritems(dictionary): if value is None: try: message.reset(key) except AttributeError: pass # This is an unrecognized field, skip it. continue try: field = message.field_by_name(key) except KeyError: # Save unknown values. variant = self.__find_variant(value) if variant: message.set_unrecognized_field(key, value, variant) continue if field.repeated: # This should be unnecessary? Or in fact become an error. if not isinstance(value, list): value = [value] valid_value = [self.decode_field(field, item) for item in value] setattr(message, field.name, valid_value) continue # This is just for consistency with the old behavior. if value == []: continue try: setattr(message, field.name, self.decode_field(field, value)) except messages.DecodeError: # Save unknown enum values. if not isinstance(field, messages.EnumField): raise variant = self.__find_variant(value) if variant: message.set_unrecognized_field(key, value, variant) return message
def __decode_dictionary(self, message_type, dictionary)
Merge dictionary in to message. Args: message: Message to merge dictionary in to. dictionary: Dictionary to extract information from. Dictionary is as parsed from JSON. Nested objects will also be dictionaries.
3.406875
3.512592
0.969903
if isinstance(field, messages.EnumField): try: return field.type(value) except TypeError: raise messages.DecodeError( 'Invalid enum value "%s"' % (value or '')) elif isinstance(field, messages.BytesField): try: return base64.b64decode(value) except (binascii.Error, TypeError) as err: raise messages.DecodeError('Base64 decoding error: %s' % err) elif isinstance(field, message_types.DateTimeField): try: return util.decode_datetime(value) except ValueError as err: raise messages.DecodeError(err) elif (isinstance(field, messages.MessageField) and issubclass(field.type, messages.Message)): return self.__decode_dictionary(field.type, value) elif (isinstance(field, messages.FloatField) and isinstance(value, (six.integer_types, six.string_types))): try: return float(value) except: # pylint:disable=bare-except pass elif (isinstance(field, messages.IntegerField) and isinstance(value, six.string_types)): try: return int(value) except: # pylint:disable=bare-except pass return value
def decode_field(self, field, value)
Decode a JSON value to a python value. Args: field: A ProtoRPC field instance. value: A serialized JSON value. Return: A Python value compatible with field.
2.068672
2.077704
0.995653
printer = self._GetPrinter(out) if self.__init_wildcards_file: printer('', self.__client_info.package) printer('# pylint:disable=wildcard-import') else: printer('') printer() printer('import pkgutil') printer() if self.__init_wildcards_file: printer('from %s import *', self.__base_files_package) if self.__root_package == '.': import_prefix = '' else: import_prefix = '%s.' % self.__root_package printer('from %s%s import *', import_prefix, self.__client_info.client_rule_name) printer('from %s%s import *', import_prefix, self.__client_info.messages_rule_name) printer() printer('__path__ = pkgutil.extend_path(__path__, __name__)')
def WriteInit(self, out)
Write a simple __init__.py for the generated client.
4.101141
3.696849
1.109361
printer = self._GetPrinter(out) printer('#!/usr/bin/env python') printer('') printer() printer('from pkgutil import extend_path') printer('__path__ = extend_path(__path__, __name__)')
def WriteIntermediateInit(self, out)
Write a simple __init__.py for an intermediate directory.
4.010555
3.403115
1.178495
printer = self._GetPrinter(out) year = datetime.datetime.now().year printer('# Copyright %s Google Inc. All Rights Reserved.' % year) printer('#') printer('# Licensed under the Apache License, Version 2.0 (the' '"License");') printer('# you may not use this file except in compliance with ' 'the License.') printer('# You may obtain a copy of the License at') printer('#') printer('# http://www.apache.org/licenses/LICENSE-2.0') printer('#') printer('# Unless required by applicable law or agreed to in writing, ' 'software') printer('# distributed under the License is distributed on an "AS IS" ' 'BASIS,') printer('# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ' 'express or implied.') printer('# See the License for the specific language governing ' 'permissions and') printer('# limitations under the License.') printer() printer('import setuptools') printer('REQUIREMENTS = [') with printer.Indent(indent=' '): parts = self.apitools_version.split('.') major = parts.pop(0) minor = parts.pop(0) printer('"google-apitools>=%s,~=%s.%s",', self.apitools_version, major, minor) printer('"httplib2>=0.9",') printer('"oauth2client>=1.4.12",') printer(']') printer('_PACKAGE = "apitools.clients.%s"' % self.__package) printer() printer('setuptools.setup(') # TODO(craigcitro): Allow customization of these options. with printer.Indent(indent=' '): printer('name="google-apitools-%s-%s",', self.__package, self.__version) printer('version="%s.%s",', self.apitools_version, self.__revision) printer('description="Autogenerated apitools library for %s",' % ( self.__package,)) printer('url="https://github.com/google/apitools",') printer('author="Craig Citro",') printer('author_email="[email protected]",') printer('packages=setuptools.find_packages(),') printer('install_requires=REQUIREMENTS,') printer('classifiers=[') with printer.Indent(indent=' '): printer('"Programming Language :: Python :: 2.7",') printer('"License :: OSI Approved :: Apache Software ' 'License",') printer('],') printer('license="Apache 2.0",') printer('keywords="apitools apitools-%s %s",' % ( self.__package, self.__package)) printer(')')
def WriteSetupPy(self, out)
Write a setup.py for upload to PyPI.
2.3537
2.33614
1.007517
if 'content-range' in response.info: print('Received %s' % response.info['content-range']) else: print('Received %d bytes' % response.length)
def DownloadProgressPrinter(response, unused_download)
Print download progress based on response.
2.925685
3.047096
0.960155
self.EnsureUninitialized() if self.http is None: self.__http = http or http_wrapper.GetHttp() self.__url = url
def _Initialize(self, http, url)
Initialize this download by setting self.http and self.url. We want the user to be able to override self.http by having set the value in the constructor; in that case, we ignore the provided http. Args: http: An httplib2.Http instance or None. url: The url for this transfer. Returns: None. Initializes self.
7.408791
8.320272
0.89045
path = os.path.expanduser(filename) if os.path.exists(path) and not overwrite: raise exceptions.InvalidUserInputError( 'File %s exists and overwrite not specified' % path) return cls(open(path, 'wb'), close_stream=True, auto_transfer=auto_transfer, **kwds)
def FromFile(cls, filename, overwrite=False, auto_transfer=True, **kwds)
Create a new download object from a filename.
3.232644
3.243759
0.996573
return cls(stream, auto_transfer=auto_transfer, total_size=total_size, **kwds)
def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds)
Create a new Download object from a stream.
2.528207
2.695419
0.937964
info = json.loads(json_data) missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) if missing_keys: raise exceptions.InvalidDataError( 'Invalid serialization data, missing keys: %s' % ( ', '.join(missing_keys))) download = cls.FromStream(stream, **kwds) if auto_transfer is not None: download.auto_transfer = auto_transfer else: download.auto_transfer = info['auto_transfer'] setattr(download, '_Download__progress', info['progress']) setattr(download, '_Download__total_size', info['total_size']) download._Initialize( # pylint: disable=protected-access http, info['url']) return download
def FromData(cls, stream, json_data, http=None, auto_transfer=None, **kwds)
Create a new Download object from a stream and serialized data.
2.818512
2.559097
1.10137
if 'content-range' in info: _, _, total = info['content-range'].rpartition('/') if total != '*': self.__total_size = int(total) # Note "total_size is None" means we don't know it; if no size # info was returned on our initial range request, that means we # have a 0-byte file. (That last statement has been verified # empirically, but is not clearly documented anywhere.) if self.total_size is None: self.__total_size = 0
def __SetTotal(self, info)
Sets the total size based off info if possible otherwise 0.
6.897319
6.44642
1.069946
self.EnsureUninitialized() if http is None and client is None: raise exceptions.UserError('Must provide client or http.') http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) url = http_request.url if self.auto_transfer: end_byte = self.__ComputeEndByte(0) self.__SetRangeHeader(http_request, 0, end_byte) response = http_wrapper.MakeRequest( self.bytes_http or http, http_request) if response.status_code not in self._ACCEPTABLE_STATUSES: raise exceptions.HttpError.FromResponse(response) self.__initial_response = response self.__SetTotal(response.info) url = response.info.get('content-location', response.request_url) if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: self.StreamInChunks()
def InitializeDownload(self, http_request, http=None, client=None)
Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead.
4.880149
4.552161
1.072051
if end is not None: if start < 0: raise exceptions.TransferInvalidError( 'Cannot have end index with negative start index ' + '[start=%d, end=%d]' % (start, end)) elif start >= self.total_size: raise exceptions.TransferInvalidError( 'Cannot have start index greater than total size ' + '[start=%d, total_size=%d]' % (start, self.total_size)) end = min(end, self.total_size - 1) if end < start: raise exceptions.TransferInvalidError( 'Range requested with end[%s] < start[%s]' % (end, start)) return start, end else: if start < 0: start = max(0, start + self.total_size) return start, self.total_size - 1
def __NormalizeStartEnd(self, start, end=None)
Normalizes start and end values based on total size.
2.334549
2.253385
1.036019
end_byte = end if start < 0 and not self.total_size: return end_byte if use_chunks: alternate = start + self.chunksize - 1 if end_byte is not None: end_byte = min(end_byte, alternate) else: end_byte = alternate if self.total_size: alternate = self.total_size - 1 if end_byte is not None: end_byte = min(end_byte, alternate) else: end_byte = alternate return end_byte
def __ComputeEndByte(self, start, end=None, use_chunks=True)
Compute the last byte to fetch for this request. This is all based on the HTTP spec for Range and Content-Range. Note that this is potentially confusing in several ways: * the value for the last byte is 0-based, eg "fetch 10 bytes from the beginning" would return 9 here. * if we have no information about size, and don't want to use the chunksize, we'll return None. See the tests for more examples. Args: start: byte to start at. end: (int or None, default: None) Suggested last byte. use_chunks: (bool, default: True) If False, ignore self.chunksize. Returns: Last byte to use in a Range header, or None.
2.377068
2.600986
0.91391
self.EnsureInitialized() request = http_wrapper.Request(url=self.url) self.__SetRangeHeader(request, start, end=end) if additional_headers is not None: request.headers.update(additional_headers) return http_wrapper.MakeRequest( self.bytes_http, request, retry_func=self.retry_func, retries=self.num_retries)
def __GetChunk(self, start, end, additional_headers=None)
Retrieve a chunk, and return the full response.
4.273395
3.948043
1.082408
if response.status_code not in self._ACCEPTABLE_STATUSES: # We distinguish errors that mean we made a mistake in setting # up the transfer versus something we should attempt again. if response.status_code in (http_client.FORBIDDEN, http_client.NOT_FOUND): raise exceptions.HttpError.FromResponse(response) else: raise exceptions.TransferRetryError(response.content) if response.status_code in (http_client.OK, http_client.PARTIAL_CONTENT): try: self.stream.write(six.ensure_binary(response.content)) except TypeError: self.stream.write(six.ensure_text(response.content)) self.__progress += response.length if response.info and 'content-encoding' in response.info: # TODO(craigcitro): Handle the case where this changes over a # download. self.__encoding = response.info['content-encoding'] elif response.status_code == http_client.NO_CONTENT: # It's important to write something to the stream for the case # of a 0-byte download to a file, as otherwise python won't # create the file. self.stream.write('') return response
def __ProcessResponse(self, response)
Process response (by updating self and writing to self.stream).
4.30838
4.114381
1.047151
self.EnsureInitialized() progress_end_normalized = False if self.total_size is not None: progress, end_byte = self.__NormalizeStartEnd(start, end) progress_end_normalized = True else: progress = start end_byte = end while (not progress_end_normalized or end_byte is None or progress <= end_byte): end_byte = self.__ComputeEndByte(progress, end=end_byte, use_chunks=use_chunks) response = self.__GetChunk(progress, end_byte, additional_headers=additional_headers) if not progress_end_normalized: self.__SetTotal(response.info) progress, end_byte = self.__NormalizeStartEnd(start, end) progress_end_normalized = True response = self.__ProcessResponse(response) progress += response.length if response.length == 0: if response.status_code == http_client.OK: # There can legitimately be no Content-Length header sent # in some cases (e.g., when there's a Transfer-Encoding # header) and if this was a 200 response (as opposed to # 206 Partial Content) we know we're done now without # looping further on received length. return raise exceptions.TransferRetryError( 'Zero bytes unexpectedly returned in download response')
def GetRange(self, start, end=None, additional_headers=None, use_chunks=True)
Retrieve a given byte range from this download, inclusive. Range must be of one of these three forms: * 0 <= start, end = None: Fetch from start to the end of the file. * 0 <= start <= end: Fetch the bytes from start to end. * start < 0, end = None: Fetch the last -start bytes of the file. (These variations correspond to those described in the HTTP 1.1 protocol for range headers in RFC 2616, sec. 14.35.1.) Args: start: (int) Where to start fetching bytes. (See above.) end: (int, optional) Where to stop fetching bytes. (See above.) additional_headers: (bool, optional) Any additional headers to pass with the request. use_chunks: (bool, default: True) If False, ignore self.chunksize and fetch this range in a single request. Returns: None. Streams bytes into self.stream.
4.521364
4.744075
0.953055
self.StreamMedia(callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=True)
def StreamInChunks(self, callback=None, finish_callback=None, additional_headers=None)
Stream the entire download in chunks.
4.117861
3.642694
1.130444
callback = callback or self.progress_callback finish_callback = finish_callback or self.finish_callback self.EnsureInitialized() while True: if self.__initial_response is not None: response = self.__initial_response self.__initial_response = None else: end_byte = self.__ComputeEndByte(self.progress, use_chunks=use_chunks) response = self.__GetChunk( self.progress, end_byte, additional_headers=additional_headers) if self.total_size is None: self.__SetTotal(response.info) response = self.__ProcessResponse(response) self._ExecuteCallback(callback, response) if (response.status_code == http_client.OK or self.progress >= self.total_size): break self._ExecuteCallback(finish_callback, response)
def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None, use_chunks=True)
Stream the entire download. Args: callback: (default: None) Callback to call as each chunk is completed. finish_callback: (default: None) Callback to call when the download is complete. additional_headers: (default: None) Additional headers to include in fetching bytes. use_chunks: (bool, default: True) If False, ignore self.chunksize and stream this download in a single request. Returns: None. Streams bytes into self.stream.
3.375994
3.486878
0.9682
path = os.path.expanduser(filename) if not os.path.exists(path): raise exceptions.NotFoundError('Could not find file %s' % path) if not mime_type: mime_type, _ = mimetypes.guess_type(path) if mime_type is None: raise exceptions.InvalidUserInputError( 'Could not guess mime type for %s' % path) size = os.stat(path).st_size return cls(open(path, 'rb'), mime_type, total_size=size, close_stream=True, auto_transfer=auto_transfer, gzip_encoded=gzip_encoded, **kwds)
def FromFile(cls, filename, mime_type=None, auto_transfer=True, gzip_encoded=False, **kwds)
Create a new Upload object from a filename.
2.248543
2.173832
1.034368
if mime_type is None: raise exceptions.InvalidUserInputError( 'No mime_type specified for stream') return cls(stream, mime_type, total_size=total_size, close_stream=False, auto_transfer=auto_transfer, gzip_encoded=gzip_encoded, **kwds)
def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True, gzip_encoded=False, **kwds)
Create a new Upload object from a stream.
2.658208
2.650285
1.002989
info = json.loads(json_data) missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) if missing_keys: raise exceptions.InvalidDataError( 'Invalid serialization data, missing keys: %s' % ( ', '.join(missing_keys))) if 'total_size' in kwds: raise exceptions.InvalidUserInputError( 'Cannot override total_size on serialized Upload') upload = cls.FromStream(stream, info['mime_type'], total_size=info.get('total_size'), gzip_encoded=gzip_encoded, **kwds) if isinstance(stream, io.IOBase) and not stream.seekable(): raise exceptions.InvalidUserInputError( 'Cannot restart resumable upload on non-seekable stream') if auto_transfer is not None: upload.auto_transfer = auto_transfer else: upload.auto_transfer = info['auto_transfer'] upload.strategy = RESUMABLE_UPLOAD upload._Initialize( # pylint: disable=protected-access http, info['url']) upload.RefreshResumableUploadState() upload.EnsureInitialized() if upload.auto_transfer: upload.StreamInChunks() return upload
def FromData(cls, stream, json_data, http, auto_transfer=None, gzip_encoded=False, **kwds)
Create a new Upload of stream from serialized json_data and http.
3.548662
3.393924
1.045592
if upload_config.resumable_path is None: self.strategy = SIMPLE_UPLOAD if self.strategy is not None: return strategy = SIMPLE_UPLOAD if (self.total_size is not None and self.total_size > _RESUMABLE_UPLOAD_THRESHOLD): strategy = RESUMABLE_UPLOAD if http_request.body and not upload_config.simple_multipart: strategy = RESUMABLE_UPLOAD if not upload_config.simple_path: strategy = RESUMABLE_UPLOAD self.strategy = strategy
def __SetDefaultUploadStrategy(self, upload_config, http_request)
Determine and set the default upload strategy for this upload. We generally prefer simple or multipart, unless we're forced to use resumable. This happens when any of (1) the upload is too large, (2) the simple endpoint doesn't support multipart requests and we have metadata, or (3) there is no simple upload endpoint. Args: upload_config: Configuration for the upload endpoint. http_request: The associated http request. Returns: None.
3.560772
3.121175
1.140843
# Validate total_size vs. max_size if (self.total_size and upload_config.max_size and self.total_size > upload_config.max_size): raise exceptions.InvalidUserInputError( 'Upload too big: %s larger than max size %s' % ( self.total_size, upload_config.max_size)) # Validate mime type if not util.AcceptableMimeType(upload_config.accept, self.mime_type): raise exceptions.InvalidUserInputError( 'MIME type %s does not match any accepted MIME ranges %s' % ( self.mime_type, upload_config.accept)) self.__SetDefaultUploadStrategy(upload_config, http_request) if self.strategy == SIMPLE_UPLOAD: url_builder.relative_path = upload_config.simple_path if http_request.body: url_builder.query_params['uploadType'] = 'multipart' self.__ConfigureMultipartRequest(http_request) else: url_builder.query_params['uploadType'] = 'media' self.__ConfigureMediaRequest(http_request) # Once the entire body is written, compress the body if configured # to. Both multipart and media request uploads will read the # entire stream into memory, which means full compression is also # safe to perform. Because the strategy is set to SIMPLE_UPLOAD, # StreamInChunks throws an exception, meaning double compression # cannot happen. if self.__gzip_encoded: http_request.headers['Content-Encoding'] = 'gzip' # Turn the body into a stream so that we can compress it, then # read the compressed bytes. In the event of a retry (e.g. if # our access token has expired), we need to be able to re-read # the body, which we can't do with a stream. So, we consume the # bytes from the stream now and store them in a re-readable # bytes container. http_request.body = ( compression.CompressStream( six.BytesIO(http_request.body))[0].read()) else: url_builder.relative_path = upload_config.resumable_path url_builder.query_params['uploadType'] = 'resumable' self.__ConfigureResumableRequest(http_request)
def ConfigureRequest(self, upload_config, http_request, url_builder)
Configure the request and url for this upload.
4.063945
4.075301
0.997214
http_request.headers['content-type'] = self.mime_type http_request.body = self.stream.read() http_request.loggable_body = '<media body>'
def __ConfigureMediaRequest(self, http_request)
Configure http_request as a simple request for this upload.
5.248802
4.739975
1.107348
# This is a multipart/related upload. msg_root = mime_multipart.MIMEMultipart('related') # msg_root should not write out its own headers setattr(msg_root, '_write_headers', lambda self: None) # attach the body as one part msg = mime_nonmultipart.MIMENonMultipart( *http_request.headers['content-type'].split('/')) msg.set_payload(http_request.body) msg_root.attach(msg) # attach the media as the second part msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/')) msg['Content-Transfer-Encoding'] = 'binary' msg.set_payload(self.stream.read()) msg_root.attach(msg) # NOTE: We encode the body, but can't use # `email.message.Message.as_string` because it prepends # `> ` to `From ` lines. fp = six.BytesIO() if six.PY3: generator_class = MultipartBytesGenerator else: generator_class = email_generator.Generator g = generator_class(fp, mangle_from_=False) g.flatten(msg_root, unixfrom=False) http_request.body = fp.getvalue() multipart_boundary = msg_root.get_boundary() http_request.headers['content-type'] = ( 'multipart/related; boundary=%r' % multipart_boundary) if isinstance(multipart_boundary, six.text_type): multipart_boundary = multipart_boundary.encode('ascii') body_components = http_request.body.split(multipart_boundary) headers, _, _ = body_components[-2].partition(b'\n\n') body_components[-2] = b'\n\n'.join([headers, b'<media body>\n\n--']) http_request.loggable_body = multipart_boundary.join(body_components)
def __ConfigureMultipartRequest(self, http_request)
Configure http_request as a multipart request for this upload.
3.557787
3.579254
0.994002
if self.strategy != RESUMABLE_UPLOAD: return self.EnsureInitialized() refresh_request = http_wrapper.Request( url=self.url, http_method='PUT', headers={'Content-Range': 'bytes */*'}) refresh_response = http_wrapper.MakeRequest( self.http, refresh_request, redirections=0, retries=self.num_retries) range_header = self._GetRangeHeaderFromResponse(refresh_response) if refresh_response.status_code in (http_client.OK, http_client.CREATED): self.__complete = True self.__progress = self.total_size self.stream.seek(self.progress) # If we're finished, the refresh response will contain the metadata # originally requested. Cache it so it can be returned in # StreamInChunks. self.__final_response = refresh_response elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE: if range_header is None: self.__progress = 0 else: self.__progress = self.__GetLastByte(range_header) + 1 self.stream.seek(self.progress) else: raise exceptions.HttpError.FromResponse(refresh_response)
def RefreshResumableUploadState(self)
Talk to the server and refresh the state of this resumable upload. Returns: Response if the upload is complete.
3.864011
3.87563
0.997002
if self.strategy is None: raise exceptions.UserError( 'No upload strategy set; did you call ConfigureRequest?') if http is None and client is None: raise exceptions.UserError('Must provide client or http.') if self.strategy != RESUMABLE_UPLOAD: return http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) self.EnsureUninitialized() http_response = http_wrapper.MakeRequest(http, http_request, retries=self.num_retries) if http_response.status_code != http_client.OK: raise exceptions.HttpError.FromResponse(http_response) self.__server_chunk_granularity = http_response.info.get( 'X-Goog-Upload-Chunk-Granularity') url = http_response.info['location'] if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: return self.StreamInChunks() return http_response
def InitializeUpload(self, http_request, http=None, client=None)
Initialize this upload from the given http_request.
4.497727
4.439781
1.013052
if self.strategy != RESUMABLE_UPLOAD: raise exceptions.InvalidUserInputError( 'Cannot stream non-resumable upload') callback = callback or self.progress_callback finish_callback = finish_callback or self.finish_callback # final_response is set if we resumed an already-completed upload. response = self.__final_response def CallSendChunk(start): return self.__SendChunk( start, additional_headers=additional_headers) def CallSendMediaBody(start): return self.__SendMediaBody( start, additional_headers=additional_headers) send_func = CallSendChunk if use_chunks else CallSendMediaBody if not use_chunks and self.__gzip_encoded: raise exceptions.InvalidUserInputError( 'Cannot gzip encode non-chunked upload') if use_chunks: self.__ValidateChunksize(self.chunksize) self.EnsureInitialized() while not self.complete: response = send_func(self.stream.tell()) if response.status_code in (http_client.OK, http_client.CREATED): self.__complete = True break if response.status_code not in ( http_client.OK, http_client.CREATED, http_wrapper.RESUME_INCOMPLETE): # Only raise an exception if the error is something we can't # recover from. if (self.strategy != RESUMABLE_UPLOAD or not self.__IsRetryable(response)): raise exceptions.HttpError.FromResponse(response) # We want to reset our state to wherever the server left us # before this failed request, and then raise. self.RefreshResumableUploadState() self._ExecuteCallback(callback, response) continue self.__progress = self.__GetLastByte( self._GetRangeHeaderFromResponse(response)) if self.progress + 1 != self.stream.tell(): # TODO(craigcitro): Add a better way to recover here. raise exceptions.CommunicationError( 'Failed to transfer all bytes in chunk, upload paused at ' 'byte %d' % self.progress) self._ExecuteCallback(callback, response) if self.__complete and hasattr(self.stream, 'seek'): current_pos = self.stream.tell() self.stream.seek(0, os.SEEK_END) end_pos = self.stream.tell() self.stream.seek(current_pos) if current_pos != end_pos: raise exceptions.TransferInvalidError( 'Upload complete with %s additional bytes left in stream' % (int(end_pos) - int(current_pos))) self._ExecuteCallback(finish_callback, response) return response
def __StreamMedia(self, callback=None, finish_callback=None, additional_headers=None, use_chunks=True)
Helper function for StreamMedia / StreamInChunks.
3.753491
3.751216
1.000606
return self.__StreamMedia( callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=False)
def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None)
Send this resumable upload in a single request. Args: callback: Progress callback function with inputs (http_wrapper.Response, transfer.Upload) finish_callback: Final callback function with inputs (http_wrapper.Response, transfer.Upload) additional_headers: Dict of headers to include with the upload http_wrapper.Request. Returns: http_wrapper.Response of final response.
3.656393
4.851691
0.753633
return self.__StreamMedia( callback=callback, finish_callback=finish_callback, additional_headers=additional_headers)
def StreamInChunks(self, callback=None, finish_callback=None, additional_headers=None)
Send this (resumable) upload in chunks.
4.441714
4.343122
1.022701
def CheckResponse(response): if response is None: # Caller shouldn't call us if the response is None, # but handle anyway. raise exceptions.RequestError( 'Request to url %s did not return a response.' % response.request_url) response = http_wrapper.MakeRequest( self.bytes_http, request, retry_func=self.retry_func, retries=self.num_retries, check_response_func=CheckResponse) if response.status_code == http_wrapper.RESUME_INCOMPLETE: last_byte = self.__GetLastByte( self._GetRangeHeaderFromResponse(response)) if last_byte + 1 != end: self.stream.seek(last_byte + 1) return response
def __SendMediaRequest(self, request, end)
Request helper function for SendMediaBody & SendChunk.
4.745858
4.680548
1.013953
self.EnsureInitialized() if self.total_size is None: raise exceptions.TransferInvalidError( 'Total size must be known for SendMediaBody') body_stream = stream_slice.StreamSlice( self.stream, self.total_size - start) request = http_wrapper.Request(url=self.url, http_method='PUT', body=body_stream) request.headers['Content-Type'] = self.mime_type if start == self.total_size: # End of an upload with 0 bytes left to send; just finalize. range_string = 'bytes */%s' % self.total_size else: range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1, self.total_size) request.headers['Content-Range'] = range_string if additional_headers: request.headers.update(additional_headers) return self.__SendMediaRequest(request, self.total_size)
def __SendMediaBody(self, start, additional_headers=None)
Send the entire media stream in a single request.
3.246264
3.175148
1.022398
self.EnsureInitialized() no_log_body = self.total_size is None request = http_wrapper.Request(url=self.url, http_method='PUT') if self.__gzip_encoded: request.headers['Content-Encoding'] = 'gzip' body_stream, read_length, exhausted = compression.CompressStream( self.stream, self.chunksize) end = start + read_length # If the stream length was previously unknown and the input stream # is exhausted, then we're at the end of the stream. if self.total_size is None and exhausted: self.__total_size = end elif self.total_size is None: # For the streaming resumable case, we need to detect when # we're at the end of the stream. body_stream = buffered_stream.BufferedStream( self.stream, start, self.chunksize) end = body_stream.stream_end_position if body_stream.stream_exhausted: self.__total_size = end # TODO: Here, change body_stream from a stream to a string object, # which means reading a chunk into memory. This works around # https://code.google.com/p/httplib2/issues/detail?id=176 which can # cause httplib2 to skip bytes on 401's for file objects. # Rework this solution to be more general. body_stream = body_stream.read(self.chunksize) else: end = min(start + self.chunksize, self.total_size) body_stream = stream_slice.StreamSlice(self.stream, end - start) # TODO(craigcitro): Think about clearer errors on "no data in # stream". request.body = body_stream request.headers['Content-Type'] = self.mime_type if no_log_body: # Disable logging of streaming body. # TODO: Remove no_log_body and rework as part of a larger logs # refactor. request.loggable_body = '<media body>' if self.total_size is None: # Streaming resumable upload case, unknown total size. range_string = 'bytes %s-%s/*' % (start, end - 1) elif end == start: # End of an upload with 0 bytes left to send; just finalize. range_string = 'bytes */%s' % self.total_size else: # Normal resumable upload case with known sizes. range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size) request.headers['Content-Range'] = range_string if additional_headers: request.headers.update(additional_headers) return self.__SendMediaRequest(request, end)
def __SendChunk(self, start, additional_headers=None)
Send the specified chunk.
4.277201
4.23301
1.01044
in_read = 0 in_exhausted = False out_stream = StreamingBuffer() with gzip.GzipFile(mode='wb', fileobj=out_stream, compresslevel=compresslevel) as compress_stream: # Read until we've written at least length bytes to the output stream. while not length or out_stream.length < length: data = in_stream.read(chunksize) data_length = len(data) compress_stream.write(data) in_read += data_length # If we read less than requested, the stream is exhausted. if data_length < chunksize: in_exhausted = True break return out_stream, in_read, in_exhausted
def CompressStream(in_stream, length=None, compresslevel=2, chunksize=16777216)
Compresses an input stream into a file-like buffer. This reads from the input stream until either we've stored at least length compressed bytes, or the input stream has been exhausted. This supports streams of unknown size. Args: in_stream: The input stream to read from. length: The target number of compressed bytes to buffer in the output stream. If length is none, the input stream will be compressed until it's exhausted. The actual length of the output buffer can vary from the target. If the input stream is exhaused, the output buffer may be smaller than expected. If the data is incompressible, the maximum length can be exceeded by can be calculated to be: chunksize + 5 * (floor((chunksize - 1) / 16383) + 1) + 17 This accounts for additional header data gzip adds. For the default 16MiB chunksize, this results in the max size of the output buffer being: length + 16Mib + 5142 bytes compresslevel: Optional, defaults to 2. The desired compression level. chunksize: Optional, defaults to 16MiB. The chunk size used when reading data from the input stream to write into the output buffer. Returns: A file-like output buffer of compressed bytes, the number of bytes read from the input stream, and a flag denoting if the input stream was exhausted.
2.572556
2.477252
1.038472
if size is None: size = self.__size ret_list = [] while size > 0 and self.__buf: data = self.__buf.popleft() size -= len(data) ret_list.append(data) if size < 0: ret_list[-1], remainder = ret_list[-1][:size], ret_list[-1][size:] self.__buf.appendleft(remainder) ret = b''.join(ret_list) self.__size -= len(ret) return ret
def read(self, size=None)
Read at most size bytes from this buffer. Bytes read from this buffer are consumed and are permanently removed. Args: size: If provided, read no more than size bytes from the buffer. Otherwise, this reads the entire buffer. Returns: The bytes read from this buffer.
2.232605
2.508444
0.890036
proto_printer.PrintPreamble(package, version, file_descriptor) _PrintEnums(proto_printer, file_descriptor.enum_types) _PrintMessages(proto_printer, file_descriptor.message_types) custom_json_mappings = _FetchCustomMappings(file_descriptor.enum_types) custom_json_mappings.extend( _FetchCustomMappings(file_descriptor.message_types)) for mapping in custom_json_mappings: proto_printer.PrintCustomJsonMapping(mapping)
def _WriteFile(file_descriptor, package, version, proto_printer)
Write the given extended file descriptor to the printer.
2.693368
2.751845
0.97875
_WriteFile(file_descriptor, package, version, _Proto2Printer(printer))
def WriteMessagesFile(file_descriptor, package, version, printer)
Write the given extended file descriptor to out as a message file.
17.188141
18.392401
0.934524
_WriteFile(file_descriptor, package, version, _ProtoRpcPrinter(printer))
def WritePythonFile(file_descriptor, package, version, printer)
Write the given extended file descriptor to out.
16.300524
18.126663
0.899257
custom_mappings = [] for descriptor in descriptor_ls: if isinstance(descriptor, ExtendedEnumDescriptor): custom_mappings.extend( _FormatCustomJsonMapping('Enum', m, descriptor) for m in descriptor.enum_mappings) elif isinstance(descriptor, ExtendedMessageDescriptor): custom_mappings.extend( _FormatCustomJsonMapping('Field', m, descriptor) for m in descriptor.field_mappings) custom_mappings.extend( _FetchCustomMappings(descriptor.enum_types)) custom_mappings.extend( _FetchCustomMappings(descriptor.message_types)) return custom_mappings
def _FetchCustomMappings(descriptor_ls)
Find and return all custom mappings for descriptors in descriptor_ls.
2.63839
2.55498
1.032646
enum_types = sorted(enum_types, key=operator.attrgetter('name')) for enum_type in enum_types: proto_printer.PrintEnum(enum_type)
def _PrintEnums(proto_printer, enum_types)
Print all enums to the given proto_printer.
2.280117
2.089643
1.091151
description = message_type.description or '%s message type.' % ( message_type.name) width = self.__printer.CalculateWidth() - 3 for line in textwrap.wrap(description, width): self.__printer('// %s', line) PrintIndentedDescriptions(self.__printer, message_type.enum_types, 'Enums', prefix='// ') PrintIndentedDescriptions(self.__printer, message_type.message_types, 'Messages', prefix='// ') PrintIndentedDescriptions(self.__printer, message_type.fields, 'Fields', prefix='// ')
def __PrintMessageCommentLines(self, message_type)
Print the description of this message.
3.414423
3.125353
1.092492
google_imports = [x for x in imports if 'google' in x] other_imports = [x for x in imports if 'google' not in x] if other_imports: for import_ in sorted(other_imports): self.__printer(import_) self.__printer() # Note: If we ever were going to add imports from this package, we'd # need to sort those out and put them at the end. if google_imports: for import_ in sorted(google_imports): self.__printer(import_) self.__printer()
def __PrintAdditionalImports(self, imports)
Print additional imports needed for protorpc.
3.618151
3.492085
1.0361
description = message_type.description or '%s message type.' % ( message_type.name) short_description = ( _EmptyMessage(message_type) and len(description) < (self.__printer.CalculateWidth() - 6)) with self.__printer.CommentContext(): if short_description: # Note that we use explicit string interpolation here since # we're in comment context. self.__printer('r' % description) return for line in textwrap.wrap('r') self.__printer()
def __PrintMessageDocstringLines(self, message_type)
Print the docstring for this message.
9.736904
9.406953
1.035075
def positional_decorator(wrapped): @functools.wraps(wrapped) def positional_wrapper(*args, **kwargs): if len(args) > max_positional_args: plural_s = '' if max_positional_args != 1: plural_s = 's' raise TypeError('%s() takes at most %d positional argument%s ' '(%d given)' % (wrapped.__name__, max_positional_args, plural_s, len(args))) return wrapped(*args, **kwargs) return positional_wrapper if isinstance(max_positional_args, six.integer_types): return positional_decorator else: args, _, _, defaults = inspect.getargspec(max_positional_args) if defaults is None: raise ValueError( 'Functions with no keyword arguments must specify ' 'max_positional_args') return positional(len(args) - len(defaults))(max_positional_args)
def positional(max_positional_args)
A decorator that declares only the first N arguments may be positional. This decorator makes it easy to support Python 3 style keyword-only parameters. For example, in Python 3 it is possible to write: def fn(pos1, *, kwonly1=None, kwonly1=None): ... All named parameters after * must be a keyword: fn(10, 'kw1', 'kw2') # Raises exception. fn(10, kwonly1='kw1') # Ok. Example: To define a function like above, do: @positional(1) def fn(pos1, kwonly1=None, kwonly2=None): ... If no default value is provided to a keyword argument, it becomes a required keyword argument: @positional(0) def fn(required_kw): ... This must be called with the keyword parameter: fn() # Raises exception. fn(10) # Raises exception. fn(required_kw=10) # Ok. When defining instance or class methods always remember to account for 'self' and 'cls': class MyClass(object): @positional(2) def my_method(self, pos1, kwonly1=None): ... @classmethod @positional(2) def my_method(cls, pos1, kwonly1=None): ... One can omit the argument to 'positional' altogether, and then no arguments with default values may be passed positionally. This would be equivalent to placing a '*' before the first argument with a default value in Python 3. If there are no arguments with default values, and no argument is given to 'positional', an error is raised. @positional def fn(arg1, arg2, required_kw1=None, required_kw2=0): ... fn(1, 3, 5) # Raises exception. fn(1, 3) # Ok. fn(1, 3, required_kw1=5) # Ok. Args: max_positional_arguments: Maximum number of positional arguments. All parameters after the this index must be keyword only. Returns: A decorator that prevents using arguments after max_positional_args from being used as positional parameters. Raises: TypeError if a keyword-only argument is provided as a positional parameter. ValueError if no maximum number of arguments is provided and the function has no arguments with default values.
2.169315
2.244575
0.96647
if isinstance(module, six.string_types): try: module = sys.modules[module] except KeyError: return None try: return six.text_type(module.package) except AttributeError: if module.__name__ == '__main__': try: file_name = module.__file__ except AttributeError: pass else: base_name = os.path.basename(file_name) split_name = os.path.splitext(base_name) if len(split_name) == 1: return six.text_type(base_name) return u'.'.join(split_name[:-1]) return six.text_type(module.__name__)
def get_package_for_module(module)
Get package name for a module. Helper calculates the package name of a module. Args: module: Module to get name for. If module is a string, try to find module in sys.modules. Returns: If module contains 'package' attribute, uses that as package name. Else, if module is not the '__main__' module, the module __name__. Else, the base name of the module file name. Else None.
2.206026
2.086954
1.057056