code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
endpoint = '{0}/{1}/contact_methods/{2}'.format( self.endpoint, self['id'], id, ) return self.request('DELETE', endpoint=endpoint, query_params=kwargs)
def delete_contact_method(self, id, **kwargs)
Delete a contact method for this user.
3.95771
3.556344
1.112859
endpoint = '{0}/{1}/contact_methods/{2}'.format( self.endpoint, self['id'], id, ) result = self.request('GET', endpoint=endpoint, query_params=kwargs) return result['contact_method']
def get_contact_method(self, id, **kwargs)
Get a contact method for this user.
3.396648
3.227515
1.052403
endpoint = '{0}/{1}/notification_rules'.format( self.endpoint, self['id'], ) result = self.request('GET', endpoint=endpoint, query_params=kwargs) return result['notification_rules']
def notification_rules(self, **kwargs)
Get all notification rules for this user.
3.905783
3.469245
1.125831
data = {'notification_rule': data, } endpoint = '{0}/{1}/notification_rules'.format( self.endpoint, self['id'], ) result = self.request('POST', endpoint=endpoint, data=data, query_params=kwargs) self._data['notification_rules'].append(result['notification_rule']) return result
def create_notification_rule(self, data, **kwargs)
Create a notification rule for this user.
3.565406
3.316843
1.07494
endpoint = '{0}/{1}/notification_rules/{2}'.format( self.endpoint, self['id'], id, ) return self.request('DELETE', endpoint=endpoint, query_params=kwargs)
def delete_notification_rule(self, id, **kwargs)
Get a notification rule for this user.
4.014347
3.66595
1.095036
data = kwargs.pop('data', None) if data is None: data = { 'addon': { 'type': type_, 'name': name, 'src': src, } } cls.create(data=data, *args, **kwargs)
def install(cls, type_, name, src, *args, **kwargs)
Install an add-on to this account.
2.602423
2.341377
1.111493
cls.validate(data) # otherwise endpoint should contain the service path too getattr(Entity, 'create').__func__(cls, data=data, *args, **kwargs)
def create(cls, data=None, *args, **kwargs)
Validate and then create a Vendor entity.
15.308703
13.578055
1.127459
# type: () -> UnstructureStrategy return ( UnstructureStrategy.AS_DICT if self._unstructure_attrs == self.unstructure_attrs_asdict else UnstructureStrategy.AS_TUPLE )
def unstruct_strat(self)
The default way of unstructuring ``attrs`` classes.
6.771966
6.358059
1.0651
if is_union_type(cl): self._union_registry[cl] = func else: self._structure_func.register_cls_list([(cl, func)])
def register_structure_hook(self, cl, func)
Register a primitive-to-class converter function for a type. The converter function should take two arguments: * a Python object to be converted, * the type to convert to and return the instance of the class. The type may seem redundant, but is sometimes needed (for example, when dealing with generic classes).
7.020694
8.309579
0.844892
# type: (Any, Type[T]) -> T return self._structure_func.dispatch(cl)(obj, cl)
def structure(self, obj, cl)
Convert unstructured Python data structures to structured data.
9.505802
9.068744
1.048194
# type: (Any) -> Dict[str, Any] attrs = obj.__class__.__attrs_attrs__ dispatch = self._unstructure_func.dispatch rv = self._dict_factory() for a in attrs: name = a.name v = getattr(obj, name) rv[name] = dispatch(v.__class__)(v) return rv
def unstructure_attrs_asdict(self, obj)
Our version of `attrs.asdict`, so we can call back to us.
3.909538
3.775512
1.035499
# type: (Any) -> Tuple attrs = obj.__class__.__attrs_attrs__ return tuple(self.unstructure(getattr(obj, a.name)) for a in attrs)
def unstructure_attrs_astuple(self, obj)
Our version of `attrs.astuple`, so we can call back to us.
3.978714
3.946012
1.008287
# We can reuse the sequence class, so tuples stay tuples. dispatch = self._unstructure_func.dispatch return seq.__class__(dispatch(e.__class__)(e) for e in seq)
def _unstructure_seq(self, seq)
Convert a sequence to primitive equivalents.
13.08567
11.955135
1.094565
# We can reuse the mapping class, so dicts stay dicts and OrderedDicts # stay OrderedDicts. dispatch = self._unstructure_func.dispatch return mapping.__class__( (dispatch(k.__class__)(k), dispatch(v.__class__)(v)) for k, v in mapping.items() )
def _unstructure_mapping(self, mapping)
Convert a mapping of attr classes to primitive equivalents.
6.51126
6.184402
1.052852
if cl is Any or cl is Optional: return obj # We don't know what this is, so we complain loudly. msg = ( "Unsupported type: {0}. Register a structure hook for " "it.".format(cl) ) raise ValueError(msg)
def _structure_default(self, obj, cl)
This is the fallthrough case. Everything is a subclass of `Any`. A special condition here handles ``attrs`` classes. Bare optionals end here too (optionals with arguments are unions.) We treat bare optionals as Any.
7.004823
6.528533
1.072955
if not isinstance(obj, (bytes, unicode)): return cl(str(obj)) else: return obj
def _structure_unicode(self, obj, cl)
Just call ``cl`` with the given ``obj``
4.683577
4.151396
1.128193
# type: (Tuple, Type[T]) -> T conv_obj = [] # A list of converter parameters. for a, value in zip(cl.__attrs_attrs__, obj): # type: ignore # We detect the type by the metadata. converted = self._structure_attr_from_tuple(a, a.name, value) conv_obj.append(converted) return cl(*conv_obj)
def structure_attrs_fromtuple(self, obj, cl)
Load an attrs class from a sequence (tuple).
6.266214
6.212584
1.008632
type_ = a.type if type_ is None: # No type metadata. return value return self._structure_func.dispatch(type_)(value, type_)
def _structure_attr_from_tuple(self, a, name, value)
Handle an individual attrs attribute.
9.396743
8.424825
1.115364
# type: (Mapping[str, Any], Type[T]) -> T # For public use. conv_obj = {} # Start with a fresh dict, to ignore extra keys. dispatch = self._structure_func.dispatch for a in cl.__attrs_attrs__: # type: ignore # We detect the type by metadata. type_ = a.type name = a.name try: val = obj[name] except KeyError: continue if name[0] == "_": name = name[1:] conv_obj[name] = ( dispatch(type_)(val, type_) if type_ is not None else val ) return cl(**conv_obj)
def structure_attrs_fromdict(self, obj, cl)
Instantiate an attrs class from a mapping (dict).
5.483613
5.431499
1.009595
if is_bare(cl) or cl.__args__[0] is Any: return [e for e in obj] else: elem_type = cl.__args__[0] return [ self._structure_func.dispatch(elem_type)(e, elem_type) for e in obj ]
def _structure_list(self, obj, cl)
Convert an iterable to a potentially generic list.
4.871574
4.227593
1.152328
if is_bare(cl) or cl.__args__[0] is Any: return set(obj) else: elem_type = cl.__args__[0] return { self._structure_func.dispatch(elem_type)(e, elem_type) for e in obj }
def _structure_set(self, obj, cl)
Convert an iterable into a potentially generic set.
5.162584
4.467141
1.15568
if is_bare(cl) or cl.__args__[0] is Any: return frozenset(obj) else: elem_type = cl.__args__[0] dispatch = self._structure_func.dispatch return frozenset(dispatch(elem_type)(e, elem_type) for e in obj)
def _structure_frozenset(self, obj, cl)
Convert an iterable into a potentially generic frozenset.
5.083292
4.609941
1.10268
if is_bare(cl) or cl.__args__ == (Any, Any): return dict(obj) else: key_type, val_type = cl.__args__ if key_type is Any: val_conv = self._structure_func.dispatch(val_type) return {k: val_conv(v, val_type) for k, v in obj.items()} elif val_type is Any: key_conv = self._structure_func.dispatch(key_type) return {key_conv(k, key_type): v for k, v in obj.items()} else: key_conv = self._structure_func.dispatch(key_type) val_conv = self._structure_func.dispatch(val_type) return { key_conv(k, key_type): val_conv(v, val_type) for k, v in obj.items() }
def _structure_dict(self, obj, cl)
Convert a mapping into a potentially generic dict.
1.882658
1.771771
1.062585
# Unions with NoneType in them are basically optionals. # We check for NoneType early and handle the case of obj being None, # so disambiguation functions don't need to handle NoneType. union_params = union.__args__ if NoneType in union_params: # type: ignore if obj is None: return None if len(union_params) == 2: # This is just a NoneType and something else. other = ( union_params[0] if union_params[1] is NoneType # type: ignore else union_params[1] ) # We can't actually have a Union of a Union, so this is safe. return self._structure_func.dispatch(other)(obj, other) # Check the union registry first. handler = self._union_registry.get(union) if handler is not None: return handler(obj, union) # Getting here means either this is not an optional, or it's an # optional with more than one parameter. # Let's support only unions of attr classes for now. cl = self._dis_func_cache(union)(obj) return self._structure_func.dispatch(cl)(obj, cl)
def _structure_union(self, obj, union)
Deal with converting a union.
5.942492
5.856872
1.014619
tup_params = tup.__args__ has_ellipsis = tup_params and tup_params[-1] is Ellipsis if tup_params is None or (has_ellipsis and tup_params[0] is Any): # Just a Tuple. (No generic information.) return tuple(obj) if has_ellipsis: # We're dealing with a homogenous tuple, Tuple[int, ...] tup_type = tup_params[0] conv = self._structure_func.dispatch(tup_type) return tuple(conv(e, tup_type) for e in obj) else: # We're dealing with a heterogenous tuple. return tuple( self._structure_func.dispatch(t)(e, t) for t, e in zip(tup_params, obj) )
def _structure_tuple(self, obj, tup)
Deal with converting to a tuple.
3.698074
3.601591
1.026789
# type: (Type) -> Callable[..., Type] union_types = union.__args__ if NoneType in union_types: # type: ignore # We support unions of attrs classes and NoneType higher in the # logic. union_types = tuple( e for e in union_types if e is not NoneType # type: ignore ) if not all(hasattr(e, "__attrs_attrs__") for e in union_types): raise ValueError( "Only unions of attr classes supported " "currently. Register a loads hook manually." ) return create_uniq_field_dis_func(*union_types)
def _get_dis_func(self, union)
Fetch or try creating a disambiguation function for a union.
7.363885
7.378571
0.99801
# type: (*Type) -> Callable if len(classes) < 2: raise ValueError("At least two classes required.") cls_and_attrs = [(cl, set(at.name for at in fields(cl))) for cl in classes] if len([attrs for _, attrs in cls_and_attrs if len(attrs) == 0]) > 1: raise ValueError("At least two classes have no attributes.") # TODO: Deal with a single class having no required attrs. # For each class, attempt to generate a single unique required field. uniq_attrs_dict = OrderedDict() # type: Dict[str, Type] cls_and_attrs.sort(key=lambda c_a: -len(c_a[1])) fallback = None # If none match, try this. for i, (cl, cl_reqs) in enumerate(cls_and_attrs): other_classes = cls_and_attrs[i + 1 :] if other_classes: other_reqs = reduce(or_, (c_a[1] for c_a in other_classes)) uniq = cl_reqs - other_reqs if not uniq: m = "{} has no usable unique attributes.".format(cl) raise ValueError(m) uniq_attrs_dict[next(iter(uniq))] = cl else: fallback = cl def dis_func(data): # type: (Mapping) -> Optional[Type] if not isinstance(data, Mapping): raise ValueError("Only input mappings are supported.") for k, v in uniq_attrs_dict.items(): if k in data: return v return fallback return dis_func
def create_uniq_field_dis_func(*classes)
Given attr classes, generate a disambiguation function. The function is based on unique fields.
3.381292
3.449849
0.980127
for can_handle, handler in self._handler_pairs: # can handle could raise an exception here # such as issubclass being called on an instance. # it's easier to just ignore that case. try: if can_handle(typ): return handler except Exception: pass raise KeyError("unable to find handler for {0}".format(typ))
def _dispatch(self, typ)
returns the appropriate handler, for the object passed.
7.115687
6.300798
1.129331
for cls, handler in cls_and_handler: self._single_dispatch.register(cls, handler) self.dispatch.cache_clear()
def register_cls_list(self, cls_and_handler)
register a class to singledispatch
4.765265
3.293144
1.447026
for func, handler in func_and_handler: self._function_dispatch.register(func, handler) self.dispatch.cache_clear()
def register_func_list(self, func_and_handler)
register a function to determine if the handle should be used for the type
4.618317
6.33733
0.728748
endpoint = '' if cls.__endpoint__ is not None: return cls.__endpoint__ elif cls.__from_class__ is not None: endpoint = cls.__from_class__.__name__.lower() else: endpoint = cls.__tablename__.lower() if not endpoint.endswith('s'): endpoint += 's' return endpoint
def endpoint(cls)
Return the :class:`sandman.model.Model`'s endpoint. :rtype: string
2.718351
2.956894
0.919326
primary_key_value = getattr(self, self.primary_key(), None) return '/{}/{}'.format(self.endpoint(), primary_key_value)
def resource_uri(self)
Return the URI at which the resource can be found. :rtype: string
5.287484
6.513796
0.811736
if cls.__from_class__: cls = cls.__from_class__ return cls.__table__.primary_key.columns.values()[0].name
def primary_key(cls)
Return the name of the table's primary key :rtype: string
5.712416
4.668283
1.223665
links = [] for foreign_key in self.__table__.foreign_keys: column = foreign_key.column.name column_value = getattr(self, column, None) if column_value: table = foreign_key.column.table.name with app.app_context(): endpoint = current_app.class_references[table] links.append({'rel': 'related', 'uri': '/{}/{}'.format( endpoint.__name__, column_value)}) links.append({'rel': 'self', 'uri': self.resource_uri()}) return links
def links(self)
Return a list of links for endpoints related to the resource. :rtype: list
3.454242
3.307505
1.044365
result_dict = {} for column in self.__table__.columns.keys(): result_dict[column] = getattr(self, column, None) if isinstance(result_dict[column], Decimal): result_dict[column] = str(result_dict[column]) result_dict['links'] = self.links() for foreign_key in self.__table__.foreign_keys: column_name = foreign_key.column.name column_value = getattr(self, column_name, None) if column_value: table = foreign_key.column.table.name with app.app_context(): endpoint = current_app.class_references[table] session = db.session() resource = session.query(endpoint).get(column_value) if depth > 0: result_dict.update({ 'rel': endpoint.__name__, endpoint.__name__.lower(): resource.as_dict(depth - 1) }) else: result_dict[ endpoint.__name__.lower() + '_url'] = '/{}/{}'.format( endpoint.__name__, column_value) result_dict['self'] = self.resource_uri() return result_dict
def as_dict(self, depth=0)
Return a dictionary containing only the attributes which map to an instance's database columns. :param int depth: Maximum depth to recurse subobjects :rtype: dict
2.726008
2.740052
0.994875
for column in self.__table__.columns.keys(): value = dictionary.get(column, None) if value: setattr(self, column, value)
def from_dict(self, dictionary)
Set a set of attributes which correspond to the :class:`sandman.model.Model`'s columns. :param dict dictionary: A dictionary of attributes to set on the instance whose keys are the column names of the :class:`sandman.model.Model`'s underlying database table.
2.875033
3.312489
0.867937
for column in self.__table__.columns.keys(): setattr(self, column, None) self.from_dict(dictionary)
def replace(self, dictionary)
Set all attributes which correspond to the :class:`sandman.model.Model`'s columns to the values in *dictionary*, inserting None if an attribute's value is not specified. :param dict dictionary: A dictionary of attributes to set on the instance whose keys are the column names of the :class:`sandman.model.Model`'s underlying database table.
5.763001
5.880301
0.980052
if getattr(cls, '__from_class__', None) is not None: cls = cls.__from_class__ attribute_info = {} for name, value in cls.__table__.columns.items(): attribute_info[name] = str(value.type).lower() return {cls.__name__: attribute_info}
def meta(cls)
Return a dictionary containing meta-information about the given resource.
3.862317
3.808168
1.014219
if not value: return import pkg_resources version = None try: version = pkg_resources.get_distribution('sandman').version finally: del pkg_resources click.echo(version) ctx.exit()
def print_version(ctx, value)
Print the current version of sandman and exit.
3.833191
3.012367
1.272485
app.config['SQLALCHEMY_DATABASE_URI'] = uri app.config['SANDMAN_GENERATE_PKS'] = generate_pks app.config['SANDMAN_SHOW_PKS'] = show_pks app.config['SERVER_HOST'] = host app.config['SERVER_PORT'] = port activate(name='sandmanctl') app.run(host=host, port=int(port), debug=True)
def run(generate_pks, show_pks, host, port, uri)
Connect sandman to <URI> and start the API server/admin interface.
2.407165
2.285714
1.053135
session = _get_session() getattr(session, action)(*args) session.commit()
def _perform_database_action(action, *args)
Call session.*action* with the given *args*. Will later be used to abstract away database backend.
4.491964
4.538486
0.989749
if ('Accept' not in request.headers or request.headers['Accept'] in ALL_CONTENT_TYPES): return JSON acceptable_content_types = set( request.headers['ACCEPT'].strip().split(',')) if acceptable_content_types & HTML_CONTENT_TYPES: return HTML elif acceptable_content_types & JSON_CONTENT_TYPES: return JSON else: # HTTP 406 Not Acceptable raise InvalidAPIUsage(406)
def _get_acceptable_response_type()
Return the mimetype for this request.
3.758161
3.56116
1.055319
try: if _get_acceptable_response_type() == JSON: response = jsonify(error.to_dict()) response.status_code = error.code return response else: return error.abort() except InvalidAPIUsage: # In addition to the original exception, we don't support the content # type in the request's 'Accept' header, which is a more important # error, so return that instead of what was originally raised. response = jsonify(error.to_dict()) response.status_code = 415 return response
def handle_exception(error)
Return a response with the appropriate status code, message, and content type when an ``InvalidAPIUsage`` exception is raised.
5.744998
5.013271
1.145958
links = resource.links() response = jsonify(**resource.as_dict(depth)) response.headers['Link'] = '' for link in links: response.headers['Link'] += '<{}>; rel="{}",'.format( link['uri'], link['rel']) response.headers['Link'] = response.headers['Link'][:-1] return response
def _single_resource_json_response(resource, depth=0)
Return the JSON representation of *resource*. :param resource: :class:`sandman.model.Model` to render :type resource: :class:`sandman.model.Model` :rtype: :class:`flask.Response`
2.852801
2.91678
0.978065
return make_response(render_template( 'attribute.html', resource=resource, name=name, value=value))
def _single_attribute_html_response(resource, name, value)
Return the json representation of a single attribute of a resource. :param :class:`sandman.model.Model` resource: resource for attribute :param string name: name of the attribute :param string value: string value of the attribute :rtype: :class:`flask.Response`
3.49654
5.019166
0.696638
tablename = resource.__tablename__ resource.pk = getattr(resource, resource.primary_key()) resource.attributes = resource.as_dict() return make_response(render_template( 'resource.html', resource=resource, tablename=tablename))
def _single_resource_html_response(resource)
Return the HTML representation of *resource*. :param resource: :class:`sandman.model.Model` to render :type resource: :class:`sandman.model.Model` :rtype: :class:`flask.Response`
4.572539
4.712022
0.970398
top_level_json_name = None if cls.__top_level_json_name__ is not None: top_level_json_name = cls.__top_level_json_name__ else: top_level_json_name = 'resources' result_list = [] for resource in resources: result_list.append(resource.as_dict(depth)) payload = {} if start is not None: payload[top_level_json_name] = result_list[start:stop] else: payload[top_level_json_name] = result_list return jsonify(payload)
def _collection_json_response(cls, resources, start, stop, depth=0)
Return the JSON representation of the collection *resources*. :param list resources: list of :class:`sandman.model.Model`s to render :rtype: :class:`flask.Response`
2.205691
2.188798
1.007718
return make_response(render_template( 'collection.html', resources=resources[start:stop]))
def _collection_html_response(resources, start=0, stop=20)
Return the HTML representation of the collection *resources*. :param list resources: list of :class:`sandman.model.Model`s to render :rtype: :class:`flask.Response`
3.479128
5.006237
0.694959
if method not in cls.__methods__: raise InvalidAPIUsage(403, FORBIDDEN_EXCEPTION_MESSAGE.format( method, cls.endpoint(), cls.__methods__)) class_validator_name = 'validate_' + method if hasattr(cls, class_validator_name): class_validator = getattr(cls, class_validator_name) if not class_validator(resource): raise InvalidAPIUsage(403)
def _validate(cls, method, resource=None)
Return ``True`` if the the given *cls* supports the HTTP *method* found on the incoming HTTP request. :param cls: class associated with the request's endpoint :type cls: :class:`sandman.model.Model` instance :param string method: HTTP method of incoming request :param resource: *cls* instance associated with the request :type resource: :class:`sandman.model.Model` or list of :class:`sandman.model.Model` or None :rtype: bool
4.143778
4.492714
0.922333
content_type = incoming_request.headers['Content-type'].split(';')[0] if ('Content-type' not in incoming_request.headers or content_type in JSON_CONTENT_TYPES): return incoming_request.json elif content_type in HTML_CONTENT_TYPES: if not incoming_request.form: raise InvalidAPIUsage(400) return incoming_request.form else: # HTTP 415: Unsupported Media Type raise InvalidAPIUsage( 415, UNSUPPORTED_CONTENT_TYPE_MESSAGE.format( types=incoming_request.headers['Content-type']))
def get_resource_data(incoming_request)
Return the data from the incoming *request* based on the Content-type.
3.288477
3.212667
1.023597
with app.app_context(): try: cls = current_app.class_references[collection] except KeyError: raise InvalidAPIUsage(404) return cls
def endpoint_class(collection)
Return the :class:`sandman.model.Model` associated with the endpoint *collection*. :param string collection: a :class:`sandman.model.Model` endpoint :rtype: :class:`sandman.model.Model`
6.357178
8.905845
0.713821
session = _get_session() cls = endpoint_class(collection) if query_arguments: filters = [] order = [] limit = None for key, value in query_arguments.items(): if key == 'page': continue if value.startswith('%'): filters.append(getattr(cls, key).like(str(value), escape='/')) elif key == 'sort': order.append(getattr(cls, value)) elif key == 'limit': limit = value elif key: filters.append(getattr(cls, key) == value) resources = session.query(cls).filter(*filters).order_by( *order).limit(limit) else: resources = session.query(cls).all() return resources
def retrieve_collection(collection, query_arguments=None)
Return the resources in *collection*, possibly filtered by a series of values to use in a 'where' clause search. :param string collection: a :class:`sandman.model.Model` endpoint :param dict query_arguments: a list of filter query arguments :rtype: class:`sandman.model.Model`
2.562338
2.681391
0.9556
session = _get_session() cls = endpoint_class(collection) resource = session.query(cls).get(key) if resource is None: raise InvalidAPIUsage(404) return resource
def retrieve_resource(collection, key)
Return the resource in *collection* identified by key *key*. :param string collection: a :class:`sandman.model.Model` endpoint :param string key: primary key of resource :rtype: class:`sandman.model.Model`
4.673099
5.699101
0.819971
if _get_acceptable_response_type() == JSON: response = _single_resource_json_response(resource) else: response = _single_resource_html_response(resource) response.status_code = 201 response.headers['Location'] = 'http://localhost:5000/{}'.format( resource.resource_uri()) return response
def resource_created_response(resource)
Return HTTP response with status code *201*, signaling a created *resource* :param resource: resource created as a result of current request :type resource: :class:`sandman.model.Model` :rtype: :class:`flask.Response`
3.369444
3.391456
0.99351
if _get_acceptable_response_type() == JSON: return _collection_json_response(cls, resources, start, stop) else: return _collection_html_response(resources, start, stop)
def collection_response(cls, resources, start=None, stop=None)
Return a response for the *resources* of the appropriate content type. :param resources: resources to be returned in request :type resource: list of :class:`sandman.model.Model` :rtype: :class:`flask.Response`
4.100282
4.312933
0.950695
if _get_acceptable_response_type() == JSON: depth = 0 if 'expand' in request.args: depth = 1 return _single_resource_json_response(resource, depth) else: return _single_resource_html_response(resource)
def resource_response(resource, depth=0)
Return a response for the *resource* of the appropriate content type. :param resource: resource to be returned in request :type resource: :class:`sandman.model.Model` :rtype: :class:`flask.Response`
5.184081
4.848302
1.069257
if _get_acceptable_response_type() == JSON: return _single_attribute_json_response(name, value) else: return _single_attribute_html_response(resource, name, value)
def attribute_response(resource, name, value)
Return a response for the *resource* of the appropriate content type. :param resource: resource to be returned in request :type resource: :class:`sandman.model.Model` :rtype: :class:`flask.Response`
4.402684
4.868881
0.90425
resource.from_dict(get_resource_data(incoming_request)) _perform_database_action('merge', resource) return no_content_response()
def update_resource(resource, incoming_request)
Replace the contents of a resource with *data* and return an appropriate *Response*. :param resource: :class:`sandman.model.Model` to be updated :param data: New values for the fields in *resource*
9.401276
13.698619
0.686294
cls = endpoint_class(collection) try: resource = retrieve_resource(collection, key) except InvalidAPIUsage: resource = None _validate(cls, request.method, resource) if resource is None: resource = cls() resource.from_dict(get_resource_data(request)) setattr(resource, resource.primary_key(), key) _perform_database_action('add', resource) return resource_created_response(resource) else: return update_resource(resource, request)
def patch_resource(collection, key)
Upsert" a resource identified by the given key and return the appropriate *Response*. If no resource currently exists at `/<collection>/<key>`, create it with *key* as its primary key and return a :func:`resource_created_response`. If a resource *does* exist at `/<collection>/<key>`, update it with the data sent in the request and return a :func:`no_content_response`. Note: HTTP `PATCH` (and, thus, :func:`patch_resource`) is idempotent :param string collection: a :class:`sandman.model.Model` endpoint :param string key: the primary key for the :class:`sandman.model.Model` :rtype: :class:`flask.Response`
5.386303
5.334732
1.009667
resource = retrieve_resource(collection, key) _validate(endpoint_class(collection), request.method, resource) resource.replace(get_resource_data(request)) try: _perform_database_action('add', resource) except IntegrityError as exception: raise InvalidAPIUsage(422, FORWARDED_EXCEPTION_MESSAGE.format( exception)) return no_content_response()
def put_resource(collection, key)
Replace the resource identified by the given key and return the appropriate response. :param string collection: a :class:`sandman.model.Model` endpoint :rtype: :class:`flask.Response`
9.459664
10.842475
0.872464
cls = endpoint_class(collection) resource = cls() resource.from_dict(get_resource_data(request)) _validate(cls, request.method, resource) _perform_database_action('add', resource) return resource_created_response(resource)
def post_resource(collection)
Return the appropriate *Response* based on adding a new resource to *collection*. :param string collection: a :class:`sandman.model.Model` endpoint :rtype: :class:`flask.Response`
7.550169
8.769982
0.86091
cls = endpoint_class(collection) resource = cls() resource = retrieve_resource(collection, key) _validate(cls, request.method, resource) try: _perform_database_action('delete', resource) except IntegrityError as exception: raise InvalidAPIUsage(422, FORWARDED_EXCEPTION_MESSAGE.format( exception)) return no_content_response()
def delete_resource(collection, key)
Return the appropriate *Response* for deleting an existing resource in *collection*. :param string collection: a :class:`sandman.model.Model` endpoint :param string key: the primary key for the :class:`sandman.model.Model` :rtype: :class:`flask.Response`
8.422113
8.642513
0.974498
resource = retrieve_resource(collection, key) _validate(endpoint_class(collection), request.method, resource) return resource_response(resource)
def get_resource(collection, key)
Return the appropriate *Response* for retrieving a single resource. :param string collection: a :class:`sandman.model.Model` endpoint :param string key: the primary key for the :class:`sandman.model.Model` :rtype: :class:`flask.Response`
11.774549
13.791455
0.853757
resource = retrieve_resource(collection, key) _validate(endpoint_class(collection), request.method, resource) value = getattr(resource, attribute) if isinstance(value, Model): return resource_response(value) else: return attribute_response(resource, attribute, value)
def get_resource_attribute(collection, key, attribute)
Return the appropriate *Response* for retrieving an attribute of a single resource. :param string collection: a :class:`sandman.model.Model` endpoint :param string key: the primary key for the :class:`sandman.model.Model` :rtype: :class:`flask.Response`
5.27341
5.395814
0.977315
cls = endpoint_class(collection) resources = retrieve_collection(collection, request.args) _validate(cls, request.method, resources) start = stop = None if request.args and 'page' in request.args: page = int(request.args['page']) results_per_page = app.config.get('RESULTS_PER_PAGE', 20) start, stop = page * results_per_page, (page + 1) * results_per_page return collection_response(cls, resources, start, stop)
def get_collection(collection)
Return the appropriate *Response* for retrieving a collection of resources. :param string collection: a :class:`sandman.model.Model` endpoint :param string key: the primary key for the :class:`sandman.model.Model` :rtype: :class:`flask.Response`
3.667365
3.784214
0.969122
classes = [] with app.app_context(): classes = set(current_app.class_references.values()) if _get_acceptable_response_type() == JSON: meta_data = {} for cls in classes: meta_data[cls.endpoint()] = { 'link': '/' + cls.endpoint(), 'meta': '/' + cls.endpoint() + '/meta' } return jsonify(meta_data) else: return render_template('index.html', classes=classes)
def index()
Return information about each type of resource and how it can be accessed.
4.371765
4.376075
0.999015
cls = endpoint_class(collection) description = cls.meta() return jsonify(description)
def get_meta(collection)
Return the meta-description of a given resource. :param collection: The collection to get meta-info for
14.895801
13.586514
1.096367
resp = make_response(render_template('error.html', error=self.code, message=self.message), self.code) return resp
def abort(self)
Return an HTML Response representation of the exception.
4.307764
3.321367
1.296985
session = getattr(g, '_session', None) if session is None: session = g._session = db.session() return session
def _get_session()
Return (and memoize) a database session
2.715235
2.280046
1.190868
seen_classes = set() for cls in current_app.class_references.values(): seen_classes.add(cls.__tablename__) with app.app_context(): db.metadata.reflect(bind=db.engine) for name, table in db.metadata.tables.items(): if not name in seen_classes: seen_classes.add(name) if not table.primary_key and generate_pks: cls = add_pk_if_required(db, table, name) else: cls = type( str(name), (sandman_model, db.Model), {'__tablename__': name}) register(cls)
def generate_endpoint_classes(db, generate_pks=False)
Return a list of model classes generated for each reflected database table.
3.370175
3.464638
0.972735
db.metadata.reflect(bind=db.engine) cls_dict = {'__tablename__': name} if not table.primary_key: for column in table.columns: column.primary_key = True Table(name, db.metadata, *table.columns, extend_existing=True) cls_dict['__table__'] = table db.metadata.create_all(bind=db.engine) return type(str(name), (sandman_model, db.Model), cls_dict)
def add_pk_if_required(db, table, name)
Return a class deriving from our Model class as well as the SQLAlchemy model. :param `sqlalchemy.schema.Table` table: table to create primary key for :param table: table to create primary key for
3.068159
3.373295
0.909544
inspector = reflection.Inspector.from_engine(db.engine) for cls in set(known_tables.values()): for foreign_key in inspector.get_foreign_keys(cls.__tablename__): if foreign_key['referred_table'] in known_tables: other = known_tables[foreign_key['referred_table']] constrained_column = foreign_key['constrained_columns'] if other not in cls.__related_tables__ and cls not in ( other.__related_tables__) and other != cls: cls.__related_tables__.add(other) # Add a SQLAlchemy relationship as an attribute # on the class setattr(cls, other.__table__.name, relationship( other.__name__, backref=db.backref( cls.__name__.lower()), foreign_keys=str(cls.__name__) + '.' + ''.join(constrained_column)))
def prepare_relationships(db, known_tables)
Enrich the registered Models with SQLAlchemy ``relationships`` so that related tables are correctly processed up by the admin.
3.421289
3.289571
1.040041
with app.app_context(): if getattr(current_app, 'class_references', None) is None: current_app.class_references = {} if isinstance(cls, (list, tuple)): for entry in cls: register_internal_data(entry) entry.use_admin = use_admin else: register_internal_data(cls) cls.use_admin = use_admin
def register(cls, use_admin=True)
Register with the API a :class:`sandman.model.Model` class and associated endpoint. :param cls: User-defined class derived from :class:`sandman.model.Model` to be registered with the endpoint returned by :func:`endpoint()` :type cls: :class:`sandman.model.Model` or tuple
3.210068
4.081174
0.786555
with app.app_context(): if getattr(cls, 'endpoint', None) is None: orig_class = cls cls = type('Sandman' + cls.__name__, (cls, Model), {}) cls.__from_class__ = orig_class current_app.class_references[cls.__tablename__] = cls current_app.class_references[cls.__name__] = cls current_app.class_references[cls.endpoint()] = cls if not getattr(cls, '__related_tables__', None): cls.__related_tables__ = set()
def register_internal_data(cls)
Register a new class, *cls*, with various internal data structures. :params `sandman.model.Model` cls: class to register
3.7633
3.82543
0.983759
with app.app_context(): admin_view = Admin(current_app, name=name) for cls in set( cls for cls in current_app.class_references.values() if cls.use_admin): column_list = [column.name for column in cls.__table__.columns.values()] if hasattr(cls, '__view__'): # allow ability for model classes to specify model views admin_view_class = type( 'AdminView', (cls.__view__,), {'form_columns': column_list}) elif show_pks: # the default of Flask-SQLAlchemy is to not show primary # classes, which obviously isn't acceptable in some cases admin_view_class = type( 'AdminView', (AdminModelViewWithPK,), {'form_columns': column_list}) else: admin_view_class = ModelView admin_view.add_view(admin_view_class(cls, db_session))
def register_classes_for_admin(db_session, show_pks=True, name='admin')
Registers classes for the Admin view that ultimately creates the admin interface. :param db_session: handle to database session :param list classes: list of classes to register with the admin :param bool show_pks: show primary key columns in the admin?
4.213117
4.328787
0.973279
with app.app_context(): generate_pks = app.config.get('SANDMAN_GENERATE_PKS', None) or False if getattr(app, 'class_references', None) is None or reflect_all: app.class_references = collections.OrderedDict() generate_endpoint_classes(db, generate_pks) else: Model.prepare(db.engine) prepare_relationships(db, current_app.class_references) if admin: try: show_pks = current_app.config['SANDMAN_SHOW_PKS'] except KeyError: show_pks = False register_classes_for_admin(db.session, show_pks, name) if browser: port = app.config.get('SERVER_PORT', None) or 5000 webbrowser.open('http://localhost:{}/admin'.format(port))
def activate(admin=True, browser=True, name='admin', reflect_all=False)
Activate each pre-registered model or generate the model classes and (possibly) register them for the admin. :param bool admin: should we generate the admin interface? :param bool browser: should we open the browser for the user? :param name: name to use for blueprint created by the admin interface. Set this to avoid naming conflicts with other blueprints (if trying to use sandman to connect to multiple databases simultaneously)
4.325208
4.191848
1.031814
# Remove git_root from src_path for searching the correct filename # If cwd is `/home/user/work/diff-cover/diff_cover` # and src_path is `diff_cover/violations_reporter.py` # search for `violations_reporter.py` src_rel_path = self._to_unix_path(GitPathTool.relative_path(src_path)) # If cwd is `/home/user/work/diff-cover/diff_cover` # and src_path is `other_package/some_file.py` # search for `/home/user/work/diff-cover/other_package/some_file.py` src_abs_path = self._to_unix_path(GitPathTool.absolute_path(src_path)) # cobertura sometimes provides the sources for the measurements # within it. If we have that we outta use it sources = xml_document.findall('sources/source') sources = [source.text for source in sources if source.text] classes = [class_tree for class_tree in xml_document.findall(".//class") or []] classes = ( [clazz for clazz in classes if src_abs_path in [ self._to_unix_path( os.path.join( source.strip(), clazz.get('filename') ) ) for source in sources]] or [clazz for clazz in classes if self._to_unix_path(clazz.get('filename')) == src_abs_path] or [clazz for clazz in classes if self._to_unix_path(clazz.get('filename')) == src_rel_path] ) return classes
def _get_classes(self, xml_document, src_path)
Given a path and parsed xml_document provides class nodes with the relevant lines First, we look to see if xml_document contains a source node providing paths to search for If we don't have that we check each nodes filename attribute matches an absolute path Finally, if we found no nodes, we check the filename attribute for the relative path
3.94803
3.834376
1.029641
files = [file_tree for file_tree in xml_document.findall(".//file") if GitPathTool.relative_path(file_tree.get('path')) == src_path or []] if not files: return None lines = [file_tree.findall('./line[@type="stmt"]') for file_tree in files] return [elem for elem in itertools.chain(*lines)]
def _get_src_path_line_nodes_clover(self, xml_document, src_path)
Return a list of nodes containing line information for `src_path` in `xml_document`. If file is not present in `xml_document`, return None
5.493759
5.190725
1.05838
files = [] packages = [pkg for pkg in xml_document.findall(".//package")] for pkg in packages: _files = [_file for _file in pkg.findall('sourcefile') if self._measured_source_path_matches(pkg.get('name'), _file.get('name'), src_path) or [] ] files.extend(_files) if not files: return None lines = [file_tree.findall('./line') for file_tree in files] return [elem for elem in itertools.chain(*lines)]
def _get_src_path_line_nodes_jacoco(self, xml_document, src_path)
Return a list of nodes containing line information for `src_path` in `xml_document`. If file is not present in `xml_document`, return None
4.525871
4.269582
1.060027
# If we have not yet loaded this source file if src_path not in self._info_cache: # We only want to keep violations that show up in each xml source. # Thus, each time, we take the intersection. However, to do this # we must treat the first time as a special case and just add all # the violations from the first xml report. violations = None # A line is measured if it is measured in any of the reports, so # we take set union each time and can just start with the empty set measured = set() # Loop through the files that contain the xml roots for xml_document in self._xml_roots: if xml_document.findall('.[@clover]'): # see etc/schema/clover.xsd at https://bitbucket.org/atlassian/clover/src line_nodes = self._get_src_path_line_nodes_clover(xml_document, src_path) _number = 'num' _hits = 'count' elif xml_document.findall('.[@name]'): # https://github.com/jacoco/jacoco/blob/master/org.jacoco.report/src/org/jacoco/report/xml/report.dtd line_nodes = self._get_src_path_line_nodes_jacoco(xml_document, src_path) _number = 'nr' _hits = 'ci' else: # https://github.com/cobertura/web/blob/master/htdocs/xml/coverage-04.dtd line_nodes = self._get_src_path_line_nodes_cobertura(xml_document, src_path) _number = 'number' _hits = 'hits' if line_nodes is None: continue # First case, need to define violations initially if violations is None: violations = set( Violation(int(line.get(_number)), None) for line in line_nodes if int(line.get(_hits, 0)) == 0) # If we already have a violations set, # take the intersection of the new # violations set and its old self else: violations = violations & set( Violation(int(line.get(_number)), None) for line in line_nodes if int(line.get(_hits, 0)) == 0 ) # Measured is the union of itself and the new measured measured = measured | set( int(line.get(_number)) for line in line_nodes ) # If we don't have any information about the source file, # don't report any violations if violations is None: violations = set() self._info_cache[src_path] = (violations, measured)
def _cache_file(self, src_path)
Load the data from `self._xml_roots` for `src_path`, if it hasn't been already.
4.364165
4.234955
1.03051
src_paths = [] message_match = self.dupe_code_violation_regex.match(message) if message_match: for _ in range(int(message_match.group(1))): current_line += 1 match = self.multi_line_violation_regex.match( lines[current_line] ) src_path, l_number = match.groups() src_paths.append(('%s.py' % src_path, l_number)) return src_paths
def _process_dupe_code_violation(self, lines, current_line, message)
The duplicate code violation is a multi line error. This pulls out all the relevant files
3.334539
3.20984
1.038849
violations_dict = defaultdict(list) for report in reports: output_lines = report.split('\n') for output_line_number, line in enumerate(output_lines): match = self.pylint_expression.match(line) # Ignore any line that isn't matched # (for example, snippets from the source code) if match is not None: (pylint_src_path, line_number, pylint_code, function_name, message) = match.groups() if pylint_code == self.dupe_code_violation: files_involved = self._process_dupe_code_violation( output_lines, output_line_number, message ) else: files_involved = [(pylint_src_path, line_number)] for violation in files_involved: pylint_src_path, line_number = violation # If we're looking for a particular source file, # ignore any other source files. if function_name: error_str = "{}: {}: {}".format(pylint_code, function_name, message) else: error_str = "{}: {}".format(pylint_code, message) violation = Violation(int(line_number), error_str) violations_dict[pylint_src_path].append(violation) return violations_dict
def parse_reports(self, reports)
Args: reports: list[str] - output from the report Return: A dict[Str:Violation] Violation is a simple named tuple Defined above
3.473172
3.403821
1.020375
parser = argparse.ArgumentParser( description=diff_cover.QUALITY_DESCRIPTION ) parser.add_argument( '--violations', metavar='TOOL', type=str, help=VIOLATION_CMD_HELP, required=True ) parser.add_argument( '--html-report', metavar='FILENAME', type=str, default=None, help=HTML_REPORT_HELP ) parser.add_argument( '--external-css-file', metavar='FILENAME', type=str, default=None, help=CSS_FILE_HELP, ) parser.add_argument( '--compare-branch', metavar='BRANCH', type=str, default='origin/master', help=COMPARE_BRANCH_HELP ) parser.add_argument( 'input_reports', type=str, nargs="*", default=[], help=INPUT_REPORTS_HELP ) parser.add_argument( '--options', type=str, nargs='?', default=None, help=OPTIONS_HELP ) parser.add_argument( '--fail-under', metavar='SCORE', type=float, default='0', help=FAIL_UNDER_HELP ) parser.add_argument( '--ignore-staged', action='store_true', default=False, help=IGNORE_STAGED_HELP ) parser.add_argument( '--ignore-unstaged', action='store_true', default=False, help=IGNORE_UNSTAGED_HELP ) parser.add_argument( '--exclude', metavar='EXCLUDE', type=str, nargs='+', help=EXCLUDE_HELP ) return vars(parser.parse_args(argv))
def parse_quality_args(argv)
Parse command line arguments, returning a dict of valid options: { 'violations': pycodestyle| pyflakes | flake8 | pylint | ..., 'html_report': None | HTML_REPORT, 'external_css_file': None | CSS_FILE, } where `HTML_REPORT` and `CSS_FILE` are paths.
1.978296
1.893876
1.044575
diff = GitDiffReporter( compare_branch, git_diff=GitDiffTool(), ignore_staged=ignore_staged, ignore_unstaged=ignore_unstaged, supported_extensions=tool.driver.supported_extensions, exclude=exclude) if html_report is not None: css_url = css_file if css_url is not None: css_url = os.path.relpath(css_file, os.path.dirname(html_report)) reporter = HtmlQualityReportGenerator(tool, diff, css_url=css_url) with open(html_report, "wb") as output_file: reporter.generate_report(output_file) if css_file is not None: with open(css_file, "wb") as output_file: reporter.generate_css(output_file) # Generate the report for stdout reporter = StringQualityReportGenerator(tool, diff) output_file = sys.stdout if six.PY2 else sys.stdout.buffer reporter.generate_report(output_file) return reporter.total_percent_covered()
def generate_quality_report(tool, compare_branch, html_report=None, css_file=None, ignore_staged=False, ignore_unstaged=False, exclude=None)
Generate the quality report, using kwargs from `parse_args()`.
2.680091
2.728142
0.982387
logging.basicConfig(format='%(message)s') argv = argv or sys.argv arg_dict = parse_quality_args(argv[1:]) GitPathTool.set_cwd(directory) fail_under = arg_dict.get('fail_under') tool = arg_dict['violations'] user_options = arg_dict.get('options') if user_options: # strip quotes if present first_char = user_options[0] last_char = user_options[-1] if first_char == last_char and first_char in ('"', "'"): user_options = user_options[1:-1] driver = QUALITY_DRIVERS.get(tool) if driver is not None: # If we've been given pre-generated reports, # try to open the files input_reports = [] for path in arg_dict['input_reports']: try: input_reports.append(open(path, 'rb')) except IOError: LOGGER.warning("Could not load '{}'".format(path)) try: reporter = QualityReporter(driver, input_reports, user_options) percent_passing = generate_quality_report( reporter, arg_dict['compare_branch'], html_report=arg_dict['html_report'], css_file=arg_dict['external_css_file'], ignore_staged=arg_dict['ignore_staged'], ignore_unstaged=arg_dict['ignore_unstaged'], exclude=arg_dict['exclude'], ) if percent_passing >= fail_under: return 0 else: LOGGER.error("Failure. Quality is below {}%.".format(fail_under)) return 1 except (ImportError, EnvironmentError): LOGGER.error( "Quality tool not installed: '{}'".format(tool) ) return 1 # Close any reports we opened finally: for file_handle in input_reports: file_handle.close() else: LOGGER.error("Quality tool not recognized: '{}'".format(tool)) return 1
def main(argv=None, directory=None)
Main entry point for the tool, used by setup.py Returns a value that can be passed into exit() specifying the exit code. 1 is an error 0 is successful run
3.348845
3.363479
0.995649
if not cwd: try: cwd = os.getcwdu() except AttributeError: cwd = os.getcwd() if isinstance(cwd, six.binary_type): cwd = cwd.decode(sys.getdefaultencoding()) cls._cwd = cwd cls._root = cls._git_root()
def set_cwd(cls, cwd)
Set the cwd that is used to manipulate paths.
2.931158
2.825104
1.03754
# Remove git_root from src_path for searching the correct filename # If cwd is `/home/user/work/diff-cover/diff_cover` # and src_path is `diff_cover/violations_reporter.py` # search for `violations_reporter.py` root_rel_path = os.path.relpath(cls._cwd, cls._root) rel_path = os.path.relpath(git_diff_path, root_rel_path) return rel_path
def relative_path(cls, git_diff_path)
Returns git_diff_path relative to cwd.
5.826586
5.485672
1.062146
if not patterns: return default return any(fnmatch.fnmatch(filename, pattern) for pattern in patterns)
def _fnmatch(self, filename, patterns, default=True)
Wrap :func:`fnmatch.fnmatch` to add some functionality. :param str filename: Name of the file we're trying to match. :param list patterns: Patterns we're using to try to match the filename. :param bool default: The default value if patterns is empty :returns: True if a pattern matches the filename, False if it doesn't. ``default`` if patterns is empty.
2.776668
3.277482
0.847195
exclude = self._exclude if not exclude: return False basename = os.path.basename(path) if self._fnmatch(basename, exclude): return True absolute_path = os.path.abspath(path) match = self._fnmatch(absolute_path, exclude) return match
def _is_path_excluded(self, path)
Check if a path is excluded. :param str path: Path to check against the exclude patterns. :returns: True if there are exclude patterns and the path matches, otherwise False.
2.952273
3.512184
0.84058
# Get the diff dictionary diff_dict = self._git_diff() # Return the changed file paths (dict keys) # in alphabetical order return sorted(diff_dict.keys(), key=lambda x: x.lower())
def src_paths_changed(self)
See base class docstring.
7.128331
5.505044
1.294873
included = [self._git_diff_tool.diff_committed(self._compare_branch)] if not self._ignore_staged: included.append(self._git_diff_tool.diff_staged()) if not self._ignore_unstaged: included.append(self._git_diff_tool.diff_unstaged()) return included
def _get_included_diff_results(self)
Return a list of stages to be included in the diff results.
3.279493
3.062254
1.070941
# If we do not have a cached result, execute `git diff` if self._diff_dict is None: result_dict = dict() for diff_str in self._get_included_diff_results(): # Parse the output of the diff string diff_dict = self._parse_diff_str(diff_str) for src_path in diff_dict.keys(): if self._is_path_excluded(src_path): continue # If no _supported_extensions provided, or extension present: process root, extension = os.path.splitext(src_path) extension = extension[1:].lower() # 'not self._supported_extensions' tests for both None and empty list [] if not self._supported_extensions or extension in self._supported_extensions: added_lines, deleted_lines = diff_dict[src_path] # Remove any lines from the dict that have been deleted # Include any lines that have been added result_dict[src_path] = [ line for line in result_dict.get(src_path, []) if not line in deleted_lines ] + added_lines # Eliminate repeats and order line numbers for (src_path, lines) in result_dict.items(): result_dict[src_path] = self._unique_ordered_lines(lines) # Store the resulting dict self._diff_dict = result_dict # Return the diff cache return self._diff_dict
def _git_diff(self)
Run `git diff` and returns a dict in which the keys are changed file paths and the values are lists of line numbers. Guarantees that each line number within a file is unique (no repeats) and in ascending order. Returns a cached result if called multiple times. Raises a GitDiffError if `git diff` has an error.
3.905587
3.607394
1.082662
# Create a dict to hold results diff_dict = dict() # Parse the diff string into sections by source file sections_dict = self._parse_source_sections(diff_str) for (src_path, diff_lines) in sections_dict.items(): # Parse the hunk information for the source file # to determine lines changed for the source file diff_dict[src_path] = self._parse_lines(diff_lines) return diff_dict
def _parse_diff_str(self, diff_str)
Parse the output of `git diff` into a dictionary of the form: { SRC_PATH: (ADDED_LINES, DELETED_LINES) } where `ADDED_LINES` and `DELETED_LINES` are lists of line numbers added/deleted respectively. If the output could not be parsed, raises a GitDiffError.
4.679369
4.675054
1.000923
# Create a dict to map source files to lines in the diff output source_dict = dict() # Keep track of the current source file src_path = None # Signal that we've found a hunk (after starting a source file) found_hunk = False # Parse the diff string into sections by source file for line in diff_str.split('\n'): # If the line starts with "diff --git" # or "diff --cc" (in the case of a merge conflict) # then it is the start of a new source file if line.startswith('diff --git') or line.startswith('diff --cc'): # Retrieve the name of the source file src_path = self._parse_source_line(line) # Create an entry for the source file, if we don't # already have one. if src_path not in source_dict: source_dict[src_path] = [] # Signal that we're waiting for a hunk for this source file found_hunk = False # Every other line is stored in the dictionary for this source file # once we find a hunk section else: # Only add lines if we're in a hunk section # (ignore index and files changed lines) if found_hunk or line.startswith('@@'): # Remember that we found a hunk found_hunk = True if src_path is not None: source_dict[src_path].append(line) else: # We tolerate other information before we have # a source file defined, unless it's a hunk line if line.startswith("@@"): msg = "Hunk has no source file: '{}'".format(line) raise GitDiffError(msg) return source_dict
def _parse_source_sections(self, diff_str)
Given the output of `git diff`, return a dictionary with keys that are source file paths. Each value is a list of lines from the `git diff` output related to the source file. Raises a `GitDiffError` if `diff_str` is in an invalid format.
3.729825
3.620691
1.030142
added_lines = [] deleted_lines = [] current_line_new = None current_line_old = None for line in diff_lines: # If this is the start of the hunk definition, retrieve # the starting line number if line.startswith('@@'): line_num = self._parse_hunk_line(line) current_line_new, current_line_old = line_num, line_num # This is an added/modified line, so store the line number elif line.startswith('+'): # Since we parse for source file sections before # calling this method, we're guaranteed to have a source # file specified. We check anyway just to be safe. if current_line_new is not None: # Store the added line added_lines.append(current_line_new) # Increment the line number in the file current_line_new += 1 # This is a deleted line that does not exist in the final # version, so skip it elif line.startswith('-'): # Since we parse for source file sections before # calling this method, we're guaranteed to have a source # file specified. We check anyway just to be safe. if current_line_old is not None: # Store the deleted line deleted_lines.append(current_line_old) # Increment the line number in the file current_line_old += 1 # This is a line in the final version that was not modified. # Increment the line number, but do not store this as a changed # line. else: if current_line_old is not None: current_line_old += 1 if current_line_new is not None: current_line_new += 1 # If we are not in a hunk, then ignore the line else: pass return added_lines, deleted_lines
def _parse_lines(self, diff_lines)
Given the diff lines output from `git diff` for a particular source file, return a tuple of `(ADDED_LINES, DELETED_LINES)` where `ADDED_LINES` and `DELETED_LINES` are lists of line numbers added/deleted respectively. Raises a `GitDiffError` if the diff lines are in an invalid format.
2.64617
2.603964
1.016208
if '--git' in line: regex = self.SRC_FILE_RE elif '--cc' in line: regex = self.MERGE_CONFLICT_RE else: msg = "Do not recognize format of source in line '{}'".format(line) raise GitDiffError(msg) # Parse for the source file path groups = regex.findall(line) if len(groups) == 1: return groups[0] else: msg = "Could not parse source path in line '{}'".format(line) raise GitDiffError(msg)
def _parse_source_line(self, line)
Given a source line in `git diff` output, return the path to the source file.
4.437062
3.870756
1.146304
# Split the line at the @@ terminators (start and end of the line) components = line.split('@@') # The first component should be an empty string, because # the line starts with '@@'. The second component should # be the hunk information, and any additional components # are excerpts from the code. if len(components) >= 2: hunk_info = components[1] groups = self.HUNK_LINE_RE.findall(hunk_info) if len(groups) == 1: try: return int(groups[0]) except ValueError: msg = "Could not parse '{}' as a line number".format(groups[0]) raise GitDiffError(msg) else: msg = "Could not find start of hunk in line '{}'".format(line) raise GitDiffError(msg) else: msg = "Could not parse hunk in line '{}'".format(line) raise GitDiffError(msg)
def _parse_hunk_line(self, line)
Given a hunk line in `git diff` output, return the line number at the start of the hunk. A hunk is a segment of code that contains changes. The format of the hunk line is: @@ -k,l +n,m @@ TEXT where `k,l` represent the start line and length before the changes and `n,m` represent the start line and length after the changes. `git diff` will sometimes put a code excerpt from within the hunk in the `TEXT` section of the line.
3.527313
3.162477
1.115364
if len(line_numbers) == 0: return [] # Ensure lines are unique by putting them in a set line_set = set(line_numbers) # Retrieve the list from the set, sort it, and return return sorted([line for line in line_set])
def _unique_ordered_lines(line_numbers)
Given a list of line numbers, return a list in which each line number is included once and the lines are ordered sequentially.
4.211867
3.754459
1.121831
return {src for src, summary in self._diff_violations().items() if len(summary.measured_lines) > 0}
def src_paths(self)
Return a list of source files in the diff for which we have coverage information.
22.392931
12.159678
1.841573
diff_violations = self._diff_violations().get(src_path) if diff_violations is None: return None # Protect against a divide by zero num_measured = len(diff_violations.measured_lines) if num_measured > 0: num_uncovered = len(diff_violations.lines) return 100 - float(num_uncovered) / num_measured * 100 else: return None
def percent_covered(self, src_path)
Return a float percent of lines covered for the source in `src_path`. If we have no coverage information for `src_path`, returns None
3.684923
3.467029
1.062848