code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
difference = self.check_state() if not difference: return self.events = [] self.handle_new_events(difference) self.update_timeval() self.events.append(self.sync_marker(self.timeval)) self.write_to_pipe(self.events)
def handle_input(self)
Sends differences in the device state to the MicroBitPad as events.
7.562782
6.740951
1.121916
while 1: events = get_mouse() for event in events: print(event.ev_type, event.code, event.state)
def main()
Just print out some event infomation when the mouse is used.
5.282183
3.518346
1.501326
while 1: events = get_key() if events: for event in events: print(event.ev_type, event.code, event.state)
def main()
Just print out some event infomation when keys are pressed.
4.703174
3.08629
1.523892
while 1: events = get_gamepad() for event in events: print(event.ev_type, event.code, event.state)
def main()
Just print out some event infomation when the gamepad is used.
3.503554
2.327398
1.505352
if not gamepad: gamepad = inputs.devices.gamepads[0] # Vibrate left gamepad.set_vibration(1, 0, 1000) time.sleep(2) # Vibrate right gamepad.set_vibration(0, 1, 1000) time.sleep(2) # Vibrate Both gamepad.set_vibration(1, 1, 2000) time.sleep(2)
def main(gamepad=None)
Vibrate the gamepad.
2.21359
2.076087
1.066232
errors = [] # Make sure the type validates first. valid = self._is_valid(value) if not valid: errors.append(self.fail(value)) return errors # Then validate all the constraints second. for constraint in self._constraints_inst: error = constraint.is_valid(value) if error: errors.append(error) return errors
def validate(self, value)
Check if ``value`` is valid. :returns: [errors] If ``value`` is invalid, otherwise [].
4.568671
4.559236
1.002069
schema_flat = util.flatten(schema_dict) for key, expression in schema_flat.items(): try: schema_flat[key] = syntax.parse(expression, validators) except SyntaxError as e: # Tack on some more context and rethrow. error = str(e) + ' at node \'%s\'' % key raise SyntaxError(error) return schema_flat
def _process_schema(self, schema_dict, validators)
Go through a schema and construct validators.
5.094374
4.716645
1.080084
errors = [] if position: position = '%s.%s' % (position, key) else: position = key try: # Pull value out of data. Data can be a map or a list/sequence data_item = util.get_value(data, key) except KeyError: # Oops, that field didn't exist. if validator.is_optional: # Optional? Who cares. return errors # SHUT DOWN EVERTYHING errors.append('%s: Required field missing' % position) return errors return self._validate_item(validator, data_item, position, includes)
def _validate(self, validator, data, key, position=None, includes=None)
Run through a schema and a data structure, validating along the way. Ignores fields that are in the data structure, but not in the schema. Returns an array of errors.
5.764927
5.599139
1.02961
errors = [] # Optional field with optional value? Who cares. if data_item is None and validator.is_optional and validator.can_be_none: return errors errors += self._validate_primitive(validator, data_item, position) if errors: return errors if isinstance(validator, val.Include): errors += self._validate_include(validator, data_item, includes, position) elif isinstance(validator, (val.Map, val.List)): errors += self._validate_map_list(validator, data_item, includes, position) elif isinstance(validator, val.Any): errors += self._validate_any(validator, data_item, includes, position) return errors
def _validate_item(self, validator, data_item, position, includes)
Validates a single data item against validator. Returns an array of errors.
3.230238
3.270467
0.987699
if not data_path or data_path == '/' or data_path == '.': return None directory = os.path.dirname(data_path) path = glob.glob(os.path.join(directory, schema_name)) if not path: return _find_schema(directory, schema_name) return path[0]
def _find_data_path_schema(data_path, schema_name)
Starts in the data file folder and recursively looks in parents for `schema_name`
2.580951
2.463781
1.047557
path = glob.glob(schema_name) for p in path: if os.path.isfile(p): return p return _find_data_path_schema(data_path, schema_name)
def _find_schema(data_path, schema_name)
Checks if `schema_name` is a valid file, if not searches in `data_path` for it.
3.861892
3.80676
1.014483
child = {} if not dic: return {} for k, v in get_iter(dic): if isstr(k): k = k.replace('.', '_') if position: item_position = '%s.%s' % (position, k) else: item_position = '%s' % k if is_iter(v): child.update(flatten(dic[k], keep_iter, item_position)) if keep_iter: child[item_position] = v else: child[item_position] = v return child
def flatten(dic, keep_iter=False, position=None)
Returns a flattened dictionary from a dictionary of nested dictionaries and lists. `keep_iter` will treat iterables as valid values, while also flattening them.
2.593673
2.646879
0.979899
if _subclasses_yielded is None: _subclasses_yielded = set() # If the passed class is old- rather than new-style, raise an exception. if not hasattr(cls, '__subclasses__'): raise TypeError('Old-style class "%s" unsupported.' % cls.__name__) # For each direct subclass of this class for subclass in cls.__subclasses__(): # If this subclass has already been yielded, skip to the next. if subclass in _subclasses_yielded: continue # Yield this subclass and record having done so before recursing. yield subclass _subclasses_yielded.add(subclass) # Yield all direct subclasses of this class as well. for subclass_subclass in get_subclasses(subclass, _subclasses_yielded): yield subclass_subclass
def get_subclasses(cls, _subclasses_yielded=None)
Generator recursively yielding all subclasses of the passed class (in depth-first order). Parameters ---------- cls : type Class to find all subclasses of. _subclasses_yielded : set Private parameter intended to be passed only by recursive invocations of this function, containing all previously yielded classes.
2.736262
2.772692
0.986861
value = self.model_field.__get__(obj, None) return smart_text(value, strings_only=True)
def to_representation(self, obj)
convert value to representation. DRF ModelField uses ``value_to_string`` for this purpose. Mongoengine fields do not have such method. This implementation uses ``django.utils.encoding.smart_text`` to convert everything to text, while keeping json-safe types intact. NB: The argument is whole object, instead of attribute value. This is upstream feature. Probably because the field can be represented by a complicated method with nontrivial way to extract data.
6.305928
5.246141
1.202013
try: self.model_field.validate(value) except MongoValidationError as e: raise ValidationError(e.message) super(DocumentField, self).run_validators(value)
def run_validators(self, value)
validate value. Uses document field's ``validate()``
3.957743
3.851933
1.027469
if html.is_html_input(data): data = html.parse_html_dict(data) if not isinstance(data, dict): self.fail('not_a_dict', input_type=type(data).__name__) if not self.allow_empty and len(data.keys()) == 0: message = self.error_messages['empty'] raise ValidationError({ api_settings.NON_FIELD_ERRORS_KEY: [message] }) return { six.text_type(key): self.child.run_validation(value) for key, value in data.items() }
def to_internal_value(self, data)
Dicts of native values <- Dicts of primitive datatypes.
2.580639
2.442041
1.056755
try: return queryset.get(*args, **kwargs) except (ValueError, TypeError, DoesNotExist, ValidationError): raise Http404()
def get_object_or_404(queryset, *args, **kwargs)
replacement of rest_framework.generics and django.shrtcuts analogues
3.533439
3.124526
1.130872
# me_data is an analogue of validated_data, but contains # mongoengine EmbeddedDocument instances for nested data structures # instead of OrderedDicts. # # For example: # validated_data = {'id:, "1", 'embed': OrderedDict({'a': 'b'})} # me_data = {'id': "1", 'embed': <EmbeddedDocument>} me_data = dict() for key, value in validated_data.items(): try: field = self.fields[key] # for EmbeddedDocumentSerializers, call recursive_save if isinstance(field, EmbeddedDocumentSerializer): me_data[key] = field.recursive_save(value) # same for lists of EmbeddedDocumentSerializers i.e. # ListField(EmbeddedDocumentField) or EmbeddedDocumentListField elif ((isinstance(field, serializers.ListSerializer) or isinstance(field, serializers.ListField)) and isinstance(field.child, EmbeddedDocumentSerializer)): me_data[key] = [] for datum in value: me_data[key].append(field.child.recursive_save(datum)) # same for dicts of EmbeddedDocumentSerializers (or, speaking # in Mongoengine terms, MapField(EmbeddedDocument(Embed)) elif (isinstance(field, drfm_fields.DictField) and hasattr(field, "child") and isinstance(field.child, EmbeddedDocumentSerializer)): me_data[key] = {} for datum_key, datum_value in value.items(): me_data[key][datum_key] = field.child.recursive_save(datum_value) # for regular fields just set value else: me_data[key] = value except KeyError: # this is dynamic data me_data[key] = value # create (if needed), save (if needed) and return mongoengine instance if not instance: instance = self.Meta.model(**me_data) else: for key, value in me_data.items(): setattr(instance, key, value) if self._saving_instances: instance.save() return instance
def recursive_save(self, validated_data, instance=None)
Recursively traverses validated_data and creates EmbeddedDocuments of the appropriate subtype from them. Returns Mongonengine model instance.
3.400044
3.356054
1.013108
# for EmbeddedDocumentSerializers create initial data # so that _get_dynamic_data could use them for field in self._writable_fields: if isinstance(field, EmbeddedDocumentSerializer) and field.field_name in data: field.initial_data = data[field.field_name] ret = super(DocumentSerializer, self).to_internal_value(data) # for EmbeddedDocumentSerializers create _validated_data # so that create()/update() could use them for field in self._writable_fields: if isinstance(field, EmbeddedDocumentSerializer) and field.field_name in ret: field._validated_data = ret[field.field_name] return ret
def to_internal_value(self, data)
Calls super() from DRF, but with an addition. Creates initial_data and _validated_data for nested EmbeddedDocumentSerializers, so that recursive_save could make use of them. If meets any arbitrary data, not expected by fields, just silently drops them from validated_data.
3.33876
2.828152
1.180545
# This method is supposed to be called after self.get_fields(), # thus it assumes that fields and exclude are mutually exclusive # and at least one of them is set. # # Also, all the sanity checks are left up to nested field's # get_fields() method, so if something is wrong with customization # nested get_fields() will report this. fields = getattr(self.Meta, 'fields', None) exclude = getattr(self.Meta, 'exclude', None) if fields and fields != ALL_FIELDS and not isinstance(fields, (list, tuple)): raise TypeError( 'The `fields` option must be a list or tuple or "__all__". ' 'Got %s.' % type(fields).__name__ ) if exclude and not isinstance(exclude, (list, tuple)): raise TypeError( 'The `exclude` option must be a list or tuple. Got %s.' % type(exclude).__name__ ) assert not (fields and exclude), ( "Cannot set both 'fields' and 'exclude' options on " "serializer {serializer_class}.".format( serializer_class=self.__class__.__name__ ) ) if fields is None and exclude is None: warnings.warn( "Creating a ModelSerializer without either the 'fields' " "attribute or the 'exclude' attribute is deprecated " "since 3.3.0. Add an explicit fields = '__all__' to the " "{serializer_class} serializer.".format( serializer_class=self.__class__.__name__ ), DeprecationWarning ) fields = ALL_FIELDS # assume that fields are ALL_FIELDS # TODO: validators # get nested_fields or nested_exclude (supposed to be mutually exclusive, assign the other one to None) if fields: if fields == ALL_FIELDS: nested_fields = ALL_FIELDS else: nested_fields = [field[len(field_name + '.'):] for field in fields if field.startswith(field_name + '.')] nested_exclude = None else: # leave all the sanity checks up to get_fields() method of nested field's serializer nested_fields = None nested_exclude = [field[len(field_name + '.'):] for field in exclude if field.startswith(field_name + '.')] # get nested_extra_kwargs (including read-only fields) # TODO: uniqueness extra kwargs extra_kwargs = self.get_extra_kwargs() nested_extra_kwargs = {key[len(field_name + '.'):]: value for key, value in extra_kwargs.items() if key.startswith(field_name + '.')} # get nested_validate_methods dict {name: function}, rename e.g. 'validate_author__age()' -> 'validate_age()' # so that we can add them to nested serializer's definition under this new name # validate_methods are normally checked in rest_framework.Serializer.to_internal_value() nested_validate_methods = {} for attr in dir(self.__class__): if attr.startswith('validate_%s__' % field_name.replace('.', '__')): method = get_unbound_function(getattr(self.__class__, attr)) method_name = 'validate_' + attr[len('validate_%s__' % field_name.replace('.', '__')):] nested_validate_methods[method_name] = method return Customization(nested_fields, nested_exclude, nested_extra_kwargs, nested_validate_methods)
def get_customization_for_nested_field(self, field_name)
Support of nested fields customization for: * EmbeddedDocumentField * NestedReference * Compound fields with EmbeddedDocument as a child: * ListField(EmbeddedDocument)/EmbeddedDocumentListField * MapField(EmbeddedDocument) Extracts fields, exclude, extra_kwargs and validate_*() attributes from parent serializer, related to attributes of field_name.
3.3627
3.227947
1.041746
# apply fields or exclude if customization.fields is not None: if len(customization.fields) == 0: # customization fields are empty, set Meta.fields to '__all__' serializer.Meta.fields = ALL_FIELDS else: serializer.Meta.fields = customization.fields if customization.exclude is not None: serializer.Meta.exclude = customization.exclude # apply extra_kwargs if customization.extra_kwargs is not None: serializer.Meta.extra_kwargs = customization.extra_kwargs # apply validate_methods for method_name, method in customization.validate_methods.items(): setattr(serializer, method_name, method)
def apply_customization(self, serializer, customization)
Applies fields customization to a nested or embedded DocumentSerializer.
2.64268
2.70354
0.977489
ret = super(DynamicDocumentSerializer, self).to_internal_value(data) dynamic_data = self._get_dynamic_data(ret) ret.update(dynamic_data) return ret
def to_internal_value(self, data)
Updates _validated_data with dynamic data, i.e. data, not listed in fields.
3.665779
2.777268
1.319923
result = {} for key in self.initial_data: if key not in validated_data: try: field = self.fields[key] # no exception? this is either SkipField or error # in particular, this might be a read-only field # that was mistakingly given a value if not isinstance(field, drf_fields.SkipField): msg = ( 'Field %s is missing from validated data,' 'but is not a SkipField!' ) % key raise AssertionError(msg) except KeyError: # ok, this is dynamic data result[key] = self.initial_data[key] return result
def _get_dynamic_data(self, validated_data)
Returns dict of data, not declared in serializer fields. Should be called after self.is_valid().
5.579818
5.299282
1.052939
# Deal with the primary key. if issubclass(model, mongoengine.EmbeddedDocument): pk = None else: pk = model._fields[model._meta['id_field']] # Deal with regular fields. fields = OrderedDict() # Deal with forward relationships. # Pass forward relations since there is no relations on mongodb references = OrderedDict() embedded = OrderedDict() def add_field(name, field): if isinstance(field, REFERENCING_FIELD_TYPES): references[name] = get_relation_info(field) elif isinstance(field, EMBEDDING_FIELD_TYPES): embedded[name] = get_relation_info(field) elif isinstance(field, COMPOUND_FIELD_TYPES): fields[name] = field if field.field: add_field(name + '.child', field.field) elif field is pk: return else: fields[name] = field for field_name in model._fields_ordered: add_field(field_name, model._fields[field_name]) # Shortcut that merges both regular fields and the pk, # for simplifying regular field lookup. fields_and_pk = OrderedDict() fields_and_pk['pk'] = pk fields_and_pk[getattr(pk, 'name', 'pk')] = pk fields_and_pk.update(fields) return FieldInfo(pk, fields, references, fields_and_pk, embedded)
def get_field_info(model)
Given a model class, returns a `FieldInfo` instance, which is a `namedtuple`, containing metadata about the various field types on the model including information about their relationships.
3.75115
3.719635
1.008473
kwargs = {} # The following will only be used by ModelField classes. # Gets removed for everything else. kwargs['model_field'] = model_field if hasattr(model_field, 'verbose_name') and needs_label(model_field, field_name): kwargs['label'] = capfirst(model_field.verbose_name) if hasattr(model_field, 'help_text'): kwargs['help_text'] = model_field.help_text if isinstance(model_field, me_fields.DecimalField): precision = model_field.precision max_value = getattr(model_field, 'max_value', None) if max_value is not None: max_length = len(str(max_value)) + precision else: max_length = 65536 kwargs['decimal_places'] = precision kwargs['max_digits'] = max_length if isinstance(model_field, me_fields.GeoJsonBaseField): kwargs['geo_type'] = model_field._type if isinstance(model_field, me_fields.SequenceField) or model_field.primary_key or model_field.db_field == '_id': # If this field is read-only, then return early. # Further keyword arguments are not valid. kwargs['read_only'] = True return kwargs if model_field.default and not isinstance(model_field, me_fields.ComplexBaseField): kwargs['default'] = model_field.default if model_field.null: kwargs['allow_null'] = True if model_field.null and isinstance(model_field, me_fields.StringField): kwargs['allow_blank'] = True if 'default' not in kwargs: kwargs['required'] = model_field.required # handle special cases - compound fields: mongoengine.ListField/DictField if kwargs['required'] is True: if isinstance(model_field, me_fields.ListField) or isinstance(model_field, me_fields.DictField): kwargs['allow_empty'] = False if model_field.choices: # If this model field contains choices, then return early. # Further keyword arguments are not valid. kwargs['choices'] = model_field.choices return kwargs if isinstance(model_field, me_fields.StringField): if model_field.regex: kwargs['regex'] = model_field.regex max_length = getattr(model_field, 'max_length', None) if max_length is not None and isinstance(model_field, me_fields.StringField): kwargs['max_length'] = max_length min_length = getattr(model_field, 'min_length', None) if min_length is not None and isinstance(model_field, me_fields.StringField): kwargs['min_length'] = min_length max_value = getattr(model_field, 'max_value', None) if max_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES): kwargs['max_value'] = max_value min_value = getattr(model_field, 'min_value', None) if min_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES): kwargs['min_value'] = min_value return kwargs
def get_field_kwargs(field_name, model_field)
Creating a default instance of a basic non-relational field.
2.243674
2.217426
1.011837
model_field, related_model = relation_info kwargs = {} if related_model and not issubclass(related_model, EmbeddedDocument): kwargs['queryset'] = related_model.objects if model_field: if hasattr(model_field, 'verbose_name') and needs_label(model_field, field_name): kwargs['label'] = capfirst(model_field.verbose_name) if hasattr(model_field, 'help_text'): kwargs['help_text'] = model_field.help_text kwargs['required'] = model_field.required if model_field.null: kwargs['allow_null'] = True if getattr(model_field, 'unique', False): validator = UniqueValidator(queryset=related_model.objects) kwargs['validators'] = [validator] return kwargs
def get_relation_kwargs(field_name, relation_info)
Creating a default instance of a flat relational field.
2.307058
2.229753
1.03467
kwargs = get_relation_kwargs(field_name, relation_info) kwargs.pop('queryset') kwargs.pop('required') kwargs['read_only'] = True return kwargs
def get_nested_relation_kwargs(field_name, relation_info)
Creating a default instance of a nested serializer
3.257339
2.822084
1.154232
''' Density is the fraction of present connections to possible connections. Parameters ---------- CIJ : NxN np.ndarray directed weighted/binary connection matrix Returns ------- kden : float density N : int number of vertices k : int number of edges Notes ----- Assumes CIJ is directed and has no self-connections. Weight information is discarded. ''' n = len(CIJ) k = np.size(np.where(CIJ.flatten())) kden = k / (n * n - n) return kden, n, k
def density_dir(CIJ)
Density is the fraction of present connections to possible connections. Parameters ---------- CIJ : NxN np.ndarray directed weighted/binary connection matrix Returns ------- kden : float density N : int number of vertices k : int number of edges Notes ----- Assumes CIJ is directed and has no self-connections. Weight information is discarded.
5.416613
1.774659
3.052199
''' Density is the fraction of present connections to possible connections. Parameters ---------- CIJ : NxN np.ndarray undirected (weighted/binary) connection matrix Returns ------- kden : float density N : int number of vertices k : int number of edges Notes ----- Assumes CIJ is undirected and has no self-connections. Weight information is discarded. ''' n = len(CIJ) k = np.size(np.where(np.triu(CIJ).flatten())) kden = k / ((n * n - n) / 2) return kden, n, k
def density_und(CIJ)
Density is the fraction of present connections to possible connections. Parameters ---------- CIJ : NxN np.ndarray undirected (weighted/binary) connection matrix Returns ------- kden : float density N : int number of vertices k : int number of edges Notes ----- Assumes CIJ is undirected and has no self-connections. Weight information is discarded.
5.262102
1.787529
2.943785
out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() if not self._obj or hasattr(self._obj, param): autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: # GAEL: Toctree commented out below because it creates # hundreds of sphinx warnings # out += ['.. autosummary::', ' :toctree:', ''] out += ['.. autosummary::', ''] out += autosum if others: maxlen_0 = max([len(x[0]) for x in others]) maxlen_1 = max([len(x[1]) for x in others]) hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10 fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) n_indent = maxlen_0 + maxlen_1 + 4 out += [hdr] for param, param_type, desc in others: out += [fmt % (param.strip(), param_type)] out += self._str_indent(desc, n_indent) out += [hdr] out += [''] return out
def _str_member_list(self, name)
Generate a member listing, autosummary:: table where possible, and a table where not.
3.478632
3.376159
1.030352
''' Node degree is the number of links connected to the node. The indegree is the number of inward links and the outdegree is the number of outward links. Parameters ---------- CIJ : NxN np.ndarray directed binary/weighted connection matrix Returns ------- id : Nx1 np.ndarray node in-degree od : Nx1 np.ndarray node out-degree deg : Nx1 np.ndarray node degree (in-degree + out-degree) Notes ----- Inputs are assumed to be on the columns of the CIJ matrix. Weight information is discarded. ''' CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary id = np.sum(CIJ, axis=0) # indegree = column sum of CIJ od = np.sum(CIJ, axis=1) # outdegree = row sum of CIJ deg = id + od # degree = indegree+outdegree return id, od, deg
def degrees_dir(CIJ)
Node degree is the number of links connected to the node. The indegree is the number of inward links and the outdegree is the number of outward links. Parameters ---------- CIJ : NxN np.ndarray directed binary/weighted connection matrix Returns ------- id : Nx1 np.ndarray node in-degree od : Nx1 np.ndarray node out-degree deg : Nx1 np.ndarray node degree (in-degree + out-degree) Notes ----- Inputs are assumed to be on the columns of the CIJ matrix. Weight information is discarded.
3.289757
1.519968
2.164359
''' Node degree is the number of links connected to the node. Parameters ---------- CIJ : NxN np.ndarray undirected binary/weighted connection matrix Returns ------- deg : Nx1 np.ndarray node degree Notes ----- Weight information is discarded. ''' CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary return np.sum(CIJ, axis=0)
def degrees_und(CIJ)
Node degree is the number of links connected to the node. Parameters ---------- CIJ : NxN np.ndarray undirected binary/weighted connection matrix Returns ------- deg : Nx1 np.ndarray node degree Notes ----- Weight information is discarded.
4.70119
2.01598
2.331963
''' This function returns a matrix in which the value of each element (u,v) corresponds to the number of nodes that have u outgoing connections and v incoming connections. Parameters ---------- CIJ : NxN np.ndarray directed binary/weighted connnection matrix Returns ------- J : ZxZ np.ndarray joint degree distribution matrix (shifted by one, replicates matlab one-based-indexing) J_od : int number of vertices with od>id J_id : int number of vertices with id>od J_bl : int number of vertices with id==od Notes ----- Weights are discarded. ''' CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary n = len(CIJ) id = np.sum(CIJ, axis=0) # indegree = column sum of CIJ od = np.sum(CIJ, axis=1) # outdegree = row sum of CIJ # create the joint degree distribution matrix # note: the matrix is shifted by one, to accomodate zero id and od in the # first row/column # upper triangular part of the matrix has vertices with od>id # lower triangular part has vertices with id>od # main diagonal has units with id=od szJ = np.max((id, od)) + 1 J = np.zeros((szJ, szJ)) for i in range(n): J[id[i], od[i]] += 1 J_od = np.sum(np.triu(J, 1)) J_id = np.sum(np.tril(J, -1)) J_bl = np.sum(np.diag(J)) return J, J_od, J_id, J_bl
def jdegree(CIJ)
This function returns a matrix in which the value of each element (u,v) corresponds to the number of nodes that have u outgoing connections and v incoming connections. Parameters ---------- CIJ : NxN np.ndarray directed binary/weighted connnection matrix Returns ------- J : ZxZ np.ndarray joint degree distribution matrix (shifted by one, replicates matlab one-based-indexing) J_od : int number of vertices with od>id J_id : int number of vertices with id>od J_bl : int number of vertices with id==od Notes ----- Weights are discarded.
4.081714
2.033333
2.0074
''' Node strength is the sum of weights of links connected to the node. The instrength is the sum of inward link weights and the outstrength is the sum of outward link weights. Parameters ---------- CIJ : NxN np.ndarray directed weighted connection matrix Returns ------- is : Nx1 np.ndarray node in-strength os : Nx1 np.ndarray node out-strength str : Nx1 np.ndarray node strength (in-strength + out-strength) Notes ----- Inputs are assumed to be on the columns of the CIJ matrix. ''' istr = np.sum(CIJ, axis=0) ostr = np.sum(CIJ, axis=1) return istr + ostr
def strengths_dir(CIJ)
Node strength is the sum of weights of links connected to the node. The instrength is the sum of inward link weights and the outstrength is the sum of outward link weights. Parameters ---------- CIJ : NxN np.ndarray directed weighted connection matrix Returns ------- is : Nx1 np.ndarray node in-strength os : Nx1 np.ndarray node out-strength str : Nx1 np.ndarray node strength (in-strength + out-strength) Notes ----- Inputs are assumed to be on the columns of the CIJ matrix.
3.672215
1.401278
2.620618
''' Node strength is the sum of weights of links connected to the node. Parameters ---------- W : NxN np.ndarray undirected connection matrix with positive and negative weights Returns ------- Spos : Nx1 np.ndarray nodal strength of positive weights Sneg : Nx1 np.ndarray nodal strength of positive weights vpos : float total positive weight vneg : float total negative weight ''' W = W.copy() n = len(W) np.fill_diagonal(W, 0) # clear diagonal Spos = np.sum(W * (W > 0), axis=0) # positive strengths Sneg = np.sum(W * (W < 0), axis=0) # negative strengths vpos = np.sum(W[W > 0]) # positive weight vneg = np.sum(W[W < 0]) # negative weight return Spos, Sneg, vpos, vneg
def strengths_und_sign(W)
Node strength is the sum of weights of links connected to the node. Parameters ---------- W : NxN np.ndarray undirected connection matrix with positive and negative weights Returns ------- Spos : Nx1 np.ndarray nodal strength of positive weights Sneg : Nx1 np.ndarray nodal strength of positive weights vpos : float total positive weight vneg : float total negative weight
2.613986
1.595419
1.638432
''' This function determines the neighbors of two nodes that are linked by an edge, and then computes their overlap. Connection matrix must be binary and directed. Entries of 'EC' that are 'inf' indicate that no edge is present. Entries of 'EC' that are 0 denote "local bridges", i.e. edges that link completely non-overlapping neighborhoods. Low values of EC indicate edges that are "weak ties". If CIJ is weighted, the weights are ignored. Parameters ---------- CIJ : NxN np.ndarray undirected binary/weighted connection matrix Returns ------- EC : NxN np.ndarray edge neighborhood overlap matrix ec : Kx1 np.ndarray edge neighborhood overlap per edge vector degij : NxN np.ndarray degrees of node pairs connected by each edge ''' ik, jk = np.where(CIJ) lel = len(CIJ[ik, jk]) n = len(CIJ) deg = degrees_und(CIJ) ec = np.zeros((lel,)) degij = np.zeros((2, lel)) for e in range(lel): neiik = np.setdiff1d(np.union1d( np.where(CIJ[ik[e], :]), np.where(CIJ[:, ik[e]])), (ik[e], jk[e])) neijk = np.setdiff1d(np.union1d( np.where(CIJ[jk[e], :]), np.where(CIJ[:, jk[e]])), (ik[e], jk[e])) ec[e] = len(np.intersect1d(neiik, neijk)) / \ len(np.union1d(neiik, neijk)) degij[:, e] = (deg[ik[e]], deg[jk[e]]) EC = np.tile(np.inf, (n, n)) EC[ik, jk] = ec return EC, ec, degij
def edge_nei_overlap_bu(CIJ)
This function determines the neighbors of two nodes that are linked by an edge, and then computes their overlap. Connection matrix must be binary and directed. Entries of 'EC' that are 'inf' indicate that no edge is present. Entries of 'EC' that are 0 denote "local bridges", i.e. edges that link completely non-overlapping neighborhoods. Low values of EC indicate edges that are "weak ties". If CIJ is weighted, the weights are ignored. Parameters ---------- CIJ : NxN np.ndarray undirected binary/weighted connection matrix Returns ------- EC : NxN np.ndarray edge neighborhood overlap matrix ec : Kx1 np.ndarray edge neighborhood overlap per edge vector degij : NxN np.ndarray degrees of node pairs connected by each edge
3.803211
1.635012
2.326106
''' The m-th step generalized topological overlap measure (GTOM) quantifies the extent to which a pair of nodes have similar m-th step neighbors. Mth-step neighbors are nodes that are reachable by a path of at most length m. This function computes the the M x M generalized topological overlap measure (GTOM) matrix for number of steps, numSteps. Parameters ---------- adj : NxN np.ndarray connection matrix nr_steps : int number of steps Returns ------- gt : NxN np.ndarray GTOM matrix Notes ----- When numSteps is equal to 1, GTOM is identical to the topological overlap measure (TOM) from reference [2]. In that case the 'gt' matrix records, for each pair of nodes, the fraction of neighbors the two nodes share in common, where "neighbors" are one step removed. As 'numSteps' is increased, neighbors that are furter out are considered. Elements of 'gt' are bounded between 0 and 1. The 'gt' matrix can be converted from a similarity to a distance matrix by taking 1-gt. ''' bm = binarize(adj, copy=True) bm_aux = bm.copy() nr_nodes = len(adj) if nr_steps > nr_nodes: print("Warning: nr_steps exceeded nr_nodes. Setting nr_steps=nr_nodes") if nr_steps == 0: return bm else: for steps in range(2, nr_steps): for i in range(nr_nodes): # neighbors of node i ng_col, = np.where(bm_aux[i, :] == 1) # neighbors of neighbors of node i nng_row, nng_col = np.where(bm_aux[ng_col, :] == 1) new_ng = np.setdiff1d(nng_col, (i,)) # neighbors of neighbors of i become considered neighbors of i bm_aux[i, new_ng] = 1 bm_aux[new_ng, i] = 1 # numerator of GTOM formula numerator_mat = np.dot(bm_aux, bm_aux) + bm + np.eye(nr_nodes) # vector of node degrees bms = np.sum(bm_aux, axis=0) bms_r = np.tile(bms, (nr_nodes, 1)) denominator_mat = -bm + np.where(bms_r > bms_r.T, bms_r, bms_r.T) + 1 return numerator_mat / denominator_mat
def gtom(adj, nr_steps)
The m-th step generalized topological overlap measure (GTOM) quantifies the extent to which a pair of nodes have similar m-th step neighbors. Mth-step neighbors are nodes that are reachable by a path of at most length m. This function computes the the M x M generalized topological overlap measure (GTOM) matrix for number of steps, numSteps. Parameters ---------- adj : NxN np.ndarray connection matrix nr_steps : int number of steps Returns ------- gt : NxN np.ndarray GTOM matrix Notes ----- When numSteps is equal to 1, GTOM is identical to the topological overlap measure (TOM) from reference [2]. In that case the 'gt' matrix records, for each pair of nodes, the fraction of neighbors the two nodes share in common, where "neighbors" are one step removed. As 'numSteps' is increased, neighbors that are furter out are considered. Elements of 'gt' are bounded between 0 and 1. The 'gt' matrix can be converted from a similarity to a distance matrix by taking 1-gt.
4.74937
2.012497
2.359939
''' For any two nodes u and v, the matching index computes the amount of overlap in the connection patterns of u and v. Self-connections and u-v connections are ignored. The matching index is a symmetric quantity, similar to a correlation or a dot product. Parameters ---------- CIJ : NxN np.ndarray adjacency matrix Returns ------- Min : NxN np.ndarray matching index for incoming connections Mout : NxN np.ndarray matching index for outgoing connections Mall : NxN np.ndarray matching index for all connections Notes ----- Does not use self- or cross connections for comparison. Does not use connections that are not present in BOTH u and v. All output matrices are calculated for upper triangular only. ''' n = len(CIJ) Min = np.zeros((n, n)) Mout = np.zeros((n, n)) Mall = np.zeros((n, n)) # compare incoming connections for i in range(n - 1): for j in range(i + 1, n): c1i = CIJ[:, i] c2i = CIJ[:, j] usei = np.logical_or(c1i, c2i) usei[i] = 0 usei[j] = 0 nconi = np.sum(c1i[usei]) + np.sum(c2i[usei]) if not nconi: Min[i, j] = 0 else: Min[i, j] = 2 * \ np.sum(np.logical_and(c1i[usei], c2i[usei])) / nconi c1o = CIJ[i, :] c2o = CIJ[j, :] useo = np.logical_or(c1o, c2o) useo[i] = 0 useo[j] = 0 ncono = np.sum(c1o[useo]) + np.sum(c2o[useo]) if not ncono: Mout[i, j] = 0 else: Mout[i, j] = 2 * \ np.sum(np.logical_and(c1o[useo], c2o[useo])) / ncono c1a = np.ravel((c1i, c1o)) c2a = np.ravel((c2i, c2o)) usea = np.logical_or(c1a, c2a) usea[i] = 0 usea[i + n] = 0 usea[j] = 0 usea[j + n] = 0 ncona = np.sum(c1a[usea]) + np.sum(c2a[usea]) if not ncona: Mall[i, j] = 0 else: Mall[i, j] = 2 * \ np.sum(np.logical_and(c1a[usea], c2a[usea])) / ncona Min = Min + Min.T Mout = Mout + Mout.T Mall = Mall + Mall.T return Min, Mout, Mall
def matching_ind(CIJ)
For any two nodes u and v, the matching index computes the amount of overlap in the connection patterns of u and v. Self-connections and u-v connections are ignored. The matching index is a symmetric quantity, similar to a correlation or a dot product. Parameters ---------- CIJ : NxN np.ndarray adjacency matrix Returns ------- Min : NxN np.ndarray matching index for incoming connections Mout : NxN np.ndarray matching index for outgoing connections Mall : NxN np.ndarray matching index for all connections Notes ----- Does not use self- or cross connections for comparison. Does not use connections that are not present in BOTH u and v. All output matrices are calculated for upper triangular only.
2.003707
1.329492
1.507122
''' M0 = MATCHING_IND_UND(CIJ) computes matching index for undirected graph specified by adjacency matrix CIJ. Matching index is a measure of similarity between two nodes' connectivity profiles (excluding their mutual connection, should it exist). Parameters ---------- CIJ : NxN np.ndarray undirected adjacency matrix Returns ------- M0 : NxN np.ndarray matching index matrix ''' K = np.sum(CIJ0, axis=0) n = len(CIJ0) R = (K != 0) N = np.sum(R) xR, = np.where(R == 0) CIJ = np.delete(np.delete(CIJ0, xR, axis=0), xR, axis=1) I = np.logical_not(np.eye(N)) M = np.zeros((N, N)) for i in range(N): c1 = CIJ[i, :] use = np.logical_or(c1, CIJ) use[:, i] = 0 use *= I ncon1 = c1 * use ncon2 = c1 * CIJ ncon = np.sum(ncon1 + ncon2, axis=1) print(ncon) M[:, i] = 2 * np.sum(np.logical_and(ncon1, ncon2), axis=1) / ncon M *= I M[np.isnan(M)] = 0 M0 = np.zeros((n, n)) yR, = np.where(R) M0[np.ix_(yR, yR)] = M return M0
def matching_ind_und(CIJ0)
M0 = MATCHING_IND_UND(CIJ) computes matching index for undirected graph specified by adjacency matrix CIJ. Matching index is a measure of similarity between two nodes' connectivity profiles (excluding their mutual connection, should it exist). Parameters ---------- CIJ : NxN np.ndarray undirected adjacency matrix Returns ------- M0 : NxN np.ndarray matching index matrix
3.305867
2.367112
1.396582
''' Calculates pairwise dice similarity for each vertex between two matrices. Treats the matrices as binary and undirected. Paramaters ---------- A1 : NxN np.ndarray Matrix 1 A2 : NxN np.ndarray Matrix 2 Returns ------- D : Nx1 np.ndarray dice similarity vector ''' a1 = binarize(a1, copy=True) a2 = binarize(a2, copy=True) # ensure matrices are binary n = len(a1) np.fill_diagonal(a1, 0) np.fill_diagonal(a2, 0) # set diagonals to 0 d = np.zeros((n,)) # dice similarity # calculate the common neighbors for each vertex for i in range(n): d[i] = 2 * (np.sum(np.logical_and(a1[:, i], a2[:, i])) / (np.sum(a1[:, i]) + np.sum(a2[:, i]))) return d
def dice_pairwise_und(a1, a2)
Calculates pairwise dice similarity for each vertex between two matrices. Treats the matrices as binary and undirected. Paramaters ---------- A1 : NxN np.ndarray Matrix 1 A2 : NxN np.ndarray Matrix 2 Returns ------- D : Nx1 np.ndarray dice similarity vector
3.120005
1.9469
1.60255
''' Returns the correlation coefficient between two flattened adjacency matrices. Only the upper triangular part is used to avoid double counting undirected matrices. Similarity metric for weighted matrices. Parameters ---------- A1 : NxN np.ndarray undirected matrix 1 A2 : NxN np.ndarray undirected matrix 2 Returns ------- r : float Correlation coefficient describing edgewise similarity of a1 and a2 ''' n = len(a1) if len(a2) != n: raise BCTParamError("Cannot calculate flattened correlation on " "matrices of different size") triu_ix = np.where(np.triu(np.ones((n, n)), 1)) return np.corrcoef(a1[triu_ix].flat, a2[triu_ix].flat)[0][1]
def corr_flat_und(a1, a2)
Returns the correlation coefficient between two flattened adjacency matrices. Only the upper triangular part is used to avoid double counting undirected matrices. Similarity metric for weighted matrices. Parameters ---------- A1 : NxN np.ndarray undirected matrix 1 A2 : NxN np.ndarray undirected matrix 2 Returns ------- r : float Correlation coefficient describing edgewise similarity of a1 and a2
4.063087
1.945834
2.088096
''' Returns the correlation coefficient between two flattened adjacency matrices. Similarity metric for weighted matrices. Parameters ---------- A1 : NxN np.ndarray directed matrix 1 A2 : NxN np.ndarray directed matrix 2 Returns ------- r : float Correlation coefficient describing edgewise similarity of a1 and a2 ''' n = len(a1) if len(a2) != n: raise BCTParamError("Cannot calculate flattened correlation on " "matrices of different size") ix = np.logical_not(np.eye(n)) return np.corrcoef(a1[ix].flat, a2[ix].flat)[0][1]
def corr_flat_dir(a1, a2)
Returns the correlation coefficient between two flattened adjacency matrices. Similarity metric for weighted matrices. Parameters ---------- A1 : NxN np.ndarray directed matrix 1 A2 : NxN np.ndarray directed matrix 2 Returns ------- r : float Correlation coefficient describing edgewise similarity of a1 and a2
4.360463
2.251961
1.936296
''' (X,Y,INDSORT) = GRID_COMMUNITIES(C) takes a vector of community assignments C and returns three output arguments for visualizing the communities. The third is INDSORT, which is an ordering of the vertices so that nodes with the same community assignment are next to one another. The first two arguments are vectors that, when overlaid on the adjacency matrix using the PLOT function, highlight the communities. Parameters ---------- c : Nx1 np.ndarray community assignments Returns ------- bounds : list list containing the communities indsort : np.ndarray indices Notes ----- Note: This function returns considerably different values than in matlab due to differences between matplotlib and matlab. This function has been designed to work with matplotlib, as in the following example: ci,_=modularity_und(adj) bounds,ixes=grid_communities(ci) pylab.imshow(adj[np.ix_(ixes,ixes)],interpolation='none',cmap='BuGn') for b in bounds: pylab.axvline(x=b,color='red') pylab.axhline(y=b,color='red') Note that I adapted the idea from the matlab function of the same name, and have not tested the functionality extensively. ''' c = c.copy() nr_c = np.max(c) ixes = np.argsort(c) c = c[ixes] bounds = [] for i in range(nr_c): ind = np.where(c == i + 1) if np.size(ind): mn = np.min(ind) - .5 mx = np.max(ind) + .5 bounds.extend([mn, mx]) bounds = np.unique(bounds) return bounds, ixes
def grid_communities(c)
(X,Y,INDSORT) = GRID_COMMUNITIES(C) takes a vector of community assignments C and returns three output arguments for visualizing the communities. The third is INDSORT, which is an ordering of the vertices so that nodes with the same community assignment are next to one another. The first two arguments are vectors that, when overlaid on the adjacency matrix using the PLOT function, highlight the communities. Parameters ---------- c : Nx1 np.ndarray community assignments Returns ------- bounds : list list containing the communities indsort : np.ndarray indices Notes ----- Note: This function returns considerably different values than in matlab due to differences between matplotlib and matlab. This function has been designed to work with matplotlib, as in the following example: ci,_=modularity_und(adj) bounds,ixes=grid_communities(ci) pylab.imshow(adj[np.ix_(ixes,ixes)],interpolation='none',cmap='BuGn') for b in bounds: pylab.axvline(x=b,color='red') pylab.axhline(y=b,color='red') Note that I adapted the idea from the matlab function of the same name, and have not tested the functionality extensively.
5.789155
1.461806
3.960275
''' This function reorders the connectivity matrix in order to place more edges closer to the diagonal. This often helps in displaying community structure, clusters, etc. Parameters ---------- MAT : NxN np.ndarray connection matrix H : int number of reordering attempts cost : str 'line' or 'circ' for shape of lattice (linear or ring lattice). Default is linear lattice. Returns ------- MATreordered : NxN np.ndarray reordered connection matrix MATindices : Nx1 np.ndarray reordered indices MATcost : float objective function cost of reordered matrix Notes ----- I'm not 100% sure how the algorithms between this and reorder_matrix differ, but this code looks a ton sketchier and might have had some minor bugs in it. Considering reorder_matrix() does the same thing using a well vetted simulated annealing algorithm, just use that. ~rlaplant ''' from scipy import linalg, stats m = m.copy() n = len(m) np.fill_diagonal(m, 0) # generate cost function if cost == 'line': profile = stats.norm.pdf(range(1, n + 1), 0, n / 2)[::-1] elif cost == 'circ': profile = stats.norm.pdf(range(1, n + 1), n / 2, n / 4)[::-1] else: raise BCTParamError('dfun must be line or circ') costf = linalg.toeplitz(profile, r=profile) lowcost = np.sum(costf * m) # keep track of starting configuration m_start = m.copy() starta = np.arange(n) # reorder for h in range(H): a = np.arange(n) # choose two positions and flip them r1, r2 = rng.randint(n, size=(2,)) a[r1] = r2 a[r2] = r1 costnew = np.sum((m[np.ix_(a, a)]) * costf) # if this reduced the overall cost if costnew < lowcost: m = m[np.ix_(a, a)] r2_swap = starta[r2] r1_swap = starta[r1] starta[r1] = r2_swap starta[r2] = r1_swap lowcost = costnew M_reordered = m_start[np.ix_(starta, starta)] m_indices = starta cost = lowcost return M_reordered, m_indices, cost
def reorderMAT(m, H=5000, cost='line')
This function reorders the connectivity matrix in order to place more edges closer to the diagonal. This often helps in displaying community structure, clusters, etc. Parameters ---------- MAT : NxN np.ndarray connection matrix H : int number of reordering attempts cost : str 'line' or 'circ' for shape of lattice (linear or ring lattice). Default is linear lattice. Returns ------- MATreordered : NxN np.ndarray reordered connection matrix MATindices : Nx1 np.ndarray reordered indices MATcost : float objective function cost of reordered matrix Notes ----- I'm not 100% sure how the algorithms between this and reorder_matrix differ, but this code looks a ton sketchier and might have had some minor bugs in it. Considering reorder_matrix() does the same thing using a well vetted simulated annealing algorithm, just use that. ~rlaplant
4.798749
2.151283
2.230646
''' This function writes a Pajek .net file from a numpy matrix Parameters ---------- CIJ : NxN np.ndarray adjacency matrix fname : str filename directed : bool True if the network is directed and False otherwise. The data format may be required to know this for some reason so I am afraid to just use directed as the default value. ''' n = np.size(CIJ, axis=0) with open(fname, 'w') as fd: fd.write('*vertices %i \r' % n) for i in range(1, n + 1): fd.write('%i "%i" \r' % (i, i)) if directed: fd.write('*arcs \r') else: fd.write('*edges \r') for i in range(n): for j in range(n): if CIJ[i, j] != 0: fd.write('%i %i %.6f \r' % (i + 1, j + 1, CIJ[i, j]))
def writetoPAJ(CIJ, fname, directed)
This function writes a Pajek .net file from a numpy matrix Parameters ---------- CIJ : NxN np.ndarray adjacency matrix fname : str filename directed : bool True if the network is directed and False otherwise. The data format may be required to know this for some reason so I am afraid to just use directed as the default value.
2.885186
1.668153
1.729569
''' This function generates a random, directed network with a specified number of fully connected modules linked together by evenly distributed remaining random connections. Parameters ---------- N : int number of vertices (must be power of 2) K : int number of edges sz_cl : int size of clusters (must be power of 2) seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray connection matrix Notes ----- N must be a power of 2. A warning is generated if all modules contain more edges than K. Cluster size is 2^sz_cl; ''' rng = get_rng(seed) # compute number of hierarchical levels and adjust cluster size mx_lvl = int(np.floor(np.log2(n))) sz_cl -= 1 # make a stupid little template t = np.ones((2, 2)) * 2 # check n against the number of levels Nlvl = 2**mx_lvl if Nlvl != n: print("Warning: n must be a power of 2") n = Nlvl # create hierarchical template for lvl in range(1, mx_lvl): s = 2**(lvl + 1) CIJ = np.ones((s, s)) grp1 = range(int(s / 2)) grp2 = range(int(s / 2), s) ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten() ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten() CIJ.flat[ix1] = t # numpy indexing is teh sucks :( CIJ.flat[ix2] = t CIJ += 1 t = CIJ.copy() CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s)) # assign connection probabilities CIJp = (CIJ >= (mx_lvl - sz_cl)) # determine nr of non-cluster connections left and their possible positions rem_k = k - np.size(np.where(CIJp.flatten())) if rem_k < 0: print("Warning: K is too small, output matrix contains clusters only") return CIJp a, b = np.where(np.logical_not(CIJp + np.eye(n))) # assign remK randomly dstributed connections rp = rng.permutation(len(a)) a = a[rp[:rem_k]] b = b[rp[:rem_k]] for ai, bi in zip(a, b): CIJp[ai, bi] = 1 return np.array(CIJp, dtype=int)
def makeevenCIJ(n, k, sz_cl, seed=None)
This function generates a random, directed network with a specified number of fully connected modules linked together by evenly distributed remaining random connections. Parameters ---------- N : int number of vertices (must be power of 2) K : int number of edges sz_cl : int size of clusters (must be power of 2) seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray connection matrix Notes ----- N must be a power of 2. A warning is generated if all modules contain more edges than K. Cluster size is 2^sz_cl;
5.002484
3.068448
1.630298
''' This function generates a directed network with a hierarchical modular organization. All modules are fully connected and connection density decays as 1/(E^n), with n = index of hierarchical level. Parameters ---------- mx_lvl : int number of hierarchical levels, N = 2^mx_lvl E : int connection density fall off per level sz_cl : int size of clusters (must be power of 2) seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray connection matrix K : int number of connections present in output CIJ ''' rng = get_rng(seed) # make a stupid little template t = np.ones((2, 2)) * 2 # compute N and cluster size n = 2**mx_lvl sz_cl -= 1 for lvl in range(1, mx_lvl): s = 2**(lvl + 1) CIJ = np.ones((s, s)) grp1 = range(int(s / 2)) grp2 = range(int(s / 2), s) ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten() ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten() CIJ.flat[ix1] = t # numpy indexing is teh sucks :( CIJ.flat[ix2] = t CIJ += 1 t = CIJ.copy() CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s)) # assign connection probabilities ee = mx_lvl - CIJ - sz_cl ee = (ee > 0) * ee prob = (1 / E**ee) * (np.ones((s, s)) - np.eye(s)) CIJ = (prob > rng.random_sample((n, n))) # count connections k = np.sum(CIJ) return np.array(CIJ, dtype=int), k
def makefractalCIJ(mx_lvl, E, sz_cl, seed=None)
This function generates a directed network with a hierarchical modular organization. All modules are fully connected and connection density decays as 1/(E^n), with n = index of hierarchical level. Parameters ---------- mx_lvl : int number of hierarchical levels, N = 2^mx_lvl E : int connection density fall off per level sz_cl : int size of clusters (must be power of 2) seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray connection matrix K : int number of connections present in output CIJ
4.940172
2.511645
1.966907
''' This function generates a directed random network with a specified in-degree and out-degree sequence. Parameters ---------- inv : Nx1 np.ndarray in-degree vector outv : Nx1 np.ndarray out-degree vector seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray Notes ----- Necessary conditions include: length(in) = length(out) = n sum(in) = sum(out) = k in(i), out(i) < n-1 in(i) + out(j) < n+2 in(i) + out(i) < n No connections are placed on the main diagonal The algorithm used in this function is not, technically, guaranteed to terminate. If a valid distribution of in and out degrees is provided, this function will find it in bounded time with probability 1-(1/(2*(k^2))). This turns out to be a serious problem when computing infinite degree matrices, but offers good performance otherwise. ''' rng = get_rng(seed) n = len(inv) k = np.sum(inv) in_inv = np.zeros((k,)) out_inv = np.zeros((k,)) i_in = 0 i_out = 0 for i in range(n): in_inv[i_in:i_in + inv[i]] = i out_inv[i_out:i_out + outv[i]] = i i_in += inv[i] i_out += outv[i] CIJ = np.eye(n) edges = np.array((out_inv, in_inv[rng.permutation(k)])) # create CIJ and check for double edges and self connections for i in range(k): if CIJ[edges[0, i], edges[1, i]]: tried = set() while True: if len(tried) == k: raise BCTParamError('Could not resolve the given ' 'in and out vectors') switch = rng.randint(k) while switch in tried: switch = rng.randint(k) if not (CIJ[edges[0, i], edges[1, switch]] or CIJ[edges[0, switch], edges[1, i]]): CIJ[edges[0, switch], edges[1, switch]] = 0 CIJ[edges[0, switch], edges[1, i]] = 1 if switch < i: CIJ[edges[0, switch], edges[1, switch]] = 0 CIJ[edges[0, switch], edges[1, i]] = 1 t = edges[1, i] edges[1, i] = edges[1, switch] edges[1, switch] = t break tried.add(switch) else: CIJ[edges[0, i], edges[1, i]] = 1 CIJ -= np.eye(n) return CIJ
def makerandCIJdegreesfixed(inv, outv, seed=None)
This function generates a directed random network with a specified in-degree and out-degree sequence. Parameters ---------- inv : Nx1 np.ndarray in-degree vector outv : Nx1 np.ndarray out-degree vector seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray Notes ----- Necessary conditions include: length(in) = length(out) = n sum(in) = sum(out) = k in(i), out(i) < n-1 in(i) + out(j) < n+2 in(i) + out(i) < n No connections are placed on the main diagonal The algorithm used in this function is not, technically, guaranteed to terminate. If a valid distribution of in and out degrees is provided, this function will find it in bounded time with probability 1-(1/(2*(k^2))). This turns out to be a serious problem when computing infinite degree matrices, but offers good performance otherwise.
3.436056
1.675727
2.050487
''' This function generates a directed random network Parameters ---------- N : int number of vertices K : int number of edges seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray directed random connection matrix Notes ----- no connections are placed on the main diagonal. ''' rng = get_rng(seed) ix, = np.where(np.logical_not(np.eye(n)).flat) rp = rng.permutation(np.size(ix)) CIJ = np.zeros((n, n)) CIJ.flat[ix[rp][:k]] = 1 return CIJ
def makerandCIJ_dir(n, k, seed=None)
This function generates a directed random network Parameters ---------- N : int number of vertices K : int number of edges seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray directed random connection matrix Notes ----- no connections are placed on the main diagonal.
5.304288
2.050092
2.587341
''' This function generates a directed lattice network with toroidal boundary counditions (i.e. with ring-like "wrapping around"). Parameters ---------- N : int number of vertices K : int number of edges seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray connection matrix Notes ----- The lattice is made by placing connections as close as possible to the main diagonal, with wrapping around. No connections are made on the main diagonal. In/Outdegree is kept approx. constant at K/N. ''' rng = get_rng(seed) # initialize CIJ = np.zeros((n, n)) CIJ1 = np.ones((n, n)) kk = 0 count = 0 seq = range(1, n) seq2 = range(n - 1, 0, -1) # fill in while kk < k: count += 1 dCIJ = np.triu(CIJ1, seq[count]) - np.triu(CIJ1, seq[count] + 1) dCIJ2 = np.triu(CIJ1, seq2[count]) - np.triu(CIJ1, seq2[count] + 1) dCIJ = dCIJ + dCIJ.T + dCIJ2 + dCIJ2.T CIJ += dCIJ kk = int(np.sum(CIJ)) # remove excess connections overby = kk - k if overby: i, j = np.where(dCIJ) rp = rng.permutation(np.size(i)) for ii in range(overby): CIJ[i[rp[ii]], j[rp[ii]]] = 0 return CIJ
def makeringlatticeCIJ(n, k, seed=None)
This function generates a directed lattice network with toroidal boundary counditions (i.e. with ring-like "wrapping around"). Parameters ---------- N : int number of vertices K : int number of edges seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray connection matrix Notes ----- The lattice is made by placing connections as close as possible to the main diagonal, with wrapping around. No connections are made on the main diagonal. In/Outdegree is kept approx. constant at K/N.
4.559684
2.061609
2.211711
''' This function generates a directed network with a Gaussian drop-off in edge density with increasing distance from the main diagonal. There are toroidal boundary counditions (i.e. no ring-like "wrapping around"). Parameters ---------- N : int number of vertices K : int number of edges s : float standard deviation of toeplitz seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray connection matrix Notes ----- no connections are placed on the main diagonal. ''' rng = get_rng(seed) from scipy import linalg, stats pf = stats.norm.pdf(range(1, n), .5, s) template = linalg.toeplitz(np.append((0,), pf), r=np.append((0,), pf)) template *= (k / np.sum(template)) CIJ = np.zeros((n, n)) itr = 0 while np.sum(CIJ) != k: CIJ = (rng.random_sample((n, n)) < template) itr += 1 if itr > 10000: raise BCTParamError('Infinite loop was caught generating toeplitz ' 'matrix. This means the matrix could not be resolved with the ' 'specified parameters.') return CIJ
def maketoeplitzCIJ(n, k, s, seed=None)
This function generates a directed network with a Gaussian drop-off in edge density with increasing distance from the main diagonal. There are toroidal boundary counditions (i.e. no ring-like "wrapping around"). Parameters ---------- N : int number of vertices K : int number of edges s : float standard deviation of toeplitz seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray connection matrix Notes ----- no connections are placed on the main diagonal.
6.575495
2.454883
2.678538
''' This function randomizes a directed network, while preserving the in- and out-degree distributions. In weighted networks, the function preserves the out-strength but not the in-strength distributions. Parameters ---------- W : NxN np.ndarray directed binary/weighted connection matrix itr : int rewiring parameter. Each edge is rewired approximately itr times. seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- R : NxN np.ndarray randomized network eff : int number of actual rewirings carried out ''' rng = get_rng(seed) R = R.copy() n = len(R) i, j = np.where(R) k = len(i) itr *= k max_attempts = np.round(n * k / (n * (n - 1))) eff = 0 for it in range(int(itr)): att = 0 while att <= max_attempts: # while not rewired while True: e1 = rng.randint(k) e2 = rng.randint(k) while e1 == e2: e2 = rng.randint(k) a = i[e1] b = j[e1] c = i[e2] d = j[e2] if a != c and a != d and b != c and b != d: break # all 4 vertices must be different # rewiring condition if not (R[a, d] or R[c, b]): R[a, d] = R[a, b] R[a, b] = 0 R[c, b] = R[c, d] R[c, d] = 0 i.setflags(write=True) j.setflags(write=True) i[e1] = d j[e2] = b # reassign edge indices eff += 1 break att += 1 return R, eff
def randmio_dir(R, itr, seed=None)
This function randomizes a directed network, while preserving the in- and out-degree distributions. In weighted networks, the function preserves the out-strength but not the in-strength distributions. Parameters ---------- W : NxN np.ndarray directed binary/weighted connection matrix itr : int rewiring parameter. Each edge is rewired approximately itr times. seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- R : NxN np.ndarray randomized network eff : int number of actual rewirings carried out
3.468455
1.902794
1.822823
''' This function randomizes an undirected network, while preserving the degree distribution. The function does not preserve the strength distribution in weighted networks. Parameters ---------- W : NxN np.ndarray undirected binary/weighted connection matrix itr : int rewiring parameter. Each edge is rewired approximately itr times. seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- R : NxN np.ndarray randomized network eff : int number of actual rewirings carried out ''' if not np.all(R == R.T): raise BCTParamError("Input must be undirected") rng = get_rng(seed) R = R.copy() n = len(R) i, j = np.where(np.tril(R)) k = len(i) itr *= k # maximum number of rewiring attempts per iteration max_attempts = np.round(n * k / (n * (n - 1))) # actual number of successful rewirings eff = 0 for it in range(int(itr)): att = 0 while att <= max_attempts: # while not rewired while True: e1, e2 = rng.randint(k, size=(2,)) while e1 == e2: e2 = rng.randint(k) a = i[e1] b = j[e1] c = i[e2] d = j[e2] if a != c and a != d and b != c and b != d: break # all 4 vertices must be different if rng.random_sample() > .5: i.setflags(write=True) j.setflags(write=True) i[e2] = d j[e2] = c # flip edge c-d with 50% probability c = i[e2] d = j[e2] # to explore all potential rewirings # rewiring condition if not (R[a, d] or R[c, b]): R[a, d] = R[a, b] R[a, b] = 0 R[d, a] = R[b, a] R[b, a] = 0 R[c, b] = R[c, d] R[c, d] = 0 R[b, c] = R[d, c] R[d, c] = 0 j.setflags(write=True) j[e1] = d j[e2] = b # reassign edge indices eff += 1 break att += 1 return R, eff
def randmio_und(R, itr, seed=None)
This function randomizes an undirected network, while preserving the degree distribution. The function does not preserve the strength distribution in weighted networks. Parameters ---------- W : NxN np.ndarray undirected binary/weighted connection matrix itr : int rewiring parameter. Each edge is rewired approximately itr times. seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- R : NxN np.ndarray randomized network eff : int number of actual rewirings carried out
3.291239
2.198465
1.497062
''' This function randomizes an undirected weighted network with positive and negative weights, while simultaneously preserving the degree distribution of positive and negative weights. The function does not preserve the strength distribution in weighted networks. Parameters ---------- W : NxN np.ndarray undirected binary/weighted connection matrix itr : int rewiring parameter. Each edge is rewired approximately itr times. seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- R : NxN np.ndarray randomized network ''' rng = get_rng(seed) R = R.copy() n = len(R) itr *= int(n * (n -1) / 2) max_attempts = int(np.round(n / 2)) eff = 0 for it in range(int(itr)): att = 0 while att <= max_attempts: a, b, c, d = pick_four_unique_nodes_quickly(n, rng) r0_ab = R[a, b] r0_cd = R[c, d] r0_ad = R[a, d] r0_cb = R[c, b] #rewiring condition if ( np.sign(r0_ab) == np.sign(r0_cd) and np.sign(r0_ad) == np.sign(r0_cb) and np.sign(r0_ab) != np.sign(r0_ad)): R[a, d] = R[d, a] = r0_ab R[a, b] = R[b, a] = r0_ad R[c, b] = R[b, c] = r0_cd R[c, d] = R[d, c] = r0_cb eff += 1 break att += 1 return R, eff
def randmio_und_signed(R, itr, seed=None)
This function randomizes an undirected weighted network with positive and negative weights, while simultaneously preserving the degree distribution of positive and negative weights. The function does not preserve the strength distribution in weighted networks. Parameters ---------- W : NxN np.ndarray undirected binary/weighted connection matrix itr : int rewiring parameter. Each edge is rewired approximately itr times. seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- R : NxN np.ndarray randomized network
3.45406
1.989981
1.735725
''' A = RANDOMIZE_GRAPH_PARTIAL_UND(A,B,MAXSWAP) takes adjacency matrices A and B and attempts to randomize matrix A by performing MAXSWAP rewirings. The rewirings will avoid any spots where matrix B is nonzero. Parameters ---------- A : NxN np.ndarray undirected adjacency matrix to randomize B : NxN np.ndarray mask; edges to avoid maxswap : int number of rewirings seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- A : NxN np.ndarray randomized matrix Notes ----- 1. Graph may become disconnected as a result of rewiring. Always important to check. 2. A can be weighted, though the weighted degree sequence will not be preserved. 3. A must be undirected. ''' rng = get_rng(seed) A = A.copy() i, j = np.where(np.triu(A, 1)) i.setflags(write=True) j.setflags(write=True) m = len(i) nswap = 0 while nswap < maxswap: while True: e1, e2 = rng.randint(m, size=(2,)) while e1 == e2: e2 = rng.randint(m) a = i[e1] b = j[e1] c = i[e2] d = j[e2] if a != c and a != d and b != c and b != d: break # all 4 vertices must be different if rng.random_sample() > .5: i[e2] = d j[e2] = c # flip edge c-d with 50% probability c = i[e2] d = j[e2] # to explore all potential rewirings # rewiring condition if not (A[a, d] or A[c, b] or B[a, d] or B[c, b]): # avoid specified ixes A[a, d] = A[a, b] A[a, b] = 0 A[d, a] = A[b, a] A[b, a] = 0 A[c, b] = A[c, d] A[c, d] = 0 A[b, c] = A[d, c] A[d, c] = 0 j[e1] = d j[e2] = b # reassign edge indices nswap += 1 return A
def randomize_graph_partial_und(A, B, maxswap, seed=None)
A = RANDOMIZE_GRAPH_PARTIAL_UND(A,B,MAXSWAP) takes adjacency matrices A and B and attempts to randomize matrix A by performing MAXSWAP rewirings. The rewirings will avoid any spots where matrix B is nonzero. Parameters ---------- A : NxN np.ndarray undirected adjacency matrix to randomize B : NxN np.ndarray mask; edges to avoid maxswap : int number of rewirings seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- A : NxN np.ndarray randomized matrix Notes ----- 1. Graph may become disconnected as a result of rewiring. Always important to check. 2. A can be weighted, though the weighted degree sequence will not be preserved. 3. A must be undirected.
3.51798
1.876487
1.874769
''' Generates synthetic networks with parameters provided and evaluates their energy function. The energy function is defined as in Betzel et al. 2016. Basically it takes the Kolmogorov-Smirnov statistics of 4 network measures; comparing the degree distributions, clustering coefficients, betweenness centrality, and Euclidean distances between connected regions. The energy is globally low if the synthetic network matches the target. Energy is defined as the maximum difference across the four statistics. ''' m = np.size(np.where(Atgt.flat))//2 n = len(Atgt) xk = np.sum(Atgt, axis=1) xc = clustering_coef_bu(Atgt) xb = betweenness_bin(Atgt) xe = D[np.triu(Atgt, 1) > 0] B = generative_model(A, D, m, eta, gamma, model_type=model_type, model_var=model_var, epsilon=epsilon, copy=True, seed=seed) #if eta != gamma then an error is thrown within generative model nB = len(eta) if nB == 1: B = np.reshape(B, np.append(np.shape(B), 1)) K = np.zeros((nB, 4)) def kstats(x, y): bin_edges = np.concatenate([[-np.inf], np.sort(np.concatenate((x, y))), [np.inf]]) bin_x,_ = np.histogram(x, bin_edges) bin_y,_ = np.histogram(y, bin_edges) #print(np.shape(bin_x)) sum_x = np.cumsum(bin_x) / np.sum(bin_x) sum_y = np.cumsum(bin_y) / np.sum(bin_y) cdfsamp_x = sum_x[:-1] cdfsamp_y = sum_y[:-1] delta_cdf = np.abs(cdfsamp_x - cdfsamp_y) print(np.shape(delta_cdf)) #print(delta_cdf) print(np.argmax(delta_cdf), np.max(delta_cdf)) return np.max(delta_cdf) for ib in range(nB): Bc = B[:,:,ib] yk = np.sum(Bc, axis=1) yc = clustering_coef_bu(Bc) yb = betweenness_bin(Bc) ye = D[np.triu(Bc, 1) > 0] K[ib, 0] = kstats(xk, yk) K[ib, 1] = kstats(xc, yc) K[ib, 2] = kstats(xb, yb) K[ib, 3] = kstats(xe, ye) return np.max(K, axis=1)
def evaluate_generative_model(A, Atgt, D, eta, gamma=None, model_type='matching', model_var='powerlaw', epsilon=1e-6, seed=None)
Generates synthetic networks with parameters provided and evaluates their energy function. The energy function is defined as in Betzel et al. 2016. Basically it takes the Kolmogorov-Smirnov statistics of 4 network measures; comparing the degree distributions, clustering coefficients, betweenness centrality, and Euclidean distances between connected regions. The energy is globally low if the synthetic network matches the target. Energy is defined as the maximum difference across the four statistics.
3.6926
2.437168
1.515119
''' Node betweenness centrality is the fraction of all shortest paths in the network that contain a given node. Nodes with high values of betweenness centrality participate in a large number of shortest paths. Parameters ---------- A : NxN np.ndarray binary directed/undirected connection matrix BC : Nx1 np.ndarray node betweenness centrality vector Notes ----- Betweenness centrality may be normalised to the range [0,1] as BC/[(N-1)(N-2)], where N is the number of nodes in the network. ''' G = np.array(G, dtype=float) # force G to have float type so it can be # compared to float np.inf n = len(G) # number of nodes I = np.eye(n) # identity matrix d = 1 # path length NPd = G.copy() # number of paths of length |d| NSPd = G.copy() # number of shortest paths of length |d| NSP = G.copy() # number of shortest paths of any length L = G.copy() # length of shortest paths NSP[np.where(I)] = 1 L[np.where(I)] = 1 # calculate NSP and L while np.any(NSPd): d += 1 NPd = np.dot(NPd, G) NSPd = NPd * (L == 0) NSP += NSPd L = L + d * (NSPd != 0) L[L == 0] = np.inf # L for disconnected vertices is inf L[np.where(I)] = 0 NSP[NSP == 0] = 1 # NSP for disconnected vertices is 1 DP = np.zeros((n, n)) # vertex on vertex dependency diam = d - 1 # calculate DP for d in range(diam, 1, -1): DPd1 = np.dot(((L == d) * (1 + DP) / NSP), G.T) * \ ((L == (d - 1)) * NSP) DP += DPd1 return np.sum(DP, axis=0)
def betweenness_bin(G)
Node betweenness centrality is the fraction of all shortest paths in the network that contain a given node. Nodes with high values of betweenness centrality participate in a large number of shortest paths. Parameters ---------- A : NxN np.ndarray binary directed/undirected connection matrix BC : Nx1 np.ndarray node betweenness centrality vector Notes ----- Betweenness centrality may be normalised to the range [0,1] as BC/[(N-1)(N-2)], where N is the number of nodes in the network.
4.312603
2.920793
1.476518
''' Node betweenness centrality is the fraction of all shortest paths in the network that contain a given node. Nodes with high values of betweenness centrality participate in a large number of shortest paths. Parameters ---------- L : NxN np.ndarray directed/undirected weighted connection matrix Returns ------- BC : Nx1 np.ndarray node betweenness centrality vector Notes ----- The input matrix must be a connection-length matrix, typically obtained via a mapping from weight to length. For instance, in a weighted correlation network higher correlations are more naturally interpreted as shorter distances and the input matrix should consequently be some inverse of the connectivity matrix. Betweenness centrality may be normalised to the range [0,1] as BC/[(N-1)(N-2)], where N is the number of nodes in the network. ''' n = len(G) BC = np.zeros((n,)) # vertex betweenness for u in range(n): D = np.tile(np.inf, (n,)) D[u] = 0 # distance from u NP = np.zeros((n,)) NP[u] = 1 # number of paths from u S = np.ones((n,), dtype=bool) # distance permanence P = np.zeros((n, n)) # predecessors Q = np.zeros((n,), dtype=int) # indices q = n - 1 # order of non-increasing distance G1 = G.copy() V = [u] while True: S[V] = 0 # distance u->V is now permanent G1[:, V] = 0 # no in-edges as already shortest for v in V: Q[q] = v q -= 1 W, = np.where(G1[v, :]) # neighbors of v for w in W: Duw = D[v] + G1[v, w] # path length to be tested if Duw < D[w]: # if new u->w shorter than old D[w] = Duw NP[w] = NP[v] # NP(u->w) = NP of new path P[w, :] = 0 P[w, v] = 1 # v is the only predecessor elif Duw == D[w]: # if new u->w equal to old NP[w] += NP[v] # NP(u->w) sum of old and new P[w, v] = 1 # v is also predecessor if D[S].size == 0: break # all nodes were reached if np.isinf(np.min(D[S])): # some nodes cannot be reached Q[:q + 1], = np.where(np.isinf(D)) # these are first in line break V, = np.where(D == np.min(D[S])) DP = np.zeros((n,)) for w in Q[:n - 1]: BC[w] += DP[w] for v in np.where(P[w, :])[0]: DP[v] += (1 + DP[w]) * NP[v] / NP[w] return BC
def betweenness_wei(G)
Node betweenness centrality is the fraction of all shortest paths in the network that contain a given node. Nodes with high values of betweenness centrality participate in a large number of shortest paths. Parameters ---------- L : NxN np.ndarray directed/undirected weighted connection matrix Returns ------- BC : Nx1 np.ndarray node betweenness centrality vector Notes ----- The input matrix must be a connection-length matrix, typically obtained via a mapping from weight to length. For instance, in a weighted correlation network higher correlations are more naturally interpreted as shorter distances and the input matrix should consequently be some inverse of the connectivity matrix. Betweenness centrality may be normalised to the range [0,1] as BC/[(N-1)(N-2)], where N is the number of nodes in the network.
4.294555
2.598385
1.652778
''' The Shannon-entropy based diversity coefficient measures the diversity of intermodular connections of individual nodes and ranges from 0 to 1. Parameters ---------- W : NxN np.ndarray undirected connection matrix with positive and negative weights ci : Nx1 np.ndarray community affiliation vector Returns ------- Hpos : Nx1 np.ndarray diversity coefficient based on positive connections Hneg : Nx1 np.ndarray diversity coefficient based on negative connections ''' n = len(W) # number of nodes _, ci = np.unique(ci, return_inverse=True) ci += 1 m = np.max(ci) # number of modules def entropy(w_): S = np.sum(w_, axis=1) # strength Snm = np.zeros((n, m)) # node-to-module degree for i in range(m): Snm[:, i] = np.sum(w_[:, ci == i + 1], axis=1) pnm = Snm / (np.tile(S, (m, 1)).T) pnm[np.isnan(pnm)] = 0 pnm[np.logical_not(pnm)] = 1 return -np.sum(pnm * np.log(pnm), axis=1) / np.log(m) #explicitly ignore compiler warning for division by zero with np.errstate(invalid='ignore'): Hpos = entropy(W * (W > 0)) Hneg = entropy(-W * (W < 0)) return Hpos, Hneg
def diversity_coef_sign(W, ci)
The Shannon-entropy based diversity coefficient measures the diversity of intermodular connections of individual nodes and ranges from 0 to 1. Parameters ---------- W : NxN np.ndarray undirected connection matrix with positive and negative weights ci : Nx1 np.ndarray community affiliation vector Returns ------- Hpos : Nx1 np.ndarray diversity coefficient based on positive connections Hneg : Nx1 np.ndarray diversity coefficient based on negative connections
3.357197
2.233328
1.503226
''' Edge betweenness centrality is the fraction of all shortest paths in the network that contain a given edge. Edges with high values of betweenness centrality participate in a large number of shortest paths. Parameters ---------- A : NxN np.ndarray binary directed/undirected connection matrix Returns ------- EBC : NxN np.ndarray edge betweenness centrality matrix BC : Nx1 np.ndarray node betweenness centrality vector Notes ----- Betweenness centrality may be normalised to the range [0,1] as BC/[(N-1)(N-2)], where N is the number of nodes in the network. ''' n = len(G) BC = np.zeros((n,)) # vertex betweenness EBC = np.zeros((n, n)) # edge betweenness for u in range(n): D = np.zeros((n,)) D[u] = 1 # distance from u NP = np.zeros((n,)) NP[u] = 1 # number of paths from u P = np.zeros((n, n)) # predecessors Q = np.zeros((n,), dtype=int) # indices q = n - 1 # order of non-increasing distance Gu = G.copy() V = np.array([u]) while V.size: Gu[:, V] = 0 # remove remaining in-edges for v in V: Q[q] = v q -= 1 W, = np.where(Gu[v, :]) # neighbors of V for w in W: if D[w]: NP[w] += NP[v] # NP(u->w) sum of old and new P[w, v] = 1 # v is a predecessor else: D[w] = 1 NP[w] = NP[v] # NP(u->v) = NP of new path P[w, v] = 1 # v is a predecessor V, = np.where(np.any(Gu[V, :], axis=0)) if np.any(np.logical_not(D)): # if some vertices unreachable Q[:q], = np.where(np.logical_not(D)) # ...these are first in line DP = np.zeros((n,)) # dependency for w in Q[:n - 1]: BC[w] += DP[w] for v in np.where(P[w, :])[0]: DPvw = (1 + DP[w]) * NP[v] / NP[w] DP[v] += DPvw EBC[v, w] += DPvw return EBC, BC
def edge_betweenness_bin(G)
Edge betweenness centrality is the fraction of all shortest paths in the network that contain a given edge. Edges with high values of betweenness centrality participate in a large number of shortest paths. Parameters ---------- A : NxN np.ndarray binary directed/undirected connection matrix Returns ------- EBC : NxN np.ndarray edge betweenness centrality matrix BC : Nx1 np.ndarray node betweenness centrality vector Notes ----- Betweenness centrality may be normalised to the range [0,1] as BC/[(N-1)(N-2)], where N is the number of nodes in the network.
3.697997
2.677111
1.381339
''' Eigenector centrality is a self-referential measure of centrality: nodes have high eigenvector centrality if they connect to other nodes that have high eigenvector centrality. The eigenvector centrality of node i is equivalent to the ith element in the eigenvector corresponding to the largest eigenvalue of the adjacency matrix. Parameters ---------- CIJ : NxN np.ndarray binary/weighted undirected adjacency matrix v : Nx1 np.ndarray eigenvector associated with the largest eigenvalue of the matrix ''' from scipy import linalg n = len(CIJ) vals, vecs = linalg.eig(CIJ) i = np.argmax(vals) return np.abs(vecs[:, i])
def eigenvector_centrality_und(CIJ)
Eigenector centrality is a self-referential measure of centrality: nodes have high eigenvector centrality if they connect to other nodes that have high eigenvector centrality. The eigenvector centrality of node i is equivalent to the ith element in the eigenvector corresponding to the largest eigenvalue of the adjacency matrix. Parameters ---------- CIJ : NxN np.ndarray binary/weighted undirected adjacency matrix v : Nx1 np.ndarray eigenvector associated with the largest eigenvalue of the matrix
4.175122
1.635952
2.552106
''' Shortcuts are central edges which significantly reduce the characteristic path length in the network. Parameters ---------- CIJ : NxN np.ndarray binary directed connection matrix Returns ------- Erange : NxN np.ndarray range for each edge, i.e. the length of the shortest path from i to j for edge c(i,j) after the edge has been removed from the graph eta : float average range for the entire graph Eshort : NxN np.ndarray entries are ones for shortcut edges fs : float fractions of shortcuts in the graph Follows the treatment of 'shortcuts' by Duncan Watts ''' N = len(CIJ) K = np.size(np.where(CIJ)[1]) Erange = np.zeros((N, N)) i, j = np.where(CIJ) for c in range(len(i)): CIJcut = CIJ.copy() CIJcut[i[c], j[c]] = 0 R, D = reachdist(CIJcut) Erange[i[c], j[c]] = D[i[c], j[c]] # average range (ignore Inf) eta = (np.sum(Erange[np.logical_and(Erange > 0, Erange < np.inf)]) / len(Erange[np.logical_and(Erange > 0, Erange < np.inf)])) # Original entries of D are ones, thus entries of Erange # must be two or greater. # If Erange(i,j) > 2, then the edge is a shortcut. # 'fshort' is the fraction of shortcuts over the entire graph. Eshort = Erange > 2 fs = len(np.where(Eshort)) / K return Erange, eta, Eshort, fs
def erange(CIJ)
Shortcuts are central edges which significantly reduce the characteristic path length in the network. Parameters ---------- CIJ : NxN np.ndarray binary directed connection matrix Returns ------- Erange : NxN np.ndarray range for each edge, i.e. the length of the shortest path from i to j for edge c(i,j) after the edge has been removed from the graph eta : float average range for the entire graph Eshort : NxN np.ndarray entries are ones for shortcut edges fs : float fractions of shortcuts in the graph Follows the treatment of 'shortcuts' by Duncan Watts
5.408083
2.406634
2.247156
''' Computes the flow coefficient for each node and averaged over the network, as described in Honey et al. (2007) PNAS. The flow coefficient is similar to betweenness centrality, but works on a local neighborhood. It is mathematically related to the clustering coefficient (cc) at each node as, fc+cc <= 1. Parameters ---------- CIJ : NxN np.ndarray binary directed connection matrix Returns ------- fc : Nx1 np.ndarray flow coefficient for each node FC : float average flow coefficient over the network total_flo : int number of paths that "flow" across the central node ''' N = len(CIJ) fc = np.zeros((N,)) total_flo = np.zeros((N,)) max_flo = np.zeros((N,)) # loop over nodes for v in range(N): # find neighbors - note: both incoming and outgoing connections nb, = np.where(CIJ[v, :] + CIJ[:, v].T) fc[v] = 0 if np.where(nb)[0].size: CIJflo = -CIJ[np.ix_(nb, nb)] for i in range(len(nb)): for j in range(len(nb)): if CIJ[nb[i], v] and CIJ[v, nb[j]]: CIJflo[i, j] += 1 total_flo[v] = np.sum( (CIJflo == 1) * np.logical_not(np.eye(len(nb)))) max_flo[v] = len(nb) * len(nb) - len(nb) fc[v] = total_flo[v] / max_flo[v] fc[np.isnan(fc)] = 0 FC = np.mean(fc) return fc, FC, total_flo
def flow_coef_bd(CIJ)
Computes the flow coefficient for each node and averaged over the network, as described in Honey et al. (2007) PNAS. The flow coefficient is similar to betweenness centrality, but works on a local neighborhood. It is mathematically related to the clustering coefficient (cc) at each node as, fc+cc <= 1. Parameters ---------- CIJ : NxN np.ndarray binary directed connection matrix Returns ------- fc : Nx1 np.ndarray flow coefficient for each node FC : float average flow coefficient over the network total_flo : int number of paths that "flow" across the central node
4.075832
2.053342
1.984975
''' The gateway coefficient is a variant of participation coefficient. It is weighted by how critical the connections are to intermodular connectivity (e.g. if a node is the only connection between its module and another module, it will have a higher gateway coefficient, unlike participation coefficient). Parameters ---------- W : NxN np.ndarray undirected signed connection matrix ci : Nx1 np.ndarray community affiliation vector centrality_type : enum 'degree' - uses the weighted degree (i.e, node strength) 'betweenness' - uses the betweenness centrality Returns ------- Gpos : Nx1 np.ndarray gateway coefficient for positive weights Gneg : Nx1 np.ndarray gateway coefficient for negative weights Reference: Vargas ER, Wahl LM, Eur Phys J B (2014) 87:1-10 ''' _, ci = np.unique(ci, return_inverse=True) ci += 1 n = len(W) np.fill_diagonal(W, 0) def gcoef(W): #strength s = np.sum(W, axis=1) #neighbor community affiliation Gc = np.inner((W != 0), np.diag(ci)) #community specific neighbors Sc2 = np.zeros((n,)) #extra modular weighting ksm = np.zeros((n,)) #intra modular wieghting centm = np.zeros((n,)) if centrality_type == 'degree': cent = s.copy() elif centrality_type == 'betweenness': cent = betweenness_wei(invert(W)) nr_modules = int(np.max(ci)) for i in range(1, nr_modules+1): ks = np.sum(W * (Gc == i), axis=1) print(np.sum(ks)) Sc2 += ks ** 2 for j in range(1, nr_modules+1): #calculate extramodular weights ksm[ci == j] += ks[ci == j] / np.sum(ks[ci == j]) #calculate intramodular weights centm[ci == i] = np.sum(cent[ci == i]) #print(Gc) #print(centm) #print(ksm) #print(ks) centm = centm / max(centm) #calculate total weights gs = (1 - ksm * centm) ** 2 Gw = 1 - Sc2 * gs / s ** 2 Gw[np.where(np.isnan(Gw))] = 0 Gw[np.where(np.logical_not(Gw))] = 0 return Gw G_pos = gcoef(W * (W > 0)) G_neg = gcoef(-W * (W < 0)) return G_pos, G_neg
def gateway_coef_sign(W, ci, centrality_type='degree')
The gateway coefficient is a variant of participation coefficient. It is weighted by how critical the connections are to intermodular connectivity (e.g. if a node is the only connection between its module and another module, it will have a higher gateway coefficient, unlike participation coefficient). Parameters ---------- W : NxN np.ndarray undirected signed connection matrix ci : Nx1 np.ndarray community affiliation vector centrality_type : enum 'degree' - uses the weighted degree (i.e, node strength) 'betweenness' - uses the betweenness centrality Returns ------- Gpos : Nx1 np.ndarray gateway coefficient for positive weights Gneg : Nx1 np.ndarray gateway coefficient for negative weights Reference: Vargas ER, Wahl LM, Eur Phys J B (2014) 87:1-10
4.683669
2.565411
1.8257
''' The k-core is the largest subgraph comprising nodes of degree at least k. The coreness of a node is k if the node belongs to the k-core but not to the (k+1)-core. This function computes k-coreness of all nodes for a given binary directed connection matrix. Parameters ---------- CIJ : NxN np.ndarray binary directed connection matrix Returns ------- coreness : Nx1 np.ndarray node coreness kn : int size of k-core ''' N = len(CIJ) coreness = np.zeros((N,)) kn = np.zeros((N,)) for k in range(N): CIJkcore, kn[k] = kcore_bd(CIJ, k) ss = np.sum(CIJkcore, axis=0) > 0 coreness[ss] = k return coreness, kn
def kcoreness_centrality_bd(CIJ)
The k-core is the largest subgraph comprising nodes of degree at least k. The coreness of a node is k if the node belongs to the k-core but not to the (k+1)-core. This function computes k-coreness of all nodes for a given binary directed connection matrix. Parameters ---------- CIJ : NxN np.ndarray binary directed connection matrix Returns ------- coreness : Nx1 np.ndarray node coreness kn : int size of k-core
4.033648
1.834467
2.198812
''' The k-core is the largest subgraph comprising nodes of degree at least k. The coreness of a node is k if the node belongs to the k-core but not to the (k+1)-core. This function computes the coreness of all nodes for a given binary undirected connection matrix. Parameters ---------- CIJ : NxN np.ndarray binary undirected connection matrix Returns ------- coreness : Nx1 np.ndarray node coreness kn : int size of k-core ''' N = len(CIJ) # determine if the network is undirected -- if not, compute coreness # on the corresponding undirected network CIJund = CIJ + CIJ.T if np.any(CIJund > 1): CIJ = np.array(CIJund > 0, dtype=float) coreness = np.zeros((N,)) kn = np.zeros((N,)) for k in range(N): CIJkcore, kn[k] = kcore_bu(CIJ, k) ss = np.sum(CIJkcore, axis=0) > 0 coreness[ss] = k return coreness, kn
def kcoreness_centrality_bu(CIJ)
The k-core is the largest subgraph comprising nodes of degree at least k. The coreness of a node is k if the node belongs to the k-core but not to the (k+1)-core. This function computes the coreness of all nodes for a given binary undirected connection matrix. Parameters ---------- CIJ : NxN np.ndarray binary undirected connection matrix Returns ------- coreness : Nx1 np.ndarray node coreness kn : int size of k-core
3.952565
2.216538
1.783215
''' The within-module degree z-score is a within-module version of degree centrality. Parameters ---------- W : NxN np.narray binary/weighted directed/undirected connection matrix ci : Nx1 np.array_like community affiliation vector flag : int Graph type. 0: undirected graph (default) 1: directed graph in degree 2: directed graph out degree 3: directed graph in and out degree Returns ------- Z : Nx1 np.ndarray within-module degree Z-score ''' _, ci = np.unique(ci, return_inverse=True) ci += 1 if flag == 2: W = W.copy() W = W.T elif flag == 3: W = W.copy() W = W + W.T n = len(W) Z = np.zeros((n,)) # number of vertices for i in range(1, int(np.max(ci) + 1)): Koi = np.sum(W[np.ix_(ci == i, ci == i)], axis=1) Z[np.where(ci == i)] = (Koi - np.mean(Koi)) / np.std(Koi) Z[np.where(np.isnan(Z))] = 0 return Z
def module_degree_zscore(W, ci, flag=0)
The within-module degree z-score is a within-module version of degree centrality. Parameters ---------- W : NxN np.narray binary/weighted directed/undirected connection matrix ci : Nx1 np.array_like community affiliation vector flag : int Graph type. 0: undirected graph (default) 1: directed graph in degree 2: directed graph out degree 3: directed graph in and out degree Returns ------- Z : Nx1 np.ndarray within-module degree Z-score
3.321903
1.83585
1.809463
''' The PageRank centrality is a variant of eigenvector centrality. This function computes the PageRank centrality of each vertex in a graph. Formally, PageRank is defined as the stationary distribution achieved by instantiating a Markov chain on a graph. The PageRank centrality of a given vertex, then, is proportional to the number of steps (or amount of time) spent at that vertex as a result of such a process. The PageRank index gets modified by the addition of a damping factor, d. In terms of a Markov chain, the damping factor specifies the fraction of the time that a random walker will transition to one of its current state's neighbors. The remaining fraction of the time the walker is restarted at a random vertex. A common value for the damping factor is d = 0.85. Parameters ---------- A : NxN np.narray adjacency matrix d : float damping factor (see description) falff : Nx1 np.ndarray | None Initial page rank probability, non-negative values. Default value is None. If not specified, a naive bayesian prior is used. Returns ------- r : Nx1 np.ndarray vectors of page rankings Notes ----- Note: The algorithm will work well for smaller matrices (number of nodes around 1000 or less) ''' from scipy import linalg N = len(A) if falff is None: norm_falff = np.ones((N,)) / N else: norm_falff = falff / np.sum(falff) deg = np.sum(A, axis=0) deg[deg == 0] = 1 D1 = np.diag(1 / deg) B = np.eye(N) - d * np.dot(A, D1) b = (1 - d) * norm_falff r = linalg.solve(B, b) r /= np.sum(r) return r
def pagerank_centrality(A, d, falff=None)
The PageRank centrality is a variant of eigenvector centrality. This function computes the PageRank centrality of each vertex in a graph. Formally, PageRank is defined as the stationary distribution achieved by instantiating a Markov chain on a graph. The PageRank centrality of a given vertex, then, is proportional to the number of steps (or amount of time) spent at that vertex as a result of such a process. The PageRank index gets modified by the addition of a damping factor, d. In terms of a Markov chain, the damping factor specifies the fraction of the time that a random walker will transition to one of its current state's neighbors. The remaining fraction of the time the walker is restarted at a random vertex. A common value for the damping factor is d = 0.85. Parameters ---------- A : NxN np.narray adjacency matrix d : float damping factor (see description) falff : Nx1 np.ndarray | None Initial page rank probability, non-negative values. Default value is None. If not specified, a naive bayesian prior is used. Returns ------- r : Nx1 np.ndarray vectors of page rankings Notes ----- Note: The algorithm will work well for smaller matrices (number of nodes around 1000 or less)
5.255411
1.392561
3.773919
''' Participation coefficient is a measure of diversity of intermodular connections of individual nodes. Parameters ---------- W : NxN np.ndarray binary/weighted directed/undirected connection matrix ci : Nx1 np.ndarray community affiliation vector degree : str Flag to describe nature of graph 'undirected': For undirected graphs 'in': Uses the in-degree 'out': Uses the out-degree Returns ------- P : Nx1 np.ndarray participation coefficient ''' if degree == 'in': W = W.T _, ci = np.unique(ci, return_inverse=True) ci += 1 n = len(W) # number of vertices Ko = np.sum(W, axis=1) # (out) degree Gc = np.dot((W != 0), np.diag(ci)) # neighbor community affiliation Kc2 = np.zeros((n,)) # community-specific neighbors for i in range(1, int(np.max(ci)) + 1): Kc2 += np.square(np.sum(W * (Gc == i), axis=1)) P = np.ones((n,)) - Kc2 / np.square(Ko) # P=0 if for nodes with no (out) neighbors P[np.where(np.logical_not(Ko))] = 0 return P
def participation_coef(W, ci, degree='undirected')
Participation coefficient is a measure of diversity of intermodular connections of individual nodes. Parameters ---------- W : NxN np.ndarray binary/weighted directed/undirected connection matrix ci : Nx1 np.ndarray community affiliation vector degree : str Flag to describe nature of graph 'undirected': For undirected graphs 'in': Uses the in-degree 'out': Uses the out-degree Returns ------- P : Nx1 np.ndarray participation coefficient
4.562017
2.605152
1.751152
''' Participation coefficient is a measure of diversity of intermodular connections of individual nodes. Parameters ---------- W : NxN np.ndarray binary/weighted directed/undirected connection must be as scipy.sparse.csr matrix ci : Nx1 np.ndarray community affiliation vector degree : str Flag to describe nature of graph 'undirected': For undirected graphs 'in': Uses the in-degree 'out': Uses the out-degree Returns ------- P : Nx1 np.ndarray participation coefficient ''' if degree == 'in': W = W.T _, ci = np.unique(ci, return_inverse=True) ci += 1 n = W.shape[0] # number of vertices Ko = np.array(W.sum(axis=1)).flatten().astype(float) # (out) degree Gc = W.copy().astype('int16') Gc[Gc!=0] = 1 Gc = Gc * np.diag(ci)# neighbor community affiliation P = np.zeros((n)) for i in range(1, int(np.max(ci)) + 1): P = P + (np.array((W.multiply(Gc == i).astype(int)).sum(axis=1)).flatten() / Ko)**2 P = 1 - P # P=0 if for nodes with no (out) neighbors P[np.where(np.logical_not(Ko))] = 0 return P
def participation_coef_sparse(W, ci, degree='undirected')
Participation coefficient is a measure of diversity of intermodular connections of individual nodes. Parameters ---------- W : NxN np.ndarray binary/weighted directed/undirected connection must be as scipy.sparse.csr matrix ci : Nx1 np.ndarray community affiliation vector degree : str Flag to describe nature of graph 'undirected': For undirected graphs 'in': Uses the in-degree 'out': Uses the out-degree Returns ------- P : Nx1 np.ndarray participation coefficient
4.792876
2.563323
1.86979
''' Participation coefficient is a measure of diversity of intermodular connections of individual nodes. Parameters ---------- W : NxN np.ndarray undirected connection matrix with positive and negative weights ci : Nx1 np.ndarray community affiliation vector Returns ------- Ppos : Nx1 np.ndarray participation coefficient from positive weights Pneg : Nx1 np.ndarray participation coefficient from negative weights ''' _, ci = np.unique(ci, return_inverse=True) ci += 1 n = len(W) # number of vertices def pcoef(W_): S = np.sum(W_, axis=1) # strength # neighbor community affil. Gc = np.dot(np.logical_not(W_ == 0), np.diag(ci)) Sc2 = np.zeros((n,)) for i in range(1, int(np.max(ci) + 1)): Sc2 += np.square(np.sum(W_ * (Gc == i), axis=1)) P = np.ones((n,)) - Sc2 / np.square(S) P[np.where(np.isnan(P))] = 0 P[np.where(np.logical_not(P))] = 0 # p_ind=0 if no (out)neighbors return P #explicitly ignore compiler warning for division by zero with np.errstate(invalid='ignore'): Ppos = pcoef(W * (W > 0)) Pneg = pcoef(-W * (W < 0)) return Ppos, Pneg
def participation_coef_sign(W, ci)
Participation coefficient is a measure of diversity of intermodular connections of individual nodes. Parameters ---------- W : NxN np.ndarray undirected connection matrix with positive and negative weights ci : Nx1 np.ndarray community affiliation vector Returns ------- Ppos : Nx1 np.ndarray participation coefficient from positive weights Pneg : Nx1 np.ndarray participation coefficient from negative weights
4.022243
2.924196
1.375504
''' The subgraph centrality of a node is a weighted sum of closed walks of different lengths in the network starting and ending at the node. This function returns a vector of subgraph centralities for each node of the network. Parameters ---------- CIJ : NxN np.ndarray binary adjacency matrix Cs : Nx1 np.ndarray subgraph centrality ''' from scipy import linalg vals, vecs = linalg.eig(CIJ) # compute eigendecomposition # lambdas=np.diag(vals) # compute eigenvector centr. Cs = np.real(np.dot(vecs * vecs, np.exp(vals))) return Cs
def subgraph_centrality(CIJ)
The subgraph centrality of a node is a weighted sum of closed walks of different lengths in the network starting and ending at the node. This function returns a vector of subgraph centralities for each node of the network. Parameters ---------- CIJ : NxN np.ndarray binary adjacency matrix Cs : Nx1 np.ndarray subgraph centrality
5.457863
2.484603
2.196674
''' Functional motifs are subsets of connection patterns embedded within anatomical motifs. Motif frequency is the frequency of occurrence of motifs around a node. Parameters ---------- A : NxN np.ndarray binary directed connection matrix Returns ------- F : 13xN np.ndarray motif frequency matrix f : 13x1 np.ndarray motif frequency vector (averaged over all nodes) ''' from scipy import io import os fname = os.path.join(os.path.dirname(__file__), motiflib) mot = io.loadmat(fname) m3 = mot['m3'] id3 = mot['id3'].squeeze() n3 = mot['n3'].squeeze() n = len(A) # number of vertices in A f = np.zeros((13,)) # motif count for whole graph F = np.zeros((13, n)) # motif frequency A = binarize(A, copy=True) # ensure A is binary As = np.logical_or(A, A.T) # symmetrized adjmat for u in range(n - 2): # v1: neighbors of u (>u) V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1]) for v1 in np.where(V1)[0]: # v2: neighbors of v1 (>u) V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1]) V2[V1] = 0 # not already in V1 # and all neighbors of u (>v1) V2 = np.logical_or( np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2) for v2 in np.where(V2)[0]: a = np.array((A[v1, u], A[v2, u], A[u, v1], A[v2, v1], A[u, v2], A[v1, 2])) # find all contained isomorphs ix = (np.dot(m3, a) == n3) id = id3[ix] - 1 # unique motif occurrences idu, jx = np.unique(id, return_index=True) jx = np.append((0,), jx + 1) mu = len(idu) # number of unique motifs f2 = np.zeros((mu,)) for h in range(mu): # for each unique motif f2[h] = jx[h + 1] - jx[h] # and frequencies # then add to a cumulative count f[idu] += f2 # numpy indexing is teh sucks :( F[idu, u] += f2 F[idu, v1] += f2 F[idu, v2] += f2 return f, F
def motif3funct_bin(A)
Functional motifs are subsets of connection patterns embedded within anatomical motifs. Motif frequency is the frequency of occurrence of motifs around a node. Parameters ---------- A : NxN np.ndarray binary directed connection matrix Returns ------- F : 13xN np.ndarray motif frequency matrix f : 13x1 np.ndarray motif frequency vector (averaged over all nodes)
3.824968
2.998439
1.275653
''' Structural motifs are patterns of local connectivity. Motif frequency is the frequency of occurrence of motifs around a node. Parameters ---------- A : NxN np.ndarray binary directed connection matrix Returns ------- F : 13xN np.ndarray motif frequency matrix f : 13x1 np.ndarray motif frequency vector (averaged over all nodes) ''' from scipy import io import os fname = os.path.join(os.path.dirname(__file__), motiflib) mot = io.loadmat(fname) m3n = mot['m3n'] id3 = mot['id3'].squeeze() n = len(A) # number of vertices in A f = np.zeros((13,)) # motif count for whole graph F = np.zeros((13, n)) # motif frequency A = binarize(A, copy=True) # ensure A is binary As = np.logical_or(A, A.T) # symmetrized adjmat for u in range(n - 2): # v1: neighbors of u (>u) V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1]) for v1 in np.where(V1)[0]: # v2: neighbors of v1 (>u) V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1]) V2[V1] = 0 # not already in V1 # and all neighbors of u (>v1) V2 = np.logical_or( np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2) for v2 in np.where(V2)[0]: a = np.array((A[v1, u], A[v2, u], A[u, v1], A[v2, v1], A[u, v2], A[v1, v2])) s = np.uint32(np.sum(np.power(10, np.arange(5, -1, -1)) * a)) ix = id3[np.squeeze(s == m3n)] - 1 F[ix, u] += 1 F[ix, v1] += 1 F[ix, v2] += 1 f[ix] += 1 return f, F
def motif3struct_bin(A)
Structural motifs are patterns of local connectivity. Motif frequency is the frequency of occurrence of motifs around a node. Parameters ---------- A : NxN np.ndarray binary directed connection matrix Returns ------- F : 13xN np.ndarray motif frequency matrix f : 13x1 np.ndarray motif frequency vector (averaged over all nodes)
3.263084
2.592036
1.258888
''' Structural motifs are patterns of local connectivity. Motif frequency is the frequency of occurrence of motifs around a node. Parameters ---------- A : NxN np.ndarray binary directed connection matrix Returns ------- F : 199xN np.ndarray motif frequency matrix f : 199x1 np.ndarray motif frequency vector (averaged over all nodes) ''' from scipy import io import os fname = os.path.join(os.path.dirname(__file__), motiflib) mot = io.loadmat(fname) m4n = mot['m4n'] id4 = mot['id4'].squeeze() n = len(A) f = np.zeros((199,)) F = np.zeros((199, n)) # frequency A = binarize(A, copy=True) # ensure A is binary As = np.logical_or(A, A.T) # symmetrized adjmat for u in range(n - 3): # v1: neighbors of u (>u) V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1]) for v1 in np.where(V1)[0]: V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1]) V2[V1] = 0 # not already in V1 # and all neighbors of u (>v1) V2 = np.logical_or( np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2) for v2 in np.where(V2)[0]: vz = np.max((v1, v2)) # vz: largest rank node # v3: all neighbors of v2 (>u) V3 = np.append(np.zeros((u,), dtype=int), As[v2, u + 1:n + 1]) V3[V2] = 0 # not already in V1 and V2 # and all neighbors of v1 (>v2) V3 = np.logical_or( np.append(np.zeros((v2,)), As[v1, v2 + 1:n + 1]), V3) V3[V1] = 0 # not already in V1 # and all neighbors of u (>vz) V3 = np.logical_or( np.append(np.zeros((vz,)), As[u, vz + 1:n + 1]), V3) for v3 in np.where(V3)[0]: a = np.array((A[v1, u], A[v2, u], A[v3, u], A[u, v1], A[v2, v1], A[v3, v1], A[u, v2], A[v1, v2], A[ v3, v2], A[u, v3], A[v1, v3], A[v2, v3])) s = np.uint64( np.sum(np.power(10, np.arange(11, -1, -1)) * a)) ix = id4[np.squeeze(s == m4n)] F[ix, u] += 1 F[ix, v1] += 1 F[ix, v2] += 1 F[ix, v3] += 1 f[ix] += 1 return f, F
def motif4struct_bin(A)
Structural motifs are patterns of local connectivity. Motif frequency is the frequency of occurrence of motifs around a node. Parameters ---------- A : NxN np.ndarray binary directed connection matrix Returns ------- F : 199xN np.ndarray motif frequency matrix f : 199x1 np.ndarray motif frequency vector (averaged over all nodes)
2.585649
2.190686
1.180292
''' This function thresholds the connectivity matrix by absolute weight magnitude. All weights below the given threshold, and all weights on the main diagonal (self-self connections) are set to 0. If copy is not set, this function will *modify W in place.* Parameters ---------- W : np.ndarray weighted connectivity matrix thr : float absolute weight threshold copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray thresholded connectivity matrix ''' if copy: W = W.copy() np.fill_diagonal(W, 0) # clear diagonal W[W < thr] = 0 # apply threshold return W
def threshold_absolute(W, thr, copy=True)
This function thresholds the connectivity matrix by absolute weight magnitude. All weights below the given threshold, and all weights on the main diagonal (self-self connections) are set to 0. If copy is not set, this function will *modify W in place.* Parameters ---------- W : np.ndarray weighted connectivity matrix thr : float absolute weight threshold copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray thresholded connectivity matrix
4.005501
1.47222
2.720722
''' W_bin = weight_conversion(W, 'binarize'); W_nrm = weight_conversion(W, 'normalize'); L = weight_conversion(W, 'lengths'); This function may either binarize an input weighted connection matrix, normalize an input weighted connection matrix or convert an input weighted connection matrix to a weighted connection-length matrix. Binarization converts all present connection weights to 1. Normalization scales all weight magnitudes to the range [0,1] and should be done prior to computing some weighted measures, such as the weighted clustering coefficient. Conversion of connection weights to connection lengths is needed prior to computation of weighted distance-based measures, such as distance and betweenness centrality. In a weighted connection network, higher weights are naturally interpreted as shorter lengths. The connection-lengths matrix here is defined as the inverse of the connection-weights matrix. If copy is not set, this function will *modify W in place.* Parameters ---------- W : NxN np.ndarray weighted connectivity matrix wcm : str weight conversion command. 'binarize' : binarize weights 'normalize' : normalize weights 'lengths' : convert weights to lengths (invert matrix) copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : NxN np.ndarray connectivity matrix with specified changes Notes ----- This function is included for compatibility with BCT. But there are other functions binarize(), normalize() and invert() which are simpler to call directly. ''' if wcm == 'binarize': return binarize(W, copy) elif wcm == 'normalize': return normalize(W, copy) elif wcm == 'lengths': return invert(W, copy) else: raise NotImplementedError('Unknown weight conversion command.')
def weight_conversion(W, wcm, copy=True)
W_bin = weight_conversion(W, 'binarize'); W_nrm = weight_conversion(W, 'normalize'); L = weight_conversion(W, 'lengths'); This function may either binarize an input weighted connection matrix, normalize an input weighted connection matrix or convert an input weighted connection matrix to a weighted connection-length matrix. Binarization converts all present connection weights to 1. Normalization scales all weight magnitudes to the range [0,1] and should be done prior to computing some weighted measures, such as the weighted clustering coefficient. Conversion of connection weights to connection lengths is needed prior to computation of weighted distance-based measures, such as distance and betweenness centrality. In a weighted connection network, higher weights are naturally interpreted as shorter lengths. The connection-lengths matrix here is defined as the inverse of the connection-weights matrix. If copy is not set, this function will *modify W in place.* Parameters ---------- W : NxN np.ndarray weighted connectivity matrix wcm : str weight conversion command. 'binarize' : binarize weights 'normalize' : normalize weights 'lengths' : convert weights to lengths (invert matrix) copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : NxN np.ndarray connectivity matrix with specified changes Notes ----- This function is included for compatibility with BCT. But there are other functions binarize(), normalize() and invert() which are simpler to call directly.
5.161171
1.164079
4.433694
''' Binarizes an input weighted connection matrix. If copy is not set, this function will *modify W in place.* Parameters ---------- W : NxN np.ndarray weighted connectivity matrix copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : NxN np.ndarray binary connectivity matrix ''' if copy: W = W.copy() W[W != 0] = 1 return W
def binarize(W, copy=True)
Binarizes an input weighted connection matrix. If copy is not set, this function will *modify W in place.* Parameters ---------- W : NxN np.ndarray weighted connectivity matrix copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : NxN np.ndarray binary connectivity matrix
3.793289
1.553915
2.441118
''' Normalizes an input weighted connection matrix. If copy is not set, this function will *modify W in place.* Parameters ---------- W : np.ndarray weighted connectivity matrix copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray normalized connectivity matrix ''' if copy: W = W.copy() W /= np.max(np.abs(W)) return W
def normalize(W, copy=True)
Normalizes an input weighted connection matrix. If copy is not set, this function will *modify W in place.* Parameters ---------- W : np.ndarray weighted connectivity matrix copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray normalized connectivity matrix
3.889712
1.58529
2.453628
''' Inverts elementwise the weights in an input connection matrix. In other words, change the from the matrix of internode strengths to the matrix of internode distances. If copy is not set, this function will *modify W in place.* Parameters ---------- W : np.ndarray weighted connectivity matrix copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray inverted connectivity matrix ''' if copy: W = W.copy() E = np.where(W) W[E] = 1. / W[E] return W
def invert(W, copy=True)
Inverts elementwise the weights in an input connection matrix. In other words, change the from the matrix of internode strengths to the matrix of internode distances. If copy is not set, this function will *modify W in place.* Parameters ---------- W : np.ndarray weighted connectivity matrix copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray inverted connectivity matrix
4.837028
1.529413
3.162669
''' Fix a bunch of common problems. More specifically, remove Inf and NaN, ensure exact binariness and symmetry (i.e. remove floating point instability), and zero diagonal. Parameters ---------- W : np.ndarray weighted connectivity matrix copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray connectivity matrix with fixes applied ''' if copy: W = W.copy() # zero diagonal np.fill_diagonal(W, 0) # remove np.inf and np.nan W[np.logical_or(np.where(np.isinf(W)), np.where(np.isnan(W)))] = 0 # ensure exact binarity u = np.unique(W) if np.all(np.logical_or(np.abs(u) < 1e-8, np.abs(u - 1) < 1e-8)): W = np.around(W, decimal=5) # ensure exact symmetry if np.allclose(W, W.T): W = np.around(W, decimals=5) return W
def autofix(W, copy=True)
Fix a bunch of common problems. More specifically, remove Inf and NaN, ensure exact binariness and symmetry (i.e. remove floating point instability), and zero diagonal. Parameters ---------- W : np.ndarray weighted connectivity matrix copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray connectivity matrix with fixes applied
3.546703
1.7869
1.984836
''' Takes as input a set of vertex partitions CI of dimensions [vertex x partition]. Each column in CI contains the assignments of each vertex to a class/community/module. This function aggregates the partitions in CI into a square [vertex x vertex] agreement matrix D, whose elements indicate the number of times any two vertices were assigned to the same class. In the case that the number of nodes and partitions in CI is large (greater than ~1000 nodes or greater than ~1000 partitions), the script can be made faster by computing D in pieces. The optional input BUFFSZ determines the size of each piece. Trial and error has found that BUFFSZ ~ 150 works well. Parameters ---------- ci : NxM np.ndarray set of M (possibly degenerate) partitions of N nodes buffsz : int | None sets buffer size. If not specified, defaults to 1000 Returns ------- D : NxN np.ndarray agreement matrix ''' ci = np.array(ci) n_nodes, n_partitions = ci.shape if n_partitions <= buffsz: # Case 1: Use all partitions at once ind = dummyvar(ci) D = np.dot(ind, ind.T) else: # Case 2: Add together results from subsets of partitions a = np.arange(0, n_partitions, buffsz) b = np.arange(buffsz, n_partitions, buffsz) if len(a) != len(b): b = np.append(b, n_partitions) D = np.zeros((n_nodes, n_nodes)) for i, j in zip(a, b): y = ci[:, i:j] ind = dummyvar(y) D += np.dot(ind, ind.T) np.fill_diagonal(D, 0) return D
def agreement(ci, buffsz=1000)
Takes as input a set of vertex partitions CI of dimensions [vertex x partition]. Each column in CI contains the assignments of each vertex to a class/community/module. This function aggregates the partitions in CI into a square [vertex x vertex] agreement matrix D, whose elements indicate the number of times any two vertices were assigned to the same class. In the case that the number of nodes and partitions in CI is large (greater than ~1000 nodes or greater than ~1000 partitions), the script can be made faster by computing D in pieces. The optional input BUFFSZ determines the size of each piece. Trial and error has found that BUFFSZ ~ 150 works well. Parameters ---------- ci : NxM np.ndarray set of M (possibly degenerate) partitions of N nodes buffsz : int | None sets buffer size. If not specified, defaults to 1000 Returns ------- D : NxN np.ndarray agreement matrix
4.843616
1.665178
2.908767
''' D = AGREEMENT_WEIGHTED(CI,WTS) is identical to AGREEMENT, with the exception that each partitions contribution is weighted according to the corresponding scalar value stored in the vector WTS. As an example, suppose CI contained partitions obtained using some heuristic for maximizing modularity. A possible choice for WTS might be the Q metric (Newman's modularity score). Such a choice would add more weight to higher modularity partitions. NOTE: Unlike AGREEMENT, this script does not have the input argument BUFFSZ. Parameters ---------- ci : MxN np.ndarray set of M (possibly degenerate) partitions of N nodes wts : Mx1 np.ndarray relative weight of each partition Returns ------- D : NxN np.ndarray weighted agreement matrix ''' ci = np.array(ci) m, n = ci.shape wts = np.array(wts) / np.sum(wts) D = np.zeros((n, n)) for i in range(m): d = dummyvar(ci[i, :].reshape(1, n)) D += np.dot(d, d.T) * wts[i] return D
def agreement_weighted(ci, wts)
D = AGREEMENT_WEIGHTED(CI,WTS) is identical to AGREEMENT, with the exception that each partitions contribution is weighted according to the corresponding scalar value stored in the vector WTS. As an example, suppose CI contained partitions obtained using some heuristic for maximizing modularity. A possible choice for WTS might be the Q metric (Newman's modularity score). Such a choice would add more weight to higher modularity partitions. NOTE: Unlike AGREEMENT, this script does not have the input argument BUFFSZ. Parameters ---------- ci : MxN np.ndarray set of M (possibly degenerate) partitions of N nodes wts : Mx1 np.ndarray relative weight of each partition Returns ------- D : NxN np.ndarray weighted agreement matrix
7.127846
1.605926
4.438464
''' The clustering coefficient is the fraction of triangles around a node (equiv. the fraction of nodes neighbors that are neighbors of each other). Parameters ---------- A : NxN np.ndarray binary directed connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector Notes ----- Methodological note: In directed graphs, 3 nodes generate up to 8 triangles (2*2*2 edges). The number of existing triangles is the main diagonal of S^3/2. The number of all (in or out) neighbour pairs is K(K-1)/2. Each neighbour pair may generate two triangles. "False pairs" are i<->j edge pairs (these do not generate triangles). The number of false pairs is the main diagonal of A^2. Thus the maximum possible number of triangles = = (2 edges)*([ALL PAIRS] - [FALSE PAIRS]) = 2 * (K(K-1)/2 - diag(A^2)) = K(K-1) - 2(diag(A^2)) ''' S = A + A.T # symmetrized input graph K = np.sum(S, axis=1) # total degree (in+out) cyc3 = np.diag(np.dot(S, np.dot(S, S))) / 2 # number of 3-cycles K[np.where(cyc3 == 0)] = np.inf # if no 3-cycles exist, make C=0 # number of all possible 3 cycles CYC3 = K * (K - 1) - 2 * np.diag(np.dot(A, A)) C = cyc3 / CYC3 return C
def clustering_coef_bd(A)
The clustering coefficient is the fraction of triangles around a node (equiv. the fraction of nodes neighbors that are neighbors of each other). Parameters ---------- A : NxN np.ndarray binary directed connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector Notes ----- Methodological note: In directed graphs, 3 nodes generate up to 8 triangles (2*2*2 edges). The number of existing triangles is the main diagonal of S^3/2. The number of all (in or out) neighbour pairs is K(K-1)/2. Each neighbour pair may generate two triangles. "False pairs" are i<->j edge pairs (these do not generate triangles). The number of false pairs is the main diagonal of A^2. Thus the maximum possible number of triangles = = (2 edges)*([ALL PAIRS] - [FALSE PAIRS]) = 2 * (K(K-1)/2 - diag(A^2)) = K(K-1) - 2(diag(A^2))
7.036688
1.793374
3.923715
''' The clustering coefficient is the fraction of triangles around a node (equiv. the fraction of nodes neighbors that are neighbors of each other). Parameters ---------- A : NxN np.ndarray binary undirected connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector ''' n = len(G) C = np.zeros((n,)) for u in range(n): V, = np.where(G[u, :]) k = len(V) if k >= 2: # degree must be at least 2 S = G[np.ix_(V, V)] C[u] = np.sum(S) / (k * k - k) return C
def clustering_coef_bu(G)
The clustering coefficient is the fraction of triangles around a node (equiv. the fraction of nodes neighbors that are neighbors of each other). Parameters ---------- A : NxN np.ndarray binary undirected connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector
4.052508
2.102351
1.927607
''' The weighted clustering coefficient is the average "intensity" of triangles around a node. Parameters ---------- W : NxN np.ndarray weighted directed connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector Notes ----- Methodological note (also see clustering_coef_bd) The weighted modification is as follows: - The numerator: adjacency matrix is replaced with weights matrix ^ 1/3 - The denominator: no changes from the binary version The above reduces to symmetric and/or binary versions of the clustering coefficient for respective graphs. ''' A = np.logical_not(W == 0).astype(float) # adjacency matrix S = cuberoot(W) + cuberoot(W.T) # symmetrized weights matrix ^1/3 K = np.sum(A + A.T, axis=1) # total degree (in+out) cyc3 = np.diag(np.dot(S, np.dot(S, S))) / 2 # number of 3-cycles K[np.where(cyc3 == 0)] = np.inf # if no 3-cycles exist, make C=0 # number of all possible 3 cycles CYC3 = K * (K - 1) - 2 * np.diag(np.dot(A, A)) C = cyc3 / CYC3 # clustering coefficient return C
def clustering_coef_wd(W)
The weighted clustering coefficient is the average "intensity" of triangles around a node. Parameters ---------- W : NxN np.ndarray weighted directed connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector Notes ----- Methodological note (also see clustering_coef_bd) The weighted modification is as follows: - The numerator: adjacency matrix is replaced with weights matrix ^ 1/3 - The denominator: no changes from the binary version The above reduces to symmetric and/or binary versions of the clustering coefficient for respective graphs.
6.696708
2.536321
2.640323
''' The weighted clustering coefficient is the average "intensity" of triangles around a node. Parameters ---------- W : NxN np.ndarray weighted undirected connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector ''' K = np.array(np.sum(np.logical_not(W == 0), axis=1), dtype=float) ws = cuberoot(W) cyc3 = np.diag(np.dot(ws, np.dot(ws, ws))) K[np.where(cyc3 == 0)] = np.inf # if no 3-cycles exist, set C=0 C = cyc3 / (K * (K - 1)) return C
def clustering_coef_wu(W)
The weighted clustering coefficient is the average "intensity" of triangles around a node. Parameters ---------- W : NxN np.ndarray weighted undirected connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector
4.874584
2.961513
1.645978
''' Returns the components of an undirected graph specified by the binary and undirected adjacency matrix adj. Components and their constitutent nodes are assigned the same index and stored in the vector, comps. The vector, comp_sizes, contains the number of nodes beloning to each component. Parameters ---------- A : NxN np.ndarray binary undirected adjacency matrix no_depend : Any Does nothing, included for backwards compatibility Returns ------- comps : Nx1 np.ndarray vector of component assignments for each node comp_sizes : Mx1 np.ndarray vector of component sizes Notes ----- Note: disconnected nodes will appear as components with a component size of 1 Note: The identity of each component (i.e. its numerical value in the result) is not guaranteed to be identical the value returned in BCT, matlab code, although the component topology is. Many thanks to Nick Cullen for providing this implementation ''' if not np.all(A == A.T): # ensure matrix is undirected raise BCTParamError('get_components can only be computed for undirected' ' matrices. If your matrix is noisy, correct it with np.around') A = binarize(A, copy=True) n = len(A) np.fill_diagonal(A, 1) edge_map = [{u,v} for u in range(n) for v in range(n) if A[u,v] == 1] union_sets = [] for item in edge_map: temp = [] for s in union_sets: if not s.isdisjoint(item): item = s.union(item) else: temp.append(s) temp.append(item) union_sets = temp comps = np.array([i+1 for v in range(n) for i in range(len(union_sets)) if v in union_sets[i]]) comp_sizes = np.array([len(s) for s in union_sets]) return comps, comp_sizes
def get_components(A, no_depend=False)
Returns the components of an undirected graph specified by the binary and undirected adjacency matrix adj. Components and their constitutent nodes are assigned the same index and stored in the vector, comps. The vector, comp_sizes, contains the number of nodes beloning to each component. Parameters ---------- A : NxN np.ndarray binary undirected adjacency matrix no_depend : Any Does nothing, included for backwards compatibility Returns ------- comps : Nx1 np.ndarray vector of component assignments for each node comp_sizes : Mx1 np.ndarray vector of component sizes Notes ----- Note: disconnected nodes will appear as components with a component size of 1 Note: The identity of each component (i.e. its numerical value in the result) is not guaranteed to be identical the value returned in BCT, matlab code, although the component topology is. Many thanks to Nick Cullen for providing this implementation
5.171756
1.983778
2.607024
''' Transitivity is the ratio of 'triangles to triplets' in the network. (A classical version of the clustering coefficient). Parameters ---------- A : NxN np.ndarray binary directed connection matrix Returns ------- T : float transitivity scalar Notes ----- Methodological note: In directed graphs, 3 nodes generate up to 8 triangles (2*2*2 edges). The number of existing triangles is the main diagonal of S^3/2. The number of all (in or out) neighbour pairs is K(K-1)/2. Each neighbour pair may generate two triangles. "False pairs" are i<->j edge pairs (these do not generate triangles). The number of false pairs is the main diagonal of A^2. Thus the maximum possible number of triangles = (2 edges)*([ALL PAIRS] - [FALSE PAIRS]) = 2 * (K(K-1)/2 - diag(A^2)) = K(K-1) - 2(diag(A^2)) ''' S = A + A.T # symmetrized input graph K = np.sum(S, axis=1) # total degree (in+out) cyc3 = np.diag(np.dot(S, np.dot(S, S))) / 2 # number of 3-cycles CYC3 = K * (K - 1) - 2 * np.diag(np.dot(A, A)) # number of all possible 3-cycles return np.sum(cyc3) / np.sum(CYC3)
def transitivity_bd(A)
Transitivity is the ratio of 'triangles to triplets' in the network. (A classical version of the clustering coefficient). Parameters ---------- A : NxN np.ndarray binary directed connection matrix Returns ------- T : float transitivity scalar Notes ----- Methodological note: In directed graphs, 3 nodes generate up to 8 triangles (2*2*2 edges). The number of existing triangles is the main diagonal of S^3/2. The number of all (in or out) neighbour pairs is K(K-1)/2. Each neighbour pair may generate two triangles. "False pairs" are i<->j edge pairs (these do not generate triangles). The number of false pairs is the main diagonal of A^2. Thus the maximum possible number of triangles = (2 edges)*([ALL PAIRS] - [FALSE PAIRS]) = 2 * (K(K-1)/2 - diag(A^2)) = K(K-1) - 2(diag(A^2))
7.731163
1.705841
4.532171
''' Transitivity is the ratio of 'triangles to triplets' in the network. (A classical version of the clustering coefficient). Parameters ---------- A : NxN np.ndarray binary undirected connection matrix Returns ------- T : float transitivity scalar ''' tri3 = np.trace(np.dot(A, np.dot(A, A))) tri2 = np.sum(np.dot(A, A)) - np.trace(np.dot(A, A)) return tri3 / tri2
def transitivity_bu(A)
Transitivity is the ratio of 'triangles to triplets' in the network. (A classical version of the clustering coefficient). Parameters ---------- A : NxN np.ndarray binary undirected connection matrix Returns ------- T : float transitivity scalar
4.89906
1.95035
2.511887
''' Transitivity is the ratio of 'triangles to triplets' in the network. (A classical version of the clustering coefficient). Parameters ---------- W : NxN np.ndarray weighted undirected connection matrix Returns ------- T : int transitivity scalar ''' K = np.sum(np.logical_not(W == 0), axis=1) ws = cuberoot(W) cyc3 = np.diag(np.dot(ws, np.dot(ws, ws))) return np.sum(cyc3, axis=0) / np.sum(K * (K - 1), axis=0)
def transitivity_wu(W)
Transitivity is the ratio of 'triangles to triplets' in the network. (A classical version of the clustering coefficient). Parameters ---------- W : NxN np.ndarray weighted undirected connection matrix Returns ------- T : int transitivity scalar
5.743527
2.695906
2.130463
''' Convert from a community index vector to a 2D python list of modules The list is a pure python list, not requiring numpy. Parameters ---------- ci : Nx1 np.ndarray the community index vector zeroindexed : bool If True, ci uses zero-indexing (lowest value is 0). Defaults to False. Returns ------- ls : listof(list) pure python list with lowest value zero-indexed (regardless of zero-indexing parameter) ''' if not np.size(ci): return ci # list is empty _, ci = np.unique(ci, return_inverse=True) ci += 1 nr_indices = int(max(ci)) ls = [] for c in range(nr_indices): ls.append([]) for i, x in enumerate(ci): ls[ci[i] - 1].append(i) return ls
def ci2ls(ci)
Convert from a community index vector to a 2D python list of modules The list is a pure python list, not requiring numpy. Parameters ---------- ci : Nx1 np.ndarray the community index vector zeroindexed : bool If True, ci uses zero-indexing (lowest value is 0). Defaults to False. Returns ------- ls : listof(list) pure python list with lowest value zero-indexed (regardless of zero-indexing parameter)
6.065526
2.007501
3.021432
''' Convert from a 2D python list of modules to a community index vector. The list is a pure python list, not requiring numpy. Parameters ---------- ls : listof(list) pure python list with lowest value zero-indexed (regardless of value of zeroindexed parameter) zeroindexed : bool If True, ci uses zero-indexing (lowest value is 0). Defaults to False. Returns ------- ci : Nx1 np.ndarray community index vector ''' if ls is None or np.size(ls) == 0: return () # list is empty nr_indices = sum(map(len, ls)) ci = np.zeros((nr_indices,), dtype=int) z = int(not zeroindexed) for i, x in enumerate(ls): for j, y in enumerate(ls[i]): ci[ls[i][j]] = i + z return ci
def ls2ci(ls, zeroindexed=False)
Convert from a 2D python list of modules to a community index vector. The list is a pure python list, not requiring numpy. Parameters ---------- ls : listof(list) pure python list with lowest value zero-indexed (regardless of value of zeroindexed parameter) zeroindexed : bool If True, ci uses zero-indexing (lowest value is 0). Defaults to False. Returns ------- ci : Nx1 np.ndarray community index vector
6.060842
2.14356
2.827465
out = np.squeeze(arr, *args, **kwargs) if np.ndim(out) == 0: out = out.reshape((1,)) return out
def _safe_squeeze(arr, *args, **kwargs)
numpy.squeeze will reduce a 1-item array down to a zero-dimensional "array", which is not necessarily desirable. This function does the squeeze operation, but ensures that there is at least 1 dimension in the output.
2.473917
2.765452
0.89458
''' This function quantifies the distance between pairs of community partitions with information theoretic measures. Parameters ---------- cx : Nx1 np.ndarray community affiliation vector X cy : Nx1 np.ndarray community affiliation vector Y Returns ------- VIn : Nx1 np.ndarray normalized variation of information MIn : Nx1 np.ndarray normalized mutual information Notes ----- (Definitions: VIn = [H(X) + H(Y) - 2MI(X,Y)]/log(n) MIn = 2MI(X,Y)/[H(X)+H(Y)] where H is entropy, MI is mutual information and n is number of nodes) ''' n = np.size(cx) _, cx = np.unique(cx, return_inverse=True) _, cy = np.unique(cy, return_inverse=True) _, cxy = np.unique(cx + cy * 1j, return_inverse=True) cx += 1 cy += 1 cxy += 1 Px = np.histogram(cx, bins=np.max(cx))[0] / n Py = np.histogram(cy, bins=np.max(cy))[0] / n Pxy = np.histogram(cxy, bins=np.max(cxy))[0] / n Hx = -np.sum(Px * np.log(Px)) Hy = -np.sum(Py * np.log(Py)) Hxy = -np.sum(Pxy * np.log(Pxy)) Vin = (2 * Hxy - Hx - Hy) / np.log(n) Min = 2 * (Hx + Hy - Hxy) / (Hx + Hy) return Vin, Min
def partition_distance(cx, cy)
This function quantifies the distance between pairs of community partitions with information theoretic measures. Parameters ---------- cx : Nx1 np.ndarray community affiliation vector X cy : Nx1 np.ndarray community affiliation vector Y Returns ------- VIn : Nx1 np.ndarray normalized variation of information MIn : Nx1 np.ndarray normalized mutual information Notes ----- (Definitions: VIn = [H(X) + H(Y) - 2MI(X,Y)]/log(n) MIn = 2MI(X,Y)/[H(X)+H(Y)] where H is entropy, MI is mutual information and n is number of nodes)
2.618316
1.442066
1.81567
''' The binary reachability matrix describes reachability between all pairs of nodes. An entry (u,v)=1 means that there exists a path from node u to node v; alternatively (u,v)=0. The distance matrix contains lengths of shortest paths between all pairs of nodes. An entry (u,v) represents the length of shortest path from node u to node v. The average shortest path length is the characteristic path length of the network. Parameters ---------- CIJ : NxN np.ndarray binary directed/undirected connection matrix Returns ------- R : NxN np.ndarray binary reachability matrix D : NxN np.ndarray distance matrix Notes ----- slower but less memory intensive than "reachdist.m". ''' n = len(CIJ) D = np.zeros((n, n)) for i in range(n): D[i, :], _ = breadth(CIJ, i) D[D == 0] = np.inf R = (D != np.inf) return R, D
def breadthdist(CIJ)
The binary reachability matrix describes reachability between all pairs of nodes. An entry (u,v)=1 means that there exists a path from node u to node v; alternatively (u,v)=0. The distance matrix contains lengths of shortest paths between all pairs of nodes. An entry (u,v) represents the length of shortest path from node u to node v. The average shortest path length is the characteristic path length of the network. Parameters ---------- CIJ : NxN np.ndarray binary directed/undirected connection matrix Returns ------- R : NxN np.ndarray binary reachability matrix D : NxN np.ndarray distance matrix Notes ----- slower but less memory intensive than "reachdist.m".
3.955448
1.44159
2.743809
''' Implementation of breadth-first search. Parameters ---------- CIJ : NxN np.ndarray binary directed/undirected connection matrix source : int source vertex Returns ------- distance : Nx1 np.ndarray vector of distances between source and ith vertex (0 for source) branch : Nx1 np.ndarray vertex that precedes i in the breadth-first search (-1 for source) Notes ----- Breadth-first search tree does not contain all paths (or all shortest paths), but allows the determination of at least one path with minimum distance. The entire graph is explored, starting from source vertex 'source'. ''' n = len(CIJ) # colors: white,gray,black white = 0 gray = 1 black = 2 color = np.zeros((n,)) distance = np.inf * np.ones((n,)) branch = np.zeros((n,)) # start on vertex source color[source] = gray distance[source] = 0 branch[source] = -1 Q = [source] # keep going until the entire graph is explored while Q: u = Q[0] ns, = np.where(CIJ[u, :]) for v in ns: # this allows the source distance itself to be recorded if distance[v] == 0: distance[v] = distance[u] + 1 if color[v] == white: color[v] = gray distance[v] = distance[u] + 1 branch[v] = u Q.append(v) Q = Q[1:] color[u] = black return distance, branch
def breadth(CIJ, source)
Implementation of breadth-first search. Parameters ---------- CIJ : NxN np.ndarray binary directed/undirected connection matrix source : int source vertex Returns ------- distance : Nx1 np.ndarray vector of distances between source and ith vertex (0 for source) branch : Nx1 np.ndarray vertex that precedes i in the breadth-first search (-1 for source) Notes ----- Breadth-first search tree does not contain all paths (or all shortest paths), but allows the determination of at least one path with minimum distance. The entire graph is explored, starting from source vertex 'source'.
3.548594
1.848516
1.919699
''' The characteristic path length is the average shortest path length in the network. The global efficiency is the average inverse shortest path length in the network. Parameters ---------- D : NxN np.ndarray distance matrix include_diagonal : bool If True, include the weights on the diagonal. Default value is False. include_infinite : bool If True, include infinite distances in calculation Returns ------- lambda : float characteristic path length efficiency : float global efficiency ecc : Nx1 np.ndarray eccentricity at each vertex radius : float radius of graph diameter : float diameter of graph Notes ----- The input distance matrix may be obtained with any of the distance functions, e.g. distance_bin, distance_wei. Characteristic path length is calculated as the global mean of the distance matrix D, excludings any 'Infs' but including distances on the main diagonal. ''' D = D.copy() if not include_diagonal: np.fill_diagonal(D, np.nan) if not include_infinite: D[np.isinf(D)] = np.nan Dv = D[np.logical_not(np.isnan(D))].ravel() # mean of finite entries of D[G] lambda_ = np.mean(Dv) # efficiency: mean of inverse entries of D[G] efficiency = np.mean(1 / Dv) # eccentricity for each vertex (ignore inf) ecc = np.array(np.ma.masked_where(np.isnan(D), D).max(axis=1)) # radius of graph radius = np.min(ecc) # but what about zeros? # diameter of graph diameter = np.max(ecc) return lambda_, efficiency, ecc, radius, diameter
def charpath(D, include_diagonal=False, include_infinite=True)
The characteristic path length is the average shortest path length in the network. The global efficiency is the average inverse shortest path length in the network. Parameters ---------- D : NxN np.ndarray distance matrix include_diagonal : bool If True, include the weights on the diagonal. Default value is False. include_infinite : bool If True, include infinite distances in calculation Returns ------- lambda : float characteristic path length efficiency : float global efficiency ecc : Nx1 np.ndarray eccentricity at each vertex radius : float radius of graph diameter : float diameter of graph Notes ----- The input distance matrix may be obtained with any of the distance functions, e.g. distance_bin, distance_wei. Characteristic path length is calculated as the global mean of the distance matrix D, excludings any 'Infs' but including distances on the main diagonal.
4.422065
1.776863
2.488692
''' Cycles are paths which begin and end at the same node. Cycle probability for path length d, is the fraction of all paths of length d-1 that may be extended to form cycles of length d. Parameters ---------- Pq : NxNxQ np.ndarray Path matrix with Pq[i,j,q] = number of paths from i to j of length q. Produced by findpaths() Returns ------- fcyc : Qx1 np.ndarray fraction of all paths that are cycles for each path length q pcyc : Qx1 np.ndarray probability that a non-cyclic path of length q-1 can be extended to form a cycle of length q for each path length q ''' # note: fcyc[1] must be zero, as there cannot be cycles of length 1 fcyc = np.zeros(np.size(Pq, axis=2)) for q in range(np.size(Pq, axis=2)): if np.sum(Pq[:, :, q]) > 0: fcyc[q] = np.sum(np.diag(Pq[:, :, q])) / np.sum(Pq[:, :, q]) else: fcyc[q] = 0 # note: pcyc[1] is not defined (set to zero) # note: pcyc[2] is equal to the fraction of reciprocal connections # note: there are no non-cyclic paths of length N and no cycles of len N+1 pcyc = np.zeros(np.size(Pq, axis=2)) for q in range(np.size(Pq, axis=2)): if np.sum(Pq[:, :, q - 1]) - np.sum(np.diag(Pq[:, :, q - 1])) > 0: pcyc[q] = (np.sum(np.diag(Pq[:, :, q - 1])) / np.sum(Pq[:, :, q - 1]) - np.sum(np.diag(Pq[:, :, q - 1]))) else: pcyc[q] = 0 return fcyc, pcyc
def cycprob(Pq)
Cycles are paths which begin and end at the same node. Cycle probability for path length d, is the fraction of all paths of length d-1 that may be extended to form cycles of length d. Parameters ---------- Pq : NxNxQ np.ndarray Path matrix with Pq[i,j,q] = number of paths from i to j of length q. Produced by findpaths() Returns ------- fcyc : Qx1 np.ndarray fraction of all paths that are cycles for each path length q pcyc : Qx1 np.ndarray probability that a non-cyclic path of length q-1 can be extended to form a cycle of length q for each path length q
3.001348
1.78748
1.679094
''' The distance matrix contains lengths of shortest paths between all pairs of nodes. An entry (u,v) represents the length of shortest path from node u to node v. The average shortest path length is the characteristic path length of the network. Parameters ---------- A : NxN np.ndarray binary directed/undirected connection matrix Returns ------- D : NxN distance matrix Notes ----- Lengths between disconnected nodes are set to Inf. Lengths on the main diagonal are set to 0. Algorithm: Algebraic shortest paths. ''' G = binarize(G, copy=True) D = np.eye(len(G)) n = 1 nPATH = G.copy() # n path matrix L = (nPATH != 0) # shortest n-path matrix while np.any(L): D += n * L n += 1 nPATH = np.dot(nPATH, G) L = (nPATH != 0) * (D == 0) D[D == 0] = np.inf # disconnected nodes are assigned d=inf np.fill_diagonal(D, 0) return D
def distance_bin(G)
The distance matrix contains lengths of shortest paths between all pairs of nodes. An entry (u,v) represents the length of shortest path from node u to node v. The average shortest path length is the characteristic path length of the network. Parameters ---------- A : NxN np.ndarray binary directed/undirected connection matrix Returns ------- D : NxN distance matrix Notes ----- Lengths between disconnected nodes are set to Inf. Lengths on the main diagonal are set to 0. Algorithm: Algebraic shortest paths.
4.978838
2.326567
2.139993