code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
pass
def search(self, id_list: Iterable, negated_ids: Iterable, limit: Optional[int], taxon_filter: Optional, category_filter: Optional, method: Optional)-> SimResult
Given an input iterable of classes or individuals, resolves to target classes (phenotypes, go terms, etc) and provides a ranking of similar profiles
42,212.539063
6,695.5625
6.304555
self.merged_ontology.merge([ont]) syns = ont.all_synonyms(include_label=True) include_id = self._is_meaningful_ids() logging.info("Include IDs as synonyms: {}".format(include_id)) if include_id: for n in ont.nodes(): v = n # Get fragment if v.startswith('http'): v = re.sub('.*/','',v) v = re.sub('.*#','',v) syns.append(Synonym(n, val=v, pred='label')) logging.info("Indexing {} syns in {}".format(len(syns),ont)) logging.info("Distinct lexical values: {}".format(len(self.lmap.keys()))) for syn in syns: self.index_synonym(syn, ont) for nid in ont.nodes(): self.id_to_ontology_map[nid].append(ont)
def index_ontology(self, ont)
Adds an ontology to the index This iterates through all labels and synonyms in the ontology, creating an index
5.915649
5.991909
0.987273
if not syn.val: if syn.pred == 'label': if not self._is_meaningful_ids(): if not ont.is_obsolete(syn.class_id): pass #logging.error('Use meaningful ids if label not present: {}'.format(syn)) else: logging.warning("Incomplete syn: {}".format(syn)) return if self.exclude_obsolete and ont.is_obsolete(syn.class_id): return syn.ontology = ont prefix,_ = ont.prefix_fragment(syn.class_id) v = syn.val caps_match = re.match('[A-Z]+',v) if caps_match: # if > 75% of length is caps, assume abbreviation if caps_match.span()[1] >= len(v)/3: syn.is_abbreviation(True) # chebi 'synonyms' are often not real synonyms # https://github.com/ebi-chebi/ChEBI/issues/3294 if not re.match('.*[a-zA-Z]',v): if prefix != 'CHEBI': logging.warning('Ignoring suspicous synonym: {}'.format(syn)) return v = self._standardize_label(v) # TODO: do this once ahead of time wsmap = {} for w,s in self.wsmap.items(): wsmap[w] = s for ss in self._get_config_val(prefix,'synsets',[]): # TODO: weights wsmap[ss['synonym']] = ss['word'] nv = self._normalize_label(v, wsmap) self._index_synonym_val(syn, v) nweight = self._get_config_val(prefix, 'normalized_form_confidence', 0.8) if nweight > 0 and not syn.is_abbreviation(): if nv != v: nsyn = Synonym(syn.class_id, val=syn.val, pred=syn.pred, lextype=syn.lextype, ontology=ont, confidence=syn.confidence * nweight) self._index_synonym_val(nsyn, nv)
def index_synonym(self, syn, ont)
Index a synonym Typically not called from outside this object; called by `index_ontology`
5.330287
5.288356
1.007929
toks = [] for tok in list(set(self.npattern.sub(' ', s).split(' '))): if tok in wsmap: tok=wsmap[tok] if tok != "": toks.append(tok) toks.sort() return " ".join(toks)
def _normalize_label(self, s, wsmap)
normalized form of a synonym
3.951418
3.684422
1.072466
logging.info("scoring xrefs by semantic similarity for {} nodes in {}".format(len(xg.nodes()), ont)) for (i,j,d) in xg.edges(data=True): pfx1 = self._id_to_ontology(i) pfx2 = self._id_to_ontology(j) ancs1 = self._blanket(i) ancs2 = self._blanket(j) s1,_,_ = self._sim(xg, ancs1, ancs2, pfx1, pfx2) s2,_,_ = self._sim(xg, ancs2, ancs1, pfx2, pfx1) s = 1 - ((1-s1) * (1-s2)) logging.debug("Score {} x {} = {} x {} = {} // {}".format(i,j,s1,s2,s, d)) xg[i][j][self.SIMSCORES] = (s1,s2) xg[i][j][self.SCORE] *= s
def score_xrefs_by_semsim(self, xg, ont=None)
Given an xref graph (see ref:`get_xref_graph`), this will adjust scores based on the semantic similarity of matches.
3.231274
3.201993
1.009145
xancs1 = set() for a in ancs1: if a in xg: # TODO: restrict this to neighbors in single ontology for n in xg.neighbors(a): pfx = self._id_to_ontology(n) if pfx == pfx2: xancs1.add(n) logging.debug('SIM={}/{} ## {}'.format(len(xancs1.intersection(ancs2)), len(xancs1), xancs1.intersection(ancs2), xancs1)) n_shared = len(xancs1.intersection(ancs2)) n_total = len(xancs1) return (1+n_shared) / (1+n_total), n_shared, n_total
def _sim(self, xg, ancs1, ancs2, pfx1, pfx2)
Compare two lineages
3.759895
3.721941
1.010198
ont = self.merged_ontology for (i,j,d) in xg1.edges(data=True): ont_left = self._id_to_ontology(i) ont_right = self._id_to_ontology(j) unique_lr = True num_xrefs_left = 0 same_left = False if i in xg2: for j2 in xg2.neighbors(i): ont_right2 = self._id_to_ontology(j2) if ont_right2 == ont_right: unique_lr = False num_xrefs_left += 1 if j2 == j: same_left = True unique_rl = True num_xrefs_right = 0 same_right = False if j in xg2: for i2 in xg2.neighbors(j): ont_left2 = self._id_to_ontology(i2) if ont_left2 == ont_left: unique_rl = False num_xrefs_right += 1 if i2 == i: same_right = True (x,y) = d['idpair'] xg1[x][y]['left_novel'] = num_xrefs_left==0 xg1[x][y]['right_novel'] = num_xrefs_right==0 xg1[x][y]['left_consistent'] = same_left xg1[x][y]['right_consistent'] = same_right
def compare_to_xrefs(self, xg1, xg2)
Compares a base xref graph with another one
2.197963
2.19481
1.001436
logging.info("assigning best matches for {} nodes".format(len(xg.nodes()))) for i in xg.nodes(): xrefmap = self._neighborscores_by_ontology(xg, i) for (ontid,score_node_pairs) in xrefmap.items(): score_node_pairs.sort(reverse=True) (best_score,best_node) = score_node_pairs[0] logging.info("BEST for {}: {} in {} from {}".format(i, best_node, ontid, score_node_pairs)) edge = xg[i][best_node] dirn = self._dirn(edge, i, best_node) best_kwd = 'best_' + dirn if len(score_node_pairs) == 1 or score_node_pairs[0] > score_node_pairs[1]: edge[best_kwd] = 2 else: edge[best_kwd] = 1 for (score,j) in score_node_pairs: edge_ij = xg[i][j] dirn_ij = self._dirn(edge_ij, i, j) edge_ij['cpr_'+dirn_ij] = score / sum([s for s,_ in score_node_pairs]) for (i,j,edge) in xg.edges(data=True): # reciprocal score is set if (A) i is best for j, and (B) j is best for i rs = 0 if 'best_fwd' in edge and 'best_rev' in edge: rs = edge['best_fwd'] * edge['best_rev'] edge['reciprocal_score'] = rs edge['cpr'] = edge['cpr_fwd'] * edge['cpr_rev']
def assign_best_matches(self, xg)
For each node in the xref graph, tag best match edges
3.332081
3.179307
1.048053
SUBSTRING_WEIGHT = 0.2 WBEST = None sbest = None sxv = self._standardize_label(sx.val) sxp = self._id_to_ontology(sx.class_id) for sy in sys: syv = self._standardize_label(sy.val) syp = self._id_to_ontology(sy.class_id) W = None if sxv == syv: confidence = sx.confidence * sy.confidence if sx.is_abbreviation() or sy.is_abbreviation: confidence *= self._get_config_val(sxp, 'abbreviation_confidence', 0.5) confidence *= self._get_config_val(syp, 'abbreviation_confidence', 0.5) W = scope_map[sx.scope()][sy.scope()] + logit(confidence/2) elif sxv in syv: W = np.array((-SUBSTRING_WEIGHT, SUBSTRING_WEIGHT, 0, 0)) elif syv in sxv: W = np.array((SUBSTRING_WEIGHT, -SUBSTRING_WEIGHT, 0, 0)) if W is not None: # The best match is determined by the highest magnitude weight if WBEST is None or max(abs(W)) > max(abs(WBEST)): WBEST = W sbest = sy return WBEST, sbest
def _best_match_syn(self, sx, sys, scope_map)
The best match is determined by the highest magnitude weight
3.538199
3.304231
1.070809
g = self.get_xref_graph() m = {} for n in g.neighbors(id): [prefix, local] = n.split(':') if prefix not in m: m[prefix] = [] m[prefix].append(n) return m
def grouped_mappings(self,id)
return all mappings for a node, grouped by ID prefix
3.878722
3.290825
1.178647
g = nx.DiGraph() for (x,y) in self.merged_ontology.get_graph().edges(): g.add_edge(x,y) for (x,y) in xg.edges(): g.add_edge(x,y) g.add_edge(y,x) return list(strongly_connected_components(g))
def cliques(self, xg)
Return all equivalence set cliques, assuming each edge in the xref graph is treated as equivalent, and all edges in ontology are subClassOf Arguments --------- xg : Graph an xref graph Returns ------- list of sets
2.982393
3.248019
0.918219
parser = argparse.ArgumentParser(description='Phenologs' , formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-r', '--resource1', type=str, required=False, help='Name of ontology1') parser.add_argument('-R', '--resource2', type=str, required=False, help='Name of ontology2') parser.add_argument('-T', '--taxon', type=str, default='NCBITaxon:10090', required=False, help='NCBITaxon ID') parser.add_argument('-s', '--search', type=str, default='', required=False, help='Search type. p=partial, r=regex') parser.add_argument('-b', '--background', type=str, default=None, required=False, help='Class to use for background') parser.add_argument('-p', '--pthreshold', type=float, default=0.05, required=False, help='P-value threshold') parser.add_argument('-v', '--verbosity', default=0, action='count', help='Increase output verbosity') parser.add_argument('ids',nargs='*') args = parser.parse_args() if args.verbosity >= 2: logging.basicConfig(level=logging.DEBUG) if args.verbosity == 1: logging.basicConfig(level=logging.INFO) logging.info("Welcome!") ofactory = OntologyFactory() afactory = AssociationSetFactory() handle = args.resource1 ont1 = ofactory.create(args.resource1) ont2 = ofactory.create(args.resource2) logging.info("onts: {} {}".format(ont1, ont2)) searchp = args.search category = 'gene' aset1 = afactory.create(ontology=ont1, subject_category=category, object_category='phenotype', taxon=args.taxon) aset2 = afactory.create(ontology=ont2, subject_category=category, object_category='function', taxon=args.taxon) bg_cls = None if args.background is not None: bg_ids = resolve(ont1,[args.background],searchp) if len(bg_ids) == 0: logging.error("Cannnot resolve: '{}' using {} in {}".format(args.background, searchp, ont1)) sys.exit(1) elif len(bg_ids) > 1: logging.error("Multiple matches: '{}' using {} MATCHES={}".format(args.background, searchp,bg_ids)) sys.exit(1) else: logging.info("Background: {}".format(bg_cls)) [bg_cls] = bg_ids for id in resolve(ont1,args.ids,searchp): sample = aset1.query([id],[]) print("Gene set class:{} Gene set: {}".format(id, sample)) bg = None if bg_cls is not None: bg = aset1.query([bg_cls],[]) print("BACKGROUND SUBJECTS: {}".format(bg)) rs = aset2.enrichment_test(sample, bg, threshold=args.pthreshold, labels=True) print("RESULTS: {} < {}".format(len(rs), args.pthreshold)) for r in rs: print(str(r))
def main()
Phenologs
2.806808
2.738214
1.025051
rg = self.rdfgraph g = ontol.get_graph() typemap = {} inds = rg.subjects(RDF.type, OWL.NamedIndividual) for s in inds: for (s,p,o) in rg.triples((s,None,None)): s_id = id(s) p_id = id(p) g.add_node(s_id) if isinstance(o,URIRef): o_id = id(o) if p == RDF.type: if o != OWL.NamedIndividual: if s_id not in typemap: typemap[s_id] = [] typemap[s_id].append(o_id) else: g.add_edge(o_id,s_id,pred=p_id) # propagate label from type for s in typemap.keys(): g.nodes[s]['types'] = typemap[s] if self.tbox_ontology is not None: if 'label' not in g.nodes[s]: g.nodes[s]['label'] = ";".join([self.tbox_ontology.label(x) for x in typemap[s] if self.tbox_ontology.label(x) is not None])
def add_triples(self, ontol)
Adds triples to an ontology object. Currently assumes gocam/lego-style
2.78704
2.847586
0.978738
s = self.render(ontol, **args) if self.outfile is None: print(s) else: f = open(self.outfile, 'w') f.write(s) f.close()
def write(self, ontol, **args)
Write a `ontology` object
2.445529
2.580661
0.947637
subont = ontol.subontology(nodes, **args) return self.render(subont, **args)
def render_subgraph(self, ontol, nodes, **args)
Render a `ontology` object after inducing a subgraph
4.660803
4.369384
1.066696
subont = ontol.subontology(nodes, **args) self.write(subont, **args)
def write_subgraph(self, ontol, nodes, **args)
Write a `ontology` object after inducing a subgraph
5.168396
4.915358
1.051479
if r is None: return "." m = self.config.relsymbolmap if r in m: return m[r] return r
def render_relation(self, r, **args)
Render an object property
8.092246
8.020597
1.008933
if query_ids is None: query_ids = [] marker = "" if n in query_ids: marker = " * " label = ontol.label(n) s = None if label is not None: s = '{} ! {}{}'.format(n, label, marker) else: s = str(n) if self.config.show_text_definition: td = ontol.text_definition(n) if td: s += ' "{}"'.format(td.val) return s
def render_noderef(self, ontol, n, query_ids=None, **args)
Render a node object
3.985737
3.955012
1.007769
w = None if fmt == 'tree': w = AsciiTreeGraphRenderer() elif fmt == 'dot': w = DotGraphRenderer(image_format='dot') elif fmt == 'png': w = DotGraphRenderer(image_format='png') elif fmt == 'ndot': w = NativeDotGraphRenderer() elif fmt == 'obo': w = OboFormatGraphRenderer() elif fmt == 'obog': w = OboJsonGraphRenderer() else: w = SimpleListGraphRenderer() return w
def create(fmt)
Creates a GraphRenderer
3.70494
3.486897
1.062532
user_agent_array = ["{}/{}".format(name, version)] if modules: module_info_array = [] for m in modules: mod_name = m.__name__ mod_version = None if hasattr(m, 'get_version'): mod_version = m.get_version() else: mod_version = m.__version__ module_info_array.append("{}/{}".format(mod_name, mod_version)) if caller_name: module_info_array.append(caller_name) user_agent_array.append("({})".format('; '.join(module_info_array))) else: if caller_name: user_agent_array.append("({})".format(caller_name)) return ' '.join(user_agent_array)
def get_user_agent(name="ontobio", version=ontobio_version, modules=None, caller_name=None)
Create a User-Agent string
1.848008
1.866048
0.990332
raise NotImplementedError
def search(self, id_list: List, negated_classes: List, limit: Optional[int], method: Optional) -> List[SimResult]
Given an input list of classes or individuals, provides a ranking of similar profiles
661.373047
1,242.629272
0.532237
raise NotImplementedError
def compare(self, query_classes: List, reference_classes: List, method: Optional) -> SimResult
Given two lists of entites (classes, individual) return their similarity
2,804.103027
483.470215
5.79995
return [ SimAlgorithm.PHENODIGM, SimAlgorithm.BAYES_NETWORK, SimAlgorithm.BAYES_VARIABLE, SimAlgorithm.NAIVE_BAYES_THREE_STATE, SimAlgorithm.NAIVE_BAYES_TWO_STATE, SimAlgorithm.NAIVE_BAYES_TWO_STATE_NO_BLANKET, SimAlgorithm.GRID, SimAlgorithm.GRID_NEGATED, SimAlgorithm.JACCARD, SimAlgorithm.MAX_INFORMATION ]
def matchers() -> List[SimAlgorithm]
Matchers in owlsim3 Could be dynamically retrieved from http://owlsim3.monarchinitiative.org/api/match/matchers
4.318072
4.086637
1.056632
logging.info("Slimming {} to {}".format(g,nodes)) # maps ancestor nodes to members of the focus node set they subsume mm = {} subnodes = set() for n in nodes: subnodes.add(n) ancs = nx.ancestors(g, n) ancs.add(n) for a in ancs: subnodes.add(a) if a not in mm: mm[a] = set() mm[a].add(n) # merge graph egraph = nx.MultiDiGraph() # TODO: ensure edge labels are preserved for a, aset in mm.items(): for p in g.predecessors(a): logging.info(" cmp {} -> {} // {} {}".format(len(aset),len(mm[p]), a, p)) if p in mm and len(aset) == len(mm[p]): egraph.add_edge(p, a) egraph.add_edge(a, p) logging.info("will merge {} <-> {} (members identical)".format(p,a)) nmap = {} leafmap = {} disposable = set() for cliq in nx.strongly_connected_components(egraph): leaders = set() leafs = set() for n in cliq: is_src = False if n in nodes: logging.info("Preserving: {} in {}".format(n,cliq)) leaders.add(n) is_src = True is_leaf = True for p in g.successors(n): if p in cliq: is_leaf = False if not(is_leaf or is_src): disposable.add(n) if is_leaf: logging.info("Clique leaf: {} in {}".format(n,cliq)) leafs.add(n) leader = None if len(leaders) > 1: logging.info("UHOH: {}".format(leaders)) if len(leaders) > 0: leader = list(leaders)[0] else: leader = list(leafs)[0] leafmap[n] = leafs subg = g.subgraph(subnodes) fg = remove_nodes(subg, disposable) return fg
def get_minimal_subgraph(g, nodes)
given a set of nodes, extract a subgraph that excludes non-informative nodes - i.e. those that are not MRCAs of pairs of existing nodes. Note: no property chain reasoning is performed. As a result, edge labels are lost.
3.569536
3.547169
1.006305
ofactory = OntologyFactory() ont2 = ofactory.create(args.resource2) afactory = AssociationSetFactory() aset2 = afactory.create(ontology=ont2, file=args.file2) # only test for genes (or other subjects of statements) in common common = set(aset.subjects).intersection(aset2.subjects) num_common = len(common) logging.info("Genes in common between two KBs: {}/\{} = {}".format(len(aset.subjects), len(aset2.subjects), num_common)) if num_common < 2: logging.error("TOO FEW") return None for n in aset.ontology.nodes(): nl = ont.label(n, id_if_null=True) genes = aset.query([n]) num_genes = len(genes) if num_genes > 2: logging.info("BASE: {} {} num={}".format(n,nl, num_genes)) enr = aset2.enrichment_test(subjects=genes, background=aset2.subjects, labels=True) for r in enr: print("{:8.3g} {} {:20s} <-> {} {:20s}".format(r['p'],n,nl,r['c'],str(r['n'])))
def run_phenolog(ont, aset, args)
Like run_enrichment_test, but uses classes from a 2nd ontology/assocset to build the gene set.
5.438909
5.11209
1.063931
subjects = aset.query(args.query, args.negative) for s in subjects: print("{} {}".format(s, str(aset.label(s)))) if args.plot: import plotly.plotly as py import plotly.graph_objs as go tups = aset.query_associations(subjects=subjects) z, xaxis, yaxis = tuple_to_matrix(tups) spacechar = " " xaxis = mk_axis(xaxis, aset, args, spacechar=" ") yaxis = mk_axis(yaxis, aset, args, spacechar=" ") logging.info("PLOTTING: {} x {} = {}".format(xaxis, yaxis, z)) trace = go.Heatmap(z=z, x=xaxis, y=yaxis) data=[trace] py.plot(data, filename='labelled-heatmap')
def run_query(ont, aset, args)
Basic querying by positive/negative class lists
4.374124
4.251924
1.02874
if "header" not in association or association["header"] == False: # print(json.dumps(association, indent=4)) gpi_obj = { 'id': association["subject"]["id"], 'label': association["subject"]["label"], # db_object_symbol, 'full_name': association["subject"]["fullname"], # db_object_name, 'synonyms': association["subject"]["synonyms"], 'type': association["subject"]["type"], #db_object_type, 'parents': "", # GAF does not have this field, but it's optional in GPI 'xrefs': "", # GAF does not have this field, but it's optional in GPI 'taxon': { 'id': association["subject"]["taxon"]["id"] } } return Entity(gpi_obj) return None
def convert_association(self, association: Association) -> Entity
'id' is already `join`ed in both the Association and the Entity, so we don't have to worry about what that looks like. We assume it's correct.
4.183492
4.12396
1.014436
# trigger synonym cache self.all_synonyms() self.all_obsoletes() # default method - wrap get_graph srcg = self.get_graph() if prefix is not None: srcg = srcg.subgraph([n for n in srcg.nodes() if n.startswith(prefix+":")]) if relations is None: logger.info("No filtering on "+str(self)) return srcg logger.info("Filtering {} for {}".format(self, relations)) g = nx.MultiDiGraph() # TODO: copy full metadata logger.info("copying nodes") for (n,d) in srcg.nodes(data=True): g.add_node(n, **d) logger.info("copying edges") num_edges = 0 for (x,y,d) in srcg.edges(data=True): if d['pred'] in relations: num_edges += 1 g.add_edge(x,y,**d) logger.info("Filtered edges: {}".format(num_edges)) return g
def get_filtered_graph(self, relations=None, prefix=None)
Returns a networkx graph for the whole ontology, for a subset of relations Only implemented for eager methods. Implementation notes: currently this is not cached Arguments --------- - relations : list list of object property IDs, e.g. subClassOf, BFO:0000050. If empty, uses all. - prefix : String if specified, create a subgraph using only classes with this prefix, e.g. ENVO, PATO, GO Return ------ nx.MultiDiGraph A networkx MultiDiGraph object representing the filtered ontology
3.518935
3.347064
1.05135
if self.xref_graph is None: self.xref_graph = nx.MultiGraph() logger.info("Merging source: {} xrefs: {}".format(self, len(self.xref_graph.edges()))) for ont in ontologies: logger.info("Merging {} into {}".format(ont, self)) g = self.get_graph() srcg = ont.get_graph() for n in srcg.nodes(): g.add_node(n, **srcg.node[n]) for (o,s,m) in srcg.edges(data=True): g.add_edge(o,s,**m) if ont.xref_graph is not None: for (o,s,m) in ont.xref_graph.edges(data=True): self.xref_graph.add_edge(o,s,**m)
def merge(self, ontologies)
Merges specified ontology into current ontology
2.497086
2.465505
1.012809
g = None if nodes is not None: g = self.subgraph(nodes) else: g = self.get_graph() if minimal: from ontobio.slimmer import get_minimal_subgraph g = get_minimal_subgraph(g, nodes) ont = Ontology(graph=g, xref_graph=self.xref_graph) # TODO - add metadata if relations is not None: g = ont.get_filtered_graph(relations) ont = Ontology(graph=g, xref_graph=self.xref_graph) return ont
def subontology(self, nodes=None, minimal=False, relations=None)
Return a new ontology that is an extract of this one Arguments --------- - nodes: list list of node IDs to include in subontology. If None, all are used - relations: list list of relation IDs to include in subontology. If None, all are used
3.097418
3.342058
0.9268
if subset is not None: subset_nodes = self.extract_subset(subset) logger.info("Extracting subset: {} -> {}".format(subset, subset_nodes)) if subset_nodes is None or len(subset_nodes) == 0: raise ValueError("subset nodes is blank") subset_nodes = set(subset_nodes) logger.debug("SUBSET: {}".format(subset_nodes)) # Use a sub-ontology for mapping subont = self if relations is not None: subont = self.subontology(relations=relations) if not disable_checks: for r in subont.relations_used(): if r != 'subClassOf' and r != 'BFO:0000050' and r != 'subPropertyOf': raise ValueError("Not safe to propagate over a graph with edge type: {}".format(r)) m = {} for n in subont.nodes(): ancs = subont.ancestors(n, reflexive=True) ancs_in_subset = subset_nodes.intersection(ancs) m[n] = list(subont.filter_redundant(ancs_in_subset)) return m
def create_slim_mapping(self, subset=None, subset_nodes=None, relations=None, disable_checks=False)
Create a dictionary that maps between all nodes in an ontology to a subset Arguments --------- ont : `Ontology` Complete ontology to be mapped. Assumed pre-filtered for relationship types subset : str Name of subset to map to, e.g. goslim_generic nodes : list If no named subset provided, subset is passed in as list of node ids relations : list List of relations to filter on disable_checks: bool Unless this is set, this will prevent a mapping being generated with non-standard relations. The motivation here is that the ontology graph may include relations that it is inappropriate to propagate gene products over, e.g. transports, has-part Return ------ dict maps all nodes in ont to one or more non-redundant nodes in subset Raises ------ ValueError if the subset is empty
3.507418
3.426907
1.023494
sids = set(ids) for id in ids: sids = sids.difference(self.ancestors(id, reflexive=False)) return sids
def filter_redundant(self, ids)
Return all non-redundant ids from a list
5.049852
4.78496
1.055359
return [n for n in self.nodes() if subset in self.subsets(n, contract=contract)]
def extract_subset(self, subset, contract=True)
Return all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs, or IR fragments
5.231165
4.246105
1.231992
n = self.node(nid) subsets = [] meta = self._meta(nid) if 'subsets' in meta: subsets = meta['subsets'] else: subsets = [] if contract: subsets = [self._contract_subset(s) for s in subsets] return subsets
def subsets(self, nid, contract=True)
Retrieves subset ids for a class or ontology object
2.83455
2.69467
1.05191
pset = set() for n in self.nodes(): pfx = self.prefix(n) if pfx is not None: pset.add(pfx) return list(pset)
def prefixes(self)
list all prefixes used
3.101719
3.087265
1.004682
sep=':' if nid.startswith('http'): if '#' in nid: sep='#' else: sep='/' parts = nid.split(sep) frag = parts.pop() prefix = sep.join(parts) return prefix, frag
def prefix_fragment(self, nid)
Return prefix and fragment/localid for a node
4.371884
3.974502
1.099983
g = self.get_graph() types = set() for (x,y,d) in g.edges(data=True): types.add(d['pred']) return list(types)
def relations_used(self)
Return list of all relations used to connect edges
3.749807
3.409755
1.099729
if graph is None: graph = self.get_graph() preds = set() for _,ea in graph[obj][subj].items(): preds.add(ea['pred']) logger.debug('{}->{} = {}'.format(subj,obj,preds)) return preds
def child_parent_relations(self, subj, obj, graph=None)
Get all relationship type ids between a subject and a parent. Typically only one relation ID returned, but in some cases there may be more than one Arguments --------- subj: string Child (subject) id obj: string Parent (object) id Returns ------- list
4.613173
5.886655
0.783666
g = self.get_graph() if node in g: parents = list(g.predecessors(node)) if relations is None: return parents else: rset = set(relations) return [p for p in parents if len(self.child_parent_relations(node, p, graph=g).intersection(rset)) > 0 ] else: return []
def parents(self, node, relations=None)
Return all direct parents of specified node. Wraps networkx by default. Arguments --------- node: string identifier for node in ontology relations: list of strings list of relation (object property) IDs used to filter
3.317222
3.820512
0.868266
g = self.get_graph() if node in g: children = list(g.successors(node)) if relations is None: return children else: rset = set(relations) return [c for c in children if len(self.child_parent_relations(c, node, graph=g).intersection(rset)) > 0 ] else: return []
def children(self, node, relations=None)
Return all direct children of specified node. Wraps networkx by default. Arguments --------- node: string identifier for node in ontology relations: list of strings list of relation (object property) IDs used to filter
3.449433
4.119755
0.837291
if reflexive: ancs = self.ancestors(node, relations, reflexive=False) ancs.append(node) return ancs g = None if relations is None: g = self.get_graph() else: g = self.get_filtered_graph(relations) if node in g: return list(nx.ancestors(g, node)) else: return []
def ancestors(self, node, relations=None, reflexive=False)
Return all ancestors of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] ancestor node IDs
2.369538
2.563411
0.924369
if reflexive: decs = self.descendants(node, relations, reflexive=False) decs.append(node) return decs g = None if relations is None: g = self.get_graph() else: g = self.get_filtered_graph(relations) if node in g: return list(nx.descendants(g, node)) else: return []
def descendants(self, node, relations=None, reflexive=False)
Returns all descendants of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] descendant node IDs
2.403541
2.704986
0.888559
eg = nx.Graph() for (u,v,d) in self.get_graph().edges(data=True): if d['pred'] == 'equivalentTo': eg.add_edge(u,v) return eg
def equiv_graph(self)
Returns ------- graph bidirectional networkx graph of all equivalency relations
3.227355
3.122223
1.033672
g = self.get_filtered_graph(**args) nodes = set() for id in qids: # reflexive - always add self nodes.add(id) if down: nodes.update(nx.descendants(g, id)) if up: nodes.update(nx.ancestors(g, id)) return nodes
def traverse_nodes(self, qids, up=True, down=False, **args)
Traverse (optionally) up and (optionally) down from an input set of nodes Arguments --------- qids : list[str] list of seed node IDs to start from up : bool if True, include ancestors down : bool if True, include descendants relations : list[str] list of relations used to filter Return ------ list[str] nodes reachable from qids
3.305
4.160613
0.794354
g = self.get_filtered_graph(relations=relations, prefix=prefix) # note: we also eliminate any singletons, which includes obsolete classes roots = [n for n in g.nodes() if len(list(g.predecessors(n))) == 0 and len(list(g.successors(n))) > 0] return roots
def get_roots(self, relations=None, prefix=None)
Get all nodes that lack parents Arguments --------- relations : list[str] list of relations used to filter prefix : str E.g. GO. Exclude nodes that lack this prefix when testing parentage
4.360231
4.716956
0.924374
g = self.get_filtered_graph(relations) nodes = self.get_roots(relations=relations, **args) for i in range(level): logger.info(" ITERATING TO LEVEL: {} NODES: {}".format(i, nodes)) nodes = [c for n in nodes for c in g.successors(n)] logger.info(" FINAL: {}".format(nodes)) return nodes
def get_level(self, level, relations=None, **args)
Get all nodes at a particular level Arguments --------- relations : list[str] list of relations used to filter
4.764355
5.66046
0.84169
g = None if relations is None: g = self.get_graph() else: g = self.get_filtered_graph(relations) l = [] for n in g: l.append([n] + list(g.predecessors(n))) return l
def parent_index(self, relations=None)
Returns a mapping of nodes to all direct parents Arguments --------- relations : list[str] list of relations used to filter Returns: list list of lists [[CLASS_1, PARENT_1,1, ..., PARENT_1,N], [CLASS_2, PARENT_2,1, PARENT_2,2, ... ] ... ]
3.023564
2.793314
1.082429
tdefs = [] meta = self._meta(nid) if 'definition' in meta: obj = meta['definition'] return TextDefinition(nid, **obj) else: return None
def text_definition(self, nid)
Retrieves logical definitions for a class or relation id Arguments --------- nid : str Node identifier for entity to be queried Returns ------- TextDefinition
6.152824
6.549927
0.939373
ldefs = self.all_logical_definitions if ldefs is not None: #print("TESTING: {} AGAINST LD: {}".format(nid, str(ldefs))) return [x for x in ldefs if x.class_id == nid] else: return []
def logical_definitions(self, nid)
Retrieves logical definitions for a class id Arguments --------- nid : str Node identifier for entity to be queried Returns ------- LogicalDefinition
5.59333
6.069438
0.921556
pcas = self.all_property_chain_axioms if pcas is not None: return [x for x in pcas if x.predicate_id == nid] else: return []
def get_property_chain_axioms(self, nid)
Retrieves property chain axioms for a class id Arguments --------- nid : str Node identifier for relation to be queried Returns ------- PropertyChainAxiom
3.673718
4.363501
0.84192
vs = self._get_basic_property_value(nid, 'IAO:0100001') if len(vs) > 1: msg = "replaced_by has multiple values: {}".format(vs) if strict: raise ValueError(msg) else: logger.error(msg) return vs
def replaced_by(self, nid, strict=True)
Returns value of 'replaced by' (IAO_0100001) property for obsolete nodes Arguments --------- nid : str Node identifier for entity to be queried strict: bool If true, raise error if cardinality>1. If false, return list if cardinality>1 Return ------ None if no value set, otherwise returns node id (or list if multiple values, see strict setting)
4.465152
3.818125
1.169462
n = self.node(nid) syns = [] if 'meta' in n: meta = n['meta'] if 'synonyms' in meta: for obj in meta['synonyms']: syns.append(Synonym(nid, **obj)) if include_label: syns.append(Synonym(nid, val=self.label(nid), pred='label')) return syns
def synonyms(self, nid, include_label=False)
Retrieves synonym objects for a class Arguments --------- nid : str Node identifier for entity to be queried include_label : bool If True, include label/names as Synonym objects Returns ------- list[Synonym] :class:`Synonym` objects
2.911422
3.182928
0.914699
g = self.get_graph() if meta is None: meta={} g.add_node(id, label=label, type=type, meta=meta)
def add_node(self, id, label=None, type='CLASS', meta=None)
Add a new node to the ontology
2.789484
2.910337
0.958475
xg = self.xref_graph for n in self.nodes(): if n in xg: self._add_meta_element(n, 'xrefs', [{'val':x} for x in xg.neighbors(n)])
def inline_xref_graph(self)
Copy contents of xref_graph to inlined meta object for each node
6.650482
5.023899
1.323769
g = self.get_graph() g.add_edge(pid, id, pred=relation)
def add_parent(self, id, pid, relation='subClassOf')
Add a new edge to the ontology
5.528952
6.142687
0.900087
# note: does not update meta object if self.xref_graph is None: self.xref_graph = nx.MultiGraph() self.xref_graph.add_edge(xref, id)
def add_xref(self, id, xref)
Adds an xref to the xref graph
5.065604
4.331867
1.169381
n = self.node(syn.class_id) if 'meta' not in n: n['meta'] = {} meta = n['meta'] if 'synonyms' not in meta: meta['synonyms'] = [] meta['synonyms'].append(syn.as_dict())
def add_synonym(self, syn)
Adds a synonym for a node
2.889572
2.738181
1.055289
n = self.node(id) if 'meta' not in n: n['meta'] = {} meta = n['meta'] if 'subsets' not in meta: meta['subsets'] = [] meta['subsets'].append(s)
def add_to_subset(self, id, s)
Adds a node to a subset
2.540085
2.361249
1.075738
syns = [] for n in self.nodes(): syns = syns + self.synonyms(n, include_label=include_label) return syns
def all_synonyms(self, include_label=False)
Retrieves all synonyms Arguments --------- include_label : bool If True, include label/names as Synonym objects Returns ------- list[Synonym] :class:`Synonym` objects
2.982641
3.895797
0.765605
g = self.get_graph() if nid in g: n = g.node[nid] if 'label' in n: return n['label'] else: if id_if_null: return nid else: return None else: if id_if_null: return nid else: return None
def label(self, nid, id_if_null=False)
Fetches label for a node Arguments --------- nid : str Node identifier for entity to be queried id_if_null : bool If True and node has no label return id as label Return ------ str
1.904844
2.116762
0.899886
if self.xref_graph is not None: xg = self.xref_graph if nid not in xg: return [] if bidirectional: return list(xg.neighbors(nid)) else: return [x for x in xg.neighbors(nid) if xg[nid][x][0]['source'] == nid] return []
def xrefs(self, nid, bidirectional=False)
Fetches xrefs for a node Arguments --------- nid : str Node identifier for entity to be queried bidirection : bool If True, include nodes xreffed to nid Return ------ list[str]
3.008653
3.620811
0.830933
g = self.get_graph() r_ids = [] for n in names: logger.debug("Searching for {} syns={}".format(n,synonyms)) if len(n.split(":")) == 2: r_ids.append(n) else: matches = set([nid for nid in g.nodes() if self._is_match(self.label(nid), n, **args)]) if synonyms: logger.debug("Searching syns for {}".format(names)) for nid in g.nodes(): for s in self.synonyms(nid): if self._is_match(s.val, n, **args): matches.add(nid) r_ids += list(matches) return r_ids
def resolve_names(self, names, synonyms=False, **args)
returns a list of identifiers based on an input list of labels and identifiers. Arguments --------- names: list search terms. '%' treated as wildcard synonyms: bool if true, search on synonyms in addition to labels is_regex : bool if true, treats each name as a regular expression is_partial_match : bool if true, treats each name as a regular expression .*name.*
3.173098
3.130744
1.013528
if args.graph.find('m') > -1: logging.info("SLIMMING") g = get_minimal_subgraph(g, query_ids) w = GraphRenderer.create(args.to) if args.outfile is not None: w.outfile = args.outfile logging.info("Writing subg from "+str(g)) w.write(g, query_ids=query_ids, container_predicates=args.container_properties)
def show_graph(g, nodes, query_ids, args)
Writes graph
7.658309
7.639106
1.002514
#assocs1 = search_associations_compact(object=id, subject_category=category, **args) #assocs2 = search_associations_compact(subject=id, object_category=category, **args) assocs1, facets1 = search_compact_wrap(object=id, subject_category=category, **args) assocs2, facets2 = search_compact_wrap(subject=id, object_category=category, **args) facets = facets1 if len(assocs2) > 0: facets = facets2 return assocs1 + assocs2, facets
def search_golr_wrap(id, category, **args)
performs searches in both directions
2.570276
2.502487
1.027089
parser = argparse.ArgumentParser( description='Command line interface to python-ontobio.golr library' , formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-o', '--outfile', type=str, required=False, help='Path to output file') parser.add_argument('-C', '--category', nargs=2, type=str, required=True, help='Category pair. E.g. disease gene') parser.add_argument('-s', '--species', type=str, required=True, help='NCBITaxon ID') parser.add_argument('-S', '--slim', nargs='*', type=str, required=False, help='Slim IDs') parser.add_argument('-L', '--limit', type=int, default=100000, required=False, help='Limit on number of rows') parser.add_argument('-u', '--url', type=str, required=False, help='Solr URL. E.g. http://localhost:8983/solr/golr') parser.add_argument('-v', '--verbosity', default=0, action='count', help='Increase output verbosity') parser.add_argument('ids',nargs='*') args = parser.parse_args() if args.verbosity >= 2: logging.basicConfig(level=logging.DEBUG) elif args.verbosity == 1: logging.basicConfig(level=logging.INFO) else: logging.basicConfig(level=logging.WARNING) logging.info("Welcome!") [subject_category, object_category] = args.category assocs = bulk_fetch(subject_category, object_category, args.species, rows=args.limit, slim=args.slim, url=args.url) for a in assocs: print("{}\t{}\t{}".format(a['subject'], a['relation'], ";".join(a['objects'])))
def main()
Wrapper for OGR
2.510064
2.552972
0.983193
if self.config.gpi_authority_path is not None: gpis = dict() parser = entityparser.GpiParser() with open(self.config.gpi_authority_path) as gpi_f: entities = parser.parse(file=gpi_f) for entity in entities: gpis[entity["id"]] = { "symbol": entity["label"], "name": entity["full_name"], "synonyms": entitywriter.stringify(entity["synonyms"]), "type": entity["type"] } return gpis # If there is no config file path, return None return None
def load_gpi(self, gpi_path)
Loads a GPI as a file from the `config.gpi_authority_path`
4.463796
3.718064
1.20057
file = self._ensure_file(file) ents = [] skipped = [] n_lines = 0 for line in file: n_lines += 1 if line.startswith("!"): if outfile is not None: outfile.write(line) continue line = line.strip("\n") if line == "": logging.warning("EMPTY LINE") continue parsed_line, new_ents = self.parse_line(line) if self._skipping_line(new_ents): # Skip if there were no ents logging.warning("SKIPPING: {}".format(line)) skipped.append(line) else: ents += new_ents if outfile is not None: outfile.write(parsed_line + "\n") self.report.skipped += len(skipped) self.report.n_lines += n_lines #self.report.n_associations += len(ents) logging.info("Parsed {} ents from {} lines. Skipped: {}". format(len(ents), n_lines, len(skipped))) file.close() return ents
def parse(self, file, outfile=None)
Parse a line-oriented entity file into a list of entity dict objects Note the returned list is of dict objects. TODO: These will later be specified using marshmallow and it should be possible to generate objects Arguments --------- file : file or string The file is parsed into entity objects. Can be a http URL, filename or `file-like-object`, for input assoc file outfile : file Optional output file in which processed lines are written. This a file or `file-like-object` Return ------ list Entities generated from the file
3.100772
3.219258
0.963195
vals = line.split("\t") if len(vals) < 7: self.report.error(line, assocparser.Report.WRONG_NUMBER_OF_COLUMNS, "") return line, [] if len(vals) < 10 and len(vals) >= 7: missing_columns = 10 - len(vals) vals += ["" for i in range(missing_columns)] [ db, db_object_id, db_object_symbol, db_object_name, db_object_synonym, db_object_type, taxon, parent_object_id, xrefs, properties ] = vals split_line = assocparser.SplitLine(line=line, values=vals, taxon=taxon) ## -- ## db + db_object_id. CARD=1 ## -- id = self._pair_to_id(db, db_object_id) if not self._validate_id(id, split_line, context=assocparser.Report): return line, [] ## -- ## db_object_synonym CARD=0..* ## -- synonyms = db_object_synonym.split("|") if db_object_synonym == "": synonyms = [] # TODO: DRY parents = parent_object_id.split("|") if parent_object_id == "": parents = [] else: parents = [self._normalize_id(x) for x in parents] for p in parents: self._validate_id(p, split_line, context=assocparser.Report) xref_ids = xrefs.split("|") if xrefs == "": xref_ids = [] obj = { 'id': id, 'label': db_object_symbol, 'full_name': db_object_name, 'synonyms': synonyms, 'type': db_object_type, 'parents': parents, 'xrefs': xref_ids, 'taxon': { 'id': self._taxon_id(taxon, split_line) } } return line, [obj]
def parse_line(self, line)
Parses a single line of a GPI. Return a tuple `(processed_line, entities)`. Typically there will be a single entity, but in some cases there may be none (invalid line) or multiple (disjunctive clause in annotation extensions) Note: most applications will only need to call this directly if they require fine-grained control of parsing. For most purposes, :method:`parse_file` can be used over the whole file Arguments --------- line : str A single tab-seperated line from a GPAD file
3.07428
3.213476
0.956683
file = self._ensure_file(file) obj = json.load(file) items = obj['data'] return [self.transform_item(item) for item in items]
def parse(self, file, outfile=None)
Parse a BGI (basic gene info) JSON file
5.282244
4.703675
1.123003
obj = { 'id': item['primaryId'], 'label': item['symbol'], 'full_name': item['name'], 'type': item['soTermId'], 'taxon': {'id': item['taxonId']}, } if 'synonyms' in item: obj['synonyms'] = item['synonyms'] if 'crossReferenceIds' in item: obj['xrefs'] = [self._normalize_id(x) for x in item['crossReferenceIds']] # TODO: synonyms # TODO: genomeLocations # TODO: geneLiteratureUrl return obj
def transform_item(self, item)
Transforms JSON object
4.273626
4.244704
1.006814
self.subjects = list(self.association_map.keys()) # ensure annotations unique for (subj,terms) in self.association_map.items(): self.association_map[subj] = list(set(self.association_map[subj])) logging.info("Indexing {} items".format(len(self.subjects))) n = 0 all_objs = set() for (subj,terms) in self.association_map.items(): ancs = self.termset_ancestors(terms) all_objs.update(ancs) self.subject_to_inferred_map[subj] = ancs n = n+1 if n<5: logging.info(" Indexed: {} -> {}".format(subj, ancs)) elif n == 6: logging.info("[TRUNCATING>5]....") self.objects = all_objs
def index(self)
Creates indexes based on inferred terms. You do not need to call this yourself; called on initialization
5.136681
4.761189
1.078865
if subj in self.subject_to_inferred_map: return self.subject_to_inferred_map[subj] if self.strict: raise UnknownSubjectException(subj) else: return set([])
def inferred_types(self, subj)
Returns: set of reflexive inferred types for a subject. E.g. if a gene is directly associated with terms A and B, and these terms have ancestors C, D and E then the set returned will be {A,B,C,D,E} Arguments --------- subj - ID string Returns: set of class IDs
3.703011
4.281562
0.864874
ancs = set() for term in terms: ancs = ancs.union(self.ontology.ancestors(term)) return ancs.union(set(terms))
def termset_ancestors(self, terms)
reflexive ancestors Arguments --------- terms - a set or list of class IDs Returns: set of class IDs
3.344618
4.844657
0.690373
if subjects is None: subjects = [] mset = set() if infer_subjects: for subj in subjects: mset.update(self.ontology.descendants(subj)) mset.update(set(subjects)) if include_xrefs: xset = set() for m in mset: xrefs = self.ontology.xrefs(m, bidirectional=True) if xrefs is not None: xset.update(xrefs) mset.update(xset) logging.debug("Matching subjects: {}".format(mset)) mset = mset.intersection(self.subjects) logging.debug("Matching subjects with anns: {}".format(mset)) amap = self.association_map results = [] for m in mset: if m in amap: for t in amap[m]: results.append( (m,t) ) return results
def query_associations(self, subjects=None, infer_subjects=True, include_xrefs=True)
Query for a set of associations. Note: only a minimal association model is stored, so all results are returned as (subject_id,class_id) tuples Arguments: subjects: list list of subjects (e.g. genes, diseases) used to query associations. Any association to one of these subjects or a descendant of these subjects (assuming infer_subjects=True) are returned. infer_subjects: boolean (default true) See above include_xrefs: boolean (default true) If true, then expand inferred subject set to include all xrefs of those subjects. Example: if a high level disease node (e.g. DOID:14330 Parkinson disease) is specified, then the default behavior (infer_subjects=True, include_xrefs=True) and the ontology includes DO, results will include associations from both descendant DOID classes, and all xrefs (e.g. OMIM)
2.612624
2.894818
0.902518
if terms is None: terms = [] matches_all = 'owl:Thing' in terms if negated_terms is None: negated_terms = [] termset = set(terms) negated_termset = set(negated_terms) matches = [] n_terms = len(termset) for subj in self.subjects: if matches_all or len(termset.intersection(self.inferred_types(subj))) == n_terms: if len(negated_termset.intersection(self.inferred_types(subj))) == 0: matches.append(subj) return matches
def query(self, terms=None, negated_terms=None)
Basic boolean query, using inference. Arguments: - terms: list list of class ids. Returns the set of subjects that have at least one inferred annotation to each of the specified classes. - negated_terms: list list of class ids. Filters the set of subjects so that there are no inferred annotations to any of the specified classes
2.597158
2.648514
0.980609
if x_terms is None: x_terms = [] if y_terms is None: y_terms = [] xset = set(x_terms) yset = set(y_terms) zset = xset.union(yset) # first built map of gene->termClosure. # this could be calculated ahead of time for all g, # but this may be space-expensive. TODO: benchmark gmap={} for z in zset: gmap[z] = [] for subj in self.subjects: ancs = self.inferred_types(subj) for a in ancs.intersection(zset): gmap[a].append(subj) for z in zset: gmap[z] = set(gmap[z]) ilist = [] for x in x_terms: for y in y_terms: if not symmetric or x<y: shared = gmap[x].intersection(gmap[y]) union = gmap[x].union(gmap[y]) j = 0 if len(union)>0: j = len(shared) / len(union) ilist.append({'x':x,'y':y,'shared':shared, 'c':len(shared), 'j':j}) return ilist
def query_intersections(self, x_terms=None, y_terms=None, symmetric=False)
Query for intersections of terms in two lists Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score
3.507973
3.402812
1.030904
z = [ [0] * len(xterms) for i1 in range(len(yterms)) ] xmap = {} xi = 0 for x in xterms: xmap[x] = xi xi = xi+1 ymap = {} yi = 0 for y in yterms: ymap[y] = yi yi = yi+1 for i in ilist: z[ymap[i['y']]][xmap[i['x']]] = i['j'] logging.debug("Z={}".format(z)) return (z,xterms,yterms)
def intersectionlist_to_matrix(ilist, xterms, yterms)
WILL BE DEPRECATED Replace with method to return pandas dataframe
2.547415
2.630835
0.968291
entries = [] selected_subjects = self.subjects if subjects is not None: selected_subjects = subjects for s in selected_subjects: vmap = {} for c in self.inferred_types(s): vmap[c] = 1 entries.append(vmap) logging.debug("Creating DataFrame") df = pd.DataFrame(entries, index=selected_subjects) if fillna: logging.debug("Performing fillna...") df = df.fillna(0) return df
def as_dataframe(self, fillna=True, subjects=None)
Return association set as pandas DataFrame Each row is a subject (e.g. gene) Each column is the inferred class used to describe the subject
3.293921
3.03858
1.084033
if self.ontology is not None: label = self.ontology.label(id) if label is not None: return label if self.subject_label_map is not None and id in self.subject_label_map: return self.subject_label_map[id] return None
def label(self, id)
return label for a subject id Will make use of both the ontology and the association set
2.350705
2.044858
1.149569
return self.ontology.subontology(self.objects, minimal=minimal)
def subontology(self, minimal=False)
Generates a sub-ontology based on associations
8.375871
7.787278
1.075584
if object is None: if self.associations_by_subj is not None: return self.associations_by_subj[subject] else: return [] else: if self.associations_by_subj_obj is not None: return self.associations_by_subj_obj[(subject,object)] else: return []
def associations(self, subject, object=None)
Given a subject-object pair (e.g. gene id to ontology class id), return all association objects that match.
1.963999
1.855178
1.058658
a1 = self.inferred_types(s1) a2 = self.inferred_types(s2) num_union = len(a1.union(a2)) if num_union == 0: return 0.0 return len(a1.intersection(a2)) / num_union
def jaccard_similarity(self,s1,s2)
Calculate jaccard index of inferred associations of two subjects |ancs(s1) /\ ancs(s2)| --- |ancs(s1) \/ ancs(s2)|
2.373361
2.371986
1.00058
if x_subjects is None: x_subjects = [] if y_subjects is None: y_subjects = [] xset = set(x_subjects) yset = set(y_subjects) zset = xset.union(yset) # first built map of gene->termClosure. # this could be calculated ahead of time for all g, # but this may be space-expensive. TODO: benchmark gmap={} for z in zset: gmap[z] = self.inferred_types(z) ilist = [] for x in x_subjects: for y in y_subjects: if not symmetric or x<y: shared = gmap[x].intersection(gmap[y]) union = gmap[x].union(gmap[y]) j = 0 if len(union)>0: j = len(shared) / len(union) ilist.append({'x':x,'y':y,'shared':shared, 'c':len(shared), 'j':j}) return self.intersectionlist_to_matrix(ilist, x_subjects, y_subjects)
def similarity_matrix(self, x_subjects=None, y_subjects=None, symmetric=False)
Query for similarity matrix between groups of subjects Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score
3.98045
3.709104
1.073157
a1 = aset.inferred_types(s1) a2 = aset.inferred_types(s2) num_union = len(a1.union(a2)) if num_union == 0: return 0.0 return len(a1.intersection(a2)) / num_union
def jaccard_similarity(aset:AssociationSet, s1:str, s2:str) -> float
Calculate jaccard index of inferred associations of two subjects |ancs(s1) /\ ancs(s2)| --- |ancs(s1) \/ ancs(s2)|
2.342056
2.282511
1.026087
digraph = networkx.MultiDiGraph() logging.info("Getting edges (may be cached)") for (s,p,o) in get_edges(ont): p = map_legacy_pred(p) if relations is None or p in relations: digraph.add_edge(o,s,pred=p) logging.info("Getting labels (may be cached)") for (n,label) in fetchall_labels(ont): digraph.add_node(n, **{'label':label}) return digraph
def get_digraph(ont, relations=None, writecache=False)
Creates a basic graph object corresponding to a remote ontology
3.754035
3.905437
0.961233
g = networkx.MultiGraph() for (c,x) in fetchall_xrefs(ont): g.add_edge(c,x,source=c) return g
def get_xref_graph(ont)
Creates a basic graph object corresponding to a remote ontology
6.066975
6.268081
0.967916
logging.info("QUERYING:"+ont) edges = [(c,SUBCLASS_OF, d) for (c,d) in fetchall_isa(ont)] edges += fetchall_svf(ont) edges += [(c,SUBPROPERTY_OF, d) for (c,d) in fetchall_subPropertyOf(ont)] if len(edges) == 0: logging.warn("No edges for {}".format(ont)) return edges
def get_edges(ont)
Fetches all basic edges from a remote ontology
5.249978
5.080956
1.033266
namedGraph = get_named_graph(ont) query = .format(s=searchterm, g=namedGraph) bindings = run_sparql(query) return [(r['c']['value'],r['l']['value']) for r in bindings]
def search(ont, searchterm)
Search for things using labels
5.010988
5.217962
0.960334
namedGraph = get_named_graph(ont) # note subsets have an unusual encoding query = .format(s=subset, g=namedGraph) bindings = run_sparql(query) return [(r['c']['value'],r['l']['value']) for r in bindings]
def get_terms_in_subset(ont, subset)
Find all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs
9.029645
11.030549
0.818603
logging.info("Connecting to " + ontology.value + " SPARQL endpoint...") sparql = SPARQLWrapper(ontology.value) logging.info("Made wrapper: {}".format(sparql)) sparql.setQuery(q) sparql.setReturnFormat(JSON) logging.info("Query: {}".format(q)) results = sparql.query().convert() bindings = results['results']['bindings'] logging.info("Rows: {}".format(len(bindings))) for r in bindings: curiefy(r) return bindings
def run_sparql_on(q, ontology)
Run a SPARQL query (q) on a given Ontology (Enum EOntology)
2.862549
2.736723
1.045977
logging.info("fetching rdfs:labels for: "+ont) namedGraph = get_named_graph(ont) queryBody = querybody_label() query = .format(q=queryBody, g=namedGraph) bindings = run_sparql(query) rows = [(r['c']['value'], r['l']['value']) for r in bindings] return rows
def fetchall_labels(ont)
fetch all rdfs:label assertions for an ontology
6.050321
5.298755
1.141838
logging.info("fetching syns for: "+ont) namedGraph = get_named_graph(ont) queryBody = querybody_syns() query = .format(q=queryBody, g=namedGraph) bindings = run_sparql(query) rows = [(r['c']['value'], r['r']['value'], r['l']['value']) for r in bindings] return rows
def fetchall_syns(ont)
fetch all synonyms for an ontology
5.371167
5.210121
1.03091
logging.info("fetching text defs for: "+ont) namedGraph = get_named_graph(ont) query = .format(g=namedGraph) bindings = run_sparql(query) rows = [(r['c']['value'], r['d']['value']) for r in bindings] return rows
def fetchall_textdefs(ont)
fetch all text defs for an ontology
4.91133
4.706028
1.043625
iri = expand_uri(id, strict=False) query = .format(iri=iri) bindings = run_sparql(query) rows = [r['label']['value'] for r in bindings] return rows[0]
def anyont_fetch_label(id)
fetch all rdfs:label assertions for a URI
5.471075
4.796504
1.140638
m = {} for id in ids: label = anyont_fetch_label(id) if label is not None: m[id] = label return m
def batch_fetch_labels(ids)
fetch all rdfs:label assertions for a set of CURIEs
5.279309
4.303631
1.22671
transformed = { } for key in data: if key in keysToSplit: transformed[key] = data[key]['value'].split(SEPARATOR) else: transformed[key] = data[key]['value'] return transformed
def transform(data, keysToSplit=[])
Transform a SPARQL json result by: 1) outputing only { key : value }, removing datatype 2) for some keys, transform them into array based on SEPARATOR
2.608219
2.352384
1.108755
transformed = [ ] for item in data: transformed.append(transform(item, keysToSplit)) return transformed
def transformArray(data, keysToSplit=[])
Transform a SPARQL json array based on the rules of transform
3.476674
3.68901
0.942441
mcls = None for (this_code,this_ref,cls) in self.mappings(): if str(this_code) == str(code): if this_ref == reference: return cls if this_ref is None: mcls = cls return mcls
def coderef_to_ecoclass(self, code, reference=None)
Map a GAF code to an ECO class Arguments --------- code : str GAF evidence code, e.g. ISS, IDA reference: str CURIE for a reference for the evidence instance. E.g. GO_REF:0000001. Optional - If provided can give a mapping to a more specific ECO class Return ------ str ECO class CURIE/ID
4.80073
6.090428
0.788242
code = '' ref = None for (code,ref,this_cls) in self.mappings(): if cls == this_cls: return code, ref return None, None
def ecoclass_to_coderef(self, cls)
Map an ECO class to a GAF code This is the reciprocal to :ref:`coderef_to_ecoclass` Arguments --------- cls : str GAF evidence code, e.g. ISS, IDA reference: str ECO class CURIE/ID Return ------ (str, str) code, reference tuple
6.978133
8.359939
0.834711
with open(file, 'rb') as FH: contents = FH.read() return hashlib.sha256(contents).hexdigest()
def get_checksum(file)
Get SHA256 hash from the contents of a given file
2.729284
2.521064
1.082592