code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
digraph = networkx.MultiDiGraph()
from rdflib.namespace import RDF
label_map = {}
for c in rg.subjects(RDF.type, OWL.Class):
cid = contract_uri_wrap(c)
logging.info("C={}".format(cid))
for lit in rg.objects(c, RDFS.label):
label_map[cid] = lit.value
digraph.add_node(cid, label=lit.value)
for s in rg.objects(c, RDFS.subClassOf):
# todo - blank nodes
sid = contract_uri_wrap(s)
digraph.add_edge(sid, cid, pred='subClassOf')
logging.info("G={}".format(digraph))
payload = {
'graph': digraph,
#'xref_graph': xref_graph,
#'graphdoc': obographdoc,
#'logical_definitions': logical_definitions
}
ont = Ontology(handle='wd', payload=payload)
return ont | def rdfgraph_to_ontol(rg) | Return an Ontology object from an rdflib graph object
Status: Incomplete | 4.374811 | 4.473859 | 0.977861 |
if entities is None:
entities = []
results = search_associations(subjects=entities,
subject_direct=True,
rows=0,
facet_fields=[M.IS_DEFINED_BY, M.SUBJECT_TAXON, M.SUBJECT_CATEGORY],
object_category=object_category,
facet_mincount=3, # TODO
facet_limit=-1,
json_facet={
'categories':{
'limit':-1,
'type': 'terms',
'field' : M.OBJECT_CLOSURE,
'facet' : {
'uniq_subject': "unique(subject)"
}
}
},
**kwargs)
buckets = results['facets']['categories']['buckets']
cmap = {}
for bucket in buckets:
if bucket['uniq_subject'] >= min_count:
cmap[bucket['val']] = bucket['uniq_subject']
return (cmap, results) | def get_counts(entities=None,
object_category=None,
min_count=1,
**kwargs) | given a set of entities (genes, diseases, etc), finds the number of entities associated with each descriptor in a given category.
The result is a tuple (cmap, results), where cmap is a dict of TERM:COUNT | 4.788345 | 5.056205 | 0.947024 |
if sample_entities is None:
sample_entites = []
(sample_counts, sample_results) = get_counts(entities=sample_entities,
object_category=object_category,
min_count=2,
**kwargs)
print(str(sample_counts))
sample_fcs = sample_results['facet_counts']
taxon_count_dict = sample_fcs[M.SUBJECT_TAXON]
taxon=None
for (t,tc) in taxon_count_dict.items():
# TODO - throw error if multiple taxa
taxon = t
if background_entities is None:
objects = list(sample_counts.keys())
print("OBJECTS="+str(objects))
background_entities = get_background(objects, taxon, object_category)
# TODO: consider caching
(bg_counts,_) = get_counts(entities=background_entities,
object_category=object_category,
**kwargs)
sample_n = len(sample_entities) # TODO - annotated only?
pop_n = len(background_entities)
# adapted from goatools
for (sample_termid,sample_count) in sample_counts.items():
pop_count = bg_counts[sample_termid]
# https://en.wikipedia.org/wiki/Fisher's_exact_test
# Cls NotCls
# study/sample [a, b]
# rest of ref [c, d]
#
a = sample_count
b = sample_n - sample_count
c = pop_count - sample_count
d = pop_n - pop_count - b
print("ABCD="+str((sample_termid,a,b,c,d,sample_n)))
_, p_uncorrected = sp.stats.fisher_exact( [[a, b], [c, d]])
print("P="+str(p_uncorrected)) | def find_enriched(sample_entities=None,
background_entities=None,
object_category=None,
**kwargs) | Given a sample set of sample_entities (e.g. overexpressed genes) and a background set (e.g. all genes assayed), and a category of descriptor (e.g. phenotype, function),
return enriched descriptors/classes | 4.355319 | 4.428957 | 0.983373 |
results = search_associations(id=id, **kwargs)
assoc = results['associations'][0] if len(results['associations']) > 0 else {}
return assoc | def get_association(id, **kwargs) | Fetch an association object by ID | 3.187003 | 3.24258 | 0.98286 |
logging.info("CREATING_GOLR_QUERY {}".format(kwargs))
q = GolrAssociationQuery(**kwargs)
return q.exec() | def search_associations(**kwargs) | Fetch a set of association objects based on a query. | 10.516428 | 11.028956 | 0.953529 |
searchresult = search_associations(subject=subject,
fetch_objects=True,
rows=0,
object_category=object_category,
relation=relation,
**kwargs
)
objs = searchresult['objects']
return objs | def get_objects_for_subject(subject=None,
object_category=None,
relation=None,
**kwargs) | Convenience method: Given a subject (e.g. gene, disease, variant), return all associated objects (phenotypes, functions, interacting genes, etc) | 5.75712 | 5.38828 | 1.068452 |
searchresult = search_associations(object=object,
fetch_subjects=True,
rows=0,
subject_category=subject_category,
subject_taxon=subject_taxon,
relation=relation,
**kwargs
)
subjs = searchresult['subjects']
return subjs | def get_subjects_for_object(object=None,
subject_category=None,
subject_taxon=None,
relation=None,
**kwargs) | Convenience method: Given a object (e.g. ontology term like phenotype or GO; interacting gene; disease; pathway etc), return all associated subjects (genes, variants, pubs, etc) | 4.32998 | 4.305616 | 1.005659 |
logging.info("SLIM SUBJECTS:{} SLIM:{} CAT:{}".format(subjects,slim,kwargs.get('category')))
searchresult = search_associations(subjects=subjects,
slim=slim,
facet_fields=[],
**kwargs
)
pmap = {}
for a in searchresult['associations']:
subj = a['subject']['id']
slimmed_terms = a['slim']
#logging.info('SLIM: {} {}'.format(subj,slimmed_terms))
for t in slimmed_terms:
k = (subj,t)
if k not in pmap:
pmap[k] = []
pmap[k].append(a)
results = [ {'subject': subj, 'slim':t, 'assocs': assocs} for ((subj,t),assocs) in pmap.items()]
return results | def map2slim(subjects, slim, **kwargs) | Maps a set of subjects (e.g. genes) to a set of slims
Result is a list of unique subject-class pairs, with
a list of source assocations | 3.757544 | 3.840617 | 0.97837 |
assert subject_category is not None
assert object_category is not None
time.sleep(1)
logging.info("Bulk query: {} {} {}".format(subject_category, object_category, taxon))
assocs = search_associations_compact(subject_category=subject_category,
object_category=object_category,
subject_taxon=taxon,
rows=rows,
iterate=True,
**kwargs)
logging.info("Rows retrieved: {}".format(len(assocs)))
if len(assocs) == 0:
logging.error("No associations returned for query: {} {} {}".format(subject_category, object_category, taxon))
return assocs | def bulk_fetch(subject_category, object_category, taxon, rows=MAX_ROWS, **kwargs) | Fetch associations for a species and pair of categories in bulk.
Arguments:
- subject_category: String (not None)
- object_category: String (not None)
- taxon: String
- rows: int
Additionally, any argument for search_associations can be passed | 2.676638 | 2.809132 | 0.952835 |
if facet_pivot_fields is None:
facet_pivot_fields = []
results = search_associations(rows=0,
facet_fields=[facet],
#facet_pivot_fields=facet_pivot_fields + [facet],
facet_pivot_fields=facet_pivot_fields,
**kwargs)
return results | def pivot_query(facet=None, facet_pivot_fields=None, **kwargs) | Pivot query | 3.121475 | 3.121576 | 0.999968 |
if facet_pivot_fields is None:
facet_pivot_fields = []
logging.info("Additional args: {}".format(kwargs))
fp = search_associations(rows=0,
facet_fields=[facet],
facet_pivot_fields=facet_pivot_fields,
**kwargs)['facet_pivot']
# we assume only one
results = list(fp.items())[0][1]
tups = []
xtype=None
ytype=None
xlabels=set()
ylabels=set()
for r in results:
logging.info("R={}".format(r))
xtype=r['field']
rv = r['value']
xlabels.add(rv)
for piv in r['pivot']:
ytype=piv['field']
pv = piv['value']
ylabels.add(pv)
tups.append( (rv,pv,piv['count']) )
z = [ [0] * len(xlabels) for i1 in range(len(ylabels)) ]
xlabels=list(xlabels)
ylabels=list(ylabels)
xmap = dict([x[::-1] for x in enumerate(xlabels)])
ymap = dict([x[::-1] for x in enumerate(ylabels)])
for t in tups:
z[ymap[t[1]]][xmap[t[0]]] = t[2]
m = {'xtype':xtype,
'ytype':ytype,
'xaxis':xlabels,
'yaxis':ylabels,
'z':z}
return m | def pivot_query_as_matrix(facet=None, facet_pivot_fields=None, **kwargs) | Pivot query | 2.869816 | 2.871862 | 0.999288 |
go_golr_url = "http://golr.geneontology.org/solr/"
go_solr = pysolr.Solr(go_golr_url, timeout=5)
go_solr.get_session().headers['User-Agent'] = get_user_agent(caller_name=__name__)
return search_associations(subject_category,
object_category,
relation,
subject,
solr=go_solr,
field_mapping=goassoc_fieldmap(),
**kwargs) | def search_associations_go(
subject_category=None,
object_category=None,
relation=None,
subject=None,
**kwargs) | Perform association search using Monarch golr | 3.792314 | 3.592713 | 1.055557 |
results = search_associations(rows=0,
select_fields=[],
facet_field_limits = {
distinct_field : -1
},
facet_fields=[distinct_field],
**kwargs
)
# TODO: map field
return list(results['facet_counts'][distinct_field].keys()) | def select_distinct(distinct_field=None, **kwargs) | select distinct values for a given field for a given a query | 9.542306 | 10.157862 | 0.939401 |
am = self.assocmodel
a1 = am.inferred_types(s1)
a2 = am.inferred_types(s2)
num_union = len(a1 | a2)
if num_union == 0:
return 0.0
return len(a1 & a2) / num_union | def pw_score_jaccard(self, s1 : ClassId, s2 : ClassId) -> SimScore | Calculate jaccard index of inferred associations of two subjects
|ancs(s1) /\ ancs(s2)|
---
|ancs(s1) \/ ancs(s2)| | 3.456918 | 3.294554 | 1.049282 |
df = self.assoc_df
slice1 = df.loc[s1].values
slice2 = df.loc[s2].values
return 1 - cosine(slice1, slice2) | def pw_score_cosine(self, s1 : ClassId, s2 : ClassId) -> SimScore | Cosine similarity of two subjects
Arguments
---------
s1 : str
class id
Return
------
number
A number between 0 and 1 | 3.970795 | 4.50141 | 0.882123 |
logging.info("Calculating all class ICs")
df = self.assoc_df
freqs = df.sum(axis=0)
n_subjects, _ = df.shape
ics = freqs.apply(lambda x: -math.log(x / n_subjects)/math.log(2))
self.ics = ics
logging.info("DONE calculating all class ICs")
return ics | def calculate_all_information_content(self) -> pd.Series | Calculate the Information Content (IC) value of every class
Sets the internal icmap cache and returns an array
Return
------
Series
a pandas Series indexed by class id and with IC as value | 4.680089 | 4.041819 | 1.157917 |
G = self.G
# reflexive ancestors
ancs1 = self._ancestors(c1) | {c1}
ancs2 = self._ancestors(c2) | {c2}
common_ancestors = ancs1 & ancs2
redundant = set()
for a in common_ancestors:
redundant = redundant | nx.ancestors(G, a)
return common_ancestors - redundant | def calculate_mrcas(self, c1 : ClassId, c2 : ClassId) -> Set[ClassId] | Calculate the MRCA for a class pair | 3.383085 | 3.253169 | 1.039935 |
G = self.G
ics = self._information_content_frame()
classes = list(dfs.dfs_preorder_nodes(G))
#mica_df = pd.DataFrame(index=classes, columns=classes)
#mica_ic_df = pd.DataFrame(index=classes, columns=classes)
ncs = len(classes)
ic_grid = np.empty([ncs,ncs])
mica_arrs = []
logging.info('Calculating ICs for {} x {} classes'.format(ncs, ncs))
for c1i in range(0,ncs):
c1 = classes[c1i]
logging.debug('Calculating ICs for {}'.format(c1))
ancs1r = self._ancestors(c1) | {c1}
c2i = 0
mica_arr = []
for c2i in range(0,ncs):
c2 = classes[c2i]
# TODO: optimize; matrix is symmetrical
#if c1i > c2i:
# continue
ancs2r = self._ancestors(c2) | {c2}
common_ancs = ancs1r & ancs2r
if len(common_ancs) > 0:
ic_cas = ics.loc[common_ancs]
max_ic = float(ic_cas.max())
ic_micas = ic_cas[ic_cas >= max_ic]
micas = set(ic_micas.index)
mica_arr.append(micas)
#mica_grid[c1i, c2i] = micas
#mica_grid[c2i, c1i] = micas
ic_grid[c1i, c2i] = max_ic
#ic_grid[c2i, c1i] = max_ic
else:
ic_grid[c1i, c2i] = 0
mica_arr.append(set())
#ic_grid[c2i, c1i] = 0
# TODO: consider optimization step; blank out all anc pairs
mica_arrs.append(mica_arr)
logging.info('DONE Calculating ICs for {} x {} classes'.format(ncs, ncs))
#self.mica_df = pd.DataFrame(mica_grid, index=classes, columns=classes)
self.mica_df = pd.DataFrame(mica_arrs, index=classes, columns=classes)
self.mica_ic_df = pd.DataFrame(ic_grid, index=classes, columns=classes) | def calculate_all_micas(self) | Calculate the MICA (Most Informative Common Ancestor) of every class-pair | 2.537143 | 2.454915 | 1.033496 |
pairs = self.mica_ic_df.loc[cset1, cset2]
max0 = pairs.max(axis=0)
max1 = pairs.max(axis=1)
idxmax0 = pairs.idxmax(axis=0)
idxmax1 = pairs.idxmax(axis=1)
mean0 = max0.mean()
mean1 = max1.mean()
return (mean0+mean1)/2, mean0, mean1 | def pw_compare_class_sets(self, cset1: Set[ClassId], cset2: Set[ClassId]) -> Tuple[ICValue, ICValue, ICValue] | Compare two class profiles | 3.152204 | 3.027482 | 1.041197 |
raise NotImplementedError | def search(self,
id_list: Set,
negated_classes: Set,
method: Optional) -> List[SimResult] | def search(self, cset: Set[ClassId]):
slice = self.mica_ic_df.loc[cset]
am = self.assocmodel
for i in am.subjects:
pass # TODO | 790.639771 | 521.812866 | 1.515179 |
raise NotImplementedError | def compare(self,
query_classes: Set,
reference_classes: Set,
method: Optional) -> SimResult | Given two lists of entites (classes, individual)
return their similarity | 2,542.900391 | 458.191376 | 5.549865 |
rdfgraph = rdflib.Graph()
if format is None:
if filename.endswith(".ttl"):
format='turtle'
elif filename.endswith(".rdf"):
format='xml'
rdfgraph.parse(filename, format=format)
return self.process_rdfgraph(rdfgraph) | def process_file(self,filename=None, format=None) | Parse a file into an ontology object, using rdflib | 2.792871 | 2.475906 | 1.12802 |
# TODO: ontology metadata
if ont is None:
ont = Ontology()
subjs = list(rg.subjects(RDF.type, SKOS.ConceptScheme))
if len(subjs) == 0:
logging.warning("No ConceptScheme")
else:
ont.id = self._uri2id(subjs[0])
subset_map = {}
for concept in rg.subjects(RDF.type, SKOS.Concept):
for s in self._get_schemes(rg, concept):
subset_map[self._uri2id(s)] = s
for concept in sorted(list(rg.subjects(RDF.type, SKOS.Concept))):
concept_uri = str(concept)
id=self._uri2id(concept)
logging.info("ADDING: {}".format(id))
ont.add_node(id, self._get_label(rg,concept))
for defn in rg.objects(concept, SKOS.definition):
if (defn.language == self.lang):
td = TextDefinition(id, escape_value(defn.value))
ont.add_text_definition(td)
for s in rg.objects(concept, SKOS.broader):
ont.add_parent(id, self._uri2id(s))
for s in rg.objects(concept, SKOS.related):
ont.add_parent(id, self._uri2id(s), self._uri2id(SKOS.related))
for m in rg.objects(concept, SKOS.exactMatch):
ont.add_xref(id, self._uri2id(m))
for m in rg.objects(concept, SKOS.altLabel):
syn = Synonym(id, val=self._uri2id(m))
ont.add_synonym(syn)
for s in self._get_schemes(rg,concept):
ont.add_to_subset(id, self._uri2id(s))
return ont | def process_rdfgraph(self, rg, ont=None) | Transform a skos terminology expressed in an rdf graph into an Ontology object
Arguments
---------
rg: rdflib.Graph
graph object
Returns
-------
Ontology | 2.662982 | 2.690495 | 0.989774 |
owlsim_url = url + 'searchByAttributeSet'
params = {
'a': profile,
'limit': limit,
'target': namespace_filter
}
return requests.get(owlsim_url, params=params, timeout=TIMEOUT).json() | def search_by_attribute_set(
url: str,
profile: Tuple[str],
limit: Optional[int] = 100,
namespace_filter: Optional[str]=None) -> Dict | Given a list of phenotypes, returns a ranked list of individuals
individuals can be filtered by namespace, eg MONDO, MGI, HGNC
:returns Dict with the structure: {
'unresolved' : [...]
'query_IRIs' : [...]
'results': {...}
}
:raises JSONDecodeError: If the response body does not contain valid json. | 5.16679 | 4.960226 | 1.041644 |
owlsim_url = url + 'compareAttributeSets'
params = {
'a': profile_a,
'b': profile_b,
}
return requests.get(owlsim_url, params=params, timeout=TIMEOUT).json() | def compare_attribute_sets(
url: str,
profile_a: Tuple[str],
profile_b: Tuple[str]) -> Dict | Given two phenotype profiles, returns their similarity
:returns Dict with the structure: {
'unresolved' : [...]
'query_IRIs' : [...]
'target_IRIs': [...]
'results': {...}
} | 4.01807 | 3.489137 | 1.151594 |
owlsim_url = url + 'getAttributeInformationProfile'
params = {
'a': profile,
'r': categories
}
return requests.get(owlsim_url, params=params, timeout=TIMEOUT).json() | def get_attribute_information_profile(
url: str,
profile: Optional[Tuple[str]]=None,
categories: Optional[Tuple[str]]=None) -> Dict | Get the information content for a list of phenotypes
and the annotation sufficiency simple and
and categorical scores if categories are provied
Ref: https://zenodo.org/record/834091#.W8ZnCxhlCV4
Note that the simple score varies slightly from the pub in that
it uses max_max_ic instead of mean_max_ic
If no arguments are passed this function returns the
system (loaded cohort) stats
:raises JSONDecodeError: If the response body does not contain valid json. | 6.449217 | 7.075823 | 0.911444 |
scigraph = OntologyFactory().create('scigraph:ontology')
category_stats = {}
categories = [enum.value for enum in HpoUpperLevel]
sim_response = get_attribute_information_profile(url, categories=tuple(categories))
try:
global_stats = IcStatistic(
mean_mean_ic=float(sim_response['system_stats']['meanMeanIC']),
mean_sum_ic=float(sim_response['system_stats']['meanSumIC']),
mean_cls=float(sim_response['system_stats']['meanN']),
max_max_ic=float(sim_response['system_stats']['maxMaxIC']),
max_sum_ic=float(sim_response['system_stats']['maxSumIC']),
individual_count=int(sim_response['system_stats']['individuals']),
mean_max_ic=float(sim_response['system_stats']['meanMaxIC'])
)
for cat_stat in sim_response['categorical_scores']:
category_stats[cat_stat['id']] = IcStatistic(
mean_mean_ic=float(cat_stat['system_stats']['meanMeanIC']),
mean_sum_ic=float(cat_stat['system_stats']['meanSumIC']),
mean_cls=float(cat_stat['system_stats']['meanN']),
max_max_ic=float(cat_stat['system_stats']['maxMaxIC']),
max_sum_ic=float(cat_stat['system_stats']['maxSumIC']),
individual_count=int(cat_stat['system_stats']['individuals']),
mean_max_ic=float(cat_stat['system_stats']['meanMaxIC']),
descendants=scigraph.descendants(cat_stat['id'], relations=["subClassOf"])
)
except JSONDecodeError as json_exc:
raise JSONDecodeError(
"Cannot parse owlsim2 response: {}".format(json_exc.msg),
json_exc.doc,
json_exc.pos
)
return global_stats, category_stats | def get_owlsim_stats(url) -> Tuple[IcStatistic, Dict[str, IcStatistic]] | :return Tuple[IcStatistic, Dict[str, IcStatistic]]
:raises JSONDecodeError: If the response body does not contain valid json | 2.578211 | 2.574678 | 1.001372 |
return self.filtered_search(
id_list=id_list,
negated_classes=negated_classes,
limit=limit,
taxon_filter=None,
category_filter=None,
method=method
) | def search(
self,
id_list: List,
negated_classes: List,
limit: Optional[int] = 100,
method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM) -> SimResult | Owlsim2 search, calls search_by_attribute_set, and converts to SimResult object
:raises JSONDecodeError: If the owlsim response is not valid json. | 3.415732 | 3.390711 | 1.007379 |
if len(negated_classes) > 0:
logging.warning("Owlsim2 does not support negation, ignoring neg classes")
namespace_filter = self._get_namespace_filter(taxon_filter, category_filter)
owlsim_results = search_by_attribute_set(self.url, tuple(id_list), limit, namespace_filter)
return self._simsearch_to_simresult(owlsim_results, method) | def filtered_search(
self,
id_list: List,
negated_classes: List,
limit: Optional[int] = 100,
taxon_filter: Optional[int] = None,
category_filter: Optional[str] = None,
method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM) -> SimResult | Owlsim2 filtered search, resolves taxon and category to a namespace,
calls search_by_attribute_set, and converts to SimResult object | 6.134144 | 3.735488 | 1.642126 |
owlsim_results = compare_attribute_sets(self.url, tuple(reference_classes), tuple(query_classes))
return self._simcompare_to_simresult(owlsim_results, method) | def compare(self,
reference_classes: List,
query_classes: List,
method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM) -> SimResult | Owlsim2 compare, calls compare_attribute_sets, and converts to SimResult object
:return: SimResult object | 9.186742 | 5.448285 | 1.686171 |
return [
SimAlgorithm.PHENODIGM,
SimAlgorithm.JACCARD,
SimAlgorithm.SIM_GIC,
SimAlgorithm.RESNIK,
SimAlgorithm.SYMMETRIC_RESNIK
] | def matchers() -> List[SimAlgorithm] | Matchers in owlsim2 | 8.416718 | 8.545638 | 0.984914 |
sim_response = get_attribute_information_profile(self.url, tuple(profile))
profile_ic = {}
try:
for cls in sim_response['input']:
profile_ic[cls['id']] = cls['IC']
except JSONDecodeError as json_exc:
raise JSONDecodeError(
"Cannot parse owlsim2 response: {}".format(json_exc.msg),
json_exc.doc,
json_exc.pos
)
return profile_ic | def get_profile_ic(self, profile: List) -> Dict | Given a list of individuals, return their information content | 5.587141 | 5.313993 | 1.051402 |
sim_ids = get_nodes_from_ids(sim_resp['query_IRIs'])
sim_resp['results'] = OwlSim2Api._rank_results(sim_resp['results'], method)
# get id type map:
ids = [result['j']['id'] for result in sim_resp['results']]
id_type_map = get_id_type_map(ids)
matches = []
for result in sim_resp['results']:
matches.append(
SimMatch(
id=result['j']['id'],
label=result['j']['label'],
rank=result['rank'],
score=result[OwlSim2Api.method2key[method]],
type=id_type_map[result['j']['id']][0],
taxon=get_taxon(result['j']['id']),
significance="NaN",
pairwise_match=OwlSim2Api._make_pairwise_matches(result)
)
)
return SimResult(
query=SimQuery(
ids=sim_ids,
unresolved_ids=sim_resp['unresolved'],
target_ids=[[]]
),
matches=matches,
metadata=SimMetadata(
max_max_ic=self.statistics.max_max_ic
)
) | def _simsearch_to_simresult(self, sim_resp: Dict, method: SimAlgorithm) -> SimResult | Convert owlsim json to SimResult object
:param sim_resp: owlsim response from search_by_attribute_set()
:param method: SimAlgorithm
:return: SimResult object | 4.479536 | 4.392605 | 1.01979 |
pairwise_matches = []
for pairwise_match in result['matches']:
pairwise_matches.append(
PairwiseMatch(
reference=ICNode(**pairwise_match['a']),
match=ICNode(**pairwise_match['b']),
lcs=ICNode(**pairwise_match['lcs'])
)
)
return pairwise_matches | def _make_pairwise_matches(result: Dict) -> List[PairwiseMatch] | Make a list of match object from owlsim results
:param result: Single owlsim result
:return: List of SimMatch objects | 3.288114 | 3.565718 | 0.922146 |
# https://stackoverflow.com/a/73050
sorted_results = sorted(
results, reverse=True, key=lambda k: k[OwlSim2Api.method2key[method]]
)
if len(sorted_results) > 0:
rank = 1
previous_score = sorted_results[0][OwlSim2Api.method2key[method]]
for result in sorted_results:
if previous_score > result[OwlSim2Api.method2key[method]]:
rank += 1
result['rank'] = rank
previous_score = result[OwlSim2Api.method2key[method]]
return sorted_results | def _rank_results(results: List[Dict], method: SimAlgorithm) -> List[Dict] | Ranks results - for phenodigm results are ranks but ties need to accounted for
for other methods, results need to be reranked
:param results: Results from search_by_attribute_set()['results'] or
compare_attribute_sets()['results']
:param method: sim method used to rank results
:return: Sorted results list | 2.800009 | 2.835686 | 0.987419 |
namespace_filter = None
taxon_category_default = {
10090: 'gene',
9606: 'disease',
7227: 'gene',
6239: 'gene',
7955: 'gene'
}
if category_filter is not None and taxon_filter is None:
raise ValueError("Must provide taxon filter along with category")
elif category_filter is None and taxon_filter is not None:
category_filter = taxon_category_default[taxon_filter]
else:
return namespace_filter
return OwlSim2Api.TAX_TO_NS[taxon_filter][category_filter.lower()] | def _get_namespace_filter(
taxon_filter: Optional[int]=None,
category_filter: Optional[str]=None) -> Union[None, str] | Given either a taxon and/or category, return the correct namespace
:raises ValueError: If category is provided without a taxon | 3.537073 | 3.223225 | 1.097371 |
if 'facet_fields' not in fcs:
return {}
ffs = fcs['facet_fields']
rs={}
for (facet, facetresults) in ffs.items():
if invert_subject_object:
for (k,v) in INVERT_FIELDS_MAP.items():
if facet == k:
facet = v
break
elif facet == v:
facet = k
break
pairs = {}
rs[facet] = pairs
for i in range(int(len(facetresults)/2)):
(fv,fc) = (facetresults[i*2],facetresults[i*2+1])
pairs[fv] = fc
return rs | def translate_facet_field(fcs, invert_subject_object = False) | Translates solr facet_fields results into something easier to manipulate
A solr facet field looks like this: [field1, count1, field2, count2, ..., fieldN, countN]
We translate this to a dict {f1: c1, ..., fn: cn}
This has slightly higher overhead for sending over the wire, but is easier to use | 3.322618 | 3.329014 | 0.998079 |
return {
M.SUBJECT: 'bioentity',
M.SUBJECT_CLOSURE: 'bioentity',
## In the GO AmiGO instance, the type field is not correctly populated
## See above in the code for hack that restores this for planteome instance
## M.SUBJECT_CATEGORY: 'type',
M.SUBJECT_CATEGORY: None,
M.SUBJECT_LABEL: 'bioentity_label',
M.SUBJECT_TAXON: 'taxon',
M.SUBJECT_TAXON_LABEL: 'taxon_label',
M.SUBJECT_TAXON_CLOSURE: 'taxon_closure',
M.RELATION: 'qualifier',
M.OBJECT: 'annotation_class',
M.OBJECT_CLOSURE: REGULATES_CLOSURE if relationship_type == ACTS_UPSTREAM_OF_OR_WITHIN else ISA_PARTOF_CLOSURE,
M.OBJECT_LABEL: 'annotation_class_label',
M.OBJECT_TAXON: 'object_taxon',
M.OBJECT_TAXON_LABEL: 'object_taxon_label',
M.OBJECT_TAXON_CLOSURE: 'object_taxon_closure',
M.OBJECT_CATEGORY: None,
M.EVIDENCE_OBJECT_CLOSURE: 'evidence_subset_closure',
M.IS_DEFINED_BY: 'assigned_by'
} | def goassoc_fieldmap(relationship_type=ACTS_UPSTREAM_OF_OR_WITHIN) | Returns a mapping of canonical monarch fields to amigo-golr.
See: https://github.com/geneontology/amigo/blob/master/metadata/ann-config.yaml | 3.736805 | 3.594516 | 1.039585 |
if m is None:
return fn
if fn in m:
return m[fn]
else:
return fn | def map_field(fn, m) | Maps a field name, given a mapping file.
Returns input if fieldname is unmapped. | 4.22427 | 3.105083 | 1.360437 |
query = solrInstance.value + "select?q=*:*&fq=document_category:\"" + category.value + "\"&fq=id:\"" + id + "\"&fl=" + fields + "&wt=json&indent=on"
response = requests.get(query)
return response.json()['response']['docs'][0] | def run_solr_on(solrInstance, category, id, fields) | Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id | 3.142931 | 2.748322 | 1.143582 |
if optionals == None:
optionals = ""
query = solrInstance.value + "select?q=" + q + "&qf=" + qf + "&fq=document_category:\"" + category.value + "\"&fl=" + fields + "&wt=json&indent=on" + optionals
# print("QUERY: ", query)
response = requests.get(query)
return response.json()['response']['docs'] | def run_solr_text_on(solrInstance, category, q, qf, fields, optionals) | Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id | 3.259766 | 2.985462 | 1.09188 |
merged = []
for i in range(0, len(json[firstField])):
merged.append({ json[firstField][i] : json[secondField][i] })
return merged | def merge(json, firstField, secondField) | merge two fields of a json into an array of { firstField : secondField } | 2.776627 | 2.460368 | 1.128541 |
merged = []
for i in range(0, len(json[firstField])):
merged.append({ firstFieldLabel : json[firstField][i],
secondFieldLabel : json[secondField][i] })
return merged | def mergeWithLabels(json, firstField, firstFieldLabel, secondField, secondFieldLabel) | merge two fields of a json into an array of { firstFieldLabel : firstFieldLabel, secondFieldLabel : secondField } | 2.426786 | 2.163764 | 1.121558 |
params = self.solr_params()
logging.info("PARAMS=" + str(params))
results = self.solr.search(**params)
logging.info("Docs found: {}".format(results.hits))
return self._process_search_results(results) | def search(self) | Execute solr search query | 5.108134 | 4.378016 | 1.166769 |
self.facet = False
params = self.solr_params()
logging.info("PARAMS=" + str(params))
results = self.solr.search(**params)
logging.info("Docs found: {}".format(results.hits))
return self._process_autocomplete_results(results) | def autocomplete(self) | Execute solr autocomplete | 6.468285 | 5.626682 | 1.149573 |
# map go-golr fields to standard
for doc in results.docs:
if 'entity' in doc:
doc['id'] = doc['entity']
doc['label'] = doc['entity_label']
highlighting = {
doc['id']: self._process_highlight(results, doc)._asdict()
for doc in results.docs if results.highlighting
}
payload = SearchResults(
facet_counts=translate_facet_field(results.facets),
highlighting=highlighting,
docs=results.docs,
numFound=results.hits
)
logging.debug('Docs: {}'.format(len(results.docs)))
return payload | def _process_search_results(self,
results: pysolr.Results) -> SearchResults | Convert solr docs to biolink object
:param results: pysolr.Results
:return: model.GolrResults.SearchResults | 5.13575 | 5.163779 | 0.994572 |
# map go-golr fields to standard
for doc in results.docs:
if 'entity' in doc:
doc['id'] = doc['entity']
doc['label'] = doc['entity_label']
docs = []
for doc in results.docs:
if results.highlighting:
hl = self._process_highlight(results, doc)
else:
hl = Highlight(None, None, None)
# In some cases a node does not have a category
category = doc['category'] if 'category' in doc else []
doc['taxon'] = doc['taxon'] if 'taxon' in doc else ""
doc['taxon_label'] = doc['taxon_label'] if 'taxon_label' in doc else ""
doc = AutocompleteResult(
id=doc['id'],
label=doc['label'],
match=hl.match,
category=category,
taxon=doc['taxon'],
taxon_label=doc['taxon_label'],
highlight=hl.highlight,
has_highlight=hl.has_highlight
)
docs.append(doc)
payload = {
'docs': docs
}
logging.debug('Docs: {}'.format(len(results.docs)))
return payload | def _process_autocomplete_results(
self,
results: pysolr.Results) -> Dict[str, List[AutocompleteResult]] | Convert results to biolink autocomplete object
:param results: pysolr.Results
:return: {'docs': List[AutocompleteResult]} | 3.351672 | 3.230224 | 1.037598 |
len_dict = OrderedDict()
for hl in highlights:
# dummy tags to make it valid xml
dummy_xml = "<p>" + hl + "</p>"
try:
element_tree = ET.fromstring(dummy_xml)
hl_length = 0
for emph in element_tree.findall('em'):
hl_length += len(emph.text)
len_dict[hl] = hl_length
except ET.ParseError:
raise ET.ParseError
return max(len_dict, key=len_dict.get) | def _get_longest_hl(self, highlights) | Given a list of highlighted text, returns the
longest highlight
For example:
[
"<em>Muscle</em> <em>atrophy</em>, generalized",
"Generalized <em>muscle</em> degeneration",
"Diffuse skeletal <em>">muscle</em> wasting"
]
and returns:
<em>Muscle</em> <em>atrophy</em>, generalized
If there are mutliple matches of the same length, returns
the top (arbitrary) highlight
:return: | 3.949328 | 3.99338 | 0.988969 |
# dummy tags to make it valid xml
dummy_xml = "<p>" + highlight + "</p>"
try:
element_tree = ET.fromstring(dummy_xml)
except ET.ParseError:
raise ET.ParseError
return "".join(list(element_tree.itertext())) | def _hl_as_string(self, highlight) | Given a solr string of highlighted text, returns the
str representations
For example:
"Foo <em>Muscle</em> bar <em>atrophy</em>, generalized"
Returns:
"Foo Muscle bar atrophy, generalized"
:return: str | 6.252608 | 6.339857 | 0.986238 |
params = self.set_lay_params()
logging.info("PARAMS="+str(params))
results = self.solr.search(**params)
logging.info("Docs found: {}".format(results.hits))
return self._process_layperson_results(results) | def autocomplete(self) | Execute solr query for autocomplete | 9.764201 | 8.045691 | 1.213594 |
payload = {
'results': []
}
for doc in results.docs:
hl = self._process_highlight(results, doc)
highlight = {
'id': doc['id'],
'highlight': hl.highlight,
'label': doc['label'],
'matched_synonym': hl.match
}
payload['results'].append(highlight)
logging.debug('Docs: {}'.format(len(results.docs)))
return payload | def _process_layperson_results(self, results) | Convert pysolr.Results to biolink object
:param results:
:return: | 4.940706 | 4.878573 | 1.012736 |
logging.info("Attempting category inference on id={}".format(id))
toks = id.split(":")
idspace = toks[0]
c = None
if idspace == 'DOID':
c='disease'
if c is not None:
logging.info("Inferred category: {} based on id={}".format(c, id))
return c | def infer_category(self, id) | heuristic to infer a category from an id, e.g. DOID:nnn --> disease | 5.071438 | 3.967789 | 1.278152 |
if id is not None:
for (k,v) in PREFIX_NORMALIZATION_MAP.items():
s = k+':'
if id.startswith(s):
return id.replace(s,v+':')
return id | def make_canonical_identifier(self,id) | E.g. MGI:MGI:nnnn --> MGI:nnnn | 4.291788 | 4.166883 | 1.029975 |
if fname not in d:
# TODO: consider adding arg for failure on null
return None
#lf = M.label_field(fname)
v = d[fname]
if not isinstance(v,list):
v = [v]
objs = [{'id': idval} for idval in v]
# todo - labels
return objs | def translate_objs(self,d,fname) | Translate a field whose value is expected to be a list | 10.788476 | 9.987783 | 1.080167 |
if fname not in d:
# TODO: consider adding arg for failure on null
return None
lf = M.label_field(fname)
id = d[fname]
id = self.make_canonical_identifier(id)
#if id.startswith('MGI:MGI:'):
# id = id.replace('MGI:MGI:','MGI:')
obj = {'id': id}
if id:
if self._use_amigo_schema(self.object_category):
iri = expand_uri(id)
else:
iri = expand_uri(id, [get_curie_map('{}/cypher/curies'.format(self.config.scigraph_data.url))])
obj['iri'] = iri
if lf in d:
obj['label'] = d[lf]
cf = fname + "_category"
if cf in d:
obj['category'] = [d[cf]]
if 'aspect' in d and id.startswith('GO:'):
obj['category'] = [ASPECT_MAP[d['aspect']]]
del d['aspect']
return obj | def translate_obj(self,d,fname) | Translate a field value from a solr document.
This includes special logic for when the field value
denotes an object, here we nest it | 6.100399 | 6.26014 | 0.974483 |
if field_mapping is not None:
self.map_doc(d, field_mapping)
subject = self.translate_obj(d, M.SUBJECT)
obj = self.translate_obj(d, M.OBJECT)
# TODO: use a more robust method; we need equivalence as separate field in solr
if map_identifiers is not None:
if M.SUBJECT_CLOSURE in d:
subject['id'] = self.map_id(subject, map_identifiers, d[M.SUBJECT_CLOSURE])
else:
logging.info("NO SUBJECT CLOSURE IN: "+str(d))
if M.SUBJECT_TAXON in d:
subject['taxon'] = self.translate_obj(d,M.SUBJECT_TAXON)
if M.OBJECT_TAXON in d:
obj['taxon'] = self.translate_obj(d, M.OBJECT_TAXON)
qualifiers = []
if M.RELATION in d and isinstance(d[M.RELATION],list):
# GO overloads qualifiers and relation
relation = None
for rel in d[M.RELATION]:
if rel.lower() == 'not':
qualifiers.append(rel)
else:
relation = rel
if relation is not None:
d[M.RELATION] = relation
else:
d[M.RELATION] = None
negated = 'not' in qualifiers
assoc = {'id':d.get(M.ID),
'subject': subject,
'object': obj,
'negated': negated,
'relation': self.translate_obj(d,M.RELATION),
'publications': self.translate_objs(d,M.SOURCE), # note 'source' is used in the golr schema
}
if self.invert_subject_object and assoc['relation'] is not None:
assoc['relation']['inverse'] = True
if len(qualifiers) > 0:
assoc['qualifiers'] = qualifiers
if M.OBJECT_CLOSURE in d:
assoc['object_closure'] = d.get(M.OBJECT_CLOSURE)
if M.IS_DEFINED_BY in d:
if isinstance(d[M.IS_DEFINED_BY],list):
assoc['provided_by'] = d[M.IS_DEFINED_BY]
else:
# hack for GO Golr instance
assoc['provided_by'] = [d[M.IS_DEFINED_BY]]
if M.EVIDENCE_OBJECT in d:
assoc['evidence'] = d[M.EVIDENCE_OBJECT]
assoc['types'] = [t for t in d[M.EVIDENCE_OBJECT] if t.startswith('ECO:')]
if self._use_amigo_schema(self.object_category):
for f in M.AMIGO_SPECIFIC_FIELDS:
if f in d:
assoc[f] = d[f]
# solr does not allow nested objects, so evidence graph is json-encoded
if M.EVIDENCE_GRAPH in d:
assoc[M.EVIDENCE_GRAPH] = json.loads(d[M.EVIDENCE_GRAPH])
return assoc | def translate_doc(self, d, field_mapping=None, map_identifiers=None, **kwargs) | Translate a solr document (i.e. a single result row) | 3.126755 | 3.080554 | 1.014998 |
for d in ds:
self.map_doc(d, {}, self.invert_subject_object)
return [self.translate_doc(d, **kwargs) for d in ds] | def translate_docs(self, ds, **kwargs) | Translate a set of solr results | 7.9879 | 8.002921 | 0.998123 |
amap = {}
logging.info("Translating docs to compact form. Slim={}".format(slim))
for d in ds:
self.map_doc(d, field_mapping, invert_subject_object=invert_subject_object)
subject = d[M.SUBJECT]
subject_label = d[M.SUBJECT_LABEL]
# TODO: use a more robust method; we need equivalence as separate field in solr
if map_identifiers is not None:
if M.SUBJECT_CLOSURE in d:
subject = self.map_id(subject, map_identifiers, d[M.SUBJECT_CLOSURE])
else:
logging.debug("NO SUBJECT CLOSURE IN: "+str(d))
rel = d.get(M.RELATION)
skip = False
# TODO
if rel == 'not' or rel == 'NOT':
skip = True
# this is a list in GO
if isinstance(rel,list):
if 'not' in rel or 'NOT' in rel:
skip = True
if len(rel) > 1:
logging.warn(">1 relation: {}".format(rel))
rel = ";".join(rel)
if skip:
logging.debug("Skipping: {}".format(d))
continue
subject = self.make_canonical_identifier(subject)
#if subject.startswith('MGI:MGI:'):
# subject = subject.replace('MGI:MGI:','MGI:')
k = (subject,rel)
if k not in amap:
amap[k] = {'subject':subject,
'subject_label':subject_label,
'relation':rel,
'objects': []}
if slim is not None and len(slim)>0:
mapped_objects = [x for x in d[M.OBJECT_CLOSURE] if x in slim]
logging.debug("Mapped objects: {}".format(mapped_objects))
amap[k]['objects'] += mapped_objects
else:
amap[k]['objects'].append(d[M.OBJECT])
for k in amap.keys():
amap[k]['objects'] = list(set(amap[k]['objects']))
return list(amap.values()) | def translate_docs_compact(self, ds, field_mapping=None, slim=None, map_identifiers=None, invert_subject_object=False, **kwargs) | Translate golr association documents to a compact representation | 3.09137 | 3.081394 | 1.003238 |
prefixc = prefix + ':'
ids = [eid for eid in closure_list if eid.startswith(prefixc)]
# TODO: add option to fail if no mapping, or if >1 mapping
if len(ids) == 0:
# default to input
return id
return ids[0] | def map_id(self,id, prefix, closure_list) | Map identifiers based on an equivalence closure list. | 6.744778 | 6.531178 | 1.032705 |
meta = AssociationSetMetadata(subject_category=subject_category,
object_category=object_category,
taxon=taxon)
if file is not None:
return self.create_from_file(file=file,
fmt=fmt,
ontology=ontology,
meta=meta,
skim=skim)
logging.info("Fetching assocs from store")
assocs = bulk_fetch_cached(subject_category=subject_category,
object_category=object_category,
evidence=evidence,
taxon=taxon)
logging.info("Creating map for {} subjects".format(len(assocs)))
amap = {}
subject_label_map = {}
for a in assocs:
rel = a['relation']
subj = a['subject']
subject_label_map[subj] = a['subject_label']
amap[subj] = a['objects']
aset = AssociationSet(ontology=ontology,
meta=meta,
subject_label_map=subject_label_map,
association_map=amap)
return aset | def create(self, ontology=None,subject_category=None,object_category=None,evidence=None,taxon=None,relation=None, file=None, fmt=None, skim=True) | creates an AssociationSet
Currently, this uses an eager binding to a `ontobio.golr` instance. All compact associations for the particular combination
of parameters are fetched.
Arguments
---------
ontology: an `Ontology` object
subject_category: string representing category of subjects (e.g. gene, disease, variant)
object_category: string representing category of objects (e.g. function, phenotype, disease)
taxon: string holding NCBITaxon:nnnn ID | 3.044979 | 3.134846 | 0.971333 |
amap = {}
subject_label_map = {}
for a in tuples:
subj = a[0]
subject_label_map[subj] = a[1]
if subj not in amap:
amap[subj] = []
amap[subj].append(a[2])
aset = AssociationSet(subject_label_map=subject_label_map, association_map=amap, **args)
return aset | def create_from_tuples(self, tuples, **args) | Creates from a list of (subj,subj_name,obj) tuples | 2.863269 | 2.619595 | 1.09302 |
amap = defaultdict(list)
subject_label_map = {}
for a in assocs:
subj = a['subject']
subj_id = subj['id']
subj_label = subj['label']
subject_label_map[subj_id] = subj_label
if not a['negated']:
amap[subj_id].append(a['object']['id'])
aset = AssociationSet(subject_label_map=subject_label_map, association_map=amap, **args)
aset.associations_by_subj = defaultdict(list)
aset.associations_by_subj_obj = defaultdict(list)
for a in assocs:
sub_id = a['subject']['id']
obj_id = a['object']['id']
aset.associations_by_subj[sub_id].append(a)
aset.associations_by_subj_obj[(sub_id,obj_id)].append(a)
return aset | def create_from_assocs(self, assocs, **args) | Creates from a list of association objects | 2.183576 | 2.198201 | 0.993347 |
if fmt is not None and not fmt.startswith('.'):
fmt = '.{}'.format(fmt)
d = {
'.gaf' : GafParser,
'.gpad' : GpadParser,
'.hpoa' : HpoaParser,
}
if fmt is None:
filename = file if isinstance(file, str) else file.name
suffixes = pathlib.Path(filename).suffixes
iterator = (fn() for ext, fn in d.items() if ext in suffixes)
else:
iterator = (fn() for ext, fn in d.items() if ext == fmt)
try:
parser = next(iterator)
except StopIteration:
logging.error("Format not recognized: {}".format(fmt))
logging.info("Parsing {} with {}/{}".format(file, fmt, parser))
if skim:
results = parser.skim(file)
return self.create_from_tuples(results, **args)
else:
assocs = parser.parse(file, skipheader=True)
return self.create_from_assocs(assocs, **args) | def create_from_file(self, file=None, fmt='gaf', skim=True, **args) | Creates from a file. If fmt is set to None then the file suffixes will
be used to choose a parser.
Arguments
---------
file : str or file
input file or filename
fmt : str
name of format e.g. gaf | 2.936506 | 2.878464 | 1.020164 |
import requests
url = "http://snapshot.geneontology.org/annotations/{}.gaf.gz".format(group)
r = requests.get(url, stream=True, headers={'User-Agent': get_user_agent(modules=[requests], caller_name=__name__)})
p = GafParser()
results = p.skim(r.raw)
return self.create_from_tuples(results, **args) | def create_from_remote_file(self, group, snapshot=True, **args) | Creates from remote GAF | 5.28143 | 4.871509 | 1.084147 |
if args.slim.find('m') > -1:
logging.info("SLIMMING")
g = get_minimal_subgraph(g, query_ids)
w = GraphRenderer.create(args.to)
if args.showdefs:
w.config.show_text_definition = True
if args.render:
if 'd' in args.render:
logging.info("Showing text defs")
w.config.show_text_definition = True
if args.outfile is not None:
w.outfile = args.outfile
w.write(ont, query_ids=query_ids, container_predicates=args.container_properties) | def render(ont, query_ids, args) | Writes or displays graph | 6.184359 | 5.997313 | 1.031188 |
parser = argparse.ArgumentParser(
description='Command line interface to python-ontobio.golr library'
,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-A', '--associations', dest='associations', action='store_true', default=False,
help='Path to output file')
parser.add_argument('-s', '--settings', type=str,
help='Path to config file')
parser.add_argument('-o', '--outfile', type=str, required=False,
help='Path to output file')
parser.add_argument('-f', '--facets', type=str, required=False,
help='Facet fields: comma-delimited')
parser.add_argument('-q', '--fq', type=json.loads, default={}, required=False,
help='Facet query (solr fq) - should be json')
parser.add_argument('-Q', '--qargs', type=json.loads, default={}, required=False,
help='Query to be passed directly to python golr_associations query')
parser.add_argument('-l', '--legacy_solr', dest='legacy_solr', action='store_true', default=False,
help='Set for legacy solr schema (solr3 golr)')
parser.add_argument('-u', '--url', type=str, required=False,
help='Solr URL. E.g. http://localhost:8983/solr/golr')
parser.add_argument('-v', '--verbosity', default=0, action='count',
help='Increase output verbosity')
parser.add_argument('search', type=str,
help='Search terms')
args = parser.parse_args()
if args.verbosity >= 2:
logging.basicConfig(level=logging.DEBUG)
elif args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
logging.info("Welcome!")
facets = []
if args.facets is not None:
facets = args.facets.split(",")
config = None
if args.settings is not None:
from ontobio.config import load_config
config = load_config(args.settings)
results = None
if args.associations:
q = None
if args.search != '%':
q = args.search
q = GolrAssociationQuery(q=q,
is_go=args.legacy_solr,
fq=args.fq,
facet_fields=facets,
url=args.url)
results = q.exec()
#print("RESULTS={}".format(results))
docs = results['associations']
print("RESULTS: {}".format(len(docs)))
for r in docs:
print(str(r))
else:
logging.info("FQ={}".format(args.fq))
q = GolrSearchQuery(args.search,
is_go=args.legacy_solr,
fq=args.fq,
facet_fields=facets,
url=args.url)
results = q.exec()
#print("RESULTS={}".format(results))
docs = results['docs']
print("RESULTS: {}".format(len(docs)))
for r in docs:
print(" {} '{}' {} // {}".format(r['id'],r['label'],r['score'], r['category']))
if len(facets) > 0:
#from collections import OrderedDict
fcs = results['facet_counts']
for f in facets:
d = fcs[f]
print(str(d))
print("## FACET: {}".format(f))
for k,v in sorted(d.items(), key=lambda t: -t[1]):
print(" {:5d}: {}".format(v,k)) | def main() | Wrapper for OGR | 2.6845 | 2.705157 | 0.992364 |
results = search_associations(subject=subject,
object_category=object_category,
select_fields=[],
facet_fields=[M.OBJECT_CLOSURE],
facet_limit=-1,
rows=0,
**kwargs)
return set(results['facet_counts'][M.OBJECT_CLOSURE].keys()) | def get_object_closure(subject, object_category=None, **kwargs) | Find all terms used to annotate subject plus ancestors | 5.045338 | 4.726793 | 1.067391 |
set1 = get_object_closure(subject1,
object_category=object_category,
**kwargs)
set2 = get_object_closure(subject2,
object_category=object_category,
**kwargs)
return len(set1.intersection(set2)), len(set1.union(set2)) | def subject_pair_overlap(subject1, subject2, object_category=None, **kwargs) | Jaccard similarity | 2.155009 | 2.203837 | 0.977844 |
i, u = subject_pair_overlap(subject1, subject2, **kwargs)
if i==0:
return 0.0
return i / u | def subject_pair_simj(subject1, subject2, **kwargs) | Jaccard similarity | 4.911732 | 4.583309 | 1.071656 |
if assoc.get("header", False):
return
subj = assoc['subject']
db, db_object_id = self._split_prefix(subj)
rel = assoc['relation']
qualifier = rel['id']
if assoc['negated']:
qualifier = 'NOT|' + qualifier
goid = assoc['object']['id']
ev = assoc['evidence']
evidence = self.ecomap.coderef_to_ecoclass(ev['type'])
withfrom = "|".join(ev['with_support_from'])
reference = "|".join(ev['has_supporting_reference'])
date = assoc['date']
assigned_by = assoc['provided_by']
annotation_properties = '' # TODO
interacting_taxon_id = assoc['interacting_taxon']
vals = [db,
db_object_id,
qualifier,
goid,
reference,
evidence,
withfrom,
interacting_taxon_id, # TODO
date,
assigned_by,
self._extension_expression(assoc['object_extensions']),
annotation_properties]
self._write_row(vals) | def write_assoc(self, assoc) | Write a single association to a line in the output file | 5.886081 | 5.713414 | 1.030221 |
# Handle comment 'associations'
if assoc.get("header", False):
# Skip incoming gaf-version headers, as we created the version above already
if re.match("![\s]*gaf.?version", assoc["line"]):
return
self._write(assoc["line"] + "\n")
return
subj = assoc['subject']
db, db_object_id = self._split_prefix(subj)
qualifier = "|".join(assoc["qualifiers"])
# qualifier is | seperated string, filter on None removes empty strings in case qualifiers is empty
if assoc['negated']:
qualifier = "|".join(list(filter(None, ["NOT", qualifier])))
goid = assoc['object']['id']
ev = assoc['evidence']
evidence = ev['type']
withfrom = "|".join(ev['with_support_from'])
reference = "|".join(ev['has_supporting_reference'])
date = assoc['date']
assigned_by = assoc['provided_by']
annotation_properties = '' # TODO
# if we have any subject extensions, list each one that has a "property" equal to "isoform", take the first one, and grab the "filler"
gene_product_isoform = [e for e in assoc["subject_extensions"] if e["property"] == "isoform"][0]["filler"] if len(assoc["subject_extensions"]) > 0 else ""
aspect = assoc['aspect']
interacting_taxon_id = assoc["interacting_taxon"]
taxon = self._full_taxon_field(self.normalize_taxon(subj['taxon']['id']), self.normalize_taxon(interacting_taxon_id))
extension_expression = self._extension_expression(assoc['object_extensions'])
vals = [db,
db_object_id,
subj.get('label'),
qualifier,
goid,
reference,
evidence,
withfrom,
aspect,
subj["fullname"],
"|".join(subj.get('synonyms',[])),
subj.get('type'),
taxon,
date,
assigned_by,
extension_expression,
gene_product_isoform]
self._write_row(vals) | def write_assoc(self, assoc) | Write a single association to a line in the output file | 5.957502 | 5.825386 | 1.022679 |
human_taxon = Node(
id='NCBITaxon:9606',
label='Homo sapiens'
)
return {
'MGI': Node(
id='NCBITaxon:10090',
label='Mus musculus'
),
'MONDO': human_taxon,
'OMIM': human_taxon,
'MONARCH': human_taxon,
'HGNC': human_taxon,
'FlyBase': Node(
id='NCBITaxon:7227',
label='Drosophila melanogaster'
),
'WormBase': Node(
id='NCBITaxon:6239',
label='Caenorhabditis elegans'
),
'ZFIN': Node(
id='NCBITaxon:7955',
label='Danio rerio'
)
} | def namespace_to_taxon() -> Dict[str, Node] | namespace to taxon mapping | 2.023603 | 2.021732 | 1.000925 |
scigraph = OntologyFactory().create('scigraph:data')
chunks = [id_list[i:i + 400] for i in range(0, len(list(id_list)), 400)]
for chunk in chunks:
params = {
'id': chunk,
'depth': 0
}
try:
result_graph = scigraph._neighbors_graph(**params)
for node in result_graph['nodes']:
yield node
except JSONDecodeError as exception:
# Assume json decode is due to an incorrect class ID
# Should we handle this?
raise ValueError(exception.doc) | def get_scigraph_nodes(id_list)-> Iterator[Dict] | Queries scigraph neighbors to get a list of nodes back
We use the scigraph neighbors function because ids can be sent in batch
which is faster than iteratively querying solr search
or the scigraph graph/id function
:return: json decoded result from scigraph_ontology._neighbors_graph
:raises ValueError: If id is not in scigraph | 5.257563 | 4.863532 | 1.081017 |
type_map = {}
filter_out_types = [
'cliqueLeader',
'Class',
'Node',
'Individual',
'quality',
'sequence feature'
]
for node in get_scigraph_nodes(id_list):
type_map[node['id']] = [typ.lower() for typ in node['meta']['types']
if typ not in filter_out_types]
return type_map | def get_id_type_map(id_list: Iterable[str]) -> Dict[str, List[str]] | Given a list of ids return their types
:param id_list: list of ids
:return: dictionary where the id is the key and the value is a list of types | 6.365052 | 6.921514 | 0.919604 |
node_list = []
for result in get_scigraph_nodes(id_list):
if 'lbl' in result:
label = result['lbl']
else:
label = None # Empty string or None?
node_list.append(Node(result['id'], label))
return node_list | def get_nodes_from_ids(id_list: Iterable[str]) -> List[Node] | Given a list of ids return their types
:param id_list: list of ids
:return: dictionary where the id is the key and the value is a list of types | 4.14834 | 4.843112 | 0.856544 |
taxon = None
namespace = id.split(":")[0]
if namespace in namespace_to_taxon():
taxon = namespace_to_taxon()[namespace]
return taxon | def get_taxon(id: str) -> Optional[Node] | get taxon for id
Currently via hardcoding, should replace when scigraph when
taxa are more universally annotated (having these as node
properties would also be more performant)
:param id: curie formatted id
:return: Node where id is the NCBITaxon curie and label is the scientific name | 4.062889 | 5.00545 | 0.811693 |
filter_out_types = [
'cliqueLeader',
'Class',
'Node',
'Individual',
'quality',
'sequence feature'
]
node = next(get_scigraph_nodes([id]))
if 'lbl' in node:
label = node['lbl']
else:
label = None # Empty string or None?
types = [typ.lower() for typ in node['meta']['types']
if typ not in filter_out_types]
return TypedNode(
id=node['id'],
label=label,
type=types[0],
taxon = get_taxon(id)
) | def typed_node_from_id(id: str) -> TypedNode | Get typed node from id
:param id: id as curie
:return: TypedNode object | 6.85899 | 6.572224 | 1.043633 |
curie_map = {}
response = requests.get(url)
if response.status_code == 200:
curie_map = response.json()
else:
curie_map = {}
return curie_map | def get_curie_map(url) | Get CURIE prefix map from SciGraph cypher/curies endpoint | 1.954912 | 1.872867 | 1.043807 |
return self.reporter.json(self.n_lines, self.n_assocs, self.skipped) | def to_report_json(self) | Generate a summary in json format | 23.10018 | 17.071882 | 1.353113 |
json = self.to_report_json()
# summary = json['summary']
s = "# Group: {group} - Dataset: {dataset}\n".format(group=json["group"], dataset=json["dataset"])
s += "\n## SUMMARY\n\n"
s += "This report generated on {}\n\n".format(datetime.date.today())
s += " * Associations: {}\n" . format(json["associations"])
s += " * Lines in file (incl headers): {}\n" . format(json["lines"])
s += " * Lines skipped: {}\n" . format(json["skipped_lines"])
# Header from GAF
s += "## Header From Original Association File\n\n"
s += "\n".join(["> {} ".format(head) for head in self.header])
## Table of Contents
s += "\n\n## Contents\n\n"
for rule, messages in sorted(json["messages"].items(), key=lambda t: t[0]):
any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get("tags", []) for tag in self.config.suppress_rule_reporting_tags])
# For each tag we say to suppress output for, check if it matches any tag in the rule. If any matches
if self.config.rule_metadata and any_suppress_tag_in_rule_metadata:
print("Skipping {rule_num} because the tag(s) '{tag}' are suppressed".format(rule_num=rule, tag=", ".join(self.config.suppress_rule_reporting_tags)))
continue
s += "[{rule}](#{rule})\n\n".format(rule=rule)
s += "\n## MESSAGES\n\n"
for (rule, messages) in sorted(json["messages"].items(), key=lambda t: t[0]):
any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get("tags", []) for tag in self.config.suppress_rule_reporting_tags])
# Skip if the rule metadata has "silent" as a tag
if self.config.rule_metadata and any_suppress_tag_in_rule_metadata:
# If there is a rule metadata, and the rule ID is in the config,
# get the list of tags if present and check for existence of "silent".
# If contained, continue to the next rule.
continue
s += "### {rule}\n\n".format(rule=rule)
if rule != "other" and self.config.rule_metadata:
s += "{title}\n\n".format(title=self.config.rule_metadata.get(rule, {}).get("title", ""))
s += "* total: {amount}\n".format(amount=len(messages))
if len(messages) > 0:
s += "#### Messages\n"
for message in messages:
obj = " ({})".format(message["obj"]) if message["obj"] else ""
s += "* {level} - {type}: {message}{obj} -- `{line}`\n".format(level=message["level"], type=message["type"], message=message["message"], line=message["line"], obj=obj)
# for g in json['groups']:
# s += " * {}: {}\n".format(g['level'], g['count'])
# s += "\n\n"
# for g in json['groups']:
# level = g['level']
# msgs = g['messages']
# if len(msgs) > 0:
# s += "### {}\n\n".format(level)
# for m in msgs:
# s += " * {}: obj:'{}' \"{}\" `{}`\n".format(m['type'],m['obj'],m['message'],m['line'])
return s | def to_markdown(self) | Generate a summary in markdown format | 3.209638 | 3.188005 | 1.006786 |
associations = self.association_generator(file, skipheader=skipheader, outfile=outfile)
a = list(associations)
return a | def parse(self, file, skipheader=False, outfile=None) | Parse a line-oriented association file into a list of association dict objects
Note the returned list is of dict objects. TODO: These will
later be specified using marshmallow and it should be possible
to generate objects
Arguments
---------
file : file or string
The file is parsed into association objects. Can be a http URL, filename or `file-like-object`, for input assoc file
outfile : file
Optional output file in which processed lines are written. This a file or `file-like-object`
Return
------
list
Associations generated from the file | 6.066866 | 6.621714 | 0.916208 |
file = self._ensure_file(file)
for line in file:
parsed_result = self.parse_line(line)
self.report.report_parsed_result(parsed_result, outfile, self.config.filtered_evidence_file, self.config.filter_out_evidence)
for association in parsed_result.associations:
# yield association if we don't care if it's a header or if it's definitely a real gaf line
if not skipheader or "header" not in association:
yield association
logging.info(self.report.short_summary())
file.close() | def association_generator(self, file, skipheader=False, outfile=None) -> Dict | Returns a generator that yields successive associations from file
Yields
------
association | 6.482161 | 6.624885 | 0.978456 |
if subset is not None:
logging.info("Creating mapping for subset: {}".format(subset))
class_map = ontology.create_slim_mapping(subset=subset, relations=relations)
if class_map is None:
raise ValueError("Neither class_map not subset is set")
col = self.ANNOTATION_CLASS_COLUMN
file = self._ensure_file(file)
tuples = []
for line in file:
if line.startswith("!"):
continue
vals = line.split("\t")
logging.info("LINE: {} VALS: {}".format(line, vals))
if len(vals) < col:
raise ValueError("Line: {} has too few cols, expect class id in col {}".format(line, col))
cid = vals[col]
if cid not in class_map or len(class_map[cid]) == 0:
self.report.error(line, Report.UNMAPPED_ID, cid)
continue
else:
for mcid in class_map[cid]:
vals[col] = mcid
line = "\t".join(vals)
if outfile is not None:
outfile.write(line)
else:
print(line) | def map_to_subset(self, file, outfile=None, ontology=None, subset=None, class_map=None, relations=None) | Map a file to a subset, writing out results
You can pass either a subset name (e.g. goslim_generic) or a dictionary with ready-made mappings
Arguments
---------
file: file
Name or file object for input assoc file
outfile: file
Name or file object for output (mapped) assoc file; writes to stdout if not set
subset: str
Optional name of subset to map to, e.g. goslim_generic
class_map: dict
Mapping between asserted class ids and ids to map to. Many to many
ontology: `Ontology`
Ontology to extract subset from | 3.382523 | 3.433395 | 0.985183 |
parser = argparse.ArgumentParser(
description='Command line interface to python-ontobio.golr library'
,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-o', '--outfile', type=str, required=False,
help='Path to output file')
parser.add_argument('-f', '--facet', type=str, required=True,
help='Facet field to query')
parser.add_argument('-q', '--fq', type=json.loads, default={}, required=False,
help='Facet query (solr fq) - should be json')
parser.add_argument('-Q', '--qargs', type=json.loads, default={}, required=False,
help='Query to be passed directly to python golr_associations query')
parser.add_argument('-P', '--search', nargs='*', type=str, required=False,
help='Search fields. E.f subject_category object_category, relation')
parser.add_argument('-u', '--url', type=str, required=False,
help='Solr URL. E.g. http://localhost:8983/solr/golr')
parser.add_argument('-v', '--verbosity', default=0, action='count',
help='Increase output verbosity')
args = parser.parse_args()
if args.verbosity >= 2:
logging.basicConfig(level=logging.DEBUG)
elif args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
logging.info("Welcome!")
r = search_query_as_matrix(facet=args.facet,
fq=args.fq,
facet_search_fields=args.search,
url=args.url,
**args.qargs)
print(str(r))
trace = go.Heatmap(z=r['z'],
x=r['xaxis'],
y=r['yaxis'])
data=[trace]
py.plot(data, filename='search-heatmap') | def main() | Wrapper for OGR | 3.15307 | 3.191636 | 0.987917 |
if session.config is None:
path = session.default_config_path
if os.path.isfile(path):
logging.info("LOADING FROM: {}".format(path))
session.config = load_config(path)
else:
session.config = Config()
logging.info("using default session: {}, path does not exist: {}".format(session, path))
else:
logging.info("Using pre-loaded object: {}".format(session.config))
return session.config | def get_config() | Return configuration for current session.
When called for the first time, this will create a config object, using
whatever is the default load path to find the config yaml | 3.891671 | 3.753497 | 1.036812 |
logging.info("LOADING FROM: {}".format(path))
session.config = load_config(path)
return session.config | def set_config(path) | Set configuration for current session. | 7.179733 | 7.346335 | 0.977322 |
url = self.endpoint_url(self.solr_search)
if use_amigo:
url = self.endpoint_url(self.amigo_solr_search)
return url | def get_solr_search_url(self, use_amigo=False) | Return solr URL to be used for lexical entity searches
A solr search URL is used to search entities/concepts based on a limited set of parameters.
Arguments
---------
use_amigo : bool
If true, get the URL for the GO/AmiGO instance of GOlr. This is typically used for category='function' queries | 2.809652 | 3.884622 | 0.723276 |
url = self.endpoint_url(self.solr_assocs)
if use_amigo:
url = self.endpoint_url(self.amigo_solr_assocs)
return url | def get_solr_assocs_url(self, use_amigo=False) | Return solr URL to be used for assocation (enhanced triple) queries
A solr assocs URL is used to query triple-patterns in Solr, ie subject-relation-object
There are two possible schemas: Monarch and AmiGO. The AmiGO schema is used for
querying the GO and Planteome Golr instances | 2.633345 | 3.243351 | 0.811921 |
gaf_urls = [ (data, data["source"]) for data in group_metadata["datasets"] if data["type"] == "gaf" and data["dataset"] not in exclusions ]
# List of dataset metadata to gaf download url
click.echo("Found {}".format(", ".join( [ kv[0]["dataset"] for kv in gaf_urls ] )))
downloaded_paths = []
for dataset_metadata, gaf_url in gaf_urls:
dataset = dataset_metadata["dataset"]
# Local target download path setup - path and then directories
path = download_a_dataset_source(group_metadata["id"], dataset_metadata, target_dir, gaf_url, base_download_url=base_download_url)
if dataset_metadata["compression"] == "gzip":
# Unzip any downloaded file that has gzip, strip of the gzip extension
unzipped = os.path.splitext(path)[0]
unzip(path, unzipped)
path = unzipped
else:
# otherwise file is coming in uncompressed. But we want to make sure
# to zip up the original source also
zipup(path)
downloaded_paths.append((dataset_metadata, path))
return downloaded_paths | def download_source_gafs(group_metadata, target_dir, exclusions=[], base_download_url=None) | This looks at a group metadata dictionary and downloads each GAF source that is not in the exclusions list.
For each downloaded file, keep track of the path of the file. If the file is zipped, it will unzip it here.
This function returns a list of tuples of the dataset dictionary mapped to the downloaded source path. | 5.474878 | 5.258955 | 1.041058 |
if categories is None:
categories = [enum.value for enum in HpoUpperLevel]
ic_map = self.ic_store.get_profile_ic(profile + negated_classes)
# Simple score is the weighted average of the present and
# explicitly stated negative/absent phenotypes
#
# Note that we're deviating from the publication
# to match the reference java implementation where
# mean_max_ic is replaced with max_max_ic:
# https://github.com/owlcollab/owltools/blob/452b4a/
# OWLTools-Sim/src/main/java/owltools/sim2/AbstractOwlSim.java#L1038
simple_score = self._get_simple_score(
profile, negated_classes, self.ic_store.statistics.mean_mean_ic,
self.ic_store.statistics.max_max_ic, self.ic_store.statistics.mean_sum_ic,
negation_weight, ic_map
)
categorical_score = self._get_categorical_score(
profile, negated_classes, categories,
negation_weight, ic_map
)
scaled_score = self._get_scaled_score(
simple_score, categorical_score, category_weight)
return AnnotationSufficiency(
simple_score=simple_score,
scaled_score=scaled_score,
categorical_score=categorical_score
) | def get_annotation_sufficiency(
self,
profile: List[str],
negated_classes: List[str],
categories: Optional[List] = None,
negation_weight: Optional[float] = .25,
category_weight: Optional[float] = .5) -> AnnotationSufficiency | Given a list of individuals, return the simple, scaled, and categorical scores | 4.544084 | 4.275937 | 1.062711 |
if ic_map is None:
ic_map = self.ic_store.get_profile_ic(profile + negated_classes)
pos_map = {cls: ic for cls, ic in ic_map.items() if cls in profile}
neg_map = {cls: ic for cls, ic in ic_map.items() if cls in negated_classes}
mean_ic = mean(pos_map.values()) if len(profile) > 0 else 0
max_ic = max(pos_map.values()) if len(profile) > 0 else 0
sum_ic = sum(pos_map.values()) if len(profile) > 0 else 0
if len(negated_classes) > 0:
weighted_ic = [ic * negation_weight for ic in neg_map.values()]
mean_ic = max([np.average([mean_ic, mean(neg_map.values())],
weights=[1, negation_weight]),
mean_ic])
max_ic = max([max_ic] + weighted_ic)
sum_ic = sum_ic + sum(weighted_ic)
return mean([
min([mean_ic / bg_mean_pic, 1.0]),
min([max_ic / bg_mean_max_pic, 1.0]),
min([sum_ic / bg_mean_sum_pic, 1.0])
]) | def _get_simple_score(self,
profile: List[str],
negated_classes: List[str],
bg_mean_pic: float,
bg_mean_max_pic: float,
bg_mean_sum_pic: float,
negation_weight: Optional[float] = .25,
ic_map: Optional[Dict[str, float]] = None) -> float | Simple score is the average of the relative
mean ic, max ic, and sum ic (relative to global stats)
:param ic_map: dictionary of class - information content mappings
:param bg_mean_pic: the average of the average IC in
the background profile annotations
:param bg_mean_max_pic: max IC annotated to the background set of profiles
:param bg_mean_sum_pic: Average of the profile sum IC in background set
:param negation_weight: Average of the profile sum IC in background set
:param ic_map: Average of the profile sum IC in background set
:return: simple score (float) | 1.931862 | 1.956863 | 0.987224 |
return np.average(
[simple_score, categorical_score], weights=[1, category_weight]
) | def _get_scaled_score(
simple_score: float,
categorical_score: float,
category_weight: Optional[float] = .5) -> float | Scaled score is the weighted average of the simple score and
categorical score | 4.767496 | 4.067319 | 1.172147 |
if ic_map is None:
ic_map = self.ic_store.get_profile_ic(profile + negated_classes)
scores = []
for cat in categories:
if cat not in self.ic_store.category_statistics:
raise ValueError("statistics for {} not indexed".format(cat))
pos_profile = [cls for cls in profile
if cls in self.ic_store.category_statistics[cat].descendants]
neg_profile = [cls for cls in negated_classes
if cls in self.ic_store.category_statistics[cat].descendants]
# Note that we're deviating from the publication
# to match the reference java implementation where
# mean_max_ic is replaced by max_max_ic
scores.append(self._get_simple_score(
pos_profile, neg_profile,
self.ic_store.category_statistics[cat].mean_mean_ic,
self.ic_store.category_statistics[cat].max_max_ic,
self.ic_store.category_statistics[cat].mean_sum_ic,
negation_weight, ic_map
))
return mean(scores) | def _get_categorical_score(
self,
profile: List,
negated_classes: List,
categories: List,
negation_weight: Optional[float] = 1,
ic_map: Optional[Dict[str, float]] = None) -> float | The average of the simple scores across a list of categories | 3.161736 | 3.022929 | 1.045918 |
if negated_ids is None: negated_ids = []
if method not in self.sim_api.matchers():
raise NotImplementedError("Sim method not implemented "
"in {}".format(str(self.sim_api)))
# Determine if entity is a phenotype or individual containing
# a pheno profile (gene, disease, case, etc)
pheno_list = PhenoSimEngine._resolve_nodes_to_phenotypes(id_list)
if taxon_filter is not None or category_filter is not None:
if not isinstance(self.sim_api, FilteredSearchable):
raise NotImplementedError("filtered search not implemented "
"in {}".format(str(self.sim_api)))
search_result = self.sim_api.filtered_search(
pheno_list, negated_ids, limit, taxon_filter, category_filter, method
)
else:
search_result = self.sim_api.search(pheno_list, negated_ids, limit, method)
return search_result | def search(
self,
id_list: List[str],
negated_ids: Optional[List] = None,
limit: Optional[int] = 100,
taxon_filter: Optional[int]= None,
category_filter: Optional[str]= None,
method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM
) -> SimResult | Execute a search using sim_api, resolving non-phenotype ids to
phenotype lists then adding them to the profile (eg genes, diseases)
:raises NotImplementedError:
- If sim method or filters are not supported | 3.596495 | 3.350421 | 1.073446 |
if method not in self.sim_api.matchers():
raise NotImplementedError("Sim method not implemented "
"in {}".format(str(self.sim_api)))
is_first_result = True
comparisons = None
reference_phenos = PhenoSimEngine._resolve_nodes_to_phenotypes(reference_ids)
for query_profile in query_profiles:
query_phenos = PhenoSimEngine._resolve_nodes_to_phenotypes(query_profile)
sim_result = self.sim_api.compare(reference_phenos, query_phenos, method)
if len(query_profile) > 1:
id = " + ".join(query_profile)
sim_result.matches[0].id = id
sim_result.matches[0].label = id
else:
node = typed_node_from_id(query_profile[0])
sim_result.matches[0].id = node.id
sim_result.matches[0].label = node.label
sim_result.matches[0].type = node.type
sim_result.matches[0].taxon = node.taxon
if is_first_result:
comparisons = sim_result
is_first_result = False
else:
comparisons.matches.append(sim_result.matches[0])
comparisons.query.target_ids.append(sim_result.query.target_ids[0])
if len(reference_ids) == 1:
comparisons.query.reference = typed_node_from_id(reference_ids[0])
else:
reference_id = " + ".join(reference_ids)
comparisons.query.reference = TypedNode(
id=reference_id,
label=reference_id,
type='unknown'
)
return comparisons | def compare(self,
reference_ids: List,
query_profiles: List[List],
method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM) -> SimResult | Execute one or more comparisons using sim_api
:param reference_ids: a list of phenotypes or ids that comprise
one or more phenotypes
:param query_profiles: a list of lists of phenotypes or ids
that comprise one or more phenotypes
:param method: comparison method
:return: SimResult object
:raises NotImplementedError: If sim method or filters are not supported | 2.536028 | 2.424379 | 1.046053 |
pheno_list = []
node_types = get_id_type_map(id_list)
for node in id_list:
if 'phenotype' in node_types[node]:
pheno_list.append(node)
else:
phenotypes = get_objects_for_subject(
subject=node, object_category='phenotype', relation='RO:0002200'
)
pheno_list = pheno_list + phenotypes
return pheno_list | def _resolve_nodes_to_phenotypes(id_list: List[str]) -> List[str] | Given a list of ids of unknown type, determine which ids
are phenotypes, if the id is not a phenotype, check to
see if it is associated with one or more phenotypes via
the 'has_phenotype' relation
:param id_list: list of ids of any type (curies as strings)
:return: list of phenotypes (curies as strings) | 3.292482 | 3.199154 | 1.029173 |
db, db_object_id = self._split_prefix(entity)
taxon = normalize_taxon(entity["taxon"]["id"])
vals = [
db,
db_object_id,
entity.get('label'),
entity.get('full_name'),
entity.get('synonyms'),
entity.get('type'),
taxon,
entity.get('parents'),
entity.get('xrefs'),
entity.get('properties')
]
self._write_row(vals) | def write_entity(self, entity) | Write a single entity to a line in the output file | 4.378431 | 3.954102 | 1.107314 |
pass | def search(self,
id_list: Iterable,
negated_classes: Iterable,
limit: Optional[int],
method: Optional) -> List[SimResult] | Given an input list of classes, searches for similar lists of classes
and provides a ranked list of matches | 23,094.4375 | 11,415.916016 | 2.023003 |
pass | def filtered_search(self,
id_list: Iterable,
negated_classes: Iterable,
limit: Optional[int],
taxon_filter: Optional,
category_filter: Optional,
method: Optional) -> SimResult | Given an input iterable of classes or individuals,
provides a ranking of similar profiles | 46,685.46875 | 13,824.976563 | 3.376893 |
pass | def compare(self,
reference_ids: Iterable,
query_profiles: Iterable[Iterable],
method: Optional) -> SimResult | Given two lists of entities (classes, individuals),
resolves them to some type (phenotypes, go terms, etc) and
returns their similarity | 77,436.351563 | 42,704.917969 | 1.813289 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.