code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if handle is None: self.test = self.test+1 logging.info("T: "+str(self.test)) global default_ontology if default_ontology is None: logging.info("Creating new instance of default ontology") default_ontology = create_ontology(default_ontology_handle) logging.info("Using default_ontology") return default_ontology return create_ontology(handle, **args)
def create(self, handle=None, handle_type=None, **args)
Creates an ontology based on a handle Handle is one of the following - `FILENAME.json` : creates an ontology from an obographs json file - `obo:ONTID` : E.g. obo:pato - creates an ontology from obolibrary PURL (requires owltools) - `ONTID` : E.g. 'pato' - creates an ontology from a remote SPARQL query Arguments --------- handle : str specifies how to retrieve the ontology info
4.872796
4.754678
1.024842
logging.info("fetching xrefs for: "+prefix) query = .format(p=prefixmap[prefix]) bindings = run_sparql(query) rows = [(r['c']['value'], r['x']['value']) for r in bindings] return rows
def fetchall_triples_xrefs(prefix)
fetch all xrefs for a prefix, e.g. CHEBI
6.173755
6.047436
1.020888
query = .format(s=s, p=prefixmap[p]) bindings = run_sparql(query) rows = [r['x']['value'] for r in bindings] return rows
def fetchall_sp(s,p)
fetch all triples for a property
7.394237
6.256487
1.181851
qobj = qmap[n] chain = qobj['chain'] prefix = qobj['prefix'] whr = chain2where(chain + [prefix], 'x') query = .format(s=s, w=whr) bindings = run_sparql(query) rows = [ensure_prefixed(r['x']['value'], prefix) for r in bindings] return rows
def canned_query(n,s)
uses canned query
8.336591
8.558735
0.974045
if id is None or id == 0 or id >= 10000000: return "other" return "gorule-{:0>7}".format(id)
def _rule_id(self, id: int) -> str
Convert an integer into a gorule key id.
7.843746
4.392059
1.785892
rule_id = self._rule_id(rule) if rule_id not in self.messages: self.messages[rule_id] = [] if len(self.messages[rule_id]) < self._rule_message_cap: self.messages[rule_id].append(message)
def message(self, message: Message, rule: Optional[int]) -> None
Add a message to the appropriate list of messages. If `rule` refers to a valid id range for a go rule, the message is entered in a list keyed by the full gorule-{id}. Otherwise, if `rule` is None, or outside the id range, then we put this in the catch-all "other" keyed list of messages.
2.765953
2.602761
1.0627
f = open(obographfile, 'r') jsonstr = f.read() f.close() return convert_json_object(json.loads(jsonstr), **args)
def convert_json_file(obographfile, **args)
Return a networkx MultiDiGraph of the ontologies serialized as a json string
2.666079
2.394297
1.113512
digraph = networkx.MultiDiGraph() xref_graph = networkx.MultiGraph() logical_definitions = [] property_chain_axioms = [] context = obographdoc.get('@context',{}) logging.info("CONTEXT: {}".format(context)) mapper = OboJsonMapper(digraph=digraph, context=context) ogs = obographdoc['graphs'] base_og = ogs[0] for og in ogs: # TODO: refactor this mapper.add_obograph_digraph(og, xref_graph=xref_graph, logical_definitions=logical_definitions, property_chain_axioms=property_chain_axioms, **args) return { 'id': base_og.get('id'), 'meta': base_og.get('meta'), 'graph': mapper.digraph, 'xref_graph': xref_graph, 'graphdoc': obographdoc, 'logical_definitions': logical_definitions, 'property_chain_axioms': property_chain_axioms }
def convert_json_object(obographdoc, **args)
Return a networkx MultiDiGraph of the ontologies serialized as a json object
3.465186
3.39136
1.021769
digraph = self.digraph logging.info("NODES: {}".format(len(og['nodes']))) # if client passes an xref_graph we must parse metadata if xref_graph is not None: parse_meta = True for n in og['nodes']: is_obsolete = 'is_obsolete' in n and n['is_obsolete'] == 'true' if is_obsolete: continue if node_type is not None and ('type' not in n or n['type'] != node_type): continue id = self.contract_uri(n['id']) digraph.add_node(id, **n) if 'lbl' in n: digraph.node[id]['label'] = n['lbl'] if parse_meta and 'meta' in n: if n['meta'] is None: n['meta'] = {} meta = self.transform_meta(n['meta']) if xref_graph is not None and 'xrefs' in meta: for x in meta['xrefs']: xref_graph.add_edge(self.contract_uri(x['val']), id, source=id) logging.info("EDGES: {}".format(len(og['edges']))) for e in og['edges']: sub = self.contract_uri(e['sub']) obj = self.contract_uri(e['obj']) pred = self.contract_uri(e['pred']) pred = map_legacy_pred(pred) if pred == 'is_a': pred = 'subClassOf' if predicates is None or pred in predicates: digraph.add_edge(obj, sub, pred=pred) if 'equivalentNodesSets' in og: nslist = og['equivalentNodesSets'] logging.info("CLIQUES: {}".format(len(nslist))) for ns in nslist: equivNodeIds = ns['nodeIds'] for i in ns['nodeIds']: ix = self.contract_uri(i) for j in ns['nodeIds']: if i != j: jx = self.contract_uri(j) digraph.add_edge(ix, jx, pred='equivalentTo') if logical_definitions is not None and 'logicalDefinitionAxioms' in og: for a in og['logicalDefinitionAxioms']: ld = LogicalDefinition(self.contract_uri(a['definedClassId']), [self.contract_uri(x) for x in a['genusIds']], [(self.contract_uri(x['propertyId']), self.contract_uri(x['fillerId'])) for x in a['restrictions'] if x is not None]) logical_definitions.append(ld) if property_chain_axioms is not None and 'propertyChainAxioms' in og: for a in og['propertyChainAxioms']: pca = PropertyChainAxiom(predicate_id=self.contract_uri(a['predicateId']), chain_predicate_ids=[self.contract_uri(x) for x in a['chainPredicateIds']]) property_chain_axioms.append(pca)
def add_obograph_digraph(self, og, node_type=None, predicates=None, xref_graph=None, logical_definitions=None, property_chain_axioms=None, parse_meta=True, **args)
Converts a single obograph to Digraph edges and adds to an existing networkx DiGraph
2.377914
2.372889
1.002118
if name.startswith('.'): remainder = name.lstrip('.') dot_count = (len(name) - len(remainder)) prefix = '../'*(dot_count-1) else: remainder = name dot_count = 0 prefix = '' filename = prefix + os.path.join(*remainder.split('.')) return (filename, dot_count)
def convert_to_path(name)
Converts ".module" to "./module", "..module" to "../module", etc.
3.484795
3.187679
1.093208
filename, _ = os.path.splitext(filename) for f in fspath: short_name = f.relative_path(filename) if short_name: # The module name for __init__.py files is the directory. if short_name.endswith(os.path.sep + "__init__"): short_name = short_name[:short_name.rfind(os.path.sep)] return short_name.replace(os.path.sep, '.') # We have not found filename relative to anywhere in pythonpath. return ''
def infer_module_name(filename, fspath)
Convert a python filename to a module relative to pythonpath.
3.892742
3.449288
1.128564
path = package.split('.') if package else [] name = relative_name.lstrip('.') ndots = len(relative_name) - len(name) if ndots > len(path): return relative_name absolute_path = path[:len(path) + 1 - ndots] if name: absolute_path.append(name) return '.'.join(absolute_path)
def get_absolute_name(package, relative_name)
Joins a package name and a relative name. Args: package: A dotted name, e.g. foo.bar.baz relative_name: A dotted name with possibly some leading dots, e.g. ..x.y Returns: The relative name appended to the parent's package, after going up one level for each leading dot. e.g. foo.bar.baz + ..hello.world -> foo.hello.world The unchanged relative_name if it does not start with a dot or has too many leading dots.
3.053875
3.221512
0.947963
name = item.name # The last part in `from a.b.c import d` might be a symbol rather than a # module, so we try a.b.c and a.b.c.d as names. short_name = None if item.is_from and not item.is_star: if '.' in name.lstrip('.'): # The name is something like `a.b.c`, so strip off `.c`. rindex = name.rfind('.') else: # The name is something like `..c`, so strip off just `c`. rindex = name.rfind('.') + 1 short_name = name[:rindex] if import_finder.is_builtin(name): filename = name + '.so' return Builtin(filename, name) filename, level = convert_to_path(name) if level: # This is a relative import; we need to resolve the filename # relative to the importing file path. filename = os.path.normpath( os.path.join(self.current_directory, filename)) files = [(name, filename)] if short_name: short_filename = os.path.dirname(filename) files.append((short_name, short_filename)) for module_name, path in files: for fs in self.fs_path: f = self._find_file(fs, path) if not f or f == self.current_module.path: # We cannot import a file from itself. continue if item.is_relative(): package_name = self.current_module.package_name if package_name is None: # Relative import in non-package raise ImportException(name) module_name = get_absolute_name(package_name, module_name) if isinstance(self.current_module, System): return System(f, module_name) return Local(f, module_name, fs) # If the module isn't found in the explicit pythonpath, see if python # itself resolved it. if item.source: prefix, ext = os.path.splitext(item.source) mod_name = name # We need to check for importing a symbol here too. if short_name: mod = prefix.replace(os.path.sep, '.') mod = utils.strip_suffix(mod, '.__init__') if not mod.endswith(name) and mod.endswith(short_name): mod_name = short_name if ext == '.pyc': pyfile = prefix + '.py' if os.path.exists(pyfile): return System(pyfile, mod_name) elif not ext: pyfile = os.path.join(prefix, "__init__.py") if os.path.exists(pyfile): return System(pyfile, mod_name) return System(item.source, mod_name) raise ImportException(name)
def resolve_import(self, item)
Simulate how Python resolves imports. Returns the filename of the source file Python would load when processing a statement like 'import name' in the module we're currently under. Args: item: An instance of ImportItem Returns: A filename Raises: ImportException: If the module doesn't exist.
3.857768
3.882016
0.993754
for import_item in import_items: try: yield self.resolve_import(import_item) except ImportException as err: logging.info('unknown module %s', err.module_name)
def resolve_all(self, import_items)
Resolves a list of imports. Yields filenames.
4.403421
4.631368
0.950782
path = fs.Path() for p in pythonpath.split(os.pathsep): path.add_path(utils.expand_path(p), 'os') return path
def path_from_pythonpath(pythonpath)
Create an fs.Path object from a pythonpath string.
5.703391
3.991122
1.42902
f = import_graph.provenance[node] if isinstance(f, resolve.Direct): out = '+ ' + f.short_path elif isinstance(f, resolve.Local): out = ' ' + f.short_path elif isinstance(f, resolve.System): out = ':: ' + f.short_path elif isinstance(f, resolve.Builtin): out = '(%s)' % f.module_name else: out = '%r' % node return ' '*indent + out
def format_file_node(import_graph, node, indent)
Prettyprint nodes based on their provenance.
4.133217
3.830985
1.078891
if isinstance(node, graph.NodeSet): ind = ' ' * indent out = [ind + 'cycle {'] + [ format_file_node(import_graph, n, indent + 1) for n in node.nodes ] + [ind + '}'] return '\n'.join(out) else: return format_file_node(import_graph, node, indent)
def format_node(import_graph, node, indent)
Helper function for print_tree
3.428869
3.426753
1.000618
for i in range(len(parts), 0, -1): prefix = '.'.join(parts[0:i]) if prefix in sys.modules: return i, sys.modules[prefix] return 0, None
def _find_package(parts)
Helper function for _resolve_import_versioned.
2.45053
2.376033
1.031353
if name in sys.modules: return getattr(sys.modules[name], '__file__', name + '.so') return _resolve_import_versioned(name)
def _resolve_import(name)
Helper function for resolve_import.
4.444864
4.503388
0.987005
# Don't try to resolve relative imports or builtins here; they will be # handled by resolve.Resolver if name.startswith('.') or is_builtin(name): return None ret = _resolve_import(name) if ret is None and is_from and not is_star: package, _ = name.rsplit('.', 1) ret = _resolve_import(package) return ret
def resolve_import(name, is_from, is_star)
Use python to resolve an import. Args: name: The fully qualified module name. Returns: The path to the module source file or None.
4.278028
4.930861
0.867603
with open(filename, "rb") as f: src = f.read() finder = ImportFinder() finder.visit(ast.parse(src, filename=filename)) imports = [] for i in finder.imports: name, _, is_from, is_star = i imports.append(i + (resolve_import(name, is_from, is_star),)) return imports
def get_imports(filename)
Get all the imports in a file. Each import is a tuple of: (name, alias, is_from, is_star, source_file)
3.138661
2.917265
1.075892
assert not self.final, 'Trying to mutate a final graph.' self.add_source_file(filename) resolved, unresolved = self.get_file_deps(filename) self.graph.add_node(filename) for f in resolved: self.graph.add_node(f) self.graph.add_edge(filename, f) for imp in unresolved: self.broken_deps[filename].add(imp)
def add_file(self, filename)
Add a file and all its immediate dependencies to the graph.
4.034274
3.50911
1.149657
return (f not in self.graph.nodes and f not in seen and (not trim or not isinstance(self.provenance[f], (resolve.Builtin, resolve.System))))
def follow_file(self, f, seen, trim)
Whether to recurse into a file's dependencies.
12.546376
11.092388
1.13108
assert not self.final, 'Trying to mutate a final graph.' self.add_source_file(filename) queue = collections.deque([filename]) seen = set() while queue: filename = queue.popleft() self.graph.add_node(filename) try: deps, broken = self.get_file_deps(filename) except parsepy.ParseError: # Python couldn't parse `filename`. If we're sure that it is a # Python file, we mark it as unreadable and keep the node in the # graph so importlab's callers can do their own syntax error # handling if desired. if filename.endswith('.py'): self.unreadable_files.add(filename) else: self.graph.remove_node(filename) continue for f in broken: self.broken_deps[filename].add(f) for f in deps: if self.follow_file(f, seen, trim): queue.append(f) seen.add(f) self.graph.add_node(f) self.graph.add_edge(filename, f)
def add_file_recursive(self, filename, trim=False)
Add a file and all its recursive dependencies to the graph. Args: filename: The name of the file. trim: Whether to trim the dependencies of builtin and system files.
4.535195
4.365145
1.038956
assert not self.final, 'Trying to mutate a final graph.' self.graph.add_node(scc) edges = list(self.graph.edges) for k, v in edges: if k not in scc and v in scc: self.graph.remove_edge(k, v) self.graph.add_edge(k, scc) elif k in scc and v not in scc: self.graph.remove_edge(k, v) self.graph.add_edge(scc, v) for node in scc.nodes: self.graph.remove_node(node)
def shrink_to_node(self, scc)
Shrink a strongly connected component into a node.
2.303132
2.260607
1.018811
assert not self.final, 'Trying to mutate a final graph.' # Replace each strongly connected component with a single node `NodeSet` for scc in sorted(nx.kosaraju_strongly_connected_components(self.graph), key=len, reverse=True): if len(scc) == 1: break self.shrink_to_node(NodeSet(scc)) self.final = True
def build(self)
Finalise the graph, after adding all input files to it.
5.728058
5.270692
1.086775
assert self.final, 'Call build() before using the graph.' out = [] for node in nx.topological_sort(self.graph): if isinstance(node, NodeSet): out.append(node.nodes) else: # add a one-element list for uniformity out.append([node]) return list(reversed(out))
def sorted_source_files(self)
Returns a list of targets in topologically sorted order.
6.596622
5.333027
1.236938
assert self.final, 'Call build() before using the graph.' out = [] for node in nx.topological_sort(self.graph): deps = [v for k, v in self.graph.out_edges([node])] out.append((node, deps)) return out
def deps_list(self)
Returns a list of (target, dependencies).
5.004105
4.029256
1.241943
assert self.final, 'Call build() before using the graph.' out = set() for v in self.broken_deps.values(): out |= v return out
def get_all_unresolved(self)
Returns a set of all unresolved imports.
13.020421
9.23886
1.40931
import_graph = cls(env) for filename in filenames: import_graph.add_file_recursive(os.path.abspath(filename), trim) import_graph.build() return import_graph
def create(cls, env, filenames, trim=False)
Create and return a final graph. Args: env: An environment.Environment object filenames: A list of filenames trim: Whether to trim the dependencies of builtin and system files. Returns: An immutable ImportGraph with the recursive dependencies of all the files in filenames
3.607372
3.912945
0.921907
module_name = resolve.infer_module_name(filename, self.path) return resolve.Direct(filename, module_name)
def get_source_file_provenance(self, filename)
Infer the module name if possible.
13.694551
8.199118
1.670247
# We should only call this on an actual directory; callers should do the # validation. assert os.path.isdir(path) out = [] # glob would be faster (see PEP471) but python glob doesn't do **/* for root, _, files in os.walk(path): out += [os.path.join(root, f) for f in files if f.endswith(extension)] return out
def collect_files(path, extension)
Collect all the files with extension in a directory tree.
6.580794
6.578059
1.000416
out = [] for f in expand_paths(filenames, cwd): if os.path.isdir(f): # If we have a directory, collect all the .py files within it. out += collect_files(f, ".py") else: if f.endswith(".py"): out.append(f) return sorted(set(out))
def expand_source_files(filenames, cwd=None)
Expand a list of filenames passed in as sources. This is a helper function for handling command line arguments that specify a list of source files and directories. Any directories in filenames will be scanned recursively for .py files. Any files that do not end with ".py" will be dropped. Args: filenames: A list of filenames to process. cwd: An optional working directory to expand relative paths Returns: A list of sorted full paths to .py files
3.152145
3.46179
0.910554
if string.endswith(suffix): return string[:-(len(suffix))] return string
def strip_suffix(string, suffix)
Remove a suffix from a string if it exists.
3.031928
2.954224
1.026303
path = os.path.join(self.path, filename) makedirs(path) return path
def create_directory(self, filename)
Create a subdirectory in the temporary directory.
3.242367
2.992923
1.083345
filedir, filename = os.path.split(filename) if filedir: self.create_directory(filedir) path = os.path.join(self.path, filedir, filename) data = indented_data if isinstance(data, bytes) and not isinstance(data, str): # This is binary data rather than text. mode = 'wb' else: mode = 'w' if data: data = textwrap.dedent(data) with open(path, mode) as fi: if data: fi.write(data) return path
def create_file(self, filename, indented_data=None)
Create a file in the temporary directory. Dedents the contents.
2.707582
2.640657
1.025344
msg = cls.objects.create(thread=thread, sender=user, content=content) thread.userthread_set.exclude(user=user).update(deleted=False, unread=True) thread.userthread_set.filter(user=user).update(deleted=False, unread=False) message_sent.send(sender=cls, message=msg, thread=thread, reply=True) return msg
def new_reply(cls, thread, user, content)
Create a new reply for an existing Thread. Mark thread as unread for all other participants, and mark thread as read by replier.
2.822922
2.821547
1.000487
thread = Thread.objects.create(subject=subject) for user in to_users: thread.userthread_set.create(user=user, deleted=False, unread=True) thread.userthread_set.create(user=from_user, deleted=True, unread=False) msg = cls.objects.create(thread=thread, sender=from_user, content=content) message_sent.send(sender=cls, message=msg, thread=thread, reply=False) return msg
def new_message(cls, from_user, to_users, subject, content)
Create a new Message and Thread. Mark thread as unread for all recipients, and mark thread as read and deleted from inbox by creator.
2.393849
2.288689
1.045948
return bool(thread.userthread_set.filter(user=user, unread=True))
def unread(thread, user)
Check whether there are any unread messages for a particular thread for a user.
5.475139
5.462823
1.002254
text = cleanup(text) return self.sent_detector.tokenize(text.strip())
def split(self, text)
Splits text and returns a list of the resulting sentences.
7.732079
6.410455
1.206167
with open(path) as f: s = f.read().strip() return s
def str_from_file(path)
Return file contents as string.
3.0391
3.052328
0.995666
def canonical(xml_file): # poor man's canonicalization, since we don't want to install # external packages just for unittesting s = et.tostring(et.parse(xml_file).getroot()).decode("UTF-8") s = re.sub("[\n|\t]*", "", s) s = re.sub("\s+", " ", s) s = "".join(sorted(s)).strip() return s return canonical(xml_file1) == canonical(xml_file2)
def xml_equal(xml_file1, xml_file2)
Parse xml and convert to a canonical string representation so we don't have to worry about semantically meaningless differences
4.271099
4.186839
1.020125
for root, dirs, files in os.walk(dir_path): file_list = [os.path.join(root, f) for f in files] if recursive: for dir in dirs: dir = os.path.join(root, dir) file_list.extend(list_files(dir, recursive=True)) return file_list
def list_files(dir_path, recursive=True)
Return a list of files in dir_path.
1.860212
1.816105
1.024286
if not os.path.exists(output_dir): os.makedirs(output_dir) logger = log.get_global_console_logger() logger.info("Processing files in {}.".format(input_dir)) input_file_names = os.listdir(input_dir) for input_file_name in input_file_names: logger.info("Processing {}.".format(input_file_name)) input_file = os.path.join(input_dir, input_file_name) with codecs.open(input_file, "r", encoding="UTF-8") as f: input_string = f.read() output_string = function(input_string) output_file = os.path.join(output_dir, input_file_name) with codecs.open(output_file, "w", encoding="UTF-8") as f: f.write(output_string) logger.info("Saved processed files to {}.".format(output_dir))
def process(input_dir, output_dir, function)
Apply function to all files in input_dir and save the resulting ouput files in output_dir.
1.685025
1.674681
1.006176
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter self.log.info("Splitting sentences.") ss = PunktSentenceSplitter() sent_split_to_string = lambda s: "\n".join(ss.split(s)) process_func = partial( DirectoryProcessor.process, function=sent_split_to_string) self.__process_summaries(process_func)
def split_sentences(self)
ROUGE requires texts split into sentences. In case the texts are not already split, this method can be used.
6.055977
5.342402
1.133568
sentences = text.split("\n") sent_elems = [ "<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>" "{text}</a>".format(i=i, text=sent) for i, sent in enumerate(sentences, start=1)] html = .format(title=title, elems="\n".join(sent_elems)) return html
def convert_text_to_rouge_format(text, title="dummy title")
Convert a text to a format ROUGE understands. The text is assumed to contain one sentence per line. text: The text to convert, containg one sentence per line. title: Optional title for the text. The title will appear in the converted file, but doesn't seem to have any other relevance. Returns: The converted text as string.
4.333836
4.511573
0.960604
system_filenames = [f for f in os.listdir(system_dir)] system_models_tuples = [] system_filename_pattern = re.compile(system_filename_pattern) for system_filename in sorted(system_filenames): match = system_filename_pattern.match(system_filename) if match: id = match.groups(0)[0] model_filenames = Rouge155.__get_model_filenames_for_id( id, model_dir, model_filename_pattern) system_models_tuples.append( (system_filename, sorted(model_filenames))) if not system_models_tuples: raise Exception( "Did not find any files matching the pattern {} " "in the system summaries directory {}.".format( system_filename_pattern.pattern, system_dir)) with codecs.open(config_file_path, 'w', encoding='utf-8') as f: f.write('<ROUGE-EVAL version="1.55">') for task_id, (system_filename, model_filenames) in enumerate( system_models_tuples, start=1): eval_string = Rouge155.__get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames) f.write(eval_string) f.write("</ROUGE-EVAL>")
def write_config_static(system_dir, system_filename_pattern, model_dir, model_filename_pattern, config_file_path, system_id=None)
Write the ROUGE configuration file, which is basically a list of system summary files and their corresponding model summary files. pyrouge uses regular expressions to automatically find the matching model summary files for a given system summary file (cf. docstrings for system_filename_pattern and model_filename_pattern). system_dir: Path of directory containing system summaries. system_filename_pattern: Regex string for matching system summary filenames. model_dir: Path of directory containing model summaries. model_filename_pattern: Regex string for matching model summary filenames. config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output.
2.442808
2.249361
1.086001
if not system_id: system_id = 1 if (not config_file_path) or (not self._config_dir): self._config_dir = mkdtemp() config_filename = "rouge_conf.xml" else: config_dir, config_filename = os.path.split(config_file_path) verify_dir(config_dir, "configuration file") self._config_file = os.path.join(self._config_dir, config_filename) Rouge155.write_config_static( self._system_dir, self._system_filename_pattern, self._model_dir, self._model_filename_pattern, self._config_file, system_id) self.log.info( "Written ROUGE configuration to {}".format(self._config_file))
def write_config(self, config_file_path=None, system_id=None)
Write the ROUGE configuration file, which is basically a list of system summary files and their matching model summary files. This is a non-static version of write_config_file_static(). config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output.
3.135431
2.689493
1.165807
self.write_config(system_id=system_id) options = self.__get_options(rouge_args) command = [self._bin_path] + options env = None if hasattr(self, "_home_dir") and self._home_dir: env = {'ROUGE_EVAL_HOME': self._home_dir} self.log.info( "Running ROUGE with command {}".format(" ".join(command))) rouge_output = check_output(command, env=env).decode("UTF-8") return rouge_output
def evaluate(self, system_id=1, rouge_args=None)
Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string.
3.349646
3.420857
0.979183
if split_sentences: self.split_sentences() self.__write_summaries() rouge_output = self.evaluate(system_id, rouge_args) return rouge_output
def convert_and_evaluate(self, system_id=1, split_sentences=False, rouge_args=None)
Convert plain text summaries to ROUGE format and run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. Optionally split texts into sentences in case they aren't already. This is just a convenience method combining convert_summaries_to_rouge_format() and evaluate(). split_sentences: Optional argument specifying if sentences should be split. system_id: Optional system ID which will be printed in ROUGE's output. Returns: ROUGE output as string.
3.572869
4.173868
0.856009
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632) pattern = re.compile( r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) " r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)") results = {} for line in output.split("\n"): match = pattern.match(line) if match: sys_id, rouge_type, measure, result, conf_begin, conf_end = \ match.groups() measure = { 'Average_R': 'recall', 'Average_P': 'precision', 'Average_F': 'f_score' }[measure] rouge_type = rouge_type.lower().replace("-", '_') key = "{}_{}".format(rouge_type, measure) results[key] = float(result) results["{}_cb".format(key)] = float(conf_begin) results["{}_ce".format(key)] = float(conf_end) return results
def output_to_dict(self, output)
Convert the ROUGE output into python dictionary for further processing.
2.936593
2.844992
1.032197
if not home_dir: self._home_dir = self.__get_rouge_home_dir_from_settings() else: self._home_dir = home_dir self.save_home_dir() self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl') self.data_dir = os.path.join(self._home_dir, 'data') if not os.path.exists(self._bin_path): raise Exception( "ROUGE binary not found at {}. Please set the " "correct path by running pyrouge_set_rouge_path " "/path/to/rouge/home.".format(self._bin_path))
def __set_rouge_dir(self, home_dir=None)
Verfify presence of ROUGE-1.5.5.pl and data folder, and set those paths.
2.780128
2.638497
1.053679
peer_elems = "<P ID=\"{id}\">{name}</P>".format( id=system_id, name=system_filename) model_elems = ["<M ID=\"{id}\">{name}</M>".format( id=chr(65 + i), name=name) for i, name in enumerate(model_filenames)] model_elems = "\n\t\t\t".join(model_elems) eval_string = .format( task_id=task_id, model_root=model_dir, model_elems=model_elems, peer_root=system_dir, peer_elems=peer_elems) return eval_string
def __get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames)
ROUGE can evaluate several system summaries for a given text against several model summaries, i.e. there is an m-to-n relation between system and model summaries. The system summaries are listed in the <PEERS> tag and the model summaries in the <MODELS> tag. pyrouge currently only supports one system summary per text, i.e. it assumes a 1-to-n relation between system and model summaries.
2.754999
2.754328
1.000244
temp_dir = mkdtemp() new_system_dir = os.path.join(temp_dir, "system") os.mkdir(new_system_dir) new_model_dir = os.path.join(temp_dir, "model") os.mkdir(new_model_dir) self.log.info( "Processing summaries. Saving system files to {} and " "model files to {}.".format(new_system_dir, new_model_dir)) process_func(self._system_dir, new_system_dir) process_func(self._model_dir, new_model_dir) self._system_dir = new_system_dir self._model_dir = new_model_dir
def __process_summaries(self, process_func)
Helper method that applies process_func to the files in the system and model folders and saves the resulting files to new system and model folders.
2.002578
1.768185
1.132561
if self.args: options = self.args.split() elif rouge_args: options = rouge_args.split() else: options = [ '-e', self._data_dir, '-c', 95, '-2', '-1', '-U', '-r', 1000, '-n', 4, '-w', 1.2, '-a', ] options = list(map(str, options)) options = self.__add_config_option(options) return options
def __get_options(self, rouge_args=None)
Get supplied command line arguments for ROUGE or use default ones.
3.113053
3.006952
1.035285
property_name = "{}_dir".format(dir_name) private_name = "_" + property_name setattr(self, private_name, None) def fget(self): return getattr(self, private_name) def fset(self, path): verify_dir(path, dir_name) setattr(self, private_name, path) p = property(fget=fget, fset=fset, doc=docstring) setattr(self.__class__, property_name, p)
def __create_dir_property(self, dir_name, docstring)
Generate getter and setter for a directory property.
2.021057
1.996664
1.012217
directories = [ ("home", "The ROUGE home directory."), ("data", "The path of the ROUGE 'data' directory."), ("system", "Path of the directory containing system summaries."), ("model", "Path of the directory containing model summaries."), ] for (dirname, docstring) in directories: self.__create_dir_property(dirname, docstring)
def __set_dir_properties(self)
Automatically generate the properties for directories.
5.003407
4.625911
1.081605
if not rouge_args: return quot_mark_pattern = re.compile('"(.+)"') match = quot_mark_pattern.match(rouge_args) if match: cleaned_args = match.group(1) return cleaned_args else: return rouge_args
def __clean_rouge_args(self, rouge_args)
Remove enclosing quotation marks, if any.
3.309592
2.637716
1.254719
''' This function is called to check if a username password combination is valid. ''' return username == self.queryname and password == self.querypw
def check_auth(self, username, password)
This function is called to check if a username password combination is valid.
10.720917
6.376776
1.681244
''' Decorator to prompt for user name and password. Useful for data dumps, etc. That you don't want to be public. ''' @wraps(func) def decorated(*args, **kwargs): ''' Wrapper ''' auth = request.authorization if not auth or not self.check_auth(auth.username, auth.password): return self.authenticate() return func(*args, **kwargs) return decorated
def requires_auth(self, func)
Decorator to prompt for user name and password. Useful for data dumps, etc. That you don't want to be public.
3.930492
1.985937
1.979162
app.logger.error( "%s (%s) %s", exception.value, exception.errornum, str(dict(request.args))) return exception.error_page(request, CONFIG.get('HIT Configuration', 'contact_email_on_error'))
def handle_exp_error(exception)
Handle errors by sending an error page.
15.223503
14.152508
1.075675
cutofftime = datetime.timedelta(minutes=-CONFIG.getint('Server Parameters', 'cutoff_time')) starttime = datetime.datetime.now() + cutofftime try: conditions = json.load(open(os.path.join(app.root_path, 'conditions.json'))) numconds = len(conditions.keys()) numcounts = 1 except IOError as e: numconds = CONFIG.getint('Task Parameters', 'num_conds') numcounts = CONFIG.getint('Task Parameters', 'num_counters') participants = Participant.query.\ filter(Participant.codeversion == \ CONFIG.get('Task Parameters', 'experiment_code_version')).\ filter(Participant.mode == mode).\ filter(or_(Participant.status == COMPLETED, Participant.status == CREDITED, Participant.status == SUBMITTED, Participant.status == BONUSED, Participant.beginhit > starttime)).all() counts = Counter() for cond in range(numconds): for counter in range(numcounts): counts[(cond, counter)] = 0 for participant in participants: condcount = (participant.cond, participant.counterbalance) if condcount in counts: counts[condcount] += 1 mincount = min(counts.values()) minima = [hsh for hsh, count in counts.iteritems() if count == mincount] chosen = choice(minima) #conds += [ 0 for _ in range(1000) ] #conds += [ 1 for _ in range(1000) ] app.logger.info("given %(a)s chose %(b)s" % {'a': counts, 'b': chosen}) return chosen
def get_random_condcount(mode)
HITs can be in one of three states: - jobs that are finished - jobs that are started but not finished - jobs that are never going to finish (user decided not to do it) Our count should be based on the first two, so we count any tasks finished or any tasks not finished that were started in the last cutoff_time minutes, as specified in the cutoff_time variable in the config file. Returns a tuple: (cond, condition)
4.055813
4.175467
0.971344
''' Check worker status route ''' if 'workerId' not in request.args: resp = {"status": "bad request"} return jsonify(**resp) else: worker_id = request.args['workerId'] assignment_id = request.args['assignmentId'] allow_repeats = CONFIG.getboolean('HIT Configuration', 'allow_repeats') if allow_repeats: # if you allow repeats focus on current worker/assignment combo try: part = Participant.query.\ filter(Participant.workerid == worker_id).\ filter(Participant.assignmentid == assignment_id).one() status = part.status except exc.SQLAlchemyError: status = NOT_ACCEPTED else: # if you disallow repeats search for highest status of anything by this worker try: matches = Participant.query.\ filter(Participant.workerid == worker_id).all() numrecs = len(matches) if numrecs==0: # this should be caught by exception, but just to be safe status = NOT_ACCEPTED else: status = max([record.status for record in matches]) except exc.SQLAlchemyError: status = NOT_ACCEPTED resp = {"status" : status} return jsonify(**resp)
def check_worker_status()
Check worker status route
3.846359
3.761859
1.022462
if not ('hitId' in request.args and 'assignmentId' in request.args and 'workerId' in request.args): raise ExperimentError('hit_assign_worker_id_not_set_in_consent') hit_id = request.args['hitId'] assignment_id = request.args['assignmentId'] worker_id = request.args['workerId'] mode = request.args['mode'] with open('templates/consent.html', 'r') as temp_file: consent_string = temp_file.read() consent_string = insert_mode(consent_string, mode) return render_template_string( consent_string, hitid=hit_id, assignmentid=assignment_id, workerid=worker_id )
def give_consent()
Serves up the consent in the popup window.
2.993507
3.002133
0.997127
''' Get ad via HIT id ''' username = CONFIG.get('psiTurk Access', 'psiturk_access_key_id') password = CONFIG.get('psiTurk Access', 'psiturk_secret_access_id') try: req = requests.get('https://api.psiturk.org/api/ad/lookup/' + hit_id, auth=(username, password)) except: raise ExperimentError('api_server_not_reachable') else: if req.status_code == 200: return req.json()['ad_id'] else: return "error"
def get_ad_via_hitid(hit_id)
Get ad via HIT id
3.890538
3.825028
1.017127
app.logger.info("Accessing /inexp") if not 'uniqueId' in request.form: raise ExperimentError('improper_inputs') unique_id = request.form['uniqueId'] try: user = Participant.query.\ filter(Participant.uniqueid == unique_id).one() user.status = STARTED user.beginexp = datetime.datetime.now() db_session.add(user) db_session.commit() resp = {"status": "success"} except exc.SQLAlchemyError: app.logger.error("DB error: Unique user not found.") resp = {"status": "error, uniqueId not found"} return jsonify(**resp)
def enterexp()
AJAX listener that listens for a signal from the user's script when they leave the instructions and enter the real experiment. After the server receives this signal, it will no longer allow them to re-access the experiment applet (meaning they can't do part of the experiment and referesh to start over).
4.991427
5.009579
0.996377
app.logger.info("GET /sync route with id: %s" % uid) try: user = Participant.query.\ filter(Participant.uniqueid == uid).\ one() except exc.SQLAlchemyError: app.logger.error("DB error: Unique user not found.") try: resp = json.loads(user.datastring) except: resp = { "condition": user.cond, "counterbalance": user.counterbalance, "assignmentId": user.assignmentid, "workerId": user.workerid, "hitId": user.hitid, "bonus": user.bonus } return jsonify(**resp)
def load(uid=None)
Load experiment data, which should be a JSON object and will be stored after converting to string.
5.340919
5.270075
1.013443
app.logger.info("PUT /sync route with id: %s" % uid) try: user = Participant.query.\ filter(Participant.uniqueid == uid).\ one() except exc.SQLAlchemyError: app.logger.error("DB error: Unique user not found.") if hasattr(request, 'json'): user.datastring = request.data.decode('utf-8').encode( 'ascii', 'xmlcharrefreplace' ) db_session.add(user) db_session.commit() try: data = json.loads(user.datastring) except: data = {} trial = data.get("currenttrial", None) app.logger.info("saved data for %s (current trial: %s)", uid, trial) resp = {"status": "user data saved"} return jsonify(**resp)
def update(uid=None)
Save experiment data, which should be a JSON object and will be stored after converting to string.
4.855656
4.65485
1.043139
unique_id = request.form['uniqueId'] if unique_id[:5] == "debug": debug_mode = True else: debug_mode = False if debug_mode: resp = {"status": "didn't mark as quitter since this is debugging"} return jsonify(**resp) else: try: unique_id = request.form['uniqueId'] app.logger.info("Marking quitter %s" % unique_id) user = Participant.query.\ filter(Participant.uniqueid == unique_id).\ one() user.status = QUITEARLY db_session.add(user) db_session.commit() except exc.SQLAlchemyError: raise ExperimentError('tried_to_quit') else: resp = {"status": "marked as quitter"} return jsonify(**resp)
def quitter()
Mark quitter as such.
4.270875
4.002667
1.067007
''' Debugging route for complete. ''' if not 'uniqueId' in request.args: raise ExperimentError('improper_inputs') else: unique_id = request.args['uniqueId'] mode = request.args['mode'] try: user = Participant.query.\ filter(Participant.uniqueid == unique_id).one() user.status = COMPLETED user.endhit = datetime.datetime.now() db_session.add(user) db_session.commit() except: raise ExperimentError('error_setting_worker_complete') else: if (mode == 'sandbox' or mode == 'live'): # send them back to mturk. return render_template('closepopup.html') else: return render_template('complete.html')
def debug_complete()
Debugging route for complete.
5.607259
5.4106
1.036347
''' Complete worker. ''' if not 'uniqueId' in request.args: resp = {"status": "bad request"} return jsonify(**resp) else: unique_id = request.args['uniqueId'] app.logger.info("Completed experiment %s" % unique_id) try: user = Participant.query.\ filter(Participant.uniqueid == unique_id).one() user.status = COMPLETED user.endhit = datetime.datetime.now() db_session.add(user) db_session.commit() status = "success" except exc.SQLAlchemyError: status = "database error" resp = {"status" : status} return jsonify(**resp)
def worker_complete()
Complete worker.
3.824991
3.929164
0.973487
''' Submit worker ''' if not 'uniqueId' in request.args: resp = {"status": "bad request"} return jsonify(**resp) else: unique_id = request.args['uniqueId'] app.logger.info("Submitted experiment for %s" % unique_id) try: user = Participant.query.\ filter(Participant.uniqueid == unique_id).one() user.status = SUBMITTED db_session.add(user) db_session.commit() status = "success" except exc.SQLAlchemyError: status = "database error" resp = {"status" : status} return jsonify(**resp)
def worker_submitted()
Submit worker
3.656674
3.79627
0.963228
''' Insert mode ''' page_html = page_html.decode("utf-8") match_found = False matches = re.finditer('workerId={{ workerid }}', page_html) match = None for match in matches: match_found = True if match_found: new_html = page_html[:match.end()] + "&mode=" + mode +\ page_html[match.end():] return new_html else: raise ExperimentError("insert_mode_failed")
def insert_mode(page_html, mode)
Insert mode
4.014198
4.369855
0.918611
if foldername is None and pagename is None: raise ExperimentError('page_not_found') if foldername is None and pagename is not None: return render_template(pagename) else: return render_template(foldername+"/"+pagename)
def regularpage(foldername=None, pagename=None)
Route not found by the other routes above. May point to a static template.
3.245892
2.877594
1.127988
''' Run web server ''' host = "0.0.0.0" port = CONFIG.getint('Server Parameters', 'port') print "Serving on ", "http://" + host + ":" + str(port) app.config['TEMPLATES_AUTO_RELOAD'] = True app.jinja_env.auto_reload = True app.run(debug=True, host=host, port=port)
def run_webserver()
Run web server
3.153517
3.447351
0.914765
''' Generate random id numbers ''' return ''.join(random.choice(chars) for x in range(size))
def random_id_generator(self, size=6, chars=string.ascii_uppercase + string.digits)
Generate random id numbers
5.578237
4.224983
1.320298
" Adds DB-logged worker bonus to worker list data " try: unique_id = '{}:{}'.format(worker_dict['workerId'], worker_dict['assignmentId']) worker = Participant.query.filter( Participant.uniqueid == unique_id).one() worker_dict['bonus'] = worker.bonus except sa.exc.InvalidRequestError: # assignment is found on mturk but not in local database. worker_dict['bonus'] = 'N/A' return worker_dict
def add_bonus(worker_dict)
Adds DB-logged worker bonus to worker list data
7.033907
5.164859
1.361878
''' Status, if set, can be one of `Submitted`, `Approved`, or `Rejected` ''' if assignment_ids: workers = [self.get_worker(assignment_id) for assignment_id in assignment_ids] else: workers = self.amt_services.get_workers(assignment_status=status, chosen_hits=chosen_hits) if workers is False: raise Exception('*** failed to get workers') if not all_studies: my_hitids = self._get_my_hitids() workers = [worker for worker in workers if worker['hitId'] in my_hitids] workers = [self.add_bonus(worker) for worker in workers] return workers
def get_workers(self, status=None, chosen_hits=None, assignment_ids=None, all_studies=False)
Status, if set, can be one of `Submitted`, `Approved`, or `Rejected`
3.783853
3.08914
1.224889
''' Approve worker ''' assignment_id = worker['assignmentId'] init_db() found_worker = False parts = Participant.query.\ filter(Participant.assignmentid == assignment_id).\ filter(Participant.status.in_([3, 4])).\ all() # Iterate through all the people who completed this assignment. # This should be one person, and it should match the person who # submitted the HIT, but that doesn't always hold. status_report = '' for part in parts: if part.workerid == worker['workerId']: found_worker = True success = self.amt_services.approve_worker(assignment_id) if success: part.status = 5 db_session.add(part) db_session.commit() status_report = 'approved worker {} for assignment {}'.format(part.workerid, assignment_id) else: error_msg = '*** failed to approve worker {} for assignment {}'.format(part.workerid, assignment_id) raise Exception(error_msg) else: status_report = 'found unexpected worker {} for assignment {}'.format(part.workerid, assignment_id) if not found_worker: # approve workers not found in DB if the assignment id has been specified if force: success = self.amt_services.approve_worker(assignment_id) if success: _status_report = 'approved worker {} for assignment {} but not found in DB'.format(worker['workerId'], assignment_id) status_report = '\n'.join([status_report,_status_report]) else: error_msg = '*** failed to approve worker {} for assignment {}'.format(worker['workerId'], assignment_id) raise Exception(error_msg) # otherwise don't approve, and print warning else: _status_report = 'worker {} not found in DB for assignment {}. Not automatically approved. Use --force to approve anyway.'.format(worker['workerId'], assignment_id) if status_report: status_report = '\n'.join([status_report,_status_report]) else: status_report = _status_report return status_report
def approve_worker(self, worker, force=False)
Approve worker
2.955466
3.001349
0.984712
''' Reject worker ''' if chosen_hit: workers = self.amt_services.get_workers("Submitted") assignment_ids = [worker['assignmentId'] for worker in workers if \ worker['hitId'] == chosen_hit] print 'rejecting workers for HIT', chosen_hit for assignment_id in assignment_ids: success = self.amt_services.reject_worker(assignment_id) if success: print 'rejected', assignment_id else: print '*** failed to reject', assignment_id
def worker_reject(self, chosen_hit, assignment_ids = None)
Reject worker
4.04216
4.084657
0.989596
''' Unreject worker ''' if chosen_hit: workers = self.amt_services.get_workers("Rejected") assignment_ids = [worker['assignmentId'] for worker in workers if \ worker['hitId'] == chosen_hit] for assignment_id in assignment_ids: success = self.amt_services.unreject_worker(assignment_id) if success: print 'unrejected %s' % (assignment_id) else: print '*** failed to unreject', assignment_id
def worker_unreject(self, chosen_hit, assignment_ids = None)
Unreject worker
3.789508
3.840164
0.986809
''' Bonus worker ''' if self.config.has_option('Shell Parameters', 'bonus_message'): reason = self.config.get('Shell Parameters', 'bonus_message') while not reason: user_input = raw_input("Type the reason for the bonus. Workers " "will see this message: ") reason = user_input # Bonus already-bonused workers if the user explicitly lists their # assignment IDs override_status = True if chosen_hit: override_status = False workers = self.amt_services.get_workers("Approved", chosen_hit) if not workers: print "No approved workers for HIT", chosen_hit return print 'bonusing workers for HIT', chosen_hit elif len(assignment_ids) == 1: workers = [self.amt_services.get_worker(assignment_ids[0])] if not workers: print "No submissions found for requested assignment ID" return else: workers = self.amt_services.get_workers("Approved") if not workers: print "No approved workers found." return workers = [worker for worker in workers if \ worker['assignmentId'] in assignment_ids] for worker in workers: assignment_id = worker['assignmentId'] try: init_db() part = Participant.query.\ filter(Participant.assignmentid == assignment_id).\ filter(Participant.workerid == worker['workerId']).\ filter(Participant.endhit != None).\ one() if auto: amount = part.bonus status = part.status if amount <= 0: print "bonus amount <=$0, no bonus given for assignment", assignment_id elif status == 7 and not override_status: print "bonus already awarded for assignment", assignment_id else: success = self.amt_services.bonus_worker(assignment_id, amount, reason) if success: print "gave bonus of $" + str(amount) + " for assignment " + \ assignment_id part.status = 7 db_session.add(part) db_session.commit() db_session.remove() else: print "*** failed to bonus assignment", assignment_id except Exception as e: print e print "*** failed to bonus assignment", assignment_id
def worker_bonus(self, chosen_hit, auto, amount, reason='', assignment_ids=None)
Bonus worker
3.887203
3.885945
1.000324
assert type(hit_id) is list assert type(hit_id[0]) is str if self.amt_services.extend_hit(hit_id[0], assignments, minutes): print "HIT extended."
def hit_extend(self, hit_id, assignments, minutes)
Add additional worker assignments or minutes to a HIT. Args: hit_id: A list conaining one hit_id string. assignments: Variable <int> for number of assignments to add. minutes: Variable <int> for number of minutes to add. Returns: A side effect of this function is that the state of a HIT changes on AMT servers. Raises:
6.458902
5.41147
1.193558
''' Delete HIT. ''' if all_hits: hits_data = self.amt_services.get_all_hits() hit_ids = [hit.options['hitid'] for hit in hits_data if \ hit.options['status'] == "Reviewable"] for hit in hit_ids: # Check that the HIT is reviewable status = self.amt_services.get_hit_status(hit) if not status: print "*** Error getting hit status" return if self.amt_services.get_hit_status(hit) != "Reviewable": print("*** This hit is not 'Reviewable' and so can not be " "deleted") return else: success = self.amt_services.delete_hit(hit) # self.web_services.delete_ad(hit) # also delete the ad if success: if self.sandbox: print "deleting sandbox HIT", hit else: print "deleting live HIT", hit
def hit_delete(self, all_hits, hit_ids=None)
Delete HIT.
3.911801
3.894635
1.004408
''' Expire all HITs. ''' if all_hits: hits_data = self.get_active_hits() hit_ids = [hit.options['hitid'] for hit in hits_data] for hit in hit_ids: success = self.amt_services.expire_hit(hit) if success: if self.sandbox: print "expiring sandbox HIT", hit else: print "expiring live HIT", hit
def hit_expire(self, all_hits, hit_ids=None)
Expire all HITs.
4.69211
4.732307
0.991506
''' Create a HIT ''' if self.sandbox: mode = 'sandbox' else: mode = 'live' server_loc = str(self.config.get('Server Parameters', 'host')) use_psiturk_ad_server = self.config.getboolean('Shell Parameters', 'use_psiturk_ad_server') if use_psiturk_ad_server: if not self.web_services.check_credentials(): error_msg = '\n'.join(['*****************************', ' Sorry, your psiTurk Credentials are invalid.\n ', ' You cannot create ads and hits until you enter valid credentials in ', ' the \'psiTurk Access\' section of ~/.psiturkconfig. You can obtain your', ' credentials or sign up at https://www.psiturk.org/login.\n']) raise Exception(error_msg) if not self.amt_services.verify_aws_login(): error_msg = '\n'.join(['*****************************', ' Sorry, your AWS Credentials are invalid.\n ', ' You cannot create ads and hits until you enter valid credentials in ', ' the \'AWS Access\' section of ~/.psiturkconfig. You can obtain your ', ' credentials via the Amazon AMT requester website.\n']) raise Exception(error_msg) ad_id = None if use_psiturk_ad_server: ad_id = self.create_psiturk_ad() create_failed = False fail_msg = None if ad_id is not False: ad_location = self.web_services.get_ad_url(ad_id, int(self.sandbox)) hit_config = self.generate_hit_config(ad_location, numWorkers, reward, duration) hit_id = self.amt_services.create_hit(hit_config) if hit_id is not False: if not self.web_services.set_ad_hitid(ad_id, hit_id, int(self.sandbox)): create_failed = True fail_msg = " Unable to update Ad on http://ad.psiturk.org to point at HIT." else: create_failed = True fail_msg = " Unable to create HIT on Amazon Mechanical Turk." else: create_failed = True fail_msg = " Unable to create Ad on http://ad.psiturk.org." else: # not using psiturk ad server ad_location = "{}?mode={}".format(self.config.get('Shell Parameters', 'ad_location'), mode ) hit_config = self.generate_hit_config(ad_location, numWorkers, reward, duration) create_failed = False hit_id = self.amt_services.create_hit(hit_config) if hit_id is False: create_failed = True fail_msg = " Unable to create HIT on Amazon Mechanical Turk." if create_failed: print '\n'.join(['*****************************', ' Sorry, there was an error creating hit and registering ad.']) if fail_msg is None: fail_msg = '' raise Exception(fail_msg) return (hit_id, ad_id)
def hit_create(self, numWorkers, reward, duration)
Create a HIT
2.905843
2.920906
0.994843
''' List AWS DB regions ''' regions = self.db_services.list_regions() if regions != []: print "Avaliable AWS regions:" for reg in regions: print '\t' + reg, if reg == self.db_services.get_region(): print "(currently selected)" else: print ''
def db_aws_list_regions(self)
List AWS DB regions
5.136947
4.877742
1.05314
''' Set AWS region ''' # interactive = False # Not used if region_name is None: # interactive = True # Not used self.db_aws_list_regions() allowed_regions = self.db_services.list_regions() region_name = "NONSENSE WORD1234" tries = 0 while region_name not in allowed_regions: if tries == 0: region_name = raw_input('Enter the name of the region you ' 'would like to use: ') else: print("*** The region name (%s) you entered is not allowed, " \ "please choose from the list printed above (use type 'db " \ "aws_list_regions'." % region_name) region_name = raw_input('Enter the name of the region you ' 'would like to use: ') tries += 1 if tries > 5: print("*** Error, region you are requesting not available. " "No changes made to regions.") return self.db_services.set_region(region_name) print "Region updated to ", region_name self.config.set('AWS Access', 'aws_region', region_name, True) if self.server.is_server_running() == 'yes': self.server_restart()
def db_aws_set_region(self, region_name)
Set AWS region
4.548834
4.652537
0.977711
''' List AWS DB instances ''' instances = self.db_services.get_db_instances() if not instances: print("There are no DB instances associated with your AWS account " \ "in region " + self.db_services.get_region()) else: print("Here are the current DB instances associated with your AWS " \ "account in region " + self.db_services.get_region()) for dbinst in instances: print '\t'+'-'*20 print "\tInstance ID: " + dbinst.id print "\tStatus: " + dbinst.status
def db_aws_list_instances(self)
List AWS DB instances
3.394183
3.344731
1.014785
''' Delete AWS DB instance ''' interactive = False if instance_id is None: interactive = True instances = self.db_services.get_db_instances() instance_list = [dbinst.id for dbinst in instances] if interactive: valid = False if len(instances) == 0: print("There are no instances you can delete currently. Use " "`db aws_create_instance` to make one.") return print "Here are the available instances you can delete:" for inst in instances: print "\t ", inst.id, "(", inst.status, ")" while not valid: instance_id = raw_input('Enter the instance identity you would ' 'like to delete: ') res = self.db_services.validate_instance_id(instance_id) if res is True: valid = True else: print(res + " Try again, instance name not valid. Check " \ "for typos.") if instance_id in instance_list: valid = True else: valid = False print("Try again, instance not present in this account. " "Try again checking for typos.") else: res = self.db_services.validate_instance_id(instance_id) if res is not True: print("*** Error, instance name either not valid. Try again " "checking for typos.") return if instance_id not in instance_list: print("*** Error, This instance not present in this account. " "Try again checking for typos. Run `db aws_list_instances` to " "see valid list.") return user_input = raw_input( "Deleting an instance will erase all your data associated with the " "database in that instance. Really quit? y or n:" ) if user_input == 'y': res = self.db_services.delete_db_instance(instance_id) if res: print("AWS RDS database instance %s deleted. Run `db " \ "aws_list_instances` for current status." % instance_id) else: print("*** Error deleting database instance %s. " \ "It maybe because it is still being created, deleted, or is " \ "being backed up. Run `db aws_list_instances` for current " \ "status." % instance_id) else: return
def db_aws_delete_instance(self, instance_id)
Delete AWS DB instance
3.413344
3.403531
1.002883
'''init method Takes our custom options from self.options and creates a config dict which specifies custom settings. ''' cfg = {} for k, v in self.options.items(): if k.lower() in self.cfg.settings and v is not None: cfg[k.lower()] = v return cfg
def init(self, *args)
init method Takes our custom options from self.options and creates a config dict which specifies custom settings.
7.282998
2.627804
2.771515
''' Setup example ''' if os.path.exists(EXAMPLE_TARGET): print "Error, `psiturk-example` directory already exists. Please \ remove it then re-run the command." else: print "Creating new folder `psiturk-example` in the current working \ directory" os.mkdir(EXAMPLE_TARGET) print "Copying", EXAMPLE_DIR, "to", EXAMPLE_TARGET dir_util.copy_tree(EXAMPLE_DIR, EXAMPLE_TARGET) # change to target director print "Creating default configuration file (config.txt)" file_util.copy_file(DEFAULT_CONFIG_FILE, CONFIG_TARGET) os.chdir(EXAMPLE_TARGET) os.rename('custom.py.txt', 'custom.py') if not os.path.exists(GLOBAL_CONFIG_FILE): print "The following config file does not exist:\n{}\ \nCreating default config file at that \ location.".format(GLOBAL_CONFIG_FILE) file_util.copy_file(DEFAULT_GLOBAL_CONFIG_FILE, GLOBAL_CONFIG_FILE)
def setup_example()
Setup example
4.006063
4.067605
0.98487
awaiting_service = Wait_For_State(lambda: not is_port_available(ip, port), function) awaiting_service.start() return awaiting_service
def wait_until_online(function, ip, port)
Uses Wait_For_State to wait for the server to come online, then runs the given function.
7.359302
4.921748
1.495262
''' Figure out how we were invoked ''' invoked_as = os.path.basename(sys.argv[0]) if invoked_as == "psiturk": launch_shell() elif invoked_as == "psiturk-server": launch_server() elif invoked_as == "psiturk-shell": launch_shell() elif invoked_as == "psiturk-setup-example": setup_example() elif invoked_as == "psiturk-install": install_from_exchange()
def process()
Figure out how we were invoked
3.409848
3.155927
1.080458
''' Install from experiment exchange. ''' parser = argparse.ArgumentParser( description='Download experiment from the psiturk.org experiment\ exchange (http://psiturk.org/ee).' ) parser.add_argument( 'exp_id', metavar='exp_id', type=str, help='the id number of the\ experiment in the exchange' ) args = parser.parse_args() exp_exch = ExperimentExchangeServices() exp_exch.download_experiment(args.exp_id)
def install_from_exchange()
Install from experiment exchange.
4.118574
4.1035
1.003673
''' Add commands for testing, etc. ''' parser = argparse.ArgumentParser( description='Creates a simple default project (stroop) in the current\ directory with the necessary psiTurk files.' ) # Optional flags parser.add_argument( '-v', '--version', help='Print version number.', action="store_true" ) args = parser.parse_args() # If requested version just print and quite if args.version: print version_number else: import psiturk.setup_example as se se.setup_example()
def setup_example()
Add commands for testing, etc.
7.710655
6.831781
1.128645
''' Add commands for testing, etc.. ''' parser = argparse.ArgumentParser( description='Launch psiTurk experiment webserver process on the\ host/port defined in config.txt.' ) # Optional flags parser.add_argument( '-v', '--version', help='Print version number.', action="store_true" ) args = parser.parse_args() # If requested version just print and quite if args.version: print version_number else: import psiturk.experiment_server as es es.launch()
def launch_server()
Add commands for testing, etc..
7.268907
6.149009
1.182126
''' Add commands for testing, etc.. ''' parser = argparse.ArgumentParser( description='Launch the psiTurk interactive shell.' ) # Optional flags parser.add_argument( '-v', '--version', help='Print version number.', action="store_true" ) parser.add_argument( '-c', '--cabinmode', help='Launch psiturk in cabin (offline) mode', action="store_true" ) script_group = parser.add_mutually_exclusive_group() script_group.add_argument( '-s', '--script', help='Run commands from a script file' ) script_group.add_argument( '-e', '--execute', help='Execute one command specified on the command line' ) args, unknownargs = parser.parse_known_args() # If requested version just print and quit if args.version: print version_number else: import psiturk.psiturk_shell as ps if args.script: ps.run(cabinmode=args.cabinmode, script=args.script, quiet=True) elif args.execute or unknownargs: if unknownargs: execute= ' '.join(unknownargs) else: execute = args.execute ps.run(cabinmode=args.cabinmode, execute=execute, quiet=True) else: ps.run(cabinmode=args.cabinmode)
def launch_shell()
Add commands for testing, etc..
3.101558
2.908036
1.066548
def get_my_ip(): if 'OPENSHIFT_SECRET_TOKEN' in os.environ: my_ip = os.environ['OPENSHIFT_APP_DNS'] else: my_ip = json.load(urllib2.urlopen( 'http://httpbin.org/ip' ))['origin'].split(',')[0] return my_ip
Asks and external server what your ip appears to be (useful is running from behind a NAT/wifi router). Of course, incoming port to the router must be forwarded correctly.
null
null
null
def colorize(target, color, use_escape=True): ''' Colorize target string. Set use_escape to false when text will not be interpreted by readline, such as in intro message.''' def escape(code): ''' Escape character ''' return '\001%s\002' % code if color == 'purple': color_code = '\033[95m' elif color == 'cyan': color_code = '\033[96m' elif color == 'darkcyan': color_code = '\033[36m' elif color == 'blue': color_code = '\033[93m' elif color == 'green': color_code = '\033[92m' elif color == 'yellow': color_code = '\033[93m' elif color == 'red': color_code = '\033[91m' elif color == 'white': color_code = '\033[37m' elif color == 'bold': color_code = '\033[1m' elif color == 'underline': color_code = '\033[4m' else: color_code = '' if use_escape: return escape(color_code) + target + escape('\033[0m') else: return color_code + target + '\033[m'
Colorize target string. Set use_escape to false when text will not be interpreted by readline, such as in intro message.
null
null
null