code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# print ("-" + line + "-") if line == '': line = 'None' if self.is_lower: line = line.lower() if line == "user ": # for slurm which has "user" and "user " line = "userid" for convert in self.change: line = line.replace(convert[0], convert[1]) if self.is_strip: line = line.strip() return line.strip(' ')
def clean(self, line)
:param line: cleans the string :return:
5.924534
6.041411
0.980654
header = self.lines[0] self.lines = self.lines[1:] self.headers = \ [self.clean(h) for h in header.split(self.seperator)] if self.is_strip: self.headers = self.headers[1:-1] return self.headers
def _get_headers(self)
assumes comment have been stripped with extract :return:
4.804458
4.214108
1.140089
d = tempfile.mkdtemp(*args, **kwargs) try: yield d finally: shutil.rmtree(d)
def tempdir(*args, **kwargs)
A contextmanager to work in an auto-removed temporary directory Arguments are passed through to tempfile.mkdtemp example: >>> with tempdir() as path: ... pass
2.671066
3.083612
0.866213
sleeptime_ms = 500 while True: if fn(): return True else: print('Sleeping {} ms'.format(sleeptime_ms)) time.sleep(sleeptime_ms / 1000.0) sleeptime_ms *= 2 if sleeptime_ms / 1000.0 > sleeptime_s_max: return False
def exponential_backoff(fn, sleeptime_s_max=30 * 60)
Calls `fn` until it returns True, with an exponentially increasing wait time between calls
2.277949
2.151385
1.058829
p = pattern.replace("*", ".*") test = re.compile(p) result = [] for l in lines: if test.search(l): result.append(l) return result
def search(lines, pattern)
return all lines that match the pattern #TODO: we need an example :param lines: :param pattern: :return:
2.79051
2.860748
0.975448
try: # for line in file # if line matches pattern: # return line return next((L for L in open(filename) if L.find(pattern) >= 0)) except StopIteration: return ''
def grep(pattern, filename)
Very simple grep that returns the first matching line in a file. String matching only, does not do REs as currently implemented.
5.117185
4.886104
1.047294
result = os.path.expandvars(os.path.expanduser(text)) # template = Template(text) # result = template.substitute(os.environ) if result.startswith("."): result = result.replace(".", os.getcwd(), 1) return result
def path_expand(text)
returns a string with expanded variable. :param text: the path to be expanded, which can include ~ and environment $ variables :param text: string
3.220556
3.845129
0.837568
# if isinstance(data, basestring): if isinstance(data, str): return str(data) elif isinstance(data, collectionsAbc.Mapping): return dict(map(convert_from_unicode, data.items())) elif isinstance(data, collectionsAbc.Iterable): return type(data)(map(convert_from_unicode, data)) else: return data
def convert_from_unicode(data)
converts unicode data to a string :param data: the data to convert :return:
2.110463
2.194583
0.961669
# http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input""" choices = 'Y/n' if default.lower() in ('y', 'yes') else 'y/N' if tries is None: choice = input("%s (%s) " % (message, choices)) values = ('y', 'yes', '') if default == 'y' else ('y', 'yes') return True if choice.strip().lower() in values else False else: while tries > 0: choice = input( "%s (%s) (%s)" % (message, choices, "'q' to discard")) choice = choice.strip().lower() if choice in ['y', 'yes']: return True elif choice in ['n', 'no', 'q']: return False else: print("Invalid input...") tries -= 1
def yn_choice(message, default='y', tries=None)
asks for a yes/no question. :param tries: the number of tries :param message: the message containing the question :param default: the default answer
2.528167
2.508583
1.007807
output = "" if debug: output = "\n" output += "# " + 70 * c + "\n" if label is not None: output += "# " + label + "\n" output += "# " + 70 * c + "\n" if txt is not None: for line in txt.split("\n"): output += "# " + line + "\n" output += "# " + 70 * c + "\n" if color is None: color = "BLUE" Console.cprint(color, "", output)
def banner(txt=None, c="#", debug=True, label=None, color=None)
prints a banner of the form with a frame of # around the txt:: ############################ # txt ############################ :param color: prints in the given color :param label: adds a label :param debug: prints only if debug is true :param txt: a text message to be printed :type txt: string :param c: the character used instead of c :type c: character
2.618369
2.769132
0.945556
line = "" if debug: line += "\n" line += "# " + str(70 * c) if txt is not None: line += "# " + txt line += "# " + str(70 * c) return line
def str_banner(txt=None, c="#", debug=True)
prints a banner of the form with a frame of # around the txt:: ############################ # txt ############################ :param debug: return "" if not in debug :type debug: boolean :param txt: a text message to be printed :type txt: string :param c: the character used instead of c :type c: character
3.990343
4.470442
0.892606
frame = inspect.getouterframes(inspect.currentframe()) filename = frame[1][1].replace(os.getcwd(), "") line = frame[1][2] - 1 method = frame[1][3] msg = "{}\n# {} {} {}".format(txt, method, filename, line) print() banner(msg, c=c)
def HEADING(txt=None, c="#")
Prints a message to stdout with #### surrounding it. This is useful for nosetests to better distinguish them. :param c: uses the given char to wrap the header :param txt: a text message to be printed :type txt: string
4.398634
4.725827
0.930765
location = path_expand(filename) n = 0 found = True backup = None while found: n += 1 backup = "{0}.bak.{1}".format(location, n) found = os.path.isfile(backup) return backup
def backup_name(filename)
:param filename: given a filename creates a backup name of the form filename.bak.1. If the filename already exists the number will be increased as much as needed so the file does not exist in the given location. The filename can consists a path and is expanded with ~ and environment variables. :type filename: string :rtype: string
3.705971
3.600321
1.029345
version_filename = Path( "{classname}/{filename}".format(classname=class_name, filename=filename)) with open(version_filename, "r") as f: content = f.read() if content != '__version__ = "{0}"'.format(version): banner("Updating version to {0}".format(version)) with open(version_filename, "w") as text_file: text_file.write('__version__ = "{0:s}"'.format(version))
def auto_create_version(class_name, version, filename="__init__.py")
creates a version number in the __init__.py file. it can be accessed with __version__ :param class_name: :param version: :param filename: :return:
2.880091
2.982601
0.965631
with open(path_expand(filename), 'r') as f: content = f.read() return content
def readfile(filename)
returns the content of a file :param filename: the filename :return:
4.164052
5.662739
0.735342
with open(path_expand(filename), 'w') as outfile: outfile.write(content)
def writefile(filename, content)
writes the content into the file :param filename: the filename :param content: teh content :return:
4.395842
6.494404
0.676866
lletters = "abcdefghijklmnopqrstuvwxyz" uletters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # This doesn't guarantee both lower and upper cases will show up alphabet = lletters + uletters digit = "0123456789" mypw = "" def _random_character(texts): return texts[random.randrange(len(texts))] if not lower: alphabet = uletters elif not upper: alphabet = lletters for i in range(length): # last half length will be filled with numbers if number and i >= int(length / 2): mypw = mypw + _random_character(digit) else: mypw = mypw + _random_character(alphabet) return mypw
def generate_password(length=8, lower=True, upper=True, number=True)
generates a simple password. We should not really use this in production. :param length: the length of the password :param lower: True of lower case characters are allowed :param upper: True if upper case characters are allowed :param number: True if numbers are allowed :return:
3.567205
3.535004
1.009109
file_contains_tabs = False with open(filename) as f: lines = f.read().split("\n") line_no = 1 for line in lines: if "\t" in line: file_contains_tabs = True location = [ i for i in range(len(line)) if line.startswith('\t', i)] if verbose: Console.error("Tab found in line {} and column(s) {}" .format(line_no, str(location).replace("[", "").replace( "]", "")), traceflag=False) line_no += 1 return file_contains_tabs
def check_file_for_tabs(filename, verbose=True)
identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints information about issues :param filename: the filename :rtype: True if there are tabs in the file
3.211359
3.203642
1.002409
# noinspection PyClassHasNoInit class OrderedLoader(Loader): pass def construct_mapping(loader, node): loader.flatten_mapping(node) return object_pairs_hook(loader.construct_pairs(node)) OrderedLoader.add_constructor( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping) return yaml.load(stream, OrderedLoader)
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict)
Loads an ordered dict into a yaml while preserving the order :param stream: the name of the stream :param Loader: the yam loader (such as yaml.SafeLoader) :param object_pairs_hook: the ordered dict
1.736719
2.086541
0.832344
location = filename if location is not None: location = path_expand(location) if not os.path.exists(location) and not check: return None if check and os.path.exists(location): # test for tab in yaml file if check_file_for_tabs(location): log.error("The file {0} contains tabs. yaml " "Files are not allowed to contain tabs".format(location)) sys.exit() result = None try: if osreplace: result = open(location, 'r').read() t = Template(result) result = t.substitute(os.environ) # data = yaml.safe_load(result) data = ordered_load(result, yaml.SafeLoader) else: f = open(location, "r") # data = yaml.safe_load(f) data = ordered_load(result, yaml.SafeLoader) f.close() return data except Exception as e: log.error( "The file {0} fails with a yaml read error".format(filename)) Error.traceback(e) sys.exit() else: log.error("The file {0} does not exist.".format(filename)) if exit: sys.exit() return None
def read_yaml_config(filename, check=True, osreplace=True, exit=True)
reads in a yaml file from the specified filename. If check is set to true the code will fail if the file does not exist. However if it is set to false and the file does not exist, None is returned. :param exit: if true is exist with sys exit :param osreplace: if true replaces environment variables from the OS :param filename: the file name :param check: if True fails if the file does not exist, if False and the file does not exist return will be None
3.289561
3.36469
0.977671
for key, value in data_structure.items(): print("\n%s%s:" % (' ' * attribute_indent * indent, str(key)), end=' ') if isinstance(value, OrderedDict): custom_print(value, indent + 1) elif isinstance(value, dict): custom_print(value, indent + 1) else: print("%s" % (str(value)), end=' ')
def custom_print(data_structure, indent)
prints a given data structure such as a dict or ordered dict at a given indentation level :param data_structure: :param indent: :return:
2.324497
2.397475
0.96956
if isinstance(o, OrderedDict): return "{" + ",\n ".join([self.encode(k) + ":" + self.encode(v, depth + 1) for (k, v) in o.items()]) + "}\n" else: return simplejson.JSONEncoder.encode(self, o)
def encode(self, o, depth=0)
encode the json object at given depth :param o: the object :param depth: the depth :return: the json encoding
2.783068
2.913888
0.955105
for v in ["filename", "location", "prefix"]: if "meta" not in self: self["meta"] = {} self["meta"][v] = self[v] del self[v]
def _update_meta(self)
internal function to define the metadata regarding filename, location, and prefix.
4.610983
3.189882
1.445503
self._set_filename(filename) if os.path.isfile(self['location']): # d = OrderedDict(read_yaml_config(self['location'], check=True)) d = read_yaml_config(self['location'], check=True) with open(self['location']) as myfile: document = myfile.read() x = yaml.load(document, Loader=yaml.FullLoader) try: self.update(d) except: print("ERROR: can not find", self["location"]) sys.exit() else: print( "Error while reading and updating the configuration file {:}".format( filename))
def load(self, filename)
Loads the yaml file with the given filename. :param filename: the name of the yaml file
4.581124
4.667973
0.981395
try: log.error("Filename: {0}".format(self['meta']['location'])) except: log.error("Filename: {0}".format(self['location'])) log.error("Key '{0}' does not exist".format('.'.join(keys))) indent = "" last_index = len(keys) - 1 for i, k in enumerate(keys): if i == last_index: log.error(indent + k + ": <- this value is missing") else: log.error(indent + k + ":") indent += " "
def error_keys_not_found(self, keys)
Check if the requested keys are found in the dict. :param keys: keys to be looked for
3.666515
3.91009
0.937706
return ordered_dump(OrderedDict(self), Dumper=yaml.SafeDumper, default_flow_style=False)
def yaml(self)
returns the yaml output of the dict.
5.242913
4.4271
1.184277
if keys is None: return self if "." in keys[0]: keys = keys[0].split('.') element = self for v in keys: try: element = element[v] except KeyError: self.error_keys_not_found(keys) # sys.exit() return element
def get(self, *keys)
returns the dict of the information as read from the yaml file. To access the file safely, you can use the keys in the order of the access. Example: get("provisioner","policy") will return the value of config["provisioner"]["policy"] from the yaml file if it does not exists an error will be printing that the value does not exists. Alternatively you can use the . notation e.g. get("provisioner.policy")
4.056227
4.301588
0.94296
element = self if keys is None: return self if '.' in keys[0]: keys = keys[0].split(".") nested_str = ''.join(["['{0}']".format(x) for x in keys]) # Safely evaluate an expression to see if it is one of the Python # literal structures: strings, numbers, tuples, lists, dicts, booleans, # and None. Quoted string will be used if it is none of these types. try: ast.literal_eval(str(value)) converted = str(value) except ValueError: converted = "'" + str(value) + "'" exec("self" + nested_str + "=" + converted) return element
def set(self, value, *keys)
Sets the dict of the information as read from the yaml file. To access the file safely, you can use the keys in the order of the access. Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy") will set the value of config["provisioner"]["policy"] in the yaml file if it does not exists an error will be printing that the value does not exists. Alternatively you can use the . notation e.g. set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy")
5.269274
5.106217
1.031933
if self['meta']['prefix'] is None: k = keys else: k = self['meta']['prefix'] + "." + keys return self.get(k)
def attribute(self, keys)
TODO: document this method :param keys:
4.904025
5.825177
0.841867
flat = flatten(table, sep=sep) return Printer.write(flat, sort_keys=sort_keys, order=order, header=header, output=output)
def flatwrite(cls, table, order=None, header=None, output="table", sort_keys=True, show_none="", sep="." )
writes the information given in the table :param table: the table of values :param order: the order of the columns :param header: the header for the columns :param output: the format (default is table, values are raw, csv, json, yaml, dict :param sort_keys: if true the table is sorted :param show_none: passed along to the list or dict printer :param sep: uses sep as the separator for csv printer :return:
4.25196
6.427778
0.661498
if output == "raw": return table elif table is None: return None elif type(table) in [dict, dotdict]: return cls.dict(table, order=order, header=header, output=output, sort_keys=sort_keys, show_none=show_none) elif type(table) == list: return cls.list(table, order=order, header=header, output=output, sort_keys=sort_keys, show_none=show_none) else: Console.error("unkown type {0}".format(type(table)))
def write(cls, table, order=None, header=None, output="table", sort_keys=True, show_none="" )
writes the information given in the table :param table: the table of values :param order: the order of the columns :param header: the header for the columns :param output: the format (default is table, values are raw, csv, json, yaml, dict :param sort_keys: if true the table is sorted :param show_none: passed along to the list or dict printer :return:
2.351774
2.328771
1.009877
d = {} count = 0 for entry in l: name = str(count) d[name] = entry count += 1 return cls.dict(d, order=order, header=header, sort_keys=sort_keys, output=output, show_none=show_none)
def list(cls, l, order=None, header=None, output="table", sort_keys=True, show_none="" )
:param l: l is a list not a dict :param order: :param header: :param output: :param sort_keys: :param show_none: :return:
2.500415
2.625317
0.952424
if output == "table": if d == {}: return None else: return cls.dict_table(d, order=order, header=header, sort_keys=sort_keys) elif output == "csv": return cls.csv(d, order=order, header=header, sort_keys=sort_keys) elif output == "json": return json.dumps(d, sort_keys=sort_keys, indent=4) elif output == "yaml": return yaml.dump(convert_from_unicode(d), default_flow_style=False) elif output == "dict": return d else: return "UNKOWN FORMAT. Please use table, csv, json, yaml, dict."
def dict(cls, d, order=None, header=None, output="table", sort_keys=True, show_none="")
TODO :param d: A a dict with dicts of the same type. :type d: dict :param order:The order in which the columns are printed. The order is specified by the key names of the dict. :type order: :param header: The Header of each of the columns :type header: list or tuple of field names :param output: type of output (table, csv, json, yaml or dict) :type output: string :param sort_keys: :type sort_keys: bool :param show_none: prints None if True for None values otherwise "" :type show_none: bool :return:
2.253848
2.211755
1.019031
first_element = list(d)[0] def _keys(): return list(d[first_element]) # noinspection PyBroadException def _get(element, key): try: tmp = str(d[element][key]) except: tmp = ' ' return tmp if d is None or d == {}: return None if order is None: order = _keys() if header is None and order is not None: header = order elif header is None: header = _keys() table = "" content = [] for attribute in order: content.append(attribute) table = table + ",".join([str(e) for e in content]) + "\n" for job in d: content = [] for attribute in order: try: content.append(d[job][attribute]) except: content.append("None") table = table + ",".join([str(e) for e in content]) + "\n" return table
def csv(cls, d, order=None, header=None, sort_keys=True)
prints a table in csv format :param d: A a dict with dicts of the same type. :type d: dict :param order:The order in which the columns are printed. The order is specified by the key names of the dict. :type order: :param header: The Header of each of the columns :type header: list or tuple of field names :param sort_keys: TODO: not yet implemented :type sort_keys: bool :return: a string representing the table in csv format
2.789816
2.777371
1.004481
def _keys(): all_keys = [] for e in d: keys = d[e].keys() all_keys.extend(keys) return list(set(all_keys)) # noinspection PyBroadException def _get(item, key): try: tmp = str(d[item][key]) if tmp == "None": tmp = show_none except: tmp = ' ' return tmp if d is None or d == {}: return None if order is None: order = _keys() if header is None and order is not None: header = order elif header is None: header = _keys() x = PrettyTable(header) x.max_width = max_width if sort_keys: if type(sort_keys) is str: sorted_list = sorted(d, key=lambda x: d[x][sort_keys]) elif type(sort_keys) == tuple: sorted_list = sorted(d, key=lambda x: tuple( [d[x][sort_key] for sort_key in sort_keys])) else: sorted_list = d else: sorted_list = d for element in sorted_list: values = [] for key in order: values.append(_get(element, key)) x.add_row(values) x.align = "l" return x
def dict_table(cls, d, order=None, header=None, sort_keys=True, show_none="", max_width=40)
prints a pretty table from an dict of dicts :param d: A a dict with dicts of the same type. Each key will be a column :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param header: The Header of each of the columns :type header: A list of string :param sort_keys: Key(s) of the dict to be used for sorting. This specify the column(s) in the table for sorting. :type sort_keys: string or a tuple of string (for sorting with multiple columns) :param show_none: prints None if True for None values otherwise "" :type show_none: bool :param max_width: maximum width for a cell :type max_width: int
2.20965
2.133852
1.035521
if header is None: header = ["Attribute", "Value"] if output == "table": x = PrettyTable(header) if order is not None: sorted_list = order else: sorted_list = list(d) if sort_keys: sorted_list = sorted(d) for key in sorted_list: if type(d[key]) == dict: values = d[key] x.add_row([key, "+"]) for e in values: x.add_row([" -", "{}: {}".format(e, values[e])]) elif type(d[key]) == list: values = list(d[key]) x.add_row([key, "+"]) for e in values: x.add_row([" -", e]) else: x.add_row([key, d[key] or ""]) x.align = "l" return x else: return cls.dict({output: d}, output=output)
def attribute(cls, d, header=None, order=None, sort_keys=True, output="table")
prints a attribute/key value table :param d: A a dict with dicts of the same type. Each key will be a column :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param header: The Header of each of the columns :type header: A list of string :param sort_keys: Key(s) of the dict to be used for sorting. This specify the column(s) in the table for sorting. :type sort_keys: string or a tuple of string (for sorting with multiple columns) :param output: the output format table, csv, dict, json
2.317348
2.413861
0.960017
def dict_from_list(l): d = dict([(idx, item) for idx, item in enumerate(l)]) return d if output == 'table': x = PrettyTable(["Index", "Host"]) for (idx, item) in enumerate(l): x.add_row([idx, item]) x.align = "l" x.align["Index"] = "r" return x elif output == 'csv': return ",".join(l) elif output == 'dict': d = dict_from_list(l) return d elif output == 'json': d = dict_from_list(l) result = json.dumps(d, indent=4) return result elif output == 'yaml': d = dict_from_list(l) result = yaml.dump(d, default_flow_style=False) return result elif output == 'txt': return "\n".join(l)
def print_list(cls, l, output='table')
prints a list :param l: the list :param output: the output, default is a table :return:
2.015519
2.064374
0.976334
# header header = list(d) x = PrettyTable(labels) if order is None: order = header for key in order: value = d[key] if type(value) == list: x.add_row([key, value[0]]) for element in value[1:]: x.add_row(["", element]) elif type(value) == dict: value_keys = list(value) first_key = value_keys[0] rest_keys = value_keys[1:] x.add_row( [key, "{0} : {1}".format(first_key, value[first_key])]) for element in rest_keys: x.add_row(["", "{0} : {1}".format(element, value[element])]) else: x.add_row([key, value]) x.align = "l" return x
def row_table(cls, d, order=None, labels=None)
prints a pretty table from data in the dict. :param d: A dict to be printed :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param labels: The array of labels for the column
2.087914
2.147658
0.972182
# TODO: why is there a tmpdir? with tempdir() as workdir: key = os.path.join(workdir, 'key.pub') with open(key, 'w') as fd: fd.write(pubkey) cmd = [ 'ssh-keygen', '-l', '-f', key, ] p = Subprocess(cmd) output = p.stdout.strip() bits, fingerprint, _ = output.split(' ', 2) return fingerprint
def get_fingerprint_from_public_key(pubkey)
Generate the fingerprint of a public key :param str pubkey: the value of the public key :returns: fingerprint :rtype: str
3.503131
3.403259
1.029346
auth = cls() with open(path) as fd: for pubkey in itertools.imap(str.strip, fd): # skip empty lines if not pubkey: continue auth.add(pubkey) return auth
def load(cls, path)
load the keys from a path :param path: the filename (path) in which we find the keys :return:
4.816525
5.137063
0.937603
f = get_fingerprint_from_public_key(pubkey) if f not in self._keys: self._order[len(self._keys)] = f self._keys[f] = pubkey
def add(self, pubkey)
add a public key. :param pubkey: the filename to the public key :return:
4.295472
4.630311
0.927685
format, created = Format.objects.get_or_create(name='newman_thumb', defaults={ 'max_width': 100, 'max_height': 100, 'flexible_height': False, 'stretch': False, 'nocrop': True, }) if created: format.sites = Site.objects.all() info = obj.get_formated_photo(format) return '<a href="%(href)s"><img src="%(src)s"></a>' % { 'href': '%s/' % obj.pk, 'src': info['url'] }
def thumb(self, obj)
Generates html and thumbnails for admin site.
4.776693
4.714796
1.013128
if not as_string: startmessage = '\nStarting analysis of %s\n' % pdbfile.split('/')[-1] else: startmessage = "Starting analysis from stdin.\n" write_message(startmessage) write_message('='*len(startmessage)+'\n') mol = PDBComplex() mol.output_path = outpath mol.load_pdb(pdbfile, as_string=as_string) # #@todo Offers possibility for filter function from command line (by ligand chain, position, hetid) for ligand in mol.ligands: mol.characterize_complex(ligand) create_folder_if_not_exists(outpath) # Generate the report files streport = StructureReport(mol, outputprefix=outputprefix) config.MAXTHREADS = min(config.MAXTHREADS, len(mol.interaction_sets)) ###################################### # PyMOL Visualization (parallelized) # ###################################### if config.PYMOL or config.PICS: try: from plip.modules.visualize import visualize_in_pymol except ImportError: from modules.visualize import visualize_in_pymol complexes = [VisualizerData(mol, site) for site in sorted(mol.interaction_sets) if not len(mol.interaction_sets[site].interacting_res) == 0] if config.MAXTHREADS > 1: write_message('\nGenerating visualizations in parallel on %i cores ...' % config.MAXTHREADS) parfn = parallel_fn(visualize_in_pymol) parfn(complexes, processes=config.MAXTHREADS) else: [visualize_in_pymol(plcomplex) for plcomplex in complexes] if config.XML: # Generate report in xml format streport.write_xml(as_string=config.STDOUT) if config.TXT: # Generate report in txt (rst) format streport.write_txt(as_string=config.STDOUT)
def process_pdb(pdbfile, outpath, as_string=False, outputprefix='report')
Analysis of a single PDB file. Can generate textual reports XML, PyMOL session files and images as output.
5.201052
5.084146
1.022994
try: if len(inputpdbid) != 4 or extract_pdbid(inputpdbid.lower()) == 'UnknownProtein': sysexit(3, 'Invalid PDB ID (Wrong format)\n') pdbfile, pdbid = fetch_pdb(inputpdbid.lower()) pdbpath = tilde_expansion('%s/%s.pdb' % (config.BASEPATH.rstrip('/'), pdbid)) create_folder_if_not_exists(config.BASEPATH) with open(pdbpath, 'w') as g: g.write(pdbfile) write_message('file downloaded as %s\n\n' % pdbpath) return pdbpath, pdbid except ValueError: # Invalid PDB ID, cannot fetch from RCBS server sysexit(3, 'Invalid PDB ID (Entry does not exist)\n')
def download_structure(inputpdbid)
Given a PDB ID, downloads the corresponding PDB structure. Checks for validity of ID and handles error while downloading. Returns the path of the downloaded file.
5.3578
5.31273
1.008483
unique = list(set(slist)) difference = len(slist) - len(unique) if difference == 1: write_message("Removed one duplicate entry from input list.\n") if difference > 1: write_message("Removed %i duplicate entries from input list.\n" % difference) return unique
def remove_duplicates(slist)
Checks input lists for duplicates and returns a list with unique entries
3.000167
3.092954
0.970001
pdbid, pdbpath = None, None # #@todo For multiprocessing, implement better stacktracing for errors # Print title and version title = "* Protein-Ligand Interaction Profiler v%s *" % __version__ write_message('\n' + '*' * len(title) + '\n') write_message(title) write_message('\n' + '*' * len(title) + '\n\n') outputprefix = config.OUTPUTFILENAME if inputstructs is not None: # Process PDB file(s) num_structures = len(inputstructs) inputstructs = remove_duplicates(inputstructs) read_from_stdin = False for inputstruct in inputstructs: if inputstruct == '-': inputstruct = sys.stdin.read() read_from_stdin = True if config.RAWSTRING: if sys.version_info < (3,): inputstruct = bytes(inputstruct).decode('unicode_escape') else: inputstruct = bytes(inputstruct, 'utf8').decode('unicode_escape') else: if os.path.getsize(inputstruct) == 0: sysexit(2, 'Empty PDB file\n') # Exit if input file is empty if num_structures > 1: basename = inputstruct.split('.')[-2].split('/')[-1] config.OUTPATH = '/'.join([config.BASEPATH, basename]) outputprefix = 'report' process_pdb(inputstruct, config.OUTPATH, as_string=read_from_stdin, outputprefix=outputprefix) else: # Try to fetch the current PDB structure(s) directly from the RCBS server num_pdbids = len(inputpdbids) inputpdbids = remove_duplicates(inputpdbids) for inputpdbid in inputpdbids: pdbpath, pdbid = download_structure(inputpdbid) if num_pdbids > 1: config.OUTPATH = '/'.join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()]) outputprefix = 'report' process_pdb(pdbpath, config.OUTPATH, outputprefix=outputprefix) if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None: if config.BASEPATH in ['.', './']: write_message('\nFinished analysis. Find the result files in the working directory.\n\n') else: write_message('\nFinished analysis. Find the result files in %s\n\n' % config.BASEPATH)
def main(inputstructs, inputpdbids)
Main function. Calls functions for processing, report generation and visualization.
3.679825
3.602409
1.02149
data = self.cleaned_data photo = data['photo'] if ( (data['crop_left'] > photo.width) or (data['crop_top'] > photo.height) or ((data['crop_left'] + data['crop_width']) > photo.width) or ((data['crop_top'] + data['crop_height']) > photo.height) ): # raise forms.ValidationError, ugettext("The specified crop coordinates do not fit into the source photo.") raise ValidationError(ugettext("The specified crop coordinates do not fit into the source photo.")) return data
def clean(self)
Validation function that checks the dimensions of the crop whether it fits into the original and the format.
2.656643
2.423647
1.096134
data = self.cleaned_data formats = Format.objects.filter(name=data['name']) if self.instance: formats = formats.exclude(pk=self.instance.pk) exists_sites = [] for f in formats: for s in f.sites.all(): if s in data['sites']: exists_sites.append(s.__unicode__()) if len(exists_sites): raise ValidationError(ugettext("Format with this name exists for site(s): %s" % ", ".join(exists_sites))) return data
def clean(self)
Check format name uniqueness for sites :return: cleaned_data
3.205411
2.755124
1.163436
"Used in admin image 'crop tool'." try: photo = get_cached_object(Photo, pk=photo) format = get_cached_object(Format, pk=format) content = { 'error': False, 'image':settings.MEDIA_URL + photo.image, 'width':photo.width, 'height': photo.height, 'format_width':format.max_width, 'format_height':format.max_height } except (Photo.DoesNotExist, Format.DoesNotExist): content = {'error':True} return HttpResponse(simplejson.dumps(content))
def format_photo_json(self, request, photo, format)
Used in admin image 'crop tool'.
3.935493
3.114506
1.263601
self.update_model_dict() self.rc("background solid white") self.rc("setattr g display 0") # Hide all pseudobonds self.rc("~display #%i & :/isHet & ~:%s" % (self.model_dict[self.plipname], self.hetid))
def set_initial_representations(self)
Set the initial representations
33.350643
32.418709
1.028747
dct = {} models = self.chimera.openModels for md in models.list(): dct[md.name] = md.id self.model_dict = dct
def update_model_dict(self)
Updates the model dictionary
6.006602
5.730928
1.048103
atm_by_snum = {} for atom in self.model.atoms: atm_by_snum[atom.serialNumber] = atom return atm_by_snum
def atom_by_serialnumber(self)
Provides a dictionary mapping serial numbers to their atom objects.
3.183443
2.824841
1.126946
grp = self.getPseudoBondGroup("Hydrophobic Interactions-%i" % self.tid, associateWith=[self.model]) grp.lineType = self.chimera.Dash grp.lineWidth = 3 grp.color = self.colorbyname('gray') for i in self.plcomplex.hydrophobic_contacts.pairs_ids: self.bs_res_ids.append(i[0])
def show_hydrophobic(self)
Visualizes hydrophobic contacts.
17.34882
15.519883
1.117845
grp = self.getPseudoBondGroup("Hydrogen Bonds-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i in self.plcomplex.hbonds.ldon_id: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname('blue') self.bs_res_ids.append(i[0]) for i in self.plcomplex.hbonds.pdon_id: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname('blue') self.bs_res_ids.append(i[1])
def show_hbonds(self)
Visualizes hydrogen bonds.
4.80586
4.640824
1.035562
grp = self.getPseudoBondGroup("HalogenBonds-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i in self.plcomplex.halogen_bonds: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname('turquoise') self.bs_res_ids.append(i.acc_id)
def show_halogen(self)
Visualizes halogen bonds.
11.907072
10.673741
1.115548
grp = self.getPseudoBondGroup("pi-Stacking-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 grp.lineType = self.chimera.Dash for i, stack in enumerate(self.plcomplex.pistacking): m = self.model r = m.newResidue("pseudoatoms", " ", 1, " ") centroid_prot = m.newAtom("CENTROID", self.chimera.Element("CENTROID")) x, y, z = stack.proteinring_center centroid_prot.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(centroid_prot) centroid_lig = m.newAtom("CENTROID", self.chimera.Element("CENTROID")) x, y, z = stack.ligandring_center centroid_lig.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(centroid_lig) b = grp.newPseudoBond(centroid_lig, centroid_prot) b.color = self.colorbyname('forest green') self.bs_res_ids += stack.proteinring_atoms
def show_stacking(self)
Visualizes pi-stacking interactions.
6.129849
5.711775
1.073195
grp = self.getPseudoBondGroup("Cation-Pi-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 grp.lineType = self.chimera.Dash for i, cat in enumerate(self.plcomplex.pication): m = self.model r = m.newResidue("pseudoatoms", " ", 1, " ") chargecenter = m.newAtom("CHARGE", self.chimera.Element("CHARGE")) x, y, z = cat.charge_center chargecenter.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(chargecenter) centroid = m.newAtom("CENTROID", self.chimera.Element("CENTROID")) x, y, z = cat.ring_center centroid.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(centroid) b = grp.newPseudoBond(centroid, chargecenter) b.color = self.colorbyname('orange') if cat.protcharged: self.bs_res_ids += cat.charge_atoms else: self.bs_res_ids += cat.ring_atoms
def show_cationpi(self)
Visualizes cation-pi interactions
6.188074
5.977813
1.035174
# Salt Bridges grp = self.getPseudoBondGroup("Salt Bridges-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 grp.lineType = self.chimera.Dash for i, sbridge in enumerate(self.plcomplex.saltbridges): m = self.model r = m.newResidue("pseudoatoms", " ", 1, " ") chargecenter1 = m.newAtom("CHARGE", self.chimera.Element("CHARGE")) x, y, z = sbridge.positive_center chargecenter1.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(chargecenter1) chargecenter2 = m.newAtom("CHARGE", self.chimera.Element("CHARGE")) x, y, z = sbridge.negative_center chargecenter2.setCoord(self.chimera.Coord(x, y, z)) r.addAtom(chargecenter2) b = grp.newPseudoBond(chargecenter1, chargecenter2) b.color = self.colorbyname('yellow') if sbridge.protispos: self.bs_res_ids += sbridge.positive_atoms else: self.bs_res_ids += sbridge.negative_atoms
def show_sbridges(self)
Visualizes salt bridges.
5.163919
4.981814
1.036554
grp = self.getPseudoBondGroup("Water Bridges-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i, wbridge in enumerate(self.plcomplex.waterbridges): c = grp.newPseudoBond(self.atoms[wbridge.water_id], self.atoms[wbridge.acc_id]) c.color = self.colorbyname('cornflower blue') self.water_ids.append(wbridge.water_id) b = grp.newPseudoBond(self.atoms[wbridge.don_id], self.atoms[wbridge.water_id]) b.color = self.colorbyname('cornflower blue') self.water_ids.append(wbridge.water_id) if wbridge.protisdon: self.bs_res_ids.append(wbridge.don_id) else: self.bs_res_ids.append(wbridge.acc_id)
def show_wbridges(self)
Visualizes water bridges
5.144268
4.949739
1.039301
grp = self.getPseudoBondGroup("Metal Coordination-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i, metal in enumerate(self.plcomplex.metal_complexes): c = grp.newPseudoBond(self.atoms[metal.metal_id], self.atoms[metal.target_id]) c.color = self.colorbyname('magenta') if metal.location == 'water': self.water_ids.append(metal.target_id) if metal.location.startswith('protein'): self.bs_res_ids.append(metal.target_id)
def show_metal(self)
Visualizes metal coordination.
10.076994
9.411375
1.070725
if not len(self.water_ids) == 0: # Hide all non-interacting water molecules water_selection = [] for wid in self.water_ids: water_selection.append('serialNumber=%i' % wid) self.rc("~display :HOH") self.rc("display :@/%s" % " or ".join(water_selection)) # Show all interacting binding site residues self.rc("~display #%i & ~:/isHet" % self.model_dict[self.plipname]) self.rc("display :%s" % ",".join([str(self.atoms[bsid].residue.id) for bsid in self.bs_res_ids])) self.rc("color lightblue :HOH")
def cleanup(self)
Clean up the visualization.
9.041387
8.811095
1.026137
self.rc("center #%i & :%s" % (self.model_dict[self.plipname], self.hetid))
def zoom_to_ligand(self)
Centers the view on the ligand and its binding site residues.
42.846928
39.701969
1.079214
self.rc("setattr a color gray @CENTROID") self.rc("setattr a radius 0.3 @CENTROID") self.rc("represent sphere @CENTROID") self.rc("setattr a color orange @CHARGE") self.rc("setattr a radius 0.4 @CHARGE") self.rc("represent sphere @CHARGE") self.rc("display :pseudoatoms")
def refinements(self)
Details for the visualization.
7.839466
7.829103
1.001324
"Write your forwards methods here." if not db.dry_run: for pl in orm['core.Placement'].objects.all(): pl.listing_set.update(publishable=pl.publishable) publishable = pl.publishable publishable.publish_from = pl.publish_from publishable.static = pl.static publishable.publish_to = pl.publish_to publishable.save(force_update=True)
def forwards(self, orm)
Write your forwards methods here.
5.697133
5.416038
1.0519
if hasattr(self.image, '_getexif'): self.rotate_exif() crop_box = self.crop_to_ratio() self.resize() return self.image, crop_box
def format(self)
Crop and resize the supplied image. Return the image and the crop_box used. If the input format is JPEG and in EXIF there is information about rotation, use it and rotate resulting image.
8.692067
4.074081
2.133504
f = self.fmt if f.flexible_height and f.flexible_max_height: flexw, flexh = self.fw, f.flexible_max_height flex_ratio = float(flexw) / flexh if abs(flex_ratio - self.image_ratio) < abs(self.format_ratio - self.image_ratio): self.fh = flexh self.format_ratio = flex_ratio
def set_format(self)
Check if the format has a flexible height, if so check if the ratio of the flexible format is closer to the actual ratio of the image. If so use that instead of the default values (f.max_width, f.max_height).
5.141628
3.440133
1.494601
# check if the flexible height option is active and applies self.set_format() if self.fmt.nocrop: # cropping not allowed return if self.crop_box: # crop coordinates passed in explicitely return self.crop_box iw, ih = self.image.size if iw <= self.fw and ih <= self.fh: # image fits in the target format, no need to crop return if self.image_ratio < self.format_ratio: # image taller than format diff = ih - (iw * self.fh / self.fw) return (0, diff // 2 , iw, ih - diff // 2) elif self.image_ratio > self.format_ratio: # image wider than format diff = iw - (ih * self.fw / self.fh) return (diff // 2, 0, iw - diff // 2, ih) else: # same ratio as format return
def get_crop_box(self)
Get coordinates of the rectangle defining the new image boundaries. It takes into acount any specific wishes from the model (explicitely passed in crop_box), the desired format and it's options (flexible_height, nocrop) and mainly it's ratio. After dimensions of the format were specified (see set_format), crop the image to the same ratio.
3.787252
3.319593
1.140878
if not self.important_box: return crop_box # shortcuts ib = self.important_box cl, ct, cr, cb = crop_box iw, ih = self.image.size # compute the move of crop center onto important center move_horiz = (ib[0] + ib[2]) // 2 - (cl + cr) // 2 move_verti = (ib[1] + ib[3]) // 2 - (ct + cb) // 2 # make sure we don't get out of the image # ... horizontaly if move_horiz > 0: move_horiz = min(iw - cr, move_horiz) else: move_horiz = max(-cl, move_horiz) # .. and verticaly if move_verti > 0: move_verti = min(ih - cb, move_verti) else: move_verti = max(-ct, move_verti) # move the crop_box return (cl + move_horiz, ct + move_verti, cr + move_horiz, cb + move_verti)
def center_important_part(self, crop_box)
If important_box was specified, make sure it lies inside the crop box.
2.31687
2.210384
1.048175
" Get crop coordinates and perform the crop if we get any. " crop_box = self.get_crop_box() if not crop_box: return crop_box = self.center_important_part(crop_box) iw, ih = self.image.size # see if we want to crop something from outside of the image out_of_photo = min(crop_box[0], crop_box[1]) < 0 or crop_box[2] > iw or crop_box[3] > ih # check whether there's transparent information in the image transparent = self.image.mode in ('RGBA', 'LA') if photos_settings.DEFAULT_BG_COLOR != 'black' and out_of_photo and not transparent: # if we do, just crop the image to the portion that will be visible updated_crop_box = ( max(0, crop_box[0]), max(0, crop_box[1]), min(iw, crop_box[2]), min(ih, crop_box[3]), ) cropped = self.image.crop(updated_crop_box) # create new image of the proper size and color self.image = Image.new('RGB', (crop_box[2] - crop_box[0], crop_box[3] - crop_box[1]), photos_settings.DEFAULT_BG_COLOR) # and paste the cropped part into it's proper position self.image.paste(cropped, (abs(min(crop_box[0], 0)), abs(min(crop_box[1], 0)))) else: # crop normally if not the case self.image = self.image.crop(crop_box) return crop_box
def crop_to_ratio(self)
Get crop coordinates and perform the crop if we get any.
3.432797
3.117042
1.1013
f = self.fmt iw, ih = self.image.size if not f.stretch and iw <= self.fw and ih <= self.fh: return if self.image_ratio == self.format_ratio: # same ratio, just resize return (self.fw, self.fh) elif self.image_ratio < self.format_ratio: # image taller than format return (self.fh * iw / ih, self.fh) else: # self.image_ratio > self.format_ratio # image wider than format return (self.fw, self.fw * ih / iw)
def get_resized_size(self)
Get target size for the stretched or shirnked image to fit within the target dimensions. Do not stretch images if not format.stretch. Note that this method is designed to operate on already cropped image.
3.143265
2.887981
1.088395
resized_size = self.get_resized_size() if not resized_size: return self.image = self.image.resize(resized_size, Image.ANTIALIAS)
def resize(self)
Get target size for a cropped image and do the resizing if we got anything usable.
3.194054
2.755544
1.159137
exif = self.image._getexif() or {} rotation = exif.get(TAGS['Orientation'], 1) rotations = { 6: -90, 3: -180, 8: -270, } if rotation not in rotations: return self.image = self.image.rotate(rotations[rotation])
def rotate_exif(self)
Rotate image via exif information. Only 90, 180 and 270 rotations are supported.
2.836003
2.689578
1.054442
idlist = list(set(idlist)) # Remove duplicates if not selection_exists: cmd.select(selname, 'None') # Empty selection first idchunks = [idlist[i:i+chunksize] for i in range(0, len(idlist), chunksize)] for idchunk in idchunks: cmd.select(selname, '%s or (id %s)' % (selname, '+'.join(map(str, idchunk)))) if restrict is not None: cmd.select(selname, '%s and %s' % (selname, restrict))
def select_by_ids(selname, idlist, selection_exists=False, chunksize=20, restrict=None)
Selection with a large number of ids concatenated into a selection list can cause buffer overflow in PyMOL. This function takes a selection name and and list of IDs (list of integers) as input and makes a careful step-by-step selection (packages of 20 by default)
2.319483
2.151895
1.077879
try: ct = CONTENT_TYPE_MAPPING[ct_name] except KeyError: for model in models.get_models(): if ct_name == slugify(model._meta.verbose_name_plural): ct = ContentType.objects.get_for_model(model) CONTENT_TYPE_MAPPING[ct_name] = ct break else: raise Http404 return ct
def get_content_type(ct_name)
A helper function that returns ContentType object based on its slugified verbose_name_plural. Results of this function is cached to improve performance. :Parameters: - `ct_name`: Slugified verbose_name_plural of the target model. :Exceptions: - `Http404`: if no matching ContentType is found
2.246647
2.22804
1.008351
def category_templates(category, incomplete_template, params): paths = [] parts = category.path.split('/') for i in reversed(range(1, len(parts) + 1)): params.update({'pth': '/'.join(parts[:i])}) paths.append(incomplete_template % params) return paths FULL = 'page/category/%(pth)s/content_type/%(app_label)s.%(model_label)s/%(slug)s/%(name)s' FULL_NO_SLUG = 'page/category/%(pth)s/content_type/%(app_label)s.%(model_label)s/%(name)s' BY_CATEGORY = 'page/category/%(pth)s/%(name)s' BY_CONTENT_TYPE = 'page/content_type/%(app_label)s.%(model_label)s/%(name)s' templates = [] params = {'name': name} if app_label and model_label: params.update({'app_label': app_label, 'model_label': model_label}) if slug: params.update({'slug': slug}) if category: if app_label and model_label: if slug: templates += category_templates(category, FULL, params) templates += category_templates(category, FULL_NO_SLUG, params) templates += category_templates(category, BY_CATEGORY, params) if app_label and model_label: templates.append(BY_CONTENT_TYPE % params) templates.append('page/%(name)s' % params) return templates
def get_templates(name, slug=None, category=None, app_label=None, model_label=None)
Returns templates in following format and order: * ``'page/category/%s/content_type/%s.%s/%s/%s' % (<CATEGORY_PART>, app_label, model_label, slug, name)`` * ``'page/category/%s/content_type/%s.%s/%s' % (<CATEGORY_PART>, app_label, model_label, name)`` * ``'page/category/%s/%s' % (<CATEGORY_PART>, name)`` * ``'page/content_type/%s.%s/%s' % (app_label, model_label, name)`` * ``'page/%s' % name`` Where ``<CATEGORY_PART>`` is derived from ``path`` attribute by these rules: * When **no** parent exists (this is therfore root category) ``<CATEGORY_PART> = path`` * When exactly **one** parent exists: ``<CATEGORY_PART> = path`` * When multiple parent exist (category nestedN is deep in the tree):: <CATEGORY_PART> = ( 'nested1/nested2/../nestedN/', 'nested1/nested2/../nestedN-1/', ... 'nested1' ) Examples. Three categories exist having slugs **ROOT**, **NESTED1**, **NESTED2** where **NESTED2**'s parent is **NESTED1**.:: ROOT \ NESTED1 \ NESTED2 * For **ROOT**, ``<CATEGORY_PART>`` is only one - "ROOT". * For **NESTED1**, ``<CATEGORY_PART>`` is only one - "NESTED1". * For **NESTED2**, ``<CATEGORY_PART>`` has two elements: "NESTED1/NESTED2" and "NESTED1".
2.153973
1.969241
1.093809
slug = publishable.slug category = publishable.category app_label = publishable.content_type.app_label model_label = publishable.content_type.model return get_templates(name, slug, category, app_label, model_label)
def get_templates_from_publishable(name, publishable)
Returns the same template list as `get_templates` but gets values from `Publishable` instance.
2.627758
2.411276
1.089779
t_list = [] if name: t_list.append('page/export/%s.html' % name) t_list.append('page/export/banner.html') try: cat = Category.objects.get_by_tree_path('') except Category.DoesNotExist: raise Http404() listing = Listing.objects.get_listing(count=count, category=cat) return render( request, t_list, { 'category' : cat, 'listing' : listing }, content_type=content_type )
def export(request, count, name='', content_type=None)
Export banners. :Parameters: - `count`: number of objects to pass into the template - `name`: name of the template ( page/export/banner.html is default ) - `models`: list of Model classes to include
3.834966
3.639838
1.053609
" Extract parameters for `get_templates` from the context. " if not template_name: template_name = self.template_name kw = {} if 'object' in context: o = context['object'] kw['slug'] = o.slug if context.get('content_type', False): ct = context['content_type'] kw['app_label'] = ct.app_label kw['model_label'] = ct.model return get_templates(template_name, category=context['category'], **kw)
def get_templates(self, context, template_name=None)
Extract parameters for `get_templates` from the context.
3.635131
2.964647
1.22616
" Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category " year = getattr(settings, 'ARCHIVE_ENTRY_YEAR', None) if not year: n = now() try: year = Listing.objects.filter( category__site__id=settings.SITE_ID, category__tree_path__startswith=category.tree_path, publish_from__lte=n ).values('publish_from')[0]['publish_from'].year except: year = n.year return year
def _archive_entry_year(self, category)
Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category
5.065922
3.338864
1.517259
if not self.width or not self.height: self.width, self.height = self.image.width, self.image.height # prefill the slug with the ID, it requires double save if not self.id: img = self.image # store dummy values first... w, h = self.width, self.height self.image = '' self.width, self.height = w, h self.slug = '' super(Photo, self).save(force_insert=True) # ... so that we can generate the slug self.slug = str(self.id) + '-' + slugify(self.title) # truncate slug in order to fit in an ImageField and/or paths in Redirects self.slug = self.slug[:64] # .. tha will be used in the image's upload_to function self.image = img # and the image will be saved properly super(Photo, self).save(force_update=True) else: try: old = Photo.objects.get(pk=self.pk) force_update = True # delete formatedphotos if new image was uploaded if old.image != self.image: for f_photo in self.formatedphoto_set.all(): f_photo.delete() except Photo.DoesNotExist: # somebody is just trying to create new model with given PK force_update = False super(Photo, self).save(force_update=force_update)
def save(self, **kwargs)
Overrides models.Model.save. - Generates slug. - Saves image file.
4.998115
4.848816
1.030791
if photos_settings.DEBUG: return self.get_placeholder_img() out = { 'blank': True, 'width': self.max_width, 'height': self.max_height, 'url': photos_settings.EMPTY_IMAGE_SITE_PREFIX + 'img/empty/%s.png' % (self.name), } return out
def get_blank_img(self)
Return fake ``FormatedPhoto`` object to be used in templates when an error occurs in image generation.
5.408048
5.153828
1.049326
pars = { 'width': self.max_width, 'height': self.max_height } out = { 'placeholder': True, 'width': self.max_width, 'height': self.max_height, 'url': photos_settings.DEBUG_PLACEHOLDER_PROVIDER_TEMPLATE % pars } return out
def get_placeholder_img(self)
Returns fake ``FormatedPhoto`` object grabbed from image placeholder generator service for the purpose of debugging when images are not available but we still want to see something.
4.490426
3.769634
1.19121
if self.id: for f_photo in self.formatedphoto_set.all(): f_photo.delete() super(Format, self).save(**kwargs)
def save(self, **kwargs)
Overrides models.Model.save. - Delete formatted photos if format save and not now created (because of possible changes)
7.180316
3.657134
1.963373
stretched_photo, crop_box = self._generate_img() # set crop_box to (0,0,0,0) if photo not cropped if not crop_box: crop_box = 0, 0, 0, 0 self.crop_left, self.crop_top, right, bottom = crop_box self.crop_width = right - self.crop_left self.crop_height = bottom - self.crop_top self.width, self.height = stretched_photo.size f = StringIO() imgf = (self.photo._get_image().format or Image.EXTENSION[path.splitext(self.photo.image.name)[1]]) stretched_photo.save(f, format=imgf, quality=self.format.resample_quality) f.seek(0) self.image.save(self.file(), ContentFile(f.read()), save)
def generate(self, save=True)
Generates photo file in current format. If ``save`` is ``True``, file is saved too.
3.860421
3.760967
1.026444
self.remove_file() if not self.image: self.generate(save=False) else: self.image.name = self.file() super(FormatedPhoto, self).save(**kwargs)
def save(self, **kwargs)
Overrides models.Model.save - Removes old file from the FS - Generates new file.
6.985986
6.166306
1.132929
if photos_settings.FORMATED_PHOTO_FILENAME is not None: return photos_settings.FORMATED_PHOTO_FILENAME(self) source_file = path.split(self.photo.image.name) return path.join(source_file[0], str(self.format.id) + '-' + source_file[1])
def file(self)
Method returns formated photo path - derived from format.id and source Photo filename
5.575099
3.409494
1.635169
''' get category from template variable or from tree_path ''' cat = template_var.resolve(context) if isinstance(cat, basestring): cat = Category.objects.get_by_tree_path(cat) return cat
def _get_category_from_pars_var(template_var, context)
get category from template variable or from tree_path
5.394299
3.020342
1.785989
bits = token.split_contents() nodelist = parser.parse(('end' + bits[0],)) parser.delete_first_token() return _parse_position_tag(bits, nodelist)
def position(parser, token)
Render a given position for category. If some position is not defined for first category, position from its parent category is used unless nofallback is specified. Syntax:: {% position POSITION_NAME for CATEGORY [nofallback] %}{% endposition %} {% position POSITION_NAME for CATEGORY using BOX_TYPE [nofallback] %}{% endposition %} Example usage:: {% position top_left for category %}{% endposition %}
2.740235
4.5036
0.608454
bits = list(token.split_contents()) end_tag = 'end' + bits[0] nofallback = False if bits[-1] == 'nofallback': nofallback = True bits.pop() if len(bits) >= 4 and bits[-2] == 'for': category = template.Variable(bits.pop()) pos_names = bits[1:-1] else: raise TemplateSyntaxError('Invalid syntax: {% ifposition POSITION_NAME ... for CATEGORY [nofallback] %}') nodelist_true = parser.parse(('else', end_tag)) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse((end_tag,)) parser.delete_first_token() else: nodelist_false = template.NodeList() return IfPositionNode(category, pos_names, nofallback, nodelist_true, nodelist_false)
def ifposition(parser, token)
Syntax:: {% ifposition POSITION_NAME ... for CATEGORY [nofallback] %} {% else %} {% endifposition %}
2.476175
2.016989
1.227659
if not config.INTRA: return pairings filtered1_pairings = [p for p in pairings if (p.resnr, p.reschain) != (p.resnr_l, p.reschain_l)] already_considered = [] filtered2_pairings = [] for contact in filtered1_pairings: try: dist = 'D{}'.format(round(contact.distance, 2)) except AttributeError: try: dist = 'D{}'.format(round(contact.distance_ah, 2)) except AttributeError: dist = 'D{}'.format(round(contact.distance_aw, 2)) res1, res2 = ''.join([str(contact.resnr), contact.reschain]), ''.join( [str(contact.resnr_l), contact.reschain_l]) data = {res1, res2, dist} if data not in already_considered: filtered2_pairings.append(contact) already_considered.append(data) return filtered2_pairings
def filter_contacts(pairings)
Filter interactions by two criteria: 1. No interactions between the same residue (important for intra mode). 2. No duplicate interactions (A with B and B with A, also important for intra mode).
3.087933
2.885758
1.07006
data = namedtuple('hydroph_interaction', 'bsatom bsatom_orig_idx ligatom ligatom_orig_idx ' 'distance restype resnr reschain restype_l, resnr_l, reschain_l') pairings = [] for a, b in itertools.product(atom_set_a, atom_set_b): if a.orig_idx == b.orig_idx: continue e = euclidean3d(a.atom.coords, b.atom.coords) if not config.MIN_DIST < e < config.HYDROPH_DIST_MAX: continue restype, resnr, reschain = whichrestype(a.atom), whichresnumber(a.atom), whichchain(a.atom) restype_l, resnr_l, reschain_l = whichrestype(b.orig_atom), whichresnumber(b.orig_atom), whichchain(b.orig_atom) contact = data(bsatom=a.atom, bsatom_orig_idx=a.orig_idx, ligatom=b.atom, ligatom_orig_idx=b.orig_idx, distance=e, restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
def hydrophobic_interactions(atom_set_a, atom_set_b)
Detection of hydrophobic pliprofiler between atom_set_a (binding site) and atom_set_b (ligand). Definition: All pairs of qualified carbon atoms within a distance of HYDROPH_DIST_MAX
2.676915
2.596105
1.031128
data = namedtuple('hbond', 'a a_orig_idx d d_orig_idx h distance_ah distance_ad angle type protisdon resnr ' 'restype reschain resnr_l restype_l reschain_l sidechain atype dtype') pairings = [] for acc, don in itertools.product(acceptors, donor_pairs): if not typ == 'strong': continue # Regular (strong) hydrogen bonds dist_ah = euclidean3d(acc.a.coords, don.h.coords) dist_ad = euclidean3d(acc.a.coords, don.d.coords) if not config.MIN_DIST < dist_ad < config.HBOND_DIST_MAX: continue vec1, vec2 = vector(don.h.coords, don.d.coords), vector(don.h.coords, acc.a.coords) v = vecangle(vec1, vec2) if not v > config.HBOND_DON_ANGLE_MIN: continue protatom = don.d.OBAtom if protisdon else acc.a.OBAtom ligatom = don.d.OBAtom if not protisdon else acc.a.OBAtom is_sidechain_hbond = protatom.GetResidue().GetAtomProperty(protatom, 8) # Check if sidechain atom resnr = whichresnumber(don.d) if protisdon else whichresnumber(acc.a) resnr_l = whichresnumber(acc.a_orig_atom) if protisdon else whichresnumber(don.d_orig_atom) restype = whichrestype(don.d) if protisdon else whichrestype(acc.a) restype_l = whichrestype(acc.a_orig_atom) if protisdon else whichrestype(don.d_orig_atom) reschain = whichchain(don.d) if protisdon else whichchain(acc.a) rechain_l = whichchain(acc.a_orig_atom) if protisdon else whichchain(don.d_orig_atom) # Next line prevents H-Bonds within amino acids in intermolecular interactions if config.INTRA is not None and whichresnumber(don.d) == whichresnumber(acc.a): continue # Next line prevents backbone-backbone H-Bonds if config.INTRA is not None and protatom.GetResidue().GetAtomProperty(protatom, 8) and ligatom.GetResidue().GetAtomProperty(ligatom, 8): continue contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, d=don.d, d_orig_idx=don.d_orig_idx, h=don.h, distance_ah=dist_ah, distance_ad=dist_ad, angle=v, type=typ, protisdon=protisdon, resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l, reschain_l=rechain_l, sidechain=is_sidechain_hbond, atype=acc.a.type, dtype=don.d.type) pairings.append(contact) return filter_contacts(pairings)
def hbonds(acceptors, donor_pairs, protisdon, typ)
Detection of hydrogen bonds between sets of acceptors and donor pairs. Definition: All pairs of hydrogen bond acceptor and donors with donor hydrogens and acceptor showing a distance within HBOND DIST MIN and HBOND DIST MAX and donor angles above HBOND_DON_ANGLE_MIN
2.857415
2.768798
1.032006
data = namedtuple( 'pistack', 'proteinring ligandring distance angle offset type restype resnr reschain restype_l resnr_l reschain_l') pairings = [] for r, l in itertools.product(rings_bs, rings_lig): # DISTANCE AND RING ANGLE CALCULATION d = euclidean3d(r.center, l.center) b = vecangle(r.normal, l.normal) a = min(b, 180 - b if not 180 - b < 0 else b) # Smallest of two angles, depending on direction of normal # RING CENTER OFFSET CALCULATION (project each ring center into the other ring) proj1 = projection(l.normal, l.center, r.center) proj2 = projection(r.normal, r.center, l.center) offset = min(euclidean3d(proj1, l.center), euclidean3d(proj2, r.center)) # RECEPTOR DATA resnr, restype, reschain = whichresnumber(r.atoms[0]), whichrestype(r.atoms[0]), whichchain(r.atoms[0]) resnr_l, restype_l, reschain_l = whichresnumber(l.orig_atoms[0]), whichrestype( l.orig_atoms[0]), whichchain(l.orig_atoms[0]) # SELECTION BY DISTANCE, ANGLE AND OFFSET passed = False if not config.MIN_DIST < d < config.PISTACK_DIST_MAX: continue if 0 < a < config.PISTACK_ANG_DEV and offset < config.PISTACK_OFFSET_MAX: ptype = 'P' passed = True if 90 - config.PISTACK_ANG_DEV < a < 90 + config.PISTACK_ANG_DEV and offset < config.PISTACK_OFFSET_MAX: ptype = 'T' passed = True if passed: contact = data(proteinring=r, ligandring=l, distance=d, angle=a, offset=offset, type=ptype, resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
def pistacking(rings_bs, rings_lig)
Return all pi-stackings between the given aromatic ring systems in receptor and ligand.
3.098796
3.086699
1.003919
data = namedtuple( 'pication', 'ring charge distance offset type restype resnr reschain restype_l resnr_l reschain_l protcharged') pairings = [] if len(rings) == 0 or len(pos_charged) == 0: return pairings for ring in rings: c = ring.center for p in pos_charged: d = euclidean3d(c, p.center) # Project the center of charge into the ring and measure distance to ring center proj = projection(ring.normal, ring.center, p.center) offset = euclidean3d(proj, ring.center) if not config.MIN_DIST < d < config.PICATION_DIST_MAX or not offset < config.PISTACK_OFFSET_MAX: continue if type(p).__name__ == 'lcharge' and p.fgroup == 'tertamine': # Special case here if the ligand has a tertiary amine, check an additional angle # Otherwise, we might have have a pi-cation interaction 'through' the ligand n_atoms = [a_neighbor for a_neighbor in OBAtomAtomIter(p.atoms[0].OBAtom)] n_atoms_coords = [(a.x(), a.y(), a.z()) for a in n_atoms] amine_normal = np.cross(vector(n_atoms_coords[0], n_atoms_coords[1]), vector(n_atoms_coords[2], n_atoms_coords[0])) b = vecangle(ring.normal, amine_normal) # Smallest of two angles, depending on direction of normal a = min(b, 180 - b if not 180 - b < 0 else b) if not a > 30.0: resnr, restype = whichresnumber(ring.atoms[0]), whichrestype(ring.atoms[0]) reschain = whichchain(ring.atoms[0]) resnr_l, restype_l = whichresnumber(p.orig_atoms[0]), whichrestype(p.orig_atoms[0]) reschain_l = whichchain(p.orig_atoms[0]) contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular', restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l, protcharged=protcharged) pairings.append(contact) break resnr = whichresnumber(p.atoms[0]) if protcharged else whichresnumber(ring.atoms[0]) resnr_l = whichresnumber(ring.orig_atoms[0]) if protcharged else whichresnumber(p.orig_atoms[0]) restype = whichrestype(p.atoms[0]) if protcharged else whichrestype(ring.atoms[0]) restype_l = whichrestype(ring.orig_atoms[0]) if protcharged else whichrestype(p.orig_atoms[0]) reschain = whichchain(p.atoms[0]) if protcharged else whichchain(ring.atoms[0]) reschain_l = whichchain(ring.orig_atoms[0]) if protcharged else whichchain(p.orig_atoms[0]) contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular', restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l, protcharged=protcharged) pairings.append(contact) return filter_contacts(pairings)
def pication(rings, pos_charged, protcharged)
Return all pi-Cation interaction between aromatic rings and positively charged groups. For tertiary and quaternary amines, check also the angle between the ring and the nitrogen.
2.874685
2.751164
1.044898
data = namedtuple( 'saltbridge', 'positive negative distance protispos resnr restype reschain resnr_l restype_l reschain_l') pairings = [] for pc, nc in itertools.product(poscenter, negcenter): if not config.MIN_DIST < euclidean3d(pc.center, nc.center) < config.SALTBRIDGE_DIST_MAX: continue resnr = pc.resnr if protispos else nc.resnr resnr_l = whichresnumber(nc.orig_atoms[0]) if protispos else whichresnumber(pc.orig_atoms[0]) restype = pc.restype if protispos else nc.restype restype_l = whichrestype(nc.orig_atoms[0]) if protispos else whichrestype(pc.orig_atoms[0]) reschain = pc.reschain if protispos else nc.reschain reschain_l = whichchain(nc.orig_atoms[0]) if protispos else whichchain(pc.orig_atoms[0]) contact = data(positive=pc, negative=nc, distance=euclidean3d(pc.center, nc.center), protispos=protispos, resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
def saltbridge(poscenter, negcenter, protispos)
Detect all salt bridges (pliprofiler between centers of positive and negative charge)
2.486015
2.448573
1.015292
data = namedtuple('halogenbond', 'acc acc_orig_idx don don_orig_idx distance don_angle acc_angle restype ' 'resnr reschain restype_l resnr_l reschain_l donortype acctype sidechain') pairings = [] for acc, don in itertools.product(acceptor, donor): dist = euclidean3d(acc.o.coords, don.x.coords) if not config.MIN_DIST < dist < config.HALOGEN_DIST_MAX: continue vec1, vec2 = vector(acc.o.coords, acc.y.coords), vector(acc.o.coords, don.x.coords) vec3, vec4 = vector(don.x.coords, acc.o.coords), vector(don.x.coords, don.c.coords) acc_angle, don_angle = vecangle(vec1, vec2), vecangle(vec3, vec4) is_sidechain_hal = acc.o.OBAtom.GetResidue().GetAtomProperty(acc.o.OBAtom, 8) # Check if sidechain atom if not config.HALOGEN_ACC_ANGLE - config.HALOGEN_ANGLE_DEV < acc_angle \ < config.HALOGEN_ACC_ANGLE + config.HALOGEN_ANGLE_DEV: continue if not config.HALOGEN_DON_ANGLE - config.HALOGEN_ANGLE_DEV < don_angle \ < config.HALOGEN_DON_ANGLE + config.HALOGEN_ANGLE_DEV: continue restype, reschain, resnr = whichrestype(acc.o), whichchain(acc.o), whichresnumber(acc.o) restype_l, reschain_l, resnr_l = whichrestype(don.orig_x), whichchain(don.orig_x), whichresnumber(don.orig_x) contact = data(acc=acc, acc_orig_idx=acc.o_orig_idx, don=don, don_orig_idx=don.x_orig_idx, distance=dist, don_angle=don_angle, acc_angle=acc_angle, restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, reschain_l=reschain_l, resnr_l=resnr_l, donortype=don.x.OBAtom.GetType(), acctype=acc.o.type, sidechain=is_sidechain_hal) pairings.append(contact) return filter_contacts(pairings)
def halogen(acceptor, donor)
Detect all halogen bonds of the type Y-O...X-C
2.836025
2.795705
1.014422
found = tree.xpath('%s/text()' % location) if not found: return None else: data = found[0] if force_string: return data if data == 'True': return True elif data == 'False': return False else: try: return int(data) except ValueError: try: return float(data) except ValueError: # It's a string return data
def getdata(self, tree, location, force_string=False)
Gets XML data from a specific element and handles types.
2.253089
2.179292
1.033863
return tuple(float(x) for x in tree.xpath('.//%s/*/text()' % location))
def getcoordinates(self, tree, location)
Gets coordinates from a specific element in PLIP XML
7.631668
6.449932
1.183217
# Atom mappings smiles_to_pdb_mapping = self.bindingsite.xpath('mappings/smiles_to_pdb/text()') if smiles_to_pdb_mapping == []: self.mappings = {'smiles_to_pdb': None, 'pdb_to_smiles': None} else: smiles_to_pdb_mapping = {int(y[0]): int(y[1]) for y in [x.split(':') for x in smiles_to_pdb_mapping[0].split(',')]} self.mappings = {'smiles_to_pdb': smiles_to_pdb_mapping} self.mappings['pdb_to_smiles'] = {v: k for k, v in self.mappings['smiles_to_pdb'].items()}
def get_atom_mapping(self)
Parses the ligand atom mapping.
2.777321
2.680603
1.036081
hbondsback = len([hb for hb in self.hbonds if not hb.sidechain]) counts = {'hydrophobics': len(self.hydrophobics), 'hbonds': len(self.hbonds), 'wbridges': len(self.wbridges), 'sbridges': len(self.sbridges), 'pistacks': len(self.pi_stacks), 'pications': len(self.pi_cations), 'halogens': len(self.halogens), 'metal': len(self.metal_complexes), 'hbond_back': hbondsback, 'hbond_nonback': (len(self.hbonds) - hbondsback)} counts['total'] = counts['hydrophobics'] + counts['hbonds'] + counts['wbridges'] + \ counts['sbridges'] + counts['pistacks'] + counts['pications'] + counts['halogens'] + counts['metal'] return counts
def get_counts(self)
counts the interaction types and backbone hydrogen bonding in a binding site
2.588573
2.426456
1.066812