code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if self.tagSet: return Set.tagMap.fget(self) else: return self.componentType.tagMapUnique
def tagMap(self)
Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping ASN.1 tags to ASN.1 objects contained within callee.
12.424328
12.532681
0.991354
if self._currentIdx is None: raise error.PyAsn1Error('Component not chosen') else: c = self._componentValues[self._currentIdx] if innerFlag and isinstance(c, Choice): return c.getComponent(innerFlag) else: return c
def getComponent(self, innerFlag=False)
Return currently assigned component of the |ASN.1| object. Returns ------- : :py:class:`~pyasn1.type.base.PyAsn1Item` a PyASN1 object
4.370122
4.110163
1.063248
if self._currentIdx is None: raise error.PyAsn1Error('Component not chosen') else: if innerFlag: c = self._componentValues[self._currentIdx] if isinstance(c, Choice): return c.getName(innerFlag) return self.componentType.getNameByPosition(self._currentIdx)
def getName(self, innerFlag=False)
Return the name of currently assigned component of the |ASN.1| object. Returns ------- : :py:class:`str` |ASN.1| component name
4.731122
4.671808
1.012696
if self._currentIdx is None: return False componentValue = self._componentValues[self._currentIdx] return componentValue is not noValue and componentValue.isValue
def isValue(self)
Indicate that |ASN.1| object represents ASN.1 value. If *isValue* is `False` then this object represents just ASN.1 schema. If *isValue* is `True` then, in addition to its ASN.1 schema features, this object can also be used like a Python built-in object (e.g. `int`, `str`, `dict` etc.). Returns ------- : :class:`bool` :class:`False` if object represents just ASN.1 schema. :class:`True` if object represents ASN.1 schema and can be used as a normal value. Note ---- There is an important distinction between PyASN1 schema and value objects. The PyASN1 schema objects can only participate in ASN.1 schema-related operations (e.g. defining or testing the structure of the data). Most obvious uses of ASN.1 schema is to guide serialisation codecs whilst encoding/decoding serialised ASN.1 contents. The PyASN1 value objects can **additionally** participate in many operations involving regular Python objects (e.g. arithmetic, comprehension etc).
8.279787
13.955416
0.593303
try: return self._tagMap except AttributeError: self._tagMap = tagmap.TagMap( {self.tagSet: self}, {eoo.endOfOctets.tagSet: eoo.endOfOctets}, self ) return self._tagMap
def tagMap(self)
Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping ASN.1 tags to ASN.1 objects contained within callee.
5.066679
3.724877
1.360227
path = [(node.x, node.y)] while node.parent: node = node.parent path.append((node.x, node.y)) path.reverse() return path
def backtrace(node)
Backtrace according to the parent records and return the path. (including both start and end nodes)
2.091632
2.009398
1.040925
path_a = backtrace(node_a) path_b = backtrace(node_b) path_b.reverse() return path_a + path_b
def bi_backtrace(node_a, node_b)
Backtrace from start and end node, returns the path for bi-directional A* (including both start and end nodes)
2.314172
2.293479
1.009023
''' Given the start and end coordinates, return all the coordinates lying on the line formed by these coordinates, based on Bresenham's algorithm. http://en.wikipedia.org/wiki/Bresenham's_line_algorithm#Simplification ''' line = [] x0, y0 = coords_a x1, y1 = coords_b dx = abs(x1 - x0) dy = abs(y1 - y0) sx = 1 if x0 < x1 else -1 sy = 1 if y0 < y1 else -1 err = dx - dy while True: line += [[x0, y0]] if x0 == x1 and y0 == y1: break e2 = err * 2 if e2 > -dy: err = err - dy x0 = x0 + sx if e2 < dx: err = err + dx y0 = y0 + sy return line
def bresenham(coords_a, coords_b)
Given the start and end coordinates, return all the coordinates lying on the line formed by these coordinates, based on Bresenham's algorithm. http://en.wikipedia.org/wiki/Bresenham's_line_algorithm#Simplification
1.816785
1.378484
1.317958
''' Given a compressed path, return a new path that has all the segments in it interpolated. ''' expanded = [] if len(path) < 2: return expanded for i in range(len(path)-1): expanded += bresenham(path[i], path[i + 1]) expanded += [path[:-1]] return expanded
def expand_path(path)
Given a compressed path, return a new path that has all the segments in it interpolated.
5.716147
3.372077
1.695141
if node_b.x - node_a.x == 0 or node_b.y - node_a.y == 0: # direct neighbor - distance is 1 ng = 1 else: # not a direct neighbor - diagonal movement ng = SQRT2 # weight for weighted algorithms if self.weighted: ng *= node_b.weight return node_a.g + ng
def calc_cost(self, node_a, node_b)
get the distance between current node and the neighbor (cost)
5.39079
4.920439
1.095591
if not heuristic: heuristic = self.heuristic return heuristic( abs(node_a.x - node_b.x), abs(node_a.y - node_b.y))
def apply_heuristic(self, node_a, node_b, heuristic=None)
helper function to apply heuristic
2.389348
2.31184
1.033527
''' find neighbor, same for Djikstra, A*, Bi-A*, IDA* ''' if not diagonal_movement: diagonal_movement = self.diagonal_movement return grid.neighbors(node, diagonal_movement=diagonal_movement)
def find_neighbors(self, grid, node, diagonal_movement=None)
find neighbor, same for Djikstra, A*, Bi-A*, IDA*
6.294263
2.573151
2.446131
if self.runs >= self.max_runs: raise ExecutionRunsException( '{} run into barrier of {} iterations without ' 'finding the destination'.format( self.__class__.__name__, self.max_runs)) if time.time() - self.start_time >= self.time_limit: raise ExecutionTimeException( '{} took longer than {} seconds, aborting!'.format( self.__class__.__name__, self.time_limit))
def keep_running(self)
check, if we run into time or iteration constrains. :returns: True if we keep running and False if we run into a constraint
5.00321
4.520209
1.106854
''' we check if the given node is path of the path by calculating its cost and add or remove it from our path :param node: the node we like to test (the neighbor in A* or jump-node in JumpPointSearch) :param parent: the parent node (the current node we like to test) :param end: the end point to calculate the cost of the path :param open_list: the list that keeps track of our current path :param open_value: needed if we like to set the open list to something else than True (used for bi-directional algorithms) ''' # calculate cost from current node (parent) to the next node (neighbor) ng = self.calc_cost(parent, node) if not node.opened or ng < node.g: node.g = ng node.h = node.h or \ self.apply_heuristic(node, end) * self.weight # f is the estimated total cost from start to goal node.f = node.g + node.h node.parent = parent if not node.opened: heapq.heappush(open_list, node) node.opened = open_value else: # the node can be reached with smaller cost. # Since its f value has been updated, we have to # update its position in the open list open_list.remove(node) heapq.heappush(open_list, node)
def process_node(self, node, parent, end, open_list, open_value=True)
we check if the given node is path of the path by calculating its cost and add or remove it from our path :param node: the node we like to test (the neighbor in A* or jump-node in JumpPointSearch) :param parent: the parent node (the current node we like to test) :param end: the end point to calculate the cost of the path :param open_list: the list that keeps track of our current path :param open_value: needed if we like to set the open list to something else than True (used for bi-directional algorithms)
5.149457
2.219295
2.320312
self.start_time = time.time() # execution time limitation self.runs = 0 # count number of iterations start.opened = True open_list = [start] while len(open_list) > 0: self.runs += 1 self.keep_running() path = self.check_neighbors(start, end, grid, open_list) if path: return path, self.runs # failed to find path return [], self.runs
def find_path(self, start, end, grid)
find a path from start to end node on grid by iterating over all neighbors of a node (see check_neighbors) :param start: start node :param end: end node :param grid: grid that stores all possible steps/tiles as 2D-list :return:
4.603104
4.665949
0.986531
self.start_time = time.time() # execution time limitation self.runs = 0 # count number of iterations start_open_list = [start] start.g = 0 start.f = 0 start.opened = BY_START end_open_list = [end] end.g = 0 end.f = 0 end.opened = BY_END while len(start_open_list) > 0 and len(end_open_list) > 0: self.runs += 1 self.keep_running() path = self.check_neighbors(start, end, grid, start_open_list, open_value=BY_START, backtrace_by=BY_END) if path: return path, self.runs self.runs += 1 self.keep_running() path = self.check_neighbors(end, start, grid, end_open_list, open_value=BY_END, backtrace_by=BY_START) if path: return path, self.runs # failed to find path return [], self.runs
def find_path(self, start, end, grid)
find a path from start to end node on grid using the A* algorithm :param start: start node :param end: end node :param grid: grid that stores all possible steps/tiles as 2D-list :return:
2.738265
2.77754
0.98586
# cost from this node to the goal self.h = 0.0 # cost from the start node to this node self.g = 0.0 # distance from start to this point (f = g + h ) self.f = 0.0 self.opened = 0 self.closed = False # used for backtracking to the start point self.parent = None # used for recurion tracking of IDA* self.retain_count = 0 # used for IDA* and Jump-Point-Search self.tested = False
def cleanup(self)
reset all calculated values, fresh start for pathfinding
7.001456
6.191681
1.130784
nodes = [] use_matrix = (isinstance(matrix, (tuple, list))) or \ (USE_NUMPY and isinstance(matrix, np.ndarray) and matrix.size > 0) for y in range(height): nodes.append([]) for x in range(width): # 1, '1', True will be walkable # while others will be obstacles # if inverse is False, otherwise # it changes weight = int(matrix[y][x]) if use_matrix else 1 walkable = weight <= 0 if inverse else weight >= 1 nodes[y].append(Node(x=x, y=y, walkable=walkable, weight=weight)) return nodes
def build_nodes(width, height, matrix=None, inverse=False)
create nodes according to grid size. If a matrix is given it will be used to determine what nodes are walkable. :rtype : list
4.421399
4.524248
0.977267
return 0 <= x < self.width and 0 <= y < self.height
def inside(self, x, y)
check, if field position is inside map :param x: x pos :param y: y pos :return:
3.125722
6.798526
0.459765
return self.inside(x, y) and self.nodes[y][x].walkable
def walkable(self, x, y)
check, if the tile is inside grid and if it is set as walkable
5.485169
5.041224
1.088063
x = node.x y = node.y neighbors = [] s0 = d0 = s1 = d1 = s2 = d2 = s3 = d3 = False # ↑ if self.walkable(x, y - 1): neighbors.append(self.nodes[y - 1][x]) s0 = True # → if self.walkable(x + 1, y): neighbors.append(self.nodes[y][x + 1]) s1 = True # ↓ if self.walkable(x, y + 1): neighbors.append(self.nodes[y + 1][x]) s2 = True # ← if self.walkable(x - 1, y): neighbors.append(self.nodes[y][x - 1]) s3 = True if diagonal_movement == DiagonalMovement.never: return neighbors if diagonal_movement == DiagonalMovement.only_when_no_obstacle: d0 = s3 and s0 d1 = s0 and s1 d2 = s1 and s2 d3 = s2 and s3 elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle: d0 = s3 or s0 d1 = s0 or s1 d2 = s1 or s2 d3 = s2 or s3 elif diagonal_movement == DiagonalMovement.always: d0 = d1 = d2 = d3 = True # ↖ if d0 and self.walkable(x - 1, y - 1): neighbors.append(self.nodes[y - 1][x - 1]) # ↗ if d1 and self.walkable(x + 1, y - 1): neighbors.append(self.nodes[y - 1][x + 1]) # ↘ if d2 and self.walkable(x + 1, y + 1): neighbors.append(self.nodes[y + 1][x + 1]) # ↙ if d3 and self.walkable(x - 1, y + 1): neighbors.append(self.nodes[y + 1][x - 1]) return neighbors
def neighbors(self, node, diagonal_movement=DiagonalMovement.never)
get all neighbors of one node :param node: node
1.4205
1.425993
0.996148
data = '' if border: data = '+{}+'.format('-'*len(self.nodes[0])) for y in range(len(self.nodes)): line = '' for x in range(len(self.nodes[y])): node = self.nodes[y][x] if node == start: line += start_chr elif node == end: line += end_chr elif path and ((node.x, node.y) in path or node in path): line += path_chr elif node.walkable: # empty field weight = str(node.weight) if node.weight < 10 else '+' line += weight if show_weight else empty_chr else: line += block_chr # blocked field if border: line = '|'+line+'|' if data: data += '\n' data += line if border: data += '\n+{}+'.format('-'*len(self.nodes[0])) return data
def grid_str(self, path=None, start=None, end=None, border=True, start_chr='s', end_chr='e', path_chr='x', empty_chr=' ', block_chr='#', show_weight=False)
create a printable string from the grid using ASCII characters :param path: list of nodes that show the path :param start: start node :param end: end node :param border: create a border around the grid :param start_chr: character for the start (default "s") :param end_chr: character for the destination (default "e") :param path_chr: character to show the path (default "x") :param empty_chr: character for empty fields (default " ") :param block_chr: character for blocking elements (default "#") :param show_weight: instead of empty_chr show the cost of each empty field (shows a + if the value of weight is > 10) :return:
2.413416
2.315018
1.042504
# pop node with minimum 'f' value node = heapq.nsmallest(1, open_list)[0] open_list.remove(node) node.closed = True # if reached the end position, construct the path and return it # (ignored for bi-directional a*, there we look for a neighbor that is # part of the oncoming path) if not backtrace_by and node == end: return backtrace(end) # get neighbors of the current node neighbors = self.find_neighbors(grid, node) for neighbor in neighbors: if neighbor.closed: # already visited last minimum f value continue if backtrace_by and neighbor.opened == backtrace_by: # found the oncoming path if backtrace_by == BY_END: return bi_backtrace(node, neighbor) else: return bi_backtrace(neighbor, node) # check if the neighbor has not been inspected yet, or # can be reached with smaller cost from the current node self.process_node(neighbor, node, end, open_list, open_value) # the end has not been reached (yet) keep the find_path loop running return None
def check_neighbors(self, start, end, grid, open_list, open_value=True, backtrace_by=None)
find next path segment based on given node (or return path if we found the end)
5.238799
5.186945
1.009997
start.g = 0 start.f = 0 return super(AStarFinder, self).find_path(start, end, grid)
def find_path(self, start, end, grid)
find a path from start to end node on grid using the A* algorithm :param start: start node :param end: end node :param grid: grid that stores all possible steps/tiles as 2D-list :return:
4.165177
4.986845
0.835233
if 'Windows' == platform.system(): # Possible scenarios here # 1. Run from source, DLLs are in pyzbar directory # cdll.LoadLibrary() imports DLLs in repo root directory # 2. Wheel install into CPython installation # cdll.LoadLibrary() imports DLLs in package directory # 3. Wheel install into virtualenv # cdll.LoadLibrary() imports DLLs in package directory # 4. Frozen # cdll.LoadLibrary() imports DLLs alongside executable fname, dependencies = _windows_fnames() def load_objects(directory): # Load dependencies before loading libzbar dll deps = [ cdll.LoadLibrary(str(directory.joinpath(dep))) for dep in dependencies ] libzbar = cdll.LoadLibrary(str(directory.joinpath(fname))) return deps, libzbar try: dependencies, libzbar = load_objects(Path('')) except OSError: dependencies, libzbar = load_objects(Path(__file__).parent) else: # Assume a shared library on the path path = find_library('zbar') if not path: raise ImportError('Unable to find zbar shared library') libzbar = cdll.LoadLibrary(path) dependencies = [] return libzbar, dependencies
def load()
Loads the libzar shared library and its dependencies.
4.827124
4.564956
1.057431
global LIBZBAR global EXTERNAL_DEPENDENCIES if not LIBZBAR: libzbar, dependencies = zbar_library.load() LIBZBAR = libzbar EXTERNAL_DEPENDENCIES = [LIBZBAR] + dependencies return LIBZBAR
def load_libzbar()
Loads the zbar shared library and its dependencies. Populates the globals LIBZBAR and EXTERNAL_DEPENDENCIES.
4.121302
3.316157
1.242794
prototype = CFUNCTYPE(restype, *args) return prototype((fname, load_libzbar()))
def zbar_function(fname, restype, *args)
Returns a foreign function exported by `zbar`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function.
7.956183
20.24975
0.392903
x_values = list(map(itemgetter(0), locations)) x_min, x_max = min(x_values), max(x_values) y_values = list(map(itemgetter(1), locations)) y_min, y_max = min(y_values), max(y_values) return Rect(x_min, y_min, x_max - x_min, y_max - y_min)
def bounding_box(locations)
Computes the bounding box of an iterable of (x, y) coordinates. Args: locations: iterable of (x, y) tuples. Returns: `Rect`: Coordinates of the bounding box.
1.632531
1.673715
0.975394
def is_not_clockwise(p0, p1, p2): return 0 <= ( (p1[0] - p0[0]) * (p2[1] - p0[1]) - (p1[1] - p0[1]) * (p2[0] - p0[0]) ) def go(points_): res = [] for p in points_: while 1 < len(res) and is_not_clockwise(res[-2], res[-1], p): res.pop() res.append(p) # The last point in each list is the first point in the other list res.pop() return res # Discard duplicates and sort by x then y points = sorted(set(points)) # Algorithm needs at least two points hull = ( points if len(points) < 2 else chain(go(points), go(reversed(points))) ) return list(map(Point._make, hull))
def convex_hull(points)
Computes the convex hull of an iterable of (x, y) coordinates. Args: points: iterable of (x, y) tuples. Returns: `list`: instances of `Point` - vertices of the convex hull in counter-clockwise order, starting from the vertex with the lexicographically smallest coordinates. Andrew's monotone chain algorithm. O(n log n) complexity. https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain
2.998483
2.961879
1.012359
symbol = zbar_image_first_symbol(image) while symbol: yield symbol symbol = zbar_symbol_next(symbol)
def _symbols_for_image(image)
Generator of symbols. Args: image: `zbar_image` Yields: POINTER(zbar_symbol): Symbol
5.704738
5.385296
1.059317
for symbol in symbols: data = string_at(zbar_symbol_get_data(symbol)) # The 'type' int in a value in the ZBarSymbol enumeration symbol_type = ZBarSymbol(symbol.contents.type).name polygon = convex_hull( ( zbar_symbol_get_loc_x(symbol, index), zbar_symbol_get_loc_y(symbol, index) ) for index in _RANGEFN(zbar_symbol_get_loc_size(symbol)) ) yield Decoded( data=data, type=symbol_type, rect=bounding_box(polygon), polygon=polygon )
def _decode_symbols(symbols)
Generator of decoded symbol information. Args: symbols: iterable of instances of `POINTER(zbar_symbol)` Yields: Decoded: decoded symbol
6.045151
5.034648
1.20071
# Test for PIL.Image and numpy.ndarray without requiring that cv2 or PIL # are installed. if 'PIL.' in str(type(image)): if 'L' != image.mode: image = image.convert('L') pixels = image.tobytes() width, height = image.size elif 'numpy.ndarray' in str(type(image)): if 3 == len(image.shape): # Take just the first channel image = image[:, :, 0] if 'uint8' != str(image.dtype): image = image.astype('uint8') try: pixels = image.tobytes() except AttributeError: # `numpy.ndarray.tobytes()` introduced in `numpy` 1.9.0 - use the # older `tostring` method. pixels = image.tostring() height, width = image.shape[:2] else: # image should be a tuple (pixels, width, height) pixels, width, height = image # Check dimensions if 0 != len(pixels) % (width * height): raise PyZbarError( ( 'Inconsistent dimensions: image data of {0} bytes is not ' 'divisible by (width x height = {1})' ).format(len(pixels), (width * height)) ) # Compute bits-per-pixel bpp = 8 * len(pixels) // (width * height) if 8 != bpp: raise PyZbarError( 'Unsupported bits-per-pixel [{0}]. Only [8] is supported.'.format( bpp ) ) return pixels, width, height
def _pixel_data(image)
Returns (pixels, width, height) Returns: :obj: `tuple` (pixels, width, height)
3.143419
3.063676
1.026029
pixels, width, height = _pixel_data(image) results = [] with _image_scanner() as scanner: if symbols: # Disable all but the symbols of interest disable = set(ZBarSymbol).difference(symbols) for symbol in disable: zbar_image_scanner_set_config( scanner, symbol, ZBarConfig.CFG_ENABLE, 0 ) # I think it likely that zbar will detect all symbol types by # default, in which case enabling the types of interest is # redundant but it seems sensible to be over-cautious and enable # them. for symbol in symbols: zbar_image_scanner_set_config( scanner, symbol, ZBarConfig.CFG_ENABLE, 1 ) with _image() as img: zbar_image_set_format(img, _FOURCC['L800']) zbar_image_set_size(img, width, height) zbar_image_set_data(img, cast(pixels, c_void_p), len(pixels), None) decoded = zbar_scan_image(scanner, img) if decoded < 0: raise PyZbarError('Unsupported image format') else: results.extend(_decode_symbols(_symbols_for_image(img))) return results
def decode(image, symbols=None)
Decodes datamatrix barcodes in `image`. Args: image: `numpy.ndarray`, `PIL.Image` or tuple (pixels, width, height) symbols: iter(ZBarSymbol) the symbol types to decode; if `None`, uses `zbar`'s default behaviour, which is to decode all symbol types. Returns: :obj:`list` of :obj:`Decoded`: The values decoded from barcodes.
4.831805
4.955468
0.975045
output = "" i18 = getattr(settings, 'USE_I18N', False) if i18: template = "admin/language_selector.html" context['i18n_is_set'] = True try: output = render_to_string(template, context) except: pass return output
def language_selector(context)
displays a language selector dropdown in the admin, based on Django "LANGUAGES" context. requires: * USE_I18N = True / settings.py * LANGUAGES specified / settings.py (otherwise all Django locales will be displayed) * "set_language" url configured (see https://docs.djangoproject.com/en/dev/topics/i18n/translation/#the-set-language-redirect-view)
3.279875
3.088983
1.061798
try: template = app['app_label'] + template text = render_to_string(template, context) except: text = app['name'] return text
def render_app_name(context, app, template="/admin_app_name.html")
Render the application name using the default template name. If it cannot find a template matching the given path, fallback to the application name.
4.142612
4.143261
0.999843
try: text = app['app_label'] except KeyError: text = fallback except TypeError: text = app return text
def render_app_label(context, app, fallback="")
Render the application label.
4.39467
3.953311
1.111643
try: template = app['app_label'] + template text = render_to_string(template, context) except: text = fallback return text
def render_app_description(context, app, fallback="", template="/admin_app_description.html")
Render the application description using the default template name. If it cannot find a template matching the given path, fallback to the fallback argument.
3.819505
4.096736
0.932329
if CUSTOM_FIELD_RENDERER: mod, cls = CUSTOM_FIELD_RENDERER.rsplit(".", 1) field_renderer = getattr(import_module(mod), cls) if field_renderer: return field_renderer(field, **kwargs).render() return field
def custom_field_rendering(context, field, *args, **kwargs)
Wrapper for rendering the field via an external renderer
3.144628
3.045334
1.032605
if self._is_reader: assert self._filenames is not None return self._filenames else: return self.data_producer.filenames
def filenames(self)
list of file names the data is originally being read from. Returns ------- names : list of str list of file names at the beginning of the input chain.
6.482656
7.323347
0.885204
if self.data_producer is None: return [] res = [] ds = self.data_producer while not ds.is_reader: res.append(ds) ds = ds.data_producer res.append(ds) res = res[::-1] return res
def _data_flow_chain(self)
Get a list of all elements in the data flow graph. The first element is the original source, the next one reads from the prior and so on and so forth. Returns ------- list: list of data sources
3.845717
3.603694
1.06716
r if not IteratorState.is_uniform_stride(stride): n = len(np.unique(stride[:, 0])) else: n = self.ntraj return n
def number_of_trajectories(self, stride=None)
r""" Returns the number of trajectories. Parameters ---------- stride: None (default) or np.ndarray Returns ------- int : number of trajectories
13.075029
13.3582
0.978802
r if itraj >= self.ntraj: raise IndexError("given index (%s) exceeds number of data sets (%s)." " Zero based indexing!" % (itraj, self.ntraj)) if not IteratorState.is_uniform_stride(stride): selection = stride[stride[:, 0] == itraj][:, 0] return 0 if itraj not in selection else len(selection) else: res = max((self._lengths[itraj] - skip - 1) // int(stride) + 1, 0) return res
def trajectory_length(self, itraj, stride=1, skip=0)
r"""Returns the length of trajectory of the requested index. Parameters ---------- itraj : int trajectory index stride : int return value is the number of frames in the trajectory when running through it with a step size of `stride`. skip: int or None skip n frames. Returns ------- int : length of trajectory
6.749254
6.834966
0.98746
if chunksize != 0: chunksize = float(chunksize) chunks = int(sum((ceil(l / chunksize) for l in self.trajectory_lengths(stride=stride, skip=skip)))) else: chunks = self.number_of_trajectories(stride) return chunks
def n_chunks(self, chunksize, stride=1, skip=0)
how many chunks an iterator of this sourcde will output, starting (eg. after calling reset()) Parameters ---------- chunksize stride skip
4.323742
5.076293
0.851752
r n = self.ntraj if not IteratorState.is_uniform_stride(stride): return np.fromiter((self.trajectory_length(itraj, stride) for itraj in range(n)), dtype=int, count=n) else: return np.fromiter((self.trajectory_length(itraj, stride, skip) for itraj in range(n)), dtype=int, count=n)
def trajectory_lengths(self, stride=1, skip=0)
r""" Returns the length of each trajectory. Parameters ---------- stride : int return value is the number of frames of the trajectories when running through them with a step size of `stride`. skip : int skip parameter Returns ------- array(dtype=int) : containing length of each trajectory
3.869714
4.231728
0.914453
r if not IteratorState.is_uniform_stride(stride): return len(stride) return sum(self.trajectory_lengths(stride=stride, skip=skip))
def n_frames_total(self, stride=1, skip=0)
r"""Returns total number of frames. Parameters ---------- stride : int return value is the number of frames in trajectories when running through them with a step size of `stride`. skip : int, default=0 skip the first initial n frames per trajectory. Returns ------- n_frames_total : int total number of frames.
15.03886
17.964901
0.837125
if isinstance(dimensions, int): ndim = 1 dimensions = slice(dimensions, dimensions + 1) elif isinstance(dimensions, (list, np.ndarray, tuple, slice)): if hasattr(dimensions, 'ndim') and dimensions.ndim > 1: raise ValueError('dimension indices can\'t have more than one dimension') ndim = len(np.zeros(self.ndim)[dimensions]) else: raise ValueError('unsupported type (%s) of "dimensions"' % type(dimensions)) assert ndim > 0, "ndim was zero in %s" % self.__class__.__name__ if chunk is None: chunk = self.chunksize # create iterator if self.in_memory and not self._mapping_to_mem_active: from pyemma.coordinates.data.data_in_memory import DataInMemory assert self._Y is not None it = DataInMemory(self._Y)._create_iterator(skip=skip, chunk=chunk, stride=stride, return_trajindex=True) else: it = self._create_iterator(skip=skip, chunk=chunk, stride=stride, return_trajindex=True) with it: # allocate memory try: from pyemma import config if config.coordinates_check_output: trajs = [np.full((l, ndim), np.nan, dtype=self.output_type()) for l in it.trajectory_lengths()] else: # TODO: avoid having a copy here, if Y is already filled trajs = [np.empty((l, ndim), dtype=self.output_type()) for l in it.trajectory_lengths()] except MemoryError: self.logger.exception("Could not allocate enough memory to map all data." " Consider using a larger stride.") return if self._logger_is_active(self._loglevel_DEBUG): self.logger.debug("get_output(): dimensions=%s" % str(dimensions)) self.logger.debug("get_output(): created output trajs with shapes: %s" % [x.shape for x in trajs]) self.logger.debug("nchunks :%s, chunksize=%s" % (it.n_chunks, it.chunksize)) # fetch data from pyemma._base.progress import ProgressReporter pg = ProgressReporter() pg.register(it.n_chunks, description='getting output of %s' % self.__class__.__name__) with pg.context(), it: for itraj, chunk in it: i = slice(it.pos, it.pos + len(chunk)) assert i.stop - i.start > 0 trajs[itraj][i, :] = chunk[:, dimensions] pg.update(1) if config.coordinates_check_output: for i, t in enumerate(trajs): finite = self._chunk_finite(t) if not np.all(finite): # determine position frames = np.where(np.logical_not(finite)) if not len(frames): raise RuntimeError('nothing got assigned for traj {}'.format(i)) raise RuntimeError('unassigned sections in traj {i} in range [{frames}]'.format(frames=frames, i=i)) return trajs
def get_output(self, dimensions=slice(0, None), stride=1, skip=0, chunk=None)
Maps all input data of this transformer and returns it as an array or list of arrays Parameters ---------- dimensions : list-like of indexes or slice, default=all indices of dimensions you like to keep. stride : int, default=1 only take every n'th frame. skip : int, default=0 initially skip n frames of each file. chunk: int, default=None How many frames to process at once. If not given obtain the chunk size from the source. Returns ------- output : list of ndarray(T_i, d) the mapped data, where T is the number of time steps of the input data, or if stride > 1, floor(T_in / stride). d is the output dimension of this transformer. If the input consists of a list of trajectories, Y will also be a corresponding list of trajectories
4.36121
4.376737
0.996452
import os if not filename: assert hasattr(self, 'filenames') # raise RuntimeError("could not determine filenames") filenames = [] for f in self.filenames: base, _ = os.path.splitext(f) filenames.append(base + extension) elif isinstance(filename, str): filename = filename.replace('{stride}', str(stride)) filenames = [filename.replace('{itraj}', str(itraj)) for itraj in range(self.number_of_trajectories())] else: raise TypeError("filename should be str or None") self.logger.debug("write_to_csv, filenames=%s" % filenames) # check files before starting to write import errno for f in filenames: try: st = os.stat(f) raise OSError(errno.EEXIST) except OSError as e: if e.errno == errno.EEXIST: if overwrite: continue elif e.errno == errno.ENOENT: continue raise f = None from pyemma._base.progress import ProgressReporter pg = ProgressReporter() it = self.iterator(stride, chunk=chunksize, return_trajindex=False) pg.register(it.n_chunks, "saving to csv") with it, pg.context(): oldtraj = -1 for X in it: if oldtraj != it.current_trajindex: if f is not None: f.close() fn = filenames[it.current_trajindex] self.logger.debug("opening file %s for writing csv." % fn) f = open(fn, 'wb') oldtraj = it.current_trajindex np.savetxt(f, X, **kw) f.flush() pg.update(1, 0) if f is not None: f.close()
def write_to_csv(self, filename=None, extension='.dat', overwrite=False, stride=1, chunksize=None, **kw)
write all data to csv with numpy.savetxt Parameters ---------- filename : str, optional filename string, which may contain placeholders {itraj} and {stride}: * itraj will be replaced by trajetory index * stride is stride argument of this method If filename is not given, it is being tried to obtain the filenames from the data source of this iterator. extension : str, optional, default='.dat' filename extension of created files overwrite : bool, optional, default=False shall existing files be overwritten? If a file exists, this method will raise. stride : int omit every n'th frame chunksize: int, default=None how many frames to process at once kw : dict, optional named arguments passed into numpy.savetxt (header, seperator etc.) Example ------- Assume you want to save features calculated by some FeatureReader to ASCII: >>> import numpy as np, pyemma >>> import os >>> from pyemma.util.files import TemporaryDirectory >>> from pyemma.util.contexts import settings >>> data = [np.random.random((10,3))] * 3 >>> reader = pyemma.coordinates.source(data) >>> filename = "distances_{itraj}.dat" >>> with TemporaryDirectory() as td, settings(show_progress_bars=False): ... out = os.path.join(td, filename) ... reader.write_to_csv(out, header='', delimiter=';') ... print(sorted(os.listdir(td))) ['distances_0.dat', 'distances_1.dat', 'distances_2.dat']
3.607023
3.50865
1.028037
assert not self.uniform_stride, "requested random access indices, but is in uniform stride mode" if traj in self.traj_keys: return self.ra_indices_for_traj_dict[traj] else: return np.array([])
def ra_indices_for_traj(self, traj)
Gives the indices for a trajectory file index (without changing the order within the trajectory itself). :param traj: a trajectory file index :return: a Nx1 - np.array of the indices corresponding to the trajectory index
7.312238
7.694477
0.950323
return self._data_source.n_chunks(self.chunksize, stride=self.stride, skip=self.skip)
def n_chunks(self)
rough estimate of how many chunks will be processed
7.659755
6.792891
1.127613
from functools import wraps @wraps(datasource_method) def wrapper(self, itraj): # itraj already selected, we're done. if itraj == self._selected_itraj: return datasource_method(self, itraj) self._itraj = self._selected_itraj = itraj return wrapper
def _select_file_guard(datasource_method)
in case we call _select_file multiple times with the same value, we do not want to reopen file handles.
3.782432
3.591245
1.053237
if value != self._selected_itraj: self.state.itraj = value # TODO: this side effect is unexpected. self.state.t = 0
def _itraj(self, value)
Reader-internal property that tracks the upcoming trajectory index. Should not be used within iterator loop. Parameters ---------- value : int The upcoming trajectory index.
9.15722
10.302544
0.888831
try: return self._name except AttributeError: self._name = "%s.%s[%i]" % (self.__module__, self.__class__.__name__, next(Loggable.__ids)) return self._name
def name(self)
The name of this instance
4.681006
4.467106
1.047883
r # fetch model parameters if hasattr(cls, 'set_model_params'): # introspect the constructor arguments to find the model parameters # to represent args, varargs, kw, default = getargspec_no_self(cls.set_model_params) if varargs is not None: raise RuntimeError("PyEMMA models should always specify their parameters in the signature" " of their set_model_params (no varargs). %s doesn't follow this convention." % (cls,)) return args else: # No parameters known return []
def _get_model_param_names(cls)
r"""Get parameter names for the model
5.572628
5.537081
1.00642
r for key, value in params.items(): if not hasattr(self, key): setattr(self, key, value) # set parameter for the first time. elif getattr(self, key) is None: setattr(self, key, value) # update because this parameter is still None. elif value is not None: setattr(self, key, value)
def update_model_params(self, **params)
r"""Update given model parameter if they are set to specific values
3.751834
3.699182
1.014234
r out = dict() for key in self._get_model_param_names(): # We need deprecation warnings to always be on in order to # catch deprecated param values. # This is set in utils/__init__.py but it gets overwritten # when running under python3 somehow. from pyemma.util.exceptions import PyEMMA_DeprecationWarning warnings.simplefilter("always", DeprecationWarning) warnings.simplefilter("always", PyEMMA_DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if len(w) and w[0].category in(DeprecationWarning, PyEMMA_DeprecationWarning): # if the parameter is deprecated, don't show it continue finally: warnings.filters.pop(0) warnings.filters.pop(0) # XXX: should we rather test if instance of estimator? if deep and hasattr(value, 'get_params'): deep_items = list(value.get_params().items()) out.update((key + '__' + k, val) for k, val in deep_items) out[key] = value return out
def get_model_params(self, deep=True)
r"""Get parameters for this model. Parameters ---------- deep: boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values.
2.864831
2.465394
1.162018
r self._check_samples_available() # TODO: can we use np.fromiter here? We would ne the same shape of every member for this! return [call_member(M, f, *args, **kwargs) for M in self.samples]
def sample_f(self, f, *args, **kwargs)
r"""Evaluated method f for all samples Calls f(\*args, \*\*kwargs) on all samples. Parameters ---------- f : method reference or name (str) Model method to be evaluated for each model sample args : arguments Non-keyword arguments to be passed to the method in each call kwargs : keyword-argments Keyword arguments to be passed to the method in each call Returns ------- vals : list list of results of the method calls
20.405672
25.129276
0.812028
r vals = self.sample_f(f, *args, **kwargs) return _np.mean(vals, axis=0)
def sample_mean(self, f, *args, **kwargs)
r"""Sample mean of numerical method f over all samples Calls f(\*args, \*\*kwargs) on all samples and computes the mean. f must return a numerical value or an ndarray. Parameters ---------- f : method reference or name (str) Model method to be evaluated for each model sample args : arguments Non-keyword arguments to be passed to the method in each call kwargs : keyword-argments Keyword arguments to be passed to the method in each call Returns ------- mean : float or ndarray mean value or mean array
5.783718
8.908569
0.649231
r vals = self.sample_f(f, *args, **kwargs) return _np.std(vals, axis=0)
def sample_std(self, f, *args, **kwargs)
r"""Sample standard deviation of numerical method f over all samples Calls f(\*args, \*\*kwargs) on all samples and computes the standard deviation. f must return a numerical value or an ndarray. Parameters ---------- f : method reference or name (str) Model method to be evaluated for each model sample args : arguments Non-keyword arguments to be passed to the method in each call kwargs : keyword-argments Keyword arguments to be passed to the method in each call Returns ------- std : float or ndarray standard deviation or array of standard deviations
6.346447
9.825319
0.645928
r vals = self.sample_f(f, *args, **kwargs) return confidence_interval(vals, conf=self.conf)
def sample_conf(self, f, *args, **kwargs)
r"""Sample confidence interval of numerical method f over all samples Calls f(\*args, \*\*kwargs) on all samples and computes the confidence interval. Size of confidence interval is given in the construction of the SampledModel. f must return a numerical value or an ndarray. Parameters ---------- f : method reference or name (str) Model method to be evaluated for each model sample args : arguments Non-keyword arguments to be passed to the method in each call kwargs : keyword-argments Keyword arguments to be passed to the method in each call Returns ------- L : float or ndarray lower value or array of confidence interval R : float or ndarray upper value or array of confidence interval
7.552243
12.014211
0.628609
all_labels = [] for f in self.active_features: all_labels += f.describe() return all_labels
def describe(self)
Returns a list of strings, one for each feature selected, with human-readable descriptions of the features. Returns ------- labels : list of str An ordered list of strings, one for each feature selected, with human-readable descriptions of the features.
6.82656
5.136455
1.329041
if exclude_symmetry_related: exclusions = [] exclusions.append("mass < 2") exclusions.append("(resname == VAL and name == CG)") exclusions.append("(resname == LEU and name == CD)") exclusions.append("(resname == PHE and name == CD) or (resname == PHE and name == CE)") exclusions.append("(resname == TYR and name == CD) or (resname == TYR and name == CE)") exclusions.append("(resname == GLU and name == OD1) or (resname == GLU and name == OD2)") exclusions.append("(resname == ASP and name == OG1) or (resname == ASP and name == OG2)") exclusions.append("(resname == HIS and name == ND1) or (resname == HIS and name == NE2)") exclusions.append("(resname == ARG and name == NH1) or (resname == ARG and name == NH2)") exclusion_string = ' or '.join(exclusions) selection_string = 'not (' + exclusion_string + ')' return self.topology.select(selection_string) else: return self.topology.select("mass >= 2")
def select_Heavy(self, exclude_symmetry_related=False)
Returns the indexes of all heavy atoms (Mass >= 2), optionally excluding symmetry-related heavy atoms. Parameters ---------- exclude_symmetry_related : boolean, default=False if True, exclude symmetry-related heavy atoms. Returns ------- indexes : ndarray((n), dtype=int) array with selected atom indexes
2.05199
2.017084
1.017305
assert isinstance(excluded_neighbors,int) p = [] for i in range(len(sel)): for j in range(i + 1, len(sel)): # get ordered pair I = sel[i] J = sel[j] if (I > J): I = sel[j] J = sel[i] # exclude 1 and 2 neighbors if (J > I + excluded_neighbors): p.append([I, J]) return np.array(p)
def pairs(sel, excluded_neighbors=0)
Creates all pairs between indexes. Will exclude closest neighbors up to :py:obj:`excluded_neighbors` The self-pair (i,i) is always excluded Parameters ---------- sel : ndarray((n), dtype=int) array with selected atom indexes excluded_neighbors: int, default = 0 number of neighbors that will be excluded when creating the pairs Returns ------- sel : ndarray((m,2), dtype=int) m x 2 array with all pair indexes between different atoms that are at least :obj:`excluded_neighbors` indexes apart, i.e. if i is the index of an atom, the pairs [i,i-2], [i,i-1], [i,i], [i,i+1], [i,i+2], will not be in :py:obj:`sel` (n=excluded_neighbors) if :py:obj:`excluded_neighbors` = 2. Moreover, the list is non-redundant,i.e. if [i,j] is in sel, then [j,i] is not.
3.078712
2.925647
1.052318
pair_inds = np.array(pair_inds).astype(dtype=np.int, casting='safe') if pair_inds.ndim != 2: raise ValueError("pair indices has to be a matrix.") if pair_inds.shape[1] != pair_n: raise ValueError("pair indices shape has to be (x, %i)." % pair_n) if pair_inds.max() > self.topology.n_atoms: raise ValueError("index out of bounds: %i." " Maximum atom index available: %i" % (pair_inds.max(), self.topology.n_atoms)) return pair_inds
def _check_indices(self, pair_inds, pair_n=2)
ensure pairs are valid (shapes, all atom indices available?, etc.)
2.920023
2.657551
1.098765
self.add_selection(list(range(self.topology.n_atoms)), reference=reference, atom_indices=atom_indices, ref_atom_indices=ref_atom_indices)
def add_all(self, reference=None, atom_indices=None, ref_atom_indices=None)
Adds all atom coordinates to the feature list. The coordinates are flattened as follows: [x1, y1, z1, x2, y2, z2, ...] Parameters ---------- reference: mdtraj.Trajectory or None, default=None if given, all data is being aligned to the given reference with Trajectory.superpose atom_indices : array_like, or None The indices of the atoms to superpose. If not supplied, all atoms will be used. ref_atom_indices : array_like, or None Use these atoms on the reference structure. If not supplied, the same atom indices will be used for this trajectory and the reference one.
2.766915
3.560427
0.77713
from .misc import SelectionFeature, AlignFeature if reference is None: f = SelectionFeature(self.topology, indexes) else: if not isinstance(reference, mdtraj.Trajectory): raise ValueError('reference is not a mdtraj.Trajectory object, but {}'.format(reference)) f = AlignFeature(reference=reference, indexes=indexes, atom_indices=atom_indices, ref_atom_indices=ref_atom_indices) self.__add_feature(f)
def add_selection(self, indexes, reference=None, atom_indices=None, ref_atom_indices=None)
Adds the coordinates of the selected atom indexes to the feature list. The coordinates of the selection [1, 2, ...] are flattened as follows: [x1, y1, z1, x2, y2, z2, ...] Parameters ---------- indexes : ndarray((n), dtype=int) array with selected atom indexes reference: mdtraj.Trajectory or None, default=None if given, all data is being aligned to the given reference with Trajectory.superpose atom_indices : array_like, or None The indices of the atoms to superpose. If not supplied, all atoms will be used. ref_atom_indices : array_like, or None Use these atoms on the reference structure. If not supplied, the same atom indices will be used for this trajectory and the reference one.
3.631353
3.446637
1.053593
r from .distances import DistanceFeature atom_pairs = _parse_pairwise_input( indices, indices2, self.logger, fname='add_distances()') atom_pairs = self._check_indices(atom_pairs) f = DistanceFeature(self.topology, atom_pairs, periodic=periodic) self.__add_feature(f)
def add_distances(self, indices, periodic=True, indices2=None)
r""" Adds the distances between atoms to the feature list. Parameters ---------- indices : can be of two types: ndarray((n, 2), dtype=int): n x 2 array with the pairs of atoms between which the distances shall be computed iterable of integers (either list or ndarray(n, dtype=int)): indices (not pairs of indices) of the atoms between which the distances shall be computed. periodic : optional, boolean, default is True If periodic is True and the trajectory contains unitcell information, distances will be computed under the minimum image convention. indices2: iterable of integers (either list or ndarray(n, dtype=int)), optional: Only has effect if :py:obj:`indices` is an iterable of integers. Instead of the above behaviour, only the distances between the atoms in :py:obj:`indices` and :py:obj:`indices2` will be computed. .. note:: When using the iterable of integers input, :py:obj:`indices` and :py:obj:`indices2` will be sorted numerically and made unique before converting them to a pairlist. Please look carefully at the output of :py:func:`describe()` to see what features exactly have been added.
7.835314
7.138121
1.097672
# Atom indices for CAs at_idxs_ca = self.select_Ca() # Residue indices for residues contatinig CAs res_idxs_ca = [self.topology.atom(ca).residue.index for ca in at_idxs_ca] # Pairs of those residues, with possibility to exclude neighbors res_idxs_ca_pairs = self.pairs(res_idxs_ca, excluded_neighbors=excluded_neighbors) # Mapping back pairs of residue indices to pairs of CA indices distance_indexes = [] for ri, rj in res_idxs_ca_pairs: distance_indexes.append([self.topology.residue(ri).atom('CA').index, self.topology.residue(rj).atom('CA').index ]) distance_indexes = np.array(distance_indexes) self.add_distances(distance_indexes, periodic=periodic)
def add_distances_ca(self, periodic=True, excluded_neighbors=2)
Adds the distances between all Ca's to the feature list. Parameters ---------- periodic : boolean, default is True Use the minimum image convetion when computing distances excluded_neighbors : int, default is 2 Number of exclusions when compiling the list of pairs. Two CA-atoms are considered neighbors if they belong to adjacent residues.
3.742072
3.659132
1.022667
from .distances import InverseDistanceFeature atom_pairs = _parse_pairwise_input( indices, indices2, self.logger, fname='add_inverse_distances()') atom_pairs = self._check_indices(atom_pairs) f = InverseDistanceFeature(self.topology, atom_pairs, periodic=periodic) self.__add_feature(f)
def add_inverse_distances(self, indices, periodic=True, indices2=None)
Adds the inverse distances between atoms to the feature list. Parameters ---------- indices : can be of two types: ndarray((n, 2), dtype=int): n x 2 array with the pairs of atoms between which the inverse distances shall be computed iterable of integers (either list or ndarray(n, dtype=int)): indices (not pairs of indices) of the atoms between which the inverse distances shall be computed. periodic : optional, boolean, default is True If periodic is True and the trajectory contains unitcell information, distances will be computed under the minimum image convention. indices2: iterable of integers (either list or ndarray(n, dtype=int)), optional: Only has effect if :py:obj:`indices` is an iterable of integers. Instead of the above behaviour, only the inverse distances between the atoms in :py:obj:`indices` and :py:obj:`indices2` will be computed. .. note:: When using the *iterable of integers* input, :py:obj:`indices` and :py:obj:`indices2` will be sorted numerically and made unique before converting them to a pairlist. Please look carefully at the output of :py:func:`describe()` to see what features exactly have been added.
6.217185
6.597256
0.94239
r from .distances import ContactFeature atom_pairs = _parse_pairwise_input( indices, indices2, self.logger, fname='add_contacts()') atom_pairs = self._check_indices(atom_pairs) f = ContactFeature(self.topology, atom_pairs, threshold, periodic, count_contacts) self.__add_feature(f)
def add_contacts(self, indices, indices2=None, threshold=0.3, periodic=True, count_contacts=False)
r""" Adds the contacts to the feature list. Parameters ---------- indices : can be of two types: ndarray((n, 2), dtype=int): n x 2 array with the pairs of atoms between which the contacts shall be computed iterable of integers (either list or ndarray(n, dtype=int)): indices (not pairs of indices) of the atoms between which the contacts shall be computed. indices2: iterable of integers (either list or ndarray(n, dtype=int)), optional: Only has effect if :py:obj:`indices` is an iterable of integers. Instead of the above behaviour, only the contacts between the atoms in :py:obj:`indices` and :py:obj:`indices2` will be computed. threshold : float, optional, default = .3 distances below this threshold (in nm) will result in a feature 1.0, distances above will result in 0.0. The default is set to .3 nm (3 Angstrom) periodic : boolean, default True use the minimum image convention if unitcell information is available count_contacts : boolean, default False If set to true, this feature will return the number of formed contacts (and not feature values with either 1.0 or 0) The ouput of this feature will be of shape (Nt,1), and not (Nt, nr_of_contacts) .. note:: When using the *iterable of integers* input, :py:obj:`indices` and :py:obj:`indices2` will be sorted numerically and made unique before converting them to a pairlist. Please look carefully at the output of :py:func:`describe()` to see what features exactly have been added.
7.540759
7.855439
0.959941
r from .distances import ResidueMinDistanceFeature if scheme != 'ca' and is_string(residue_pairs): if residue_pairs == 'all': self.logger.warning("Using all residue pairs with schemes like closest or closest-heavy is " "very time consuming. Consider reducing the residue pairs") f = ResidueMinDistanceFeature(self.topology, residue_pairs, scheme, ignore_nonprotein, threshold, periodic) self.__add_feature(f)
def add_residue_mindist(self, residue_pairs='all', scheme='closest-heavy', ignore_nonprotein=True, threshold=None, periodic=True)
r""" Adds the minimum distance between residues to the feature list. See below how the minimum distance can be defined. If the topology generated out of :py:obj:`topfile` contains information on periodic boundary conditions, the minimum image convention will be used when computing distances. Parameters ---------- residue_pairs : can be of two types: 'all' Computes distances between all pairs of residues excluding first and second neighbors ndarray((n, 2), dtype=int): n x 2 array with the pairs residues for which distances will be computed scheme : 'ca', 'closest', 'closest-heavy', default is closest-heavy Within a residue, determines the sub-group atoms that will be considered when computing distances ignore_nonprotein : boolean, default True Ignore residues that are not of protein type (e.g. water molecules, post-traslational modifications etc) threshold : float, optional, default is None distances below this threshold (in nm) will result in a feature 1.0, distances above will result in 0.0. If left to None, the numerical value will be returned periodic : bool, optional, default = True If `periodic` is True and the trajectory contains unitcell information, we will treat dihedrals that cross periodic images using the minimum image convention. .. note:: Using :py:obj:`scheme` = 'closest' or 'closest-heavy' with :py:obj:`residue pairs` = 'all' will compute nearly all interatomic distances, for every frame, before extracting the closest pairs. This can be very time consuming. Those schemes are intended to be used with a subset of residues chosen via :py:obj:`residue_pairs`.
6.516649
6.109779
1.066593
r from .misc import GroupCOMFeature f = GroupCOMFeature(self.topology, group_definitions , ref_geom=ref_geom, image_molecules=image_molecules, mass_weighted=mass_weighted) self.__add_feature(f)
def add_group_COM(self, group_definitions, ref_geom=None, image_molecules=False, mass_weighted=True,)
r""" Adds the centers of mass (COM) in cartesian coordinates of a group or groups of atoms. If these group definitions coincide directly with residues, use :obj:`add_residue_COM` instead. No periodic boundaries are taken into account. Parameters ---------- group_definitions : iterable of integers List of the groups of atom indices for which the COM will be computed. The atoms are zero-indexed. ref_geom : :obj:`mdtraj.Trajectory`, default is None The coordinates can be centered to a reference geometry before computing the COM. image_molecules : boolean, default is False The method traj.image_molecules will be called before computing averages. The method tries to correct for molecules broken across periodic boundary conditions, but can be time consuming. See http://mdtraj.org/latest/api/generated/mdtraj.Trajectory.html#mdtraj.Trajectory.image_molecules for more details mass_weighted : boolean, default is True Set to False if you want the geometric center and not the COM .. note:: Centering (with :obj:`ref_geom`) and imaging (:obj:`image_molecules=True`) the trajectories can sometimes be time consuming. Consider doing that to your trajectory-files prior to the featurization.
5.315385
5.687413
0.934588
r from .misc import ResidueCOMFeature from pyemma.coordinates.data.featurization.util import _atoms_in_residues assert scheme in ['all', 'backbone', 'sidechain'] residue_atoms = _atoms_in_residues(self.topology, residue_indices, subset_of_atom_idxs=self.topology.select(scheme), MDlogger=self.logger) f = ResidueCOMFeature(self.topology, np.asarray(residue_indices), residue_atoms, scheme, ref_geom=ref_geom, image_molecules=image_molecules, mass_weighted=mass_weighted) self.__add_feature(f)
def add_residue_COM(self, residue_indices, scheme='all', ref_geom=None, image_molecules=False, mass_weighted=True,)
r""" Adds a per-residue center of mass (COM) in cartesian coordinates. No periodic boundaries are taken into account. Parameters ---------- residue_indices : iterable of integers The residue indices for which the COM will be computed. These are always zero-indexed that **are not necessarily** the residue sequence record of the topology (resSeq). resSeq indices start at least at 1 but can depend on the topology. See http://mdtraj.org/latest/atom_selection.html for more details. scheme : str, default is 'all' What atoms contribute to the COM computation. The supported keywords are: 'all', 'backbone', 'sidechain' . If the scheme yields no atoms for some residue, the selection falls back to 'all' for that residue. ref_geom : obj:`mdtraj.Trajectory`, default is None The coordinates can be centered to a reference geometry before computing the COM. image_molecules : boolean, default is False The method traj.image_molecules will be called before computing averages. The method tries to correct for molecules broken across periodic boundary conditions, but can be time consuming. See http://mdtraj.org/latest/api/generated/mdtraj.Trajectory.html#mdtraj.Trajectory.image_molecules for more details mass_weighted : boolean, default is True Set to False if you want the geometric center and not the COM .. note:: Centering (with :obj:`ref_geom`) and imaging (:obj:`image_molecules=True`) the trajectories can sometimes be time consuming. Consider doing that to your trajectory-files prior to the featurization.
4.905323
4.481821
1.094493
r from .distances import GroupMinDistanceFeature # Some thorough input checking and reformatting group_definitions, group_pairs, distance_list, group_identifiers = \ _parse_groupwise_input(group_definitions, group_pairs, self.logger, 'add_group_mindist') distance_list = self._check_indices(distance_list) f = GroupMinDistanceFeature(self.topology, group_definitions, group_pairs, distance_list, group_identifiers, threshold, periodic) self.__add_feature(f)
def add_group_mindist(self, group_definitions, group_pairs='all', threshold=None, periodic=True)
r""" Adds the minimum distance between groups of atoms to the feature list. If the groups of atoms are identical to residues, use :py:obj:`add_residue_mindist <pyemma.coordinates.data.featurizer.MDFeaturizer.add_residue_mindist>`. Parameters ---------- group_definitions : list of 1D-arrays/iterables containing the group definitions via atom indices. If there is only one group_definition, it is assumed the minimum distance within this group (excluding the self-distance) is wanted. In this case, :py:obj:`group_pairs` is ignored. group_pairs : Can be of two types: 'all' Computes minimum distances between all pairs of groups contained in the group definitions ndarray((n, 2), dtype=int): n x 2 array with the pairs of groups for which the minimum distances will be computed. threshold : float, optional, default is None distances below this threshold (in nm) will result in a feature 1.0, distances above will result in 0.0. If left to None, the numerical value will be returned periodic : bool, optional, default = True If `periodic` is True and the trajectory contains unitcell information, we will treat dihedrals that cross periodic images using the minimum image convention.
6.183005
6.666205
0.927515
from .angles import AngleFeature indexes = self._check_indices(indexes, pair_n=3) f = AngleFeature(self.topology, indexes, deg=deg, cossin=cossin, periodic=periodic) self.__add_feature(f)
def add_angles(self, indexes, deg=False, cossin=False, periodic=True)
Adds the list of angles to the feature list Parameters ---------- indexes : np.ndarray, shape=(num_pairs, 3), dtype=int an array with triplets of atom indices deg : bool, optional, default = False If False (default), angles will be computed in radians. If True, angles will be computed in degrees. cossin : bool, optional, default = False If True, each angle will be returned as a pair of (sin(x), cos(x)). This is useful, if you calculate the mean (e.g TICA/PCA, clustering) in that space. periodic : bool, optional, default = True If `periodic` is True and the trajectory contains unitcell information, we will treat dihedrals that cross periodic images using the minimum image convention.
5.736354
6.420563
0.893435
from .angles import DihedralFeature indexes = self._check_indices(indexes, pair_n=4) f = DihedralFeature(self.topology, indexes, deg=deg, cossin=cossin, periodic=periodic) self.__add_feature(f)
def add_dihedrals(self, indexes, deg=False, cossin=False, periodic=True)
Adds the list of dihedrals to the feature list Parameters ---------- indexes : np.ndarray, shape=(num_pairs, 4), dtype=int an array with quadruplets of atom indices deg : bool, optional, default = False If False (default), angles will be computed in radians. If True, angles will be computed in degrees. cossin : bool, optional, default = False If True, each angle will be returned as a pair of (sin(x), cos(x)). This is useful, if you calculate the mean (e.g TICA/PCA, clustering) in that space. periodic : bool, optional, default = True If `periodic` is True and the trajectory contains unitcell information, we will treat dihedrals that cross periodic images using the minimum image convention.
5.013339
6.149021
0.815307
from .angles import BackboneTorsionFeature f = BackboneTorsionFeature( self.topology, selstr=selstr, deg=deg, cossin=cossin, periodic=periodic) self.__add_feature(f)
def add_backbone_torsions(self, selstr=None, deg=False, cossin=False, periodic=True)
Adds all backbone phi/psi angles or the ones specified in :obj:`selstr` to the feature list. Parameters ---------- selstr : str, optional, default = "" selection string specifying the atom selection used to specify a specific set of backbone angles If "" (default), all phi/psi angles found in the topology will be computed deg : bool, optional, default = False If False (default), angles will be computed in radians. If True, angles will be computed in degrees. cossin : bool, optional, default = False If True, each angle will be returned as a pair of (sin(x), cos(x)). This is useful, if you calculate the mean (e.g TICA/PCA, clustering) in that space. periodic : bool, optional, default = True If `periodic` is True and the trajectory contains unitcell information, we will treat dihedrals that cross periodic images using the minimum image convention.
3.705222
4.56845
0.811046
from .angles import SideChainTorsions f = SideChainTorsions( self.topology, selstr=selstr, deg=deg, cossin=cossin, periodic=periodic, which=['chi1']) self.__add_feature(f)
def add_chi1_torsions(self, selstr="", deg=False, cossin=False, periodic=True)
Adds all chi1 angles or the ones specified in :obj:`selstr` to the feature list. Parameters ---------- selstr : str, optional, default = "" selection string specifying the atom selection used to specify a specific set of backbone angles If "" (default), all chi1 angles found in the topology will be computed deg : bool, optional, default = False If False (default), angles will be computed in radians. If True, angles will be computed in degrees. cossin : bool, optional, default = False If True, each angle will be returned as a pair of (sin(x), cos(x)). This is useful, if you calculate the mean (e.g TICA/PCA, clustering) in that space. periodic : bool, optional, default = True If `periodic` is True and the trajectory contains unitcell information, we will treat dihedrals that cross periodic images using the minimum image convention.
5.030628
5.70496
0.881799
if feature.dimension <= 0: raise ValueError("Dimension has to be positive. " "Please override dimension attribute in feature!") if not hasattr(feature, 'transform'): raise ValueError("no 'transform' method in given feature") elif not callable(getattr(feature, 'transform')): raise ValueError("'transform' attribute exists but is not a method") self.__add_feature(feature)
def add_custom_feature(self, feature)
Adds a custom feature to the feature list. Parameters ---------- feature : object an object with interface like CustomFeature (map, describe methods)
5.900913
6.267822
0.941462
r from .misc import MinRmsdFeature f = MinRmsdFeature(ref, ref_frame=ref_frame, atom_indices=atom_indices, topology=self.topology, precentered=precentered) self.__add_feature(f)
def add_minrmsd_to_ref(self, ref, ref_frame=0, atom_indices=None, precentered=False)
r""" Adds the minimum root-mean-square-deviation (minrmsd) with respect to a reference structure to the feature list. Parameters ---------- ref: Reference structure for computing the minrmsd. Can be of two types: 1. :py:obj:`mdtraj.Trajectory` object 2. filename for mdtraj to load. In this case, only the :py:obj:`ref_frame` of that file will be used. ref_frame: integer, default=0 Reference frame of the filename specified in :py:obj:`ref`. This parameter has no effect if :py:obj:`ref` is not a filename. atom_indices: array_like, default=None Atoms that will be used for: 1. aligning the target and reference geometries. 2. computing rmsd after the alignment. If left to None, all atoms of :py:obj:`ref` will be used. precentered: bool, default=False Use this boolean at your own risk to let mdtraj know that the target conformations are already centered at the origin, i.e., their (uniformly weighted) center of mass lies at the origin. This will speed up the computation of the rmsd.
4.331598
4.835542
0.895783
description = kwargs.pop('description', None) f = CustomFeature(func, dim=dim, description=description, fun_args=args, fun_kwargs=kwargs) self.add_custom_feature(f)
def add_custom_func(self, func, dim, *args, **kwargs)
adds a user defined function to extract features Parameters ---------- func : function a user-defined function, which accepts mdtraj.Trajectory object as first parameter and as many optional and named arguments as desired. Has to return a numpy.ndarray ndim=2. dim : int output dimension of :py:obj:`function` description: str or None a message for the describe feature list. args : any number of positional arguments these have to be in the same order as :py:obj:`func` is expecting them kwargs : dictionary named arguments passed to func Notes ----- You can pass a description list to describe the output of your function by element, by passing a list of strings with the same lengths as dimensions. Alternatively a single element list or str will be expanded to match the output dimension.
4.166166
4.03796
1.03175
custom_feats = [f for f in self.active_features if isinstance(f, CustomFeature)] for f in custom_feats: self.active_features.remove(f)
def remove_all_custom_funcs(self)
Remove all instances of CustomFeature from the active feature list.
3.72518
2.301425
1.61864
dim = sum(f.dimension for f in self.active_features) return dim
def dimension(self)
current dimension due to selected features Returns ------- dim : int total dimension due to all selection features
10.949175
8.672123
1.262571
# if there are no features selected, return given trajectory if not self.active_features: self.add_selection(np.arange(self.topology.n_atoms)) warnings.warn("You have not selected any features. Returning plain coordinates.") # otherwise build feature vector. feature_vec = [] # TODO: consider parallel evaluation computation here, this effort is # only worth it, if computation time dominates memory transfers for f in self.active_features: # perform sanity checks for custom feature input if isinstance(f, CustomFeature): # NOTE: casting=safe raises in numpy>=1.9 vec = f.transform(traj).astype(np.float32, casting='safe') if vec.shape[0] == 0: vec = np.empty((0, f.dimension)) if not isinstance(vec, np.ndarray): raise ValueError('Your custom feature %s did not return' ' a numpy.ndarray!' % str(f.describe())) if not vec.ndim == 2: raise ValueError('Your custom feature %s did not return' ' a 2d array. Shape was %s' % (str(f.describe()), str(vec.shape))) if not vec.shape[0] == traj.xyz.shape[0]: raise ValueError('Your custom feature %s did not return' ' as many frames as it received!' 'Input was %i, output was %i' % (str(f.describe()), traj.xyz.shape[0], vec.shape[0])) else: vec = f.transform(traj).astype(np.float32) feature_vec.append(vec) if len(feature_vec) > 1: res = np.hstack(feature_vec) else: res = feature_vec[0] return res
def transform(self, traj)
Maps an mdtraj Trajectory object to the selected output features Parameters ---------- traj : mdtraj Trajectory Trajectory object used as an input Returns ------- out : ndarray((T, n), dtype=float32) Output features: For each of T time steps in the given trajectory, a vector with all n output features selected.
3.929763
3.983519
0.986505
# Get the number of simulations: Q = len(dtrajs) # Get the number of states in the active set: if active_set is not None: N = active_set.size else: N = N_full # Build up a matrix of count matrices for each simulation. Size is Q*N^2: traj_ind = [] state1 = [] state2 = [] q = 0 for traj in dtrajs: traj_ind.append(q*np.ones(traj[:-lag].size)) state1.append(traj[:-lag]) state2.append(traj[lag:]) q += 1 traj_inds = np.concatenate(traj_ind) pairs = N_full * np.concatenate(state1) + np.concatenate(state2) data = np.ones(pairs.size) Ct_traj = scipy.sparse.coo_matrix((data, (traj_inds, pairs)), shape=(Q, N_full*N_full)) Ct_traj = Ct_traj.tocsr() # Perform re-sampling: svals = np.zeros((nbs, N)) for s in range(nbs): # Choose selection: sel = np.random.choice(Q, Q, replace=True) # Compute count matrix for selection: Ct_sel = Ct_traj[sel, :].sum(axis=0) Ct_sel = np.asarray(Ct_sel).reshape((N_full, N_full)) if active_set is not None: from pyemma.util.linalg import submatrix Ct_sel = submatrix(Ct_sel, active_set) svals[s, :] = scl.svdvals(Ct_sel) # Compute mean and uncertainties: smean = np.mean(svals, axis=0) sdev = np.std(svals, axis=0) return smean, sdev
def bootstrapping_dtrajs(dtrajs, lag, N_full, nbs=10000, active_set=None)
Perform trajectory based re-sampling. Parameters ---------- dtrajs : list of discrete trajectories lag : int lag time N_full : int Number of states in discrete trajectories. nbs : int, optional Number of bootstrapping samples active_set : ndarray Indices of active set, all count matrices will be restricted to active set. Returns ------- smean : ndarray(N,) mean values of singular values sdev : ndarray(N,) standard deviations of singular values
2.927485
2.746864
1.065755
# Get the number of states: N = Ct.shape[0] # Get the number of transition pairs: T = Ct.sum() # Reshape and normalize the count matrix: p = Ct.toarray() p = np.reshape(p, (N*N,)).astype(np.float) p = p / T # Perform the bootstrapping: svals = np.zeros((nbs, N)) for s in range(nbs): # Draw sample: sel = np.random.multinomial(T, p) # Compute the count-matrix: sC = np.reshape(sel, (N, N)) # Compute singular values: svals[s, :] = scl.svdvals(sC) # Compute mean and uncertainties: smean = np.mean(svals, axis=0) sdev = np.std(svals, axis=0) return smean, sdev
def bootstrapping_count_matrix(Ct, nbs=10000)
Perform bootstrapping on trajectories to estimate uncertainties for singular values of count matrices. Parameters ---------- Ct : csr-matrix count matrix of the data. nbs : int, optional the number of re-samplings to be drawn from dtrajs Returns ------- smean : ndarray(N,) mean values of singular values sdev : ndarray(N,) standard deviations of singular values
3.207072
2.964455
1.081842
# List all transition triples: rows = [] cols = [] states = [] for dtraj in dtrajs: if dtraj.size > 2*lag: rows.append(dtraj[0:-2*lag]) states.append(dtraj[lag:-lag]) cols.append(dtraj[2*lag:]) row = np.concatenate(rows) col = np.concatenate(cols) state = np.concatenate(states) data = np.ones(row.size) # Transform the rows and cols into a single list with N*+2 possible values: pair = N * row + col # Estimate sparse matrix: C2t = scipy.sparse.coo_matrix((data, (pair, state)), shape=(N*N, N)) return C2t.tocsc()
def twostep_count_matrix(dtrajs, lag, N)
Compute all two-step count matrices from discrete trajectories. Parameters ---------- dtrajs : list of discrete trajectories lag : int the lag time for count matrix estimation N : int the number of states in the discrete trajectories. Returns ------- C2t : sparse csc-matrix (N, N, N) two-step count matrices for all states. C2t[:, n, :] is a count matrix for each n
4.216394
4.139594
1.018553
import msmtools.estimation as me # Decompose count matrix by SVD: if lcc is not None: Ct_svd = me.largest_connected_submatrix(Ct, lcc=lcc) N1 = Ct.shape[0] else: Ct_svd = Ct V, s, W = scl.svd(Ct_svd, full_matrices=False) # Make rank decision: if rank_ind is None: ind = (s >= np.finfo(float).eps) V = V[:, rank_ind] s = s[rank_ind] W = W[rank_ind, :].T # Compute transformations: F1 = np.dot(V, np.diag(s**-0.5)) F2 = np.dot(W, np.diag(s**-0.5)) # Apply the transformations to C2t: N = Ct_svd.shape[0] M = F1.shape[1] Xi = np.zeros((M, N, M)) for n in range(N): if lcc is not None: C2t_n = C2t[:, lcc[n]] C2t_n = _reshape_sparse(C2t_n, (N1, N1)) C2t_n = me.largest_connected_submatrix(C2t_n, lcc=lcc) else: C2t_n = C2t[:, n] C2t_n = _reshape_sparse(C2t_n, (N, N)) Xi[:, n, :] = np.dot(F1.T, C2t_n.dot(F2)) # Compute sigma: c = np.sum(Ct_svd, axis=1) sigma = np.dot(F1.T, c) # Compute eigenvalues: Xi_S = np.sum(Xi, axis=1) l, R = scl.eig(Xi_S.T) # Restrict eigenvalues to reasonable range: ind = np.where(np.logical_and(np.abs(l) <= (1+tol_one), np.real(l) >= 0.0))[0] l = l[ind] R = R[:, ind] # Sort and extract omega l, R = _sort_by_norm(l, R) omega = np.real(R[:, 0]) omega = omega / np.dot(omega, sigma) return Xi, omega, sigma, l
def oom_components(Ct, C2t, rank_ind=None, lcc=None, tol_one=1e-2)
Compute OOM components and eigenvalues from count matrices: Parameters ---------- Ct : ndarray(N, N) count matrix from data C2t : sparse csc-matrix (N*N, N) two-step count matrix from data for all states, columns enumerate intermediate steps. rank_ind : ndarray(N, dtype=bool), optional, default=None indicates which singular values are accepted. By default, all non- zero singular values are accepted. lcc : ndarray(N,), optional, default=None largest connected set of the count-matrix. Two step count matrix will be reduced to this set. tol_one : float, optional, default=1e-2 keep eigenvalues of absolute value less or equal 1+tol_one. Returns ------- Xi : ndarray(M, N, M) matrix of set-observable operators omega: ndarray(M,) information state vector of OOM sigma : ndarray(M,) evaluator of OOM l : ndarray(M,) eigenvalues from OOM
2.812691
2.606171
1.079243
import msmtools.estimation as me # Compute equilibrium transition matrix: Ct_Eq = np.einsum('j,jkl,lmn,n->km', omega, Xi, Xi, sigma) # Remove negative entries: Ct_Eq[Ct_Eq < 0.0] = 0.0 # Compute transition matrix after symmetrization: pi_r = np.sum(Ct_Eq, axis=1) if reversible: pi_c = np.sum(Ct_Eq, axis=0) pi_sym = pi_r + pi_c # Avoid zero row-sums. States with zero row-sums will be eliminated by active set update. ind0 = np.where(pi_sym == 0.0)[0] pi_sym[ind0] = 1.0 Tt_Eq = (Ct_Eq + Ct_Eq.T) / pi_sym[:, None] else: # Avoid zero row-sums. States with zero row-sums will be eliminated by active set update. ind0 = np.where(pi_r == 0.0)[0] pi_r[ind0] = 1.0 Tt_Eq = Ct_Eq / pi_r[:, None] # Perform active set update: lcc = me.largest_connected_set(Tt_Eq) Tt_Eq = me.largest_connected_submatrix(Tt_Eq, lcc=lcc) if return_lcc: return Tt_Eq, lcc else: return Tt_Eq
def equilibrium_transition_matrix(Xi, omega, sigma, reversible=True, return_lcc=True)
Compute equilibrium transition matrix from OOM components: Parameters ---------- Xi : ndarray(M, N, M) matrix of set-observable operators omega: ndarray(M,) information state vector of OOM sigma : ndarray(M,) evaluator of OOM reversible : bool, optional, default=True symmetrize corrected count matrix in order to obtain a reversible transition matrix. return_lcc: bool, optional, default=True return indices of largest connected set. Returns ------- Tt_Eq : ndarray(N, N) equilibrium transition matrix lcc : ndarray(M,) the largest connected set of the transition matrix.
2.801619
2.600765
1.077229
r from pyemma.coordinates.data.featurization.featurizer import MDFeaturizer return MDFeaturizer(topfile)
def featurizer(topfile)
r""" Featurizer to select features from MD data. Parameters ---------- topfile : str or mdtraj.Topology instance path to topology file (e.g pdb file) or a mdtraj.Topology object Returns ------- feat : :class:`Featurizer <pyemma.coordinates.data.featurization.featurizer.MDFeaturizer>` Examples -------- Create a featurizer and add backbone torsion angles to active features. Then use it in :func:`source` >>> import pyemma.coordinates # doctest: +SKIP >>> feat = pyemma.coordinates.featurizer('my_protein.pdb') # doctest: +SKIP >>> feat.add_backbone_torsions() # doctest: +SKIP >>> reader = pyemma.coordinates.source(["my_traj01.xtc", "my_traj02.xtc"], features=feat) # doctest: +SKIP or >>> traj = mdtraj.load('my_protein.pdb') # # doctest: +SKIP >>> feat = pyemma.coordinates.featurizer(traj.topology) # doctest: +SKIP .. autoclass:: pyemma.coordinates.data.featurization.featurizer.MDFeaturizer :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.coordinates.data.featurization.featurizer.MDFeaturizer :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.coordinates.data.featurization.featurizer.MDFeaturizer :attributes:
7.235846
4.348847
1.663854
r from pyemma.coordinates.data.sources_merger import SourcesMerger return SourcesMerger(sources, chunk=chunksize)
def combine_sources(sources, chunksize=None)
r""" Combines multiple data sources to stream from. The given source objects (readers and transformers, eg. TICA) are concatenated in dimension axis during iteration. This can be used to couple arbitrary features in order to pass them to an Estimator expecting only one source, which is usually the case. All the parameters for iterator creation are passed to the actual sources, to ensure consistent behaviour. Parameters ---------- sources : list, tuple list of DataSources (Readers, StreamingTransformers etc.) to combine for streaming access. chunksize: int, default=None Number of data frames to process at once. Choose a higher value here, to optimize thread usage and gain processing speed. If None is passed, use the default value of the underlying reader/data source. Choose zero to disable chunking at all. Notes ----- This is currently only implemented for matching lengths trajectories. Returns ------- merger : :class:`SourcesMerger <pyemma.coordinates.data.sources_merger.SourcesMerger>`
10.390482
6.48841
1.601391
r from pyemma.coordinates.pipelines import Pipeline if not isinstance(stages, list): stages = [stages] p = Pipeline(stages, param_stride=stride, chunksize=chunksize) if run: p.parametrize() return p
def pipeline(stages, run=True, stride=1, chunksize=None)
r""" Data analysis pipeline. Constructs a data analysis :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` and parametrizes it (unless prevented). If this function takes too long, consider loading data in memory. Alternatively if the data is to large to be loaded into memory make use of the stride parameter. Parameters ---------- stages : data input or list of pipeline stages If given a single pipeline stage this must be a data input constructed by :py:func:`source`. If a list of pipelining stages are given, the first stage must be a data input constructed by :py:func:`source`. run : bool, optional, default = True If True, the pipeline will be parametrized immediately with the given stages. If only an input stage is given, the run flag has no effect at this time. True also means that the pipeline will be immediately re-parametrized when further stages are added to it. *Attention* True means this function may take a long time to compute. If False, the pipeline will be passive, i.e. it will not do any computations before you call parametrize() stride : int, optional, default = 1 If set to 1, all input data will be used throughout the pipeline to parametrize its stages. Note that this could cause the parametrization step to be very slow for large data sets. Since molecular dynamics data is usually correlated at short timescales, it is often sufficient to parametrize the pipeline at a longer stride. See also stride option in the output functions of the pipeline. chunksize: int, default=None Number of data frames to process at once. Choose a higher value here, to optimize thread usage and gain processing speed. If None is passed, use the default value of the underlying reader/data source. Choose zero to disable chunking at all. Returns ------- pipe : :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` A pipeline object that is able to conduct big data analysis with limited memory in streaming mode. Examples -------- >>> import numpy as np >>> from pyemma.coordinates import source, tica, assign_to_centers, pipeline Create some random data and cluster centers: >>> data = np.random.random((1000, 3)) >>> centers = data[np.random.choice(1000, 10)] >>> reader = source(data) Define a TICA transformation with lag time 10: >>> tica_obj = tica(lag=10) Assign any input to given centers: >>> assign = assign_to_centers(centers=centers) >>> pipe = pipeline([reader, tica_obj, assign]) >>> pipe.parametrize() .. autoclass:: pyemma.coordinates.pipelines.Pipeline :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.coordinates.pipelines.Pipeline :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.coordinates.pipelines.Pipeline :attributes:
5.319258
5.021873
1.059218
r from pyemma.coordinates.clustering.kmeans import KmeansClustering from pyemma.coordinates.pipelines import Discretizer if cluster is None: _logger.warning('You did not specify a cluster algorithm.' ' Defaulting to kmeans(k=100)') cluster = KmeansClustering(n_clusters=100) disc = Discretizer(reader, transform, cluster, param_stride=stride, chunksize=chunksize) if run: disc.parametrize() return disc
def discretizer(reader, transform=None, cluster=None, run=True, stride=1, chunksize=None)
r""" Specialized pipeline: From trajectories to clustering. Constructs a pipeline that consists of three stages: 1. an input stage (mandatory) 2. a transformer stage (optional) 3. a clustering stage (mandatory) This function is identical to calling :func:`pipeline` with the three stages, it is only meant as a guidance for the (probably) most common usage cases of a pipeline. Parameters ---------- reader : instance of :class:`pyemma.coordinates.data.reader.ChunkedReader` The reader instance provides access to the data. If you are working with MD data, you most likely want to use a FeatureReader. transform : instance of :class: `pyemma.coordinates.Transformer` an optional transform like PCA/TICA etc. cluster : instance of :class: `pyemma.coordinates.AbstractClustering` clustering Transformer (optional) a cluster algorithm to assign transformed data to discrete states. stride : int, optional, default = 1 If set to 1, all input data will be used throughout the pipeline to parametrize its stages. Note that this could cause the parametrization step to be very slow for large data sets. Since molecular dynamics data is usually correlated at short timescales, it is often sufficient to parametrize the pipeline at a longer stride. See also stride option in the output functions of the pipeline. chunksize: int, default=None Number of data frames to process at once. Choose a higher value here, to optimize thread usage and gain processing speed. If None is passed, use the default value of the underlying reader/data source. Choose zero to disable chunking at all. Returns ------- pipe : a :class:`Pipeline <pyemma.coordinates.pipelines.Discretizer>` object A pipeline object that is able to streamline data analysis of large amounts of input data with limited memory in streaming mode. Examples -------- Construct a discretizer pipeline processing all data with a PCA transformation and cluster the principal components with uniform time clustering: >>> from pyemma.coordinates import source, pca, cluster_regspace, discretizer >>> from pyemma.datasets import get_bpti_test_data >>> from pyemma.util.contexts import settings >>> reader = source(get_bpti_test_data()['trajs'], top=get_bpti_test_data()['top']) >>> transform = pca(dim=2) >>> cluster = cluster_regspace(dmin=0.1) Create the discretizer, access the the discrete trajectories and save them to files: >>> with settings(show_progress_bars=False): ... disc = discretizer(reader, transform, cluster) ... disc.dtrajs # doctest: +ELLIPSIS [array([... This will store the discrete trajectory to "traj01.dtraj": >>> from pyemma.util.files import TemporaryDirectory >>> import os >>> with TemporaryDirectory('dtrajs') as tmpdir: ... disc.save_dtrajs(output_dir=tmpdir) ... sorted(os.listdir(tmpdir)) ['bpti_001-033.dtraj', 'bpti_034-066.dtraj', 'bpti_067-100.dtraj'] .. autoclass:: pyemma.coordinates.pipelines.Pipeline :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.coordinates.pipelines.Pipeline :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.coordinates.pipelines.Pipeline :attributes:
4.491028
4.638268
0.968255
r from pyemma.coordinates.clustering.kmeans import MiniBatchKmeansClustering res = MiniBatchKmeansClustering(n_clusters=k, max_iter=max_iter, metric=metric, init_strategy=init_strategy, batch_size=batch_size, n_jobs=n_jobs, skip=skip, clustercenters=clustercenters) from pyemma.util.reflection import get_default_args cs = _check_old_chunksize_arg(chunksize, get_default_args(cluster_mini_batch_kmeans)['chunksize'], **kwargs) if data is not None: res.estimate(data, chunksize=cs) else: res.chunksize = chunksize return res
def cluster_mini_batch_kmeans(data=None, k=100, max_iter=10, batch_size=0.2, metric='euclidean', init_strategy='kmeans++', n_jobs=None, chunksize=None, skip=0, clustercenters=None, **kwargs)
r"""k-means clustering with mini-batch strategy Mini-batch k-means is an approximation to k-means which picks a randomly selected subset of data points to be updated in each iteration. Usually much faster than k-means but will likely deliver a less optimal result. Returns ------- kmeans_mini : a :class:`MiniBatchKmeansClustering <pyemma.coordinates.clustering.MiniBatchKmeansClustering>` clustering object Object for mini-batch kmeans clustering. It holds discrete trajectories and cluster center information. See also -------- :func:`kmeans <pyemma.coordinates.kmeans>` : for full k-means clustering .. autoclass:: pyemma.coordinates.clustering.kmeans.MiniBatchKmeansClustering :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.coordinates.clustering.kmeans.MiniBatchKmeansClustering :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.coordinates.clustering.kmeans.MiniBatchKmeansClustering :attributes: References ---------- .. [1] http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
3.093055
3.454566
0.895353
r from pyemma.coordinates.clustering.uniform_time import UniformTimeClustering res = UniformTimeClustering(k, metric=metric, n_jobs=n_jobs, skip=skip, stride=stride) from pyemma.util.reflection import get_default_args cs = _check_old_chunksize_arg(chunksize, get_default_args(cluster_uniform_time)['chunksize'], **kwargs) if data is not None: res.estimate(data, chunksize=cs) else: res.chunksize = cs return res
def cluster_uniform_time(data=None, k=None, stride=1, metric='euclidean', n_jobs=None, chunksize=None, skip=0, **kwargs)
r"""Uniform time clustering If given data, performs a clustering that selects data points uniformly in time and then assigns the data using a Voronoi discretization. Returns a :class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` object that can be used to extract the discretized data sequences, or to assign other data points to the same partition. If data is not given, an empty :class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` will be created that still needs to be parametrized, e.g. in a :func:`pipeline`. Parameters ---------- data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created by source function input data, if available in memory k : int the number of cluster centers. When not specified (None), min(sqrt(N), 5000) is chosen as default value, where N denotes the number of data points stride : int, optional, default = 1 If set to 1, all input data will be used for estimation. Note that this could cause this calculation to be very slow for large data sets. Since molecular dynamics data is usually correlated at short timescales, it is often sufficient to estimate transformations at a longer stride. Note that the stride option in the get_output() function of the returned object is independent, so you can parametrize at a long stride, and still map all frames through the transformer. metric : str metric to use during clustering ('euclidean', 'minRMSD') n_jobs : int or None, default None Number of threads to use during assignment of the data. If None, all available CPUs will be used. chunksize: int, default=None Number of data frames to process at once. Choose a higher value here, to optimize thread usage and gain processing speed. If None is passed, use the default value of the underlying reader/data source. Choose zero to disable chunking at all. skip : int, default=0 skip the first initial n frames per trajectory. Returns ------- uniformTime : a :class:`UniformTimeClustering <pyemma.coordinates.clustering.UniformTimeClustering>` clustering object Object for uniform time clustering. It holds discrete trajectories and cluster center information. .. autoclass:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.coordinates.clustering.uniform_time.UniformTimeClustering :attributes:
4.253924
4.695833
0.905894
r if centers is None: raise ValueError('You have to provide centers in form of a filename' ' or NumPy array or a reader created by source function') from pyemma.coordinates.clustering.assign import AssignCenters res = AssignCenters(centers, metric=metric, n_jobs=n_jobs, skip=skip, stride=stride) from pyemma.util.reflection import get_default_args cs = _check_old_chunksize_arg(chunksize, get_default_args(assign_to_centers)['chunksize'], **kwargs) if data is not None: res.estimate(data, chunksize=cs) if return_dtrajs: return res.dtrajs else: res.chunksize = cs return res
def assign_to_centers(data=None, centers=None, stride=1, return_dtrajs=True, metric='euclidean', n_jobs=None, chunksize=None, skip=0, **kwargs)
r"""Assigns data to the nearest cluster centers Creates a Voronoi partition with the given cluster centers. If given trajectories as data, this function will by default discretize the trajectories and return discrete trajectories of corresponding lengths. Otherwise, an assignment object will be returned that can be used to assign data later or can serve as a pipeline stage. Parameters ---------- data : ndarray or list of arrays or reader created by source function data to be assigned centers : path to file or ndarray or a reader created by source function cluster centers to use in assignment of data stride : int, optional, default = 1 assign only every n'th frame to the centers. Usually you want to assign all the data and only use a stride during calculation the centers. return_dtrajs : bool, optional, default = True If True, it will return the discretized trajectories obtained from assigning the coordinates in the data input. This will only have effect if data is given. When data is not given or return_dtrajs is False, the :class:'AssignCenters <_AssignCenters>' object will be returned. metric : str metric to use during clustering ('euclidean', 'minRMSD') n_jobs : int or None, default None Number of threads to use during assignment of the data. If None, all available CPUs will be used. chunksize: int, default=None Number of data frames to process at once. Choose a higher value here, to optimize thread usage and gain processing speed. If None is passed, use the default value of the underlying reader/data source. Choose zero to disable chunking at all. Returns ------- assignment : list of integer arrays or an :class:`AssignCenters <pyemma.coordinates.clustering.AssignCenters>` object assigned data Examples -------- Load data to assign to clusters from 'my_data.csv' by using the cluster centers from file 'my_centers.csv' >>> import numpy as np Generate some random data and choose 10 random centers: >>> data = np.random.random((100, 3)) >>> cluster_centers = data[np.random.randint(0, 99, size=10)] >>> dtrajs = assign_to_centers(data, cluster_centers) >>> print(dtrajs) # doctest: +ELLIPSIS [array([... .. autoclass:: pyemma.coordinates.clustering.assign.AssignCenters :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.coordinates.clustering.assign.AssignCenters :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.coordinates.clustering.assign.AssignCenters :attributes:
5.425477
4.916599
1.103502
disc = np.zeros(100, dtype=int) divides = np.concatenate([divides, [100]]) for i in range(len(divides)-1): disc[divides[i]:divides[i+1]] = i+1 return disc[self.dtraj_T100K_dt10]
def dtraj_T100K_dt10_n(self, divides)
100K frames trajectory at timestep 10, arbitrary n-state discretization.
2.446596
2.433502
1.005381
from msmtools.generation import generate_traj return generate_traj(self._P, N, start=start, stop=stop, dt=dt)
def generate_traj(self, N, start=None, stop=None, dt=1)
Generates a random trajectory of length N with time step dt
5.335963
5.459531
0.977367
from msmtools.generation import generate_trajs return generate_trajs(self._P, M, N, start=start, stop=stop, dt=dt)
def generate_trajs(self, M, N, start=None, stop=None, dt=1)
Generates M random trajectories of length N each with time step dt
4.525696
4.525112
1.000129
# norms evnorms = _np.abs(evals) # sort I = _np.argsort(evnorms)[::-1] # permute evals2 = evals[I] evecs2 = evecs[:, I] # done return evals2, evecs2
def sort_by_norm(evals, evecs)
Sorts the eigenvalues and eigenvectors by descending norm of the eigenvalues Parameters ---------- evals: ndarray(n) eigenvalues evecs: ndarray(n,n) eigenvectors in a column matrix Returns ------- (evals, evecs) : ndarray(m), ndarray(n,m) the sorted eigenvalues and eigenvectors
3.250965
3.61049
0.900422
# check input assert _np.allclose(W.T, W), 'W is not a symmetric matrix' if method.lower() == 'qr': from .eig_qr.eig_qr import eig_qr s, V = eig_qr(W) # compute the Eigenvalues of C0 using Schur factorization elif method.lower() == 'schur': from scipy.linalg import schur S, V = schur(W) s = _np.diag(S) else: raise ValueError('method not implemented: ' + method) s, V = sort_by_norm(s, V) # sort them # determine the cutoff. We know that C0 is an spd matrix, # so we select the truncation threshold such that everything that is negative vanishes evmin = _np.min(s) if evmin < 0: epsilon = max(epsilon, -evmin + 1e-16) # determine effective rank m and perform low-rank approximations. evnorms = _np.abs(s) n = _np.shape(evnorms)[0] m = n - _np.searchsorted(evnorms[::-1], epsilon) if m == 0: raise _ZeroRankError('All eigenvalues are smaller than %g, rank reduction would discard all dimensions.'%epsilon) Vm = V[:, 0:m] sm = s[0:m] if canonical_signs: # enforce canonical eigenvector signs for j in range(m): jj = _np.argmax(_np.abs(Vm[:, j])) Vm[:, j] *= _np.sign(Vm[jj, j]) return sm, Vm
def spd_eig(W, epsilon=1e-10, method='QR', canonical_signs=False)
Rank-reduced eigenvalue decomposition of symmetric positive definite matrix. Removes all negligible eigenvalues Parameters ---------- W : ndarray((n, n), dtype=float) Symmetric positive-definite (spd) matrix. epsilon : float Truncation parameter. Eigenvalues with norms smaller than this cutoff will be removed. method : str Method to perform the decomposition of :math:`W` before inverting. Options are: * 'QR': QR-based robust eigenvalue decomposition of W * 'schur': Schur decomposition of W canonical_signs : boolean, default = False Fix signs in V, s. t. the largest element of in every row of V is positive. Returns ------- s : ndarray(k) k non-negligible eigenvalues, sorted by descending norms V : ndarray(n, k) k leading eigenvectors
4.761008
4.566274
1.042646