code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
myarr = np.array(arr)
if myarr.ndim == 1:
return list(range(len(myarr)))
elif myarr.ndim == 2:
return tuple(itertools.product(list(range(arr.shape[0])),
list(range(arr.shape[1]))))
else:
raise NotImplementedError('Only supporting arrays of dimension 1 and 2 as yet.') | def _indexes(arr) | Returns the list of all indexes of the given array.
Currently works for one and two-dimensional arrays | 2.880217 | 2.879566 | 1.000226 |
if arr.ndim == 2 and types.is_int(indexes):
return arr[:, indexes]
elif arr.ndim == 3 and len(indexes) == 2:
return arr[:, indexes[0], indexes[1]]
else:
raise NotImplementedError('Only supporting arrays of dimension 2 and 3 as yet.') | def _column(arr, indexes) | Returns a column with given indexes from a deep array
For example, if the array is a matrix and indexes is a single int, will
return arr[:,indexes]. If the array is an order 3 tensor and indexes is a
pair of ints, will return arr[:,indexes[0],indexes[1]], etc. | 3.547207 | 3.40229 | 1.042594 |
r
if conf < 0 or conf > 1:
raise ValueError('Not a meaningful confidence level: '+str(conf))
try:
data = types.ensure_ndarray(data, kind='numeric')
except:
# if 1D array of arrays try to fuse it
if isinstance(data, np.ndarray) and np.ndim(data) == 1:
newshape = tuple([len(data)] + list(data[0].shape))
newdata = np.zeros(newshape)
for i in range(len(data)):
newdata[i, :] = data[i]
data = newdata
types.assert_array(data, kind='numeric')
if np.ndim(data) == 1:
m, lower, upper = _confidence_interval_1d(data, conf)
return lower, upper
else:
I = _indexes(data[0])
lower = np.zeros(data[0].shape)
upper = np.zeros(data[0].shape)
for i in I:
col = _column(data, i)
m, lower[i], upper[i] = _confidence_interval_1d(col, conf)
# return
return lower, upper | def confidence_interval(data, conf=0.95) | r""" Computes element-wise confidence intervals from a sample of ndarrays
Given a sample of arbitrarily shaped ndarrays, computes element-wise
confidence intervals
Parameters
----------
data : array-like of dimension 1 to 3
array of numbers or arrays. The first index is used as the sample
index, the remaining indexes are specific to the array of interest
conf : float, optional, default = 0.95
confidence interval
Return
------
lower : ndarray(shape)
element-wise lower bounds
upper : ndarray(shape)
element-wise upper bounds | 3.231281 | 3.294691 | 0.980754 |
N = 0
for x in X:
if len(x) > N:
N = len(x)
return N | def _maxlength(X) | Returns the maximum length of signal trajectories X | 2.426736 | 2.414571 | 1.005038 |
# check input
assert np.ndim(X[0]) == 1, 'Data must be 1-dimensional'
N = _maxlength(X) # length
# mean-free data
xflat = np.concatenate(X)
Xmean = np.mean(xflat)
X0 = [x-Xmean for x in X]
# moments
x2m = np.mean(xflat ** 2)
# integrate damped autocorrelation
corrsum = 0.0
for lag in range(N):
acf = 0.0
n = 0.0
for x in X0:
Nx = len(x) # length of this trajectory
if (Nx > lag): # only use trajectories that are long enough
acf += np.sum(x[0:Nx-lag] * x[lag:Nx])
n += float(Nx-lag)
acf /= n
if acf <= 0 and truncate_acf: # zero autocorrelation. Exit
break
elif lag > 0: # start integrating at lag 1 (effect of lag 0 is contained in the 0.5 below
corrsum += acf * (1.0 - (float(lag)/float(N)))
# compute damped correlation time
corrtime = 0.5 + corrsum / x2m
# return statistical inefficiency
return 1.0 / (2 * corrtime) | def statistical_inefficiency(X, truncate_acf=True) | Estimates the statistical inefficiency from univariate time series X
The statistical inefficiency [1]_ is a measure of the correlatedness of samples in a signal.
Given a signal :math:`{x_t}` with :math:`N` samples and statistical inefficiency :math:`I \in (0,1]`, there are
only :math:`I \cdot N` effective or uncorrelated samples in the signal. This means that :math:`I \cdot N` should
be used in order to compute statistical uncertainties. See [2]_ for a review.
The statistical inefficiency is computed as :math:`I = (2 \tau)^{-1}` using the damped autocorrelation time
..1: \tau = \frac{1}{2}+\sum_{K=1}^{N} A(k) \left(1-\frac{k}{N}\right)
where
..1: A(k) = \frac{\langle x_t x_{t+k} \rangle_t - \langle x^2 \rangle_t}{\mathrm{var}(x)}
is the autocorrelation function of the signal :math:`{x_t}`, which is computed either for a single or multiple
trajectories.
Parameters
----------
X : float array or list of float arrays
Univariate time series (single or multiple trajectories)
truncate_acf : bool, optional, default=True
When the normalized autocorrelation function passes through 0, it is truncated in order to avoid integrating
random noise
References
----------
.. [1] Anderson, T. W.: The Statistical Analysis of Time Series (Wiley, New York, 1971)
.. [2] Janke, W: Statistical Analysis of Simulations: Data Correlations and Error Estimation
Quantum Simulations of Complex Many-Body Systems: From Theory to Algorithms, Lecture Notes,
J. Grotendorst, D. Marx, A. Muramatsu (Eds.), John von Neumann Institute for Computing, Juelich
NIC Series 10, pp. 423-445, 2002. | 4.640713 | 4.485591 | 1.034582 |
if not self.filename:
return None
from pyemma.util.files import mkdir_p
hash_value_long = int(key, 16)
# bin hash to one of either 10 different databases
# TODO: make a configuration parameter out of this number
db_name = str(hash_value_long)[-1] + '.db'
directory = os.path.dirname(self.filename) + os.path.sep + 'traj_info_usage'
mkdir_p(directory)
return os.path.join(directory, db_name) | def _database_from_key(self, key) | gets the database name for the given key. Should ensure a uniform spread
of keys over the databases in order to minimize waiting times. Since the
database has to be locked for updates and multiple processes want to write,
each process has to wait until the lock has been released.
By default the LRU databases will be stored in a sub directory "traj_info_usage"
lying next to the main database.
:param key: hash of the TrajInfo instance
:return: str, database path | 7.613529 | 5.782474 | 1.316656 |
db_name = self._database_from_key(hash_value)
if not db_name:
db_name=':memory:'
def _update():
import sqlite3
try:
with sqlite3.connect(db_name, timeout=self.lru_timeout) as conn:
conn.execute('CREATE TABLE IF NOT EXISTS usage '
'(hash VARCHAR(32), last_read FLOAT)')
conn.commit()
cur = conn.execute('select * from usage where hash=?', (hash_value,))
row = cur.fetchone()
if not row:
conn.execute("insert into usage(hash, last_read) values(?, ?)", (hash_value, time.time()))
else:
conn.execute("update usage set last_read=? where hash=?", (time.time(), hash_value))
conn.commit()
except sqlite3.OperationalError:
# if there are many jobs to write to same database at same time, the timeout could be hit
logger.debug('could not update LRU info for db %s', db_name)
# this could lead to another (rare) race condition during cleaning...
#import threading
#threading.Thread(target=_update).start()
_update() | def _update_time_stamp(self, hash_value) | timestamps are being stored distributed over several lru databases.
The timestamp is a time.time() snapshot (float), which are seconds since epoch. | 3.776768 | 3.598637 | 1.0495 |
# delete the n % oldest entries in the database
import sqlite3
num_delete = int(self.num_entries / 100.0 * n)
logger.debug("removing %i entries from db" % num_delete)
lru_dbs = self._database.execute("select hash, lru_db from traj_info").fetchall()
lru_dbs.sort(key=itemgetter(1))
hashs_by_db = {}
age_by_hash = []
for k, v in itertools.groupby(lru_dbs, key=itemgetter(1)):
hashs_by_db[k] = list(x[0] for x in v)
# debug: distribution
len_by_db = {os.path.basename(db): len(hashs_by_db[db]) for db in hashs_by_db.keys()}
logger.debug("distribution of lru: %s", str(len_by_db))
### end dbg
# collect timestamps from databases
for db in hashs_by_db.keys():
with sqlite3.connect(db, timeout=self.lru_timeout) as conn:
rows = conn.execute("select hash, last_read from usage").fetchall()
for r in rows:
age_by_hash.append((r[0], float(r[1]), db))
# sort by age
age_by_hash.sort(key=itemgetter(1))
if len(age_by_hash)>=2:
assert[age_by_hash[-1] > age_by_hash[-2]]
ids = map(itemgetter(0), age_by_hash[:num_delete])
ids = tuple(map(str, ids))
sql_compatible_ids = SqliteDB._format_tuple_for_sql(ids)
with self._database as c:
c.execute("DELETE FROM traj_info WHERE hash in (%s)" % sql_compatible_ids)
# iterate over all LRU databases and delete those ids, we've just deleted from the main db.
# Do this within the same execution block of the main database, because we do not want the entry to be deleted,
# in case of a subsequent failure.
age_by_hash.sort(key=itemgetter(2))
for db, values in itertools.groupby(age_by_hash, key=itemgetter(2)):
values = tuple(v[0] for v in values)
with sqlite3.connect(db, timeout=self.lru_timeout) as conn:
stmnt = "DELETE FROM usage WHERE hash IN (%s)" \
% SqliteDB._format_tuple_for_sql(values)
curr = conn.execute(stmnt)
assert curr.rowcount == len(values), curr.rowcount | def _clean(self, n) | obtain n% oldest entries by looking into the usage databases. Then these entries
are deleted first from the traj_info db and afterwards from the associated LRU dbs.
:param n: delete n% entries in traj_info db [and associated LRU (usage) dbs]. | 3.959072 | 3.576398 | 1.107 |
return super(TRAM, self).estimate(X, **params) | def estimate(self, X, **params) | Parameters
----------
X : tuple of (ttrajs, dtrajs, btrajs)
Simulation trajectories. ttrajs contain the indices of the thermodynamic state, dtrajs
contains the indices of the configurational states and btrajs contain the biases.
ttrajs : list of numpy.ndarray(X_i, dtype=int)
Every element is a trajectory (time series). ttrajs[i][t] is the index of the
thermodynamic state visited in trajectory i at time step t.
dtrajs : list of numpy.ndarray(X_i, dtype=int)
dtrajs[i][t] is the index of the configurational state (Markov state) visited in
trajectory i at time step t.
btrajs : list of numpy.ndarray((X_i, T), dtype=numpy.float64)
For every simulation frame seen in trajectory i and time step t, btrajs[i][t,k] is the
bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at the k'th
Umbrella/Hamiltonian/temperature). | 8.808737 | 12.63728 | 0.697044 |
r
# TODO: check that we are estimated...
return _tram.log_likelihood_lower_bound(
self.log_lagrangian_mult, self.biased_conf_energies,
self.count_matrices, self.btrajs, self.dtrajs, self.state_counts,
None, None, None, None, None) | def log_likelihood(self) | r"""
Returns the value of the log-likelihood of the converged TRAM estimate. | 17.977228 | 13.862996 | 1.296778 |
r
assert self.therm_energies is not None, \
'MEMM has to be estimate()\'d before pointwise free energies can be calculated.'
if therm_state is not None:
assert therm_state<=self.nthermo
mu = [_np.zeros(d.shape[0], dtype=_np.float64) for d in self.dtrajs+self.equilibrium_dtrajs]
if self.equilibrium is None:
_tram.get_pointwise_unbiased_free_energies(
therm_state,
self.log_lagrangian_mult, self.biased_conf_energies,
self.therm_energies, self.count_matrices,
self.btrajs, self.dtrajs,
self.state_counts, None, None, mu)
else:
_trammbar.get_pointwise_unbiased_free_energies(
therm_state,
self.log_lagrangian_mult, self.biased_conf_energies,
self.therm_energies, self.count_matrices,
self.btrajs+self.equilibrium_btrajs, self.dtrajs+self.equilibrium_dtrajs,
self.state_counts, None, None, mu,
equilibrium_therm_state_counts=self.equilibrium_state_counts.sum(axis=1).astype(_np.intc),
overcounting_factor=1.0/self.lag)
return mu | def pointwise_free_energies(self, therm_state=None) | r"""
Computes the pointwise free energies :math:`-\log(\mu^k(x))` for all points x.
:math:`\mu^k(x)` is the optimal estimate of the Boltzmann distribution
of the k'th ensemble defined on the set of all samples.
Parameters
----------
therm_state : int or None, default=None
Selects the thermodynamic state k for which to compute the
pointwise free energies.
None selects the "unbiased" state which is defined by having
zero bias energy.
Returns
-------
mu_k : list of numpy.ndarray(X_i, dtype=numpy.float64)
list of the same layout as dtrajs (or ttrajs). mu_k[i][t]
contains the pointwise free energy of the frame seen in
trajectory i and time step t.
Frames that are not in the connected sets get assiged an
infinite pointwise free energy. | 4.260817 | 3.996864 | 1.06604 |
return self._is_random_accessible and \
not isinstance(self.ra_itraj_cuboid, NotImplementedRandomAccessStrategy) and \
not isinstance(self.ra_linear, NotImplementedRandomAccessStrategy) and \
not isinstance(self.ra_itraj_jagged, NotImplementedRandomAccessStrategy) and \
not isinstance(self.ra_itraj_linear, NotImplementedRandomAccessStrategy) | def is_random_accessible(self) | Check if self._is_random_accessible is set to true and if all the random access strategies are implemented.
Returns
-------
bool : Returns True if random accessible via strategies and False otherwise. | 5.588647 | 4.834115 | 1.156085 |
'''Computes the N-dimensional histogram of the transformed data.
Parameters
----------
transform : pyemma.coordinates.transfrom.Transformer object
transform that provides the input data
dimensions : tuple of indices
indices of the dimensions you want to examine
nbins : tuple of ints
number of bins along each dimension
Returns
-------
counts : (bins[0],bins[1],...) ndarray of ints
counts compatible with pyplot.pcolormesh and pyplot.bar
edges : list of (bins[i]) ndarrays
bin edges compatible with pyplot.pcolormesh and pyplot.bar,
see below.
Examples
--------
>>> import matplotlib.pyplot as plt # doctest: +SKIP
Only for ipython notebook
>> %matplotlib inline # doctest: +SKIP
>>> counts, edges=histogram(transform, dimensions=(0,1), nbins=(20, 30)) # doctest: +SKIP
>>> plt.pcolormesh(edges[0], edges[1], counts.T) # doctest: +SKIP
>>> counts, edges=histogram(transform, dimensions=(1,), nbins=(50,)) # doctest: +SKIP
>>> plt.bar(edges[0][:-1], counts, width=edges[0][1:]-edges[0][:-1]) # doctest: +SKIP
'''
maximum = np.ones(len(dimensions)) * (-np.inf)
minimum = np.ones(len(dimensions)) * np.inf
# compute min and max
for _, chunk in transform:
maximum = np.max(
np.vstack((
maximum,
np.max(chunk[:, dimensions], axis=0))),
axis=0)
minimum = np.min(
np.vstack((
minimum,
np.min(chunk[:, dimensions], axis=0))),
axis=0)
# define bins
bins = [np.linspace(m, M, num=n)
for m, M, n in zip(minimum, maximum, nbins)]
res = np.zeros(np.array(nbins) - 1)
# compute actual histogram
for _, chunk in transform:
part, _ = np.histogramdd(chunk[:, dimensions], bins=bins)
res += part
return res, bins | def histogram(transform, dimensions, nbins) | Computes the N-dimensional histogram of the transformed data.
Parameters
----------
transform : pyemma.coordinates.transfrom.Transformer object
transform that provides the input data
dimensions : tuple of indices
indices of the dimensions you want to examine
nbins : tuple of ints
number of bins along each dimension
Returns
-------
counts : (bins[0],bins[1],...) ndarray of ints
counts compatible with pyplot.pcolormesh and pyplot.bar
edges : list of (bins[i]) ndarrays
bin edges compatible with pyplot.pcolormesh and pyplot.bar,
see below.
Examples
--------
>>> import matplotlib.pyplot as plt # doctest: +SKIP
Only for ipython notebook
>> %matplotlib inline # doctest: +SKIP
>>> counts, edges=histogram(transform, dimensions=(0,1), nbins=(20, 30)) # doctest: +SKIP
>>> plt.pcolormesh(edges[0], edges[1], counts.T) # doctest: +SKIP
>>> counts, edges=histogram(transform, dimensions=(1,), nbins=(50,)) # doctest: +SKIP
>>> plt.bar(edges[0][:-1], counts, width=edges[0][1:]-edges[0][:-1]) # doctest: +SKIP | 3.000589 | 1.54345 | 1.94408 |
if not config.check_version:
class _dummy:
def start(self): pass
return _dummy()
import json
import platform
import os
from distutils.version import LooseVersion as parse
from contextlib import closing
import threading
import uuid
import sys
if 'pytest' in sys.modules or os.getenv('CI', False):
testing = True
def _impl():
import warnings
from urllib.request import Request, urlopen
try:
r = Request('http://emma-project.org/versions.json',
headers={'User-Agent': 'PyEMMA-{emma_version}-Py-{python_version}-{platform}-{addr}'
.format(emma_version=current, python_version=platform.python_version(),
platform=platform.platform(terse=True), addr=uuid.getnode())} if not testing else {})
with closing(urlopen(r, timeout=30)) as response:
payload = str(response.read(), encoding='ascii')
versions = json.loads(payload)
latest_json = tuple(filter(lambda x: x['latest'], versions))[0]['version']
latest = parse(latest_json)
if parse(current) < latest:
warnings.warn("You are not using the latest release of PyEMMA."
" Latest is {latest}, you have {current}."
.format(latest=latest, current=current), category=UserWarning)
if sys.version_info[0] < 3:
warnings.warn("Python 2.7 usage is deprecated. "
"Future versions of PyEMMA will not support it. "
"Please upgrade your Python installation.", category=UserWarning)
except Exception:
import logging
logging.getLogger('pyemma').debug("error during version check", exc_info=True)
return threading.Thread(target=_impl) | def _version_check(current, testing=False) | checks latest version online from http://emma-project.org.
Can be disabled by setting config.check_version = False.
>>> from unittest.mock import patch
>>> import warnings, pyemma
>>> with warnings.catch_warnings(record=True) as cw, patch('pyemma.version', '0.1'):
... warnings.simplefilter('always', UserWarning)
... v = pyemma.version
... t = pyemma._version_check(v, testing=True)
... t.start()
... t.join()
... assert cw, "no warning captured"
... assert "latest release" in str(cw[0].message), "wrong msg" | 3.633722 | 3.314691 | 1.096248 |
if not filename:
filename = self.default_config_file
files = self._cfgs_to_read()
# insert last, so it will override all values,
# which have already been set in previous files.
files.insert(-1, filename)
try:
config = self.__read_cfg(files)
except ReadConfigException as e:
print(Config._format_msg('config.load("{file}") failed with {error}'.format(file=filename, error=e)))
else:
self._conf_values = config
# notice user?
if self.show_config_notification and not self.cfg_dir:
print(Config._format_msg("no configuration directory set or usable."
" Falling back to defaults.")) | def load(self, filename=None) | load runtime configuration from given filename.
If filename is None try to read from default file from
default location. | 8.188965 | 8.095915 | 1.011493 |
if not filename:
filename = self.DEFAULT_CONFIG_FILE_NAME
else:
filename = str(filename)
# try to extract the path from filename and use is as cfg_dir
head, tail = os.path.split(filename)
if head:
self._cfg_dir = head
# we are search for .cfg files in cfg_dir so make sure it contains the proper extension.
base, ext = os.path.splitext(tail)
if ext != ".cfg":
filename += ".cfg"
# if we have no cfg dir, try to create it first. Return if it failed.
if not self.cfg_dir or not os.path.isdir(self.cfg_dir) or not os.stat(self.cfg_dir) != os.W_OK:
try:
self.cfg_dir = self.DEFAULT_CONFIG_DIR
except ConfigDirectoryException as cde:
print(Config._format_msg('Could not create configuration directory "{dir}"! config.save() failed.'
' Please set a writeable location with config.cfg_dir = val. Error was {exc}'
.format(dir=self.cfg_dir, exc=cde)))
return
filename = os.path.join(self.cfg_dir, filename)
try:
with open(filename, 'w') as fh:
self._conf_values.write(fh)
except IOError as ioe:
print(Config._format_msg("Save failed with error %s" % ioe)) | def save(self, filename=None) | Saves the runtime configuration to disk.
Parameters
----------
filename: str or None, default=None
writeable path to configuration filename.
If None, use default location and filename. | 4.222887 | 4.251883 | 0.99318 |
import os.path as p
import pyemma
return p.join(pyemma.__path__[0], Config.DEFAULT_CONFIG_FILE_NAME) | def default_config_file(self) | default config file living in PyEMMA package | 5.85488 | 3.84176 | 1.52401 |
import os.path as p
import pyemma
return p.join(pyemma.__path__[0], Config.DEFAULT_LOGGING_FILE_NAME) | def default_logging_file(self) | default logging configuration | 6.551111 | 6.211342 | 1.054701 |
if not os.path.exists(pyemma_cfg_dir):
try:
mkdir_p(pyemma_cfg_dir)
except NotADirectoryError: # on Python 3
raise ConfigDirectoryException("pyemma cfg dir (%s) is not a directory" % pyemma_cfg_dir)
except EnvironmentError:
raise ConfigDirectoryException("could not create configuration directory '%s'" % pyemma_cfg_dir)
if not os.path.isdir(pyemma_cfg_dir):
raise ConfigDirectoryException("%s is no valid directory" % pyemma_cfg_dir)
if not os.access(pyemma_cfg_dir, os.W_OK):
raise ConfigDirectoryException("%s is not writeable" % pyemma_cfg_dir)
# give user the default cfg file, if its not there
self.__copy_default_files_to_cfg_dir(pyemma_cfg_dir)
self._cfg_dir = pyemma_cfg_dir
if self.show_config_notification:
stars = '*' * 80
print(stars, '\n',
'Changed PyEMMAs config directory to "{dir}".\n'
'To make this change permanent, export the environment variable'
' "PYEMMA_CFG_DIR" \nto point to this location. Eg. edit your .bashrc file!'
.format(dir=pyemma_cfg_dir), '\n', stars, sep='') | def cfg_dir(self, pyemma_cfg_dir) | Sets PyEMMAs configuration directory.
Also creates it with some default files, if does not exists. | 3.234183 | 3.121443 | 1.036118 |
cfg = self._conf_values.get('pyemma', 'logging_config')
if cfg == 'DEFAULT':
cfg = os.path.join(self.cfg_dir, Config.DEFAULT_LOGGING_FILE_NAME)
return cfg | def logging_config(self) | currently used logging configuration file. Can not be changed during runtime. | 6.394603 | 5.615376 | 1.138767 |
# use these files to extend/overwrite the conf_values.
# Last red file always overwrites existing values!
cfg = Config.DEFAULT_CONFIG_FILE_NAME
filenames = [
self.default_config_file,
cfg, # conf_values in current directory
os.path.join(os.path.expanduser('~' + os.path.sep), cfg), # config in user dir
'.pyemma.cfg',
]
# look for user defined files
if self.cfg_dir:
from glob import glob
filenames.extend(glob(self.cfg_dir + os.path.sep + "*.cfg"))
return filenames | def _cfgs_to_read(self) | reads config files from various locations to build final config. | 7.598351 | 7.332644 | 1.036236 |
fragment_indices = []
for idx, cumlen in enumerate(self._cumulative_lengths):
cumlen_prev = self._cumulative_lengths[idx - 1] if idx > 0 else 0
fragment_indices.append([np.argwhere(
np.logical_and(self.ra_indices >= cumlen_prev, self.ra_indices < cumlen)
)])
return fragment_indices | def __get_ra_index_indices(self) | Returns a list containing indices of the ra_index array, which correspond to the separate trajectory fragments,
i.e., ra_indices[fragment_indices[itraj]] are the ra indices for itraj (plus some offset by
cumulative length) | 3.613947 | 3.015137 | 1.198601 |
overlap = stride * ((traj_len - skip - 1) // stride + 1) - traj_len + skip
return overlap | def _calculate_new_overlap(stride, traj_len, skip) | Given two trajectories T_1 and T_2, this function calculates for the first trajectory an overlap, i.e.,
a skip parameter for T_2 such that the trajectory fragments T_1 and T_2 appear as one under the given stride.
Idea for deriving the formula: It is
K = ((traj_len - skip - 1) // stride + 1) = #(data points in trajectory of length (traj_len - skip)).
Therefore, the first point's position that is not contained in T_1 anymore is given by
pos = skip + s * K.
Thus the needed skip of T_2 such that the same stride parameter makes T_1 and T_2 "look as one" is
overlap = pos - traj_len.
:param stride: the (global) stride parameter
:param traj_len: length of T_1
:param skip: skip of T_1
:return: skip of T_2 | 4.550787 | 5.289504 | 0.860343 |
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser()
parser.add_argument('-u', '--url', dest='url', required=True, help="base url (has to contain versions json)")
parser.add_argument('-o', '--output', dest='output')
parser.add_argument('-a', '--add_version', dest='version')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true')
parser.add_argument('-l', '--latest-version', dest='latest', action='store_true')
args = parser.parse_args()
URL = args.url
# get dict
versions = json.load(urlopen(URL + '/versions.json'))
# add new version
if args.version:
versions.append(make_version_dict(URL, args.version))
# create Version objects to compare them
version_objs = [parse(s['version']) for s in versions]
# unify and sort
version_objs = set(version_objs)
version_objs = sorted(list(version_objs))
versions = [make_version_dict(URL, str(v)) for v in version_objs if v != 'devel']
# last element should be the highest version
versions[-1]['latest'] = True
versions.append(make_version_dict(URL, 'devel', '', False))
if args.verbose:
print("new versions json:")
json.dump(versions, sys.stdout, indent=1)
print()
if args.latest:
print(find_latest(versions)['version'])
return 0
if args.output:
with open(args.output, 'w') as v:
json.dump(versions, v, indent=1)
v.flush() | def main(argv=None) | Command line options. | 2.931106 | 2.933722 | 0.999108 |
if TrajectoryInfoCache._instance is None:
# if we do not have a configuration director yet, we do not want to store
if not config.cfg_dir:
filename = None
else:
filename = os.path.join(config.cfg_dir, "traj_info.sqlite3")
TrajectoryInfoCache._instance = TrajectoryInfoCache(filename)
return TrajectoryInfoCache._instance | def instance() | :returns the TrajectoryInfoCache singleton instance | 5.266457 | 3.996515 | 1.317762 |
r
return assert_allclose_np(actual, desired, rtol=rtol, atol=atol,
err_msg=err_msg, verbose=verbose) | def assert_allclose(actual, desired, rtol=1.e-5, atol=1.e-8,
err_msg='', verbose=True) | r"""wrapper for numpy.testing.allclose with default tolerances of
numpy.allclose. Needed since testing method has different values. | 3.251642 | 4.200879 | 0.774039 |
data = [(atom.serial, atom.name, atom.element.symbol,
atom.residue.resSeq, atom.residue.name,
atom.residue.chain.index, atom.segment_id) for atom in top.atoms]
atoms = np.array(data,
dtype=[("serial", 'i4'), ("name", 'S4'), ("element", 'S3'),
("resSeq", 'i4'), ("resName", 'S4'), ("chainID", 'i4'), ("segmentID", 'S4')])
bonds = np.fromiter(((a.index, b.index) for (a, b) in top.bonds), dtype='i4,i4', count=top.n_bonds)
return atoms, bonds | def topology_to_numpy(top) | Convert this topology into a pandas dataframe
Returns
-------
atoms : np.ndarray dtype=[("serial", 'i4'), ("name", 'S4'), ("element", 'S3'),
("resSeq", 'i4'), ("resName",'S4'), ("chainID", 'i4'), ("segmentID", 'S4')]
The atoms in the topology, represented as a data frame.
bonds : np.ndarray
The bonds in this topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond. | 2.36836 | 1.996249 | 1.186405 |
if bonds is None:
bonds = np.zeros((0, 2))
for col in ["name", "element", "resSeq",
"resName", "chainID", "serial"]:
if col not in atoms.dtype.names:
raise ValueError('dataframe must have column %s' % col)
if "segmentID" not in atoms.dtype.names:
atoms["segmentID"] = ""
from mdtraj.core.topology import Atom
from mdtraj.core import element as elem
out = mdtraj.Topology()
# TODO: allow for h5py data sets here, is there a way to check generic ndarray interface?
#if not isinstance(bonds, np.ndarray):
# raise TypeError('bonds must be an instance of numpy.ndarray. '
# 'You supplied a %s' % type(bonds))
out._atoms = [None for _ in range(len(atoms))]
N = np.arange(0, len(atoms))
for ci in np.unique(atoms['chainID']):
chain_atoms = atoms[atoms['chainID'] == ci]
subN = N[atoms['chainID'] == ci]
c = out.add_chain()
for ri in np.unique(chain_atoms['resSeq']):
residue_atoms = chain_atoms[chain_atoms['resSeq'] == ri]
mask = subN[chain_atoms['resSeq'] == ri]
indices = N[mask]
rnames = residue_atoms['resName']
residue_name = np.array(rnames)[0]
segids = residue_atoms['segmentID']
segment_id = np.array(segids)[0]
if not np.all(rnames == residue_name):
raise ValueError('All of the atoms with residue index %d '
'do not share the same residue name' % ri)
r = out.add_residue(residue_name.decode('ascii'), c, ri, segment_id.decode('ascii'))
for ix, atom in enumerate(residue_atoms):
e = atom['element'].decode('ascii')
a = Atom(atom['name'].decode('ascii'), elem.get_by_symbol(e),
int(indices[ix]), r, serial=atom['serial'])
out._atoms[indices[ix]] = a
r._atoms.append(a)
for ai1, ai2 in bonds:
out.add_bond(out.atom(ai1), out.atom(ai2))
out._numAtoms = out.n_atoms
return out | def topology_from_numpy(atoms, bonds=None) | Create a mdtraj topology from numpy arrays
Parameters
----------
atoms : np.ndarray
The atoms in the topology, represented as a data frame. This data
frame should have columns "serial" (atom index), "name" (atom name),
"element" (atom's element), "resSeq" (index of the residue)
"resName" (name of the residue), "chainID" (index of the chain),
and optionally "segmentID", following the same conventions
as wwPDB 3.0 format.
bonds : np.ndarray, shape=(n_bonds, 2), dtype=int, optional
The bonds in the topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond. Specifiying
bonds here is optional. To create standard protein bonds, you can
use `create_standard_bonds` to "fill in" the bonds on your newly
created Topology object
See Also
--------
create_standard_bonds | 3.043821 | 2.960037 | 1.028305 |
with open(filename, "r") as f:
lines=f.read()
dtraj=np.fromstring(lines, dtype=int, sep="\n")
return dtraj | def read_discrete_trajectory(filename) | Read discrete trajectory from ascii file.
The ascii file containing a single column with integer entries is
read into an array of integers.
Parameters
----------
filename : str
The filename of the discrete state trajectory file.
The filename can either contain the full or the
relative path to the file.
Returns
-------
dtraj : (M, ) ndarray
Discrete state trajectory. | 3.47603 | 3.430975 | 1.013132 |
r
dtraj=np.asarray(dtraj)
with open(filename, 'w') as f:
dtraj.tofile(f, sep='\n', format='%d') | def write_discrete_trajectory(filename, dtraj) | r"""Write discrete trajectory to ascii file.
The discrete trajectory is written to a
single column ascii file with integer entries
Parameters
----------
filename : str
The filename of the discrete state trajectory file.
The filename can either contain the full or the
relative path to the file.
dtraj : array-like
Discrete state trajectory. | 3.558721 | 4.923258 | 0.722839 |
r
dtraj=np.asarray(dtraj)
np.save(filename, dtraj) | def save_discrete_trajectory(filename, dtraj) | r"""Write discrete trajectory to binary file.
The discrete trajectory is stored as ndarray of integers
in numpy .npy format.
Parameters
----------
filename : str
The filename of the discrete state trajectory file.
The filename can either contain the full or the
relative path to the file.
dtraj : array-like
Discrete state trajectory. | 5.995241 | 7.531191 | 0.796055 |
r
# format input
dtrajs = _ensure_dtraj_list(dtrajs)
# make bincounts for each input trajectory
nmax = 0
bcs = []
for dtraj in dtrajs:
if ignore_negative:
dtraj = dtraj[np.where(dtraj >= 0)]
bc = np.bincount(dtraj)
nmax = max(nmax, bc.shape[0])
bcs.append(bc)
# construct total bincount
res = np.zeros(nmax, dtype=int)
# add up individual bincounts
for i, bc in enumerate(bcs):
res[:bc.shape[0]] += bc
return res | def count_states(dtrajs, ignore_negative=False) | r"""returns a histogram count
Parameters
----------
dtrajs : array_like or list of array_like
Discretized trajectory or list of discretized trajectories
ignore_negative, bool, default=False
Ignore negative elements. By default, a negative element will cause an
exception
Returns
-------
count : ndarray((n), dtype=int)
the number of occurrences of each state. n=max+1 where max is the largest state index found. | 3.129639 | 3.388032 | 0.923734 |
r
dtrajs = _ensure_dtraj_list(dtrajs)
if only_used:
# only states with counts > 0 wanted. Make a bincount and count nonzeros
bc = count_states(dtrajs)
return np.count_nonzero(bc)
else:
# all states wanted, included nonpopulated ones. return max + 1
imax = 0
for dtraj in dtrajs:
imax = max(imax, np.max(dtraj))
return imax+1 | def number_of_states(dtrajs, only_used = False) | r"""returns the number of states in the given trajectories.
Parameters
----------
dtraj : array_like or list of array_like
Discretized trajectory or list of discretized trajectories
only_used = False : boolean
If False, will return max+1, where max is the largest index used.
If True, will return the number of states that occur at least once. | 5.726165 | 6.876435 | 0.832723 |
N = len(sequence)
res = np.zeros((N,2), dtype=int)
for t in range(N):
s = sequence[t]
i = np.random.randint(indexes[s].shape[0])
res[t,:] = indexes[s][i,:]
return res | def sample_indexes_by_sequence(indexes, sequence) | Samples trajectory/time indexes according to the given sequence of states
Parameters
----------
indexes : list of ndarray( (N_i, 2) )
For each state, all trajectory and time indexes where this state occurs.
Each matrix has a number of rows equal to the number of occurrences of the corresponding state,
with rows consisting of a tuple (i, t), where i is the index of the trajectory and t is the time index
within the trajectory.
sequence : array of integers
A sequence of discrete states. For each state, a trajectory/time index will be sampled at which dtrajs
have an occurrences of this state
Returns
-------
indexes : ndarray( (N, 2) )
The sampled index sequence.
Index array with a number of rows equal to N=len(sequence), with rows consisting of a tuple (i, t),
where i is the index of the trajectory and t is the time index within the trajectory. | 2.841765 | 3.236538 | 0.878026 |
if fake:
yield
return
oldstdchannel = dest_file = None
try:
oldstdchannel = os.dup(stdchannel.fileno())
dest_file = open(dest_filename, 'w')
os.dup2(dest_file.fileno(), stdchannel.fileno())
yield
finally:
if oldstdchannel is not None:
os.dup2(oldstdchannel, stdchannel.fileno())
if dest_file is not None:
dest_file.close() | def stdchannel_redirected(stdchannel, dest_filename, fake=False) | A context manager to temporarily redirect stdout or stderr
e.g.:
with stdchannel_redirected(sys.stderr, os.devnull):
if compiler.has_function('clock_gettime', libraries=['rt']):
libraries.append('rt') | 1.903476 | 1.952085 | 0.975099 |
with TemporaryDirectory() as tmpdir, \
stdchannel_redirected(sys.stderr, os.devnull), \
stdchannel_redirected(sys.stdout, os.devnull):
f = tempfile.mktemp(suffix='.cpp', dir=tmpdir)
with open(f, 'w') as fh:
fh.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f], extra_postargs=[flagname], output_dir=tmpdir)
except setuptools.distutils.errors.CompileError:
return False
return True | def has_flag(compiler, flagname) | Return a boolean indicating whether a flag name is supported on
the specified compiler. | 2.233648 | 2.350173 | 0.950418 |
if self.dim > -1:
return self.dim
d = None
if self.dim != -1 and not self._estimated: # fixed parametrization
d = self.dim
elif self._estimated: # parametrization finished. Dimension is known
dim = len(self.eigenvalues)
if self.var_cutoff < 1.0: # if subspace_variance, reduce the output dimension if needed
dim = min(dim, np.searchsorted(self.cumvar, self.var_cutoff) + 1)
d = dim
elif self.var_cutoff == 1.0: # We only know that all dimensions are wanted, so return input dim
d = self.data_producer.dimension()
else: # We know nothing. Give up
raise RuntimeError('Requested dimension, but the dimension depends on the cumulative variance and the '
'transformer has not yet been estimated. Call estimate() before.')
return d | def dimension(self) | output dimension | 7.278498 | 6.992815 | 1.040854 |
r
X_meanfree = X - self.mean
Y = np.dot(X_meanfree, self.eigenvectors[:, 0:self.dimension()])
return Y.astype(self.output_type()) | def _transform_array(self, X) | r"""Projects the data onto the dominant independent components.
Parameters
----------
X : ndarray(n, m)
the input data
Returns
-------
Y : ndarray(n,)
the projected data | 9.339293 | 11.280114 | 0.827943 |
r
return -self.lag / np.log(np.abs(self.eigenvalues)) | def timescales(self) | r"""Implied timescales of the TICA transformation
For each :math:`i`-th eigenvalue, this returns
.. math::
t_i = -\frac{\tau}{\log(|\lambda_i|)}
where :math:`\tau` is the :py:obj:`lag` of the TICA object and :math:`\lambda_i` is the `i`-th
:py:obj:`eigenvalue <eigenvalues>` of the TICA object.
Returns
-------
timescales: 1D np.array
numpy array with the implied timescales. In principle, one should expect as many timescales as
input coordinates were available. However, less eigenvalues will be returned if the TICA matrices
were not full rank or :py:obj:`var_cutoff` was parsed | 27.571701 | 14.830606 | 1.859108 |
r
feature_sigma = np.sqrt(np.diag(self.cov))
return np.dot(self.cov, self.eigenvectors[:, : self.dimension()]) / feature_sigma[:, np.newaxis] | def feature_TIC_correlation(self) | r"""Instantaneous correlation matrix between mean-free input features and TICs
Denoting the input features as :math:`X_i` and the TICs as :math:`\theta_j`, the instantaneous, linear correlation
between them can be written as
.. math::
\mathbf{Corr}(X_i - \mu_i, \mathbf{\theta}_j) = \frac{1}{\sigma_{X_i - \mu_i}}\sum_l \sigma_{(X_i - \mu_i)(X_l - \mu_l} \mathbf{U}_{li}
The matrix :math:`\mathbf{U}` is the matrix containing, as column vectors, the eigenvectors of the TICA
generalized eigenvalue problem .
Returns
-------
feature_TIC_correlation : ndarray(n,m)
correlation matrix between input features and TICs. There is a row for each feature and a column
for each TIC. | 6.785981 | 7.724016 | 0.878556 |
from pyemma._ext.variational.solvers.direct import spd_inv_sqrt
# reweight operator to empirical distribution
C0t_re = mdot(C00_train, K)
# symmetrized operator and SVD
K_sym = mdot(spd_inv_sqrt(C00_train), C0t_re, spd_inv_sqrt(Ctt_train))
U, S, Vt = np.linalg.svd(K_sym, compute_uv=True, full_matrices=False)
# projects back to singular functions of K
U = mdot(spd_inv_sqrt(C00_train), U)
Vt = mdot(Vt,spd_inv_sqrt(Ctt_train))
return U, S, Vt.T | def _svd_sym_koopman(K, C00_train, Ctt_train) | Computes the SVD of the symmetrized Koopman operator in the empirical distribution. | 4.845223 | 4.615211 | 1.049838 |
from pyemma._ext.variational.solvers.direct import spd_inv_sqrt
# SVD of symmetrized operator in empirical distribution
U, S, V = _svd_sym_koopman(K, C00_train, Ctt_train)
if k is not None:
U = U[:, :k]
# S = S[:k][:, :k]
V = V[:, :k]
A = spd_inv_sqrt(mdot(U.T, C00_test, U))
B = mdot(U.T, C0t_test, V)
C = spd_inv_sqrt(mdot(V.T, Ctt_test, V))
# compute trace norm (nuclear norm), equal to the sum of singular values
score = np.linalg.norm(mdot(A, B, C), ord='nuc')
return score | def vamp_1_score(K, C00_train, C0t_train, Ctt_train, C00_test, C0t_test, Ctt_test, k=None) | Computes the VAMP-1 score of a kinetic model.
Ranks the kinetic model described by the estimation of covariances C00, C0t and Ctt,
defined by:
:math:`C_{0t}^{train} = E_t[x_t x_{t+\tau}^T]`
:math:`C_{tt}^{train} = E_t[x_{t+\tau} x_{t+\tau}^T]`
These model covariances might have been subject to symmetrization or reweighting,
depending on the type of model used.
The covariances C00, C0t and Ctt of the test data are direct empirical estimates.
singular vectors U and V using the test data
with covariances C00, C0t, Ctt. U and V should come from the SVD of the symmetrized
transition matrix or Koopman matrix:
:math:`(C00^{train})^{-(1/2)} C0t^{train} (Ctt^{train})^{-(1/2)} = U S V.T`
Parameters:
-----------
K : ndarray(n, k)
left singular vectors of the symmetrized transition matrix or Koopman matrix
C00_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{00}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_train : ndarray(n, n)
time-lagged covariance matrix of the training data, defined by
:math:`C_{0t}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{tt}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
C00_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{00}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_test : ndarray(n, n)
time-lagged covariance matrix of the test data, defined by
:math:`C_{0t}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{tt}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
k : int
number of slow processes to consider in the score
Returns:
--------
vamp1 : float
VAMP-1 score | 4.620828 | 4.681976 | 0.98694 |
# SVD of symmetrized operator in empirical distribution
U, s, V = _svd_sym_koopman(K, C00_train, Ctt_train)
if k is not None:
U = U[:, :k]
S = np.diag(s[:k])
V = V[:, :k]
score = np.trace(2.0 * mdot(V, S, U.T, C0t_test) - mdot(V, S, U.T, C00_test, U, S, V.T, Ctt_test))
return score | def vamp_e_score(K, C00_train, C0t_train, Ctt_train, C00_test, C0t_test, Ctt_test, k=None) | Computes the VAMP-E score of a kinetic model.
Ranks the kinetic model described by the estimation of covariances C00, C0t and Ctt,
defined by:
:math:`C_{0t}^{train} = E_t[x_t x_{t+\tau}^T]`
:math:`C_{tt}^{train} = E_t[x_{t+\tau} x_{t+\tau}^T]`
These model covariances might have been subject to symmetrization or reweighting,
depending on the type of model used.
The covariances C00, C0t and Ctt of the test data are direct empirical estimates.
singular vectors U and V using the test data
with covariances C00, C0t, Ctt. U and V should come from the SVD of the symmetrized
transition matrix or Koopman matrix:
:math:`(C00^{train})^{-(1/2)} C0t^{train} (Ctt^{train})^{-(1/2)} = U S V.T`
Parameters:
-----------
K : ndarray(n, k)
left singular vectors of the symmetrized transition matrix or Koopman matrix
C00_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{00}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_train : ndarray(n, n)
time-lagged covariance matrix of the training data, defined by
:math:`C_{0t}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{tt}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
C00_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{00}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_test : ndarray(n, n)
time-lagged covariance matrix of the test data, defined by
:math:`C_{0t}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{tt}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
k : int
number of slow processes to consider in the score
Returns:
--------
vampE : float
VAMP-E score | 3.783521 | 3.746537 | 1.009872 |
import inspect
public_undocumented_members = {name: func for name, func in inspect.getmembers(cls)
if not name.startswith('_') and func.__doc__ is None}
for name, func in public_undocumented_members.items():
for parent in cls.__mro__[1:]:
parfunc = getattr(parent, name, None)
if parfunc and getattr(parfunc, '__doc__', None):
if isinstance(func, property):
# copy property, since its doc attribute is read-only
new_prop = property(fget=func.fget, fset=func.fset,
fdel=func.fdel, doc=parfunc.__doc__)
setattr(cls, name, new_prop)
else:
if hasattr(func, '__func__'): # handle instancemethods
func.__func__.__doc__ = parfunc.__doc__
else:
func.__doc__ = parfunc.__doc__
break
return cls | def fix_docs(cls) | copies docstrings of derived attributes (methods, properties, attrs) from parent classes. | 2.604378 | 2.478634 | 1.050732 |
original_methods = aliased_class.__dict__.copy()
original_methods_set = set(original_methods)
for name, method in original_methods.items():
aliases = None
if isinstance(method, property) and hasattr(method.fget, '_aliases'):
aliases = method.fget._aliases
elif hasattr(method, '_aliases'):
aliases = method._aliases
if aliases:
# Add the aliases for 'method', but don't override any
# previously-defined attribute of 'aliased_class'
for alias in aliases - original_methods_set:
setattr(aliased_class, alias, method)
return aliased_class | def aliased(aliased_class) | Decorator function that *must* be used in combination with @alias
decorator. This class will make the magic happen!
@aliased classes will have their aliased method (via @alias) actually
aliased.
This method simply iterates over the member attributes of 'aliased_class'
seeking for those which have an '_aliases' attribute and then defines new
members in the class using those aliases as mere pointer functions to the
original ones.
Usage:
>>> @aliased
... class MyClass(object):
... @alias('coolMethod', 'myKinkyMethod')
... def boring_method(self):
... pass
...
... @property
... @alias('my_prop_alias')
... def my_prop(self):
... return "hi"
>>> i = MyClass()
>>> i.coolMethod() # equivalent to i.myKinkyMethod() and i.boring_method()
>>> i.my_prop == i.my_prop_alias
True | 2.757224 | 3.04681 | 0.904954 |
def wrap(f):
globals_ = f.__globals__
for name in names:
globals_[name] = f
if '__all__' in globals_ and name not in globals_['__all__']:
globals_['__all__'].append(name)
return f
return wrap | def shortcut(*names) | Add an shortcut (alias) to a decorated function, but not to class methods!
Use aliased/alias decorators for class members!
Calling the shortcut (alias) will call the decorated function. The shortcut name will be appended
to the module's __all__ variable and the shortcut function will inherit the function's docstring
Examples
--------
In some module you have defined a function
>>> @shortcut('is_tmatrix') # doctest: +SKIP
>>> def is_transition_matrix(args): # doctest: +SKIP
... pass # doctest: +SKIP
Now you are able to call the function under its short name
>>> is_tmatrix(args) # doctest: +SKIP | 2.564869 | 3.750031 | 0.683959 |
try:
caller_stack = stack()[omit_top_frames:]
while len(caller_stack) > 0:
frame = caller_stack.pop(0)
filename = frame[1]
# skip callee frames if they are other decorators or this file(func)
if '<decorator' in filename or __file__ in filename:
continue
else:
break
lineno = frame[2]
# avoid cyclic references!
del caller_stack, frame
except OSError: # eg. os.getcwd() fails in conda-test, since cwd gets deleted.
filename = 'unknown'
lineno = -1
return filename, lineno | def get_culprit(omit_top_frames=1) | get the filename and line number calling this.
Parameters
----------
omit_top_frames: int, default=1
omit n frames from top of stack stack. Purpose is to get the real
culprit and not intermediate functions on the stack.
Returns
-------
(filename: str, fileno: int)
filename and line number of the culprit. | 7.049681 | 6.67684 | 1.055841 |
def _deprecated(func, *args, **kw):
filename, lineno = get_culprit()
user_msg = 'Call to deprecated function "%s". Called from %s line %i. %s' \
% (func.__name__, filename, lineno, msg)
warnings.warn_explicit(
user_msg,
category=PyEMMA_DeprecationWarning,
filename=filename,
lineno=lineno
)
return func(*args, **kw)
# add deprecation notice to func docstring:
if len(optional_message) == 1 and callable(optional_message[0]):
# this is the function itself, decorate!
msg = ""
return decorate(optional_message[0], _deprecated)
else:
# actually got a message (or empty parenthesis)
msg = optional_message[0] if len(optional_message) > 0 else ""
return decorator(_deprecated) | def deprecated(*optional_message) | This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
Parameters
----------
*optional_message : str
an optional user level hint which should indicate which feature to use otherwise. | 4.385829 | 4.524325 | 0.969389 |
r
from pyemma.thermo.extensions.util import (logsumexp as _logsumexp, logsumexp_pair as _logsumexp_pair)
nmax = int(_np.max([dtraj.max() for dtraj in dtrajs]))
if nstates is None:
nstates = nmax + 1
elif nstates < nmax + 1:
raise ValueError("nstates is smaller than the number of observed microstates")
nthermo = bias_sequences[0].shape[1]
bias_matrix = -_np.ones(shape=(nthermo, nstates), dtype=_np.float64) * _np.inf
counts = _np.zeros(shape=(nstates,), dtype=_np.intc)
for s in range(len(bias_sequences)):
for i in range(nstates):
idx = (dtrajs[s] == i)
nidx = idx.sum()
if nidx == 0:
continue
counts[i] += nidx
selected_bias_sequence = bias_sequences[s][idx, :]
for k in range(nthermo):
bias_matrix[k, i] = _logsumexp_pair(
bias_matrix[k, i],
_logsumexp(
_np.ascontiguousarray(-selected_bias_sequence[:, k]),
inplace=False))
idx = counts.nonzero()
log_counts = _np.log(counts[idx])
bias_matrix *= -1.0
bias_matrix[:, idx] += log_counts[_np.newaxis, :]
return bias_matrix | def get_averaged_bias_matrix(bias_sequences, dtrajs, nstates=None) | r"""
Computes a bias matrix via an exponential average of the observed frame wise bias energies.
Parameters
----------
bias_sequences : list of numpy.ndarray(T_i, num_therm_states)
A single reduced bias energy trajectory or a list of reduced bias energy trajectories.
For every simulation frame seen in trajectory i and time step t, btrajs[i][t, k] is the
reduced bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at
the k'th Umbrella/Hamiltonian/temperature)
dtrajs : list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
nstates : int, optional, default=None
Number of configuration states.
Returns
-------
bias_matrix : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object
bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i
at thermodynamic state j. | 2.984479 | 3.036556 | 0.98285 |
r
ttrajs, umbrella_centers, force_constants, unbiased_state = _get_umbrella_sampling_parameters(
us_trajs, us_centers, us_force_constants, md_trajs=md_trajs, kT=kT)
if md_trajs is None:
md_trajs = []
if width is None:
width = _np.zeros(shape=(umbrella_centers.shape[1],), dtype=_np.float64)
else:
width = _np.asarray(
map(lambda w: w if w is not None and w > 0.0 else 0.0, width),
dtype=_np.float64)
if width.shape[0] != umbrella_centers.shape[1]:
raise ValueError('Unmatching number of width components.')
btrajs = _get_umbrella_bias_sequences(
us_trajs + md_trajs, umbrella_centers, force_constants, width)
return ttrajs, btrajs, umbrella_centers, force_constants, unbiased_state | def get_umbrella_sampling_data(
us_trajs, us_centers, us_force_constants, md_trajs=None, kT=None, width=None) | r"""
Wraps umbrella sampling data or a mix of umbrella sampling and and direct molecular dynamics.
Parameters
----------
us_trajs : list of N arrays, each of shape (T_i, d)
List of arrays, each having T_i rows, one for each time step, and d columns where d is the
dimension in which umbrella sampling was applied. Often d=1, and thus us_trajs will
be a list of 1d-arrays.
us_centers : array-like of size N
List or array of N center positions. Each position must be a d-dimensional vector. For 1d
umbrella sampling, one can simply pass a list of centers, e.g. [-5.0, -4.0, -3.0, ... ].
us_force_constants : float or array-like of float
The force constants used in the umbrellas, unit-less (e.g. kT per length unit). If different
force constants were used for different umbrellas, a list or array of N force constants
can be given. For multidimensional umbrella sampling, the force matrix must be used.
md_trajs : list of M arrays, each of shape (T_i, d), optional, default=None
Unbiased molecular dynamics simulations. Format like umbrella_trajs.
kT : float (optinal)
Use this attribute if the supplied force constants are NOT unit-less.
width : array-like of float, optional, default=None
Specify periodicity for individual us_traj dimensions. Each positive entry will make the
corresponding feature periodic and use the given value as width. None/zero values will be
treated as non-periodic.
Returns
-------
ttrajs : list of N+M int arrays, each of shape (T_i,)
The integers are indexes in 0,...,K-1 enumerating the thermodynamic states the trajectories
are in at any time.
btrajs : list of N+M float arrays, each of shape (T_i, K)
The floats are the reduced bias energies for each thermodynamic state and configuration.
umbrella_centers : float array of shape (K, d)
The individual umbrella centers labelled accordingly to ttrajs.
force_constants : float array of shape (K, d, d)
The individual force matrices labelled accordingly to ttrajs.
unbiased_state : int or None
Index of the unbiased thermodynamic state (if present). | 2.737632 | 2.438393 | 1.12272 |
r
ttrajs, temperatures = _get_multi_temperature_parameters(temp_trajs)
if reference_temperature is None:
reference_temperature = temperatures.min()
else:
assert isinstance(reference_temperature, (int, float)), \
'reference_temperature must be numeric'
assert reference_temperature > 0.0, 'reference_temperature must be positive'
btrajs = _get_multi_temperature_bias_sequences(
energy_trajs, temp_trajs, temperatures, reference_temperature, energy_unit, temp_unit)
if reference_temperature in temperatures:
unbiased_state = _np.where(temperatures == reference_temperature)[0]
try:
unbiased_state = unbiased_state[0]
except IndexError:
unbiased_state = None
else:
unbiased_state = None
return ttrajs, btrajs, temperatures, unbiased_state | def get_multi_temperature_data(
energy_trajs, temp_trajs, energy_unit, temp_unit, reference_temperature=None) | r"""
Wraps data from multi-temperature molecular dynamics.
Parameters
----------
energy_trajs : list of N arrays, each of shape (T_i,)
List of arrays, each having T_i rows, one for each time step, containing the potential
energies time series in units of kT, kcal/mol or kJ/mol.
temp_trajs : list of N int arrays, each of shape (T_i,)
List of arrays, each having T_i rows, one for each time step, containing the heat bath
temperature time series (at which temperatures the frames were created) in units of K or C.
Alternatively, these trajectories may contain kT values instead of temperatures.
energy_unit: str, optional, default='kcal/mol'
The physical unit used for energies. Current options: kcal/mol, kJ/mol, kT.
temp_unit : str, optional, default='K'
The physical unit used for the temperature. Current options: K, C, kT
reference_temperature : float or None, optional, default=None
Reference temperature against which the bias energies are computed. If not given, the lowest
temperature or kT value is used. If given, this parameter must have the same unit as the
temp_trajs.
Returns
-------
ttrajs : list of N+M int arrays, each of shape (T_i,)
The integers are indexes in 0,...,K-1 enumerating the thermodynamic states the trajectories
are in at any time.
btrajs : list of N+M float arrays, each of shape (T_i, K)
The floats are the reduced bias energies for each thermodynamic state and configuration.
temperatures : float array of length K
The individual temperatures labelled accordingly to ttrajs.
unbiased_state : int or None
Index of the unbiased thermodynamic state (if present). | 2.662403 | 2.320606 | 1.147288 |
r
if unbiased_state is None:
return
for memm in memm_list:
assert 0 <= unbiased_state < len(memm.models), "invalid state: " + str(unbiased_state)
memm._unbiased_state = unbiased_state | def assign_unbiased_state_label(memm_list, unbiased_state) | r"""
Sets the msm label for the given list of estimated MEMM objects.
Parameters
----------
memm_list : list of estimated MEMM objects
The MEMM objects which shall have the msm label set.
unbiased_state : int or None
Index of the unbiased thermodynamic state (if present). | 3.249383 | 3.687364 | 0.881221 |
if x is None:
return True
if isinstance(x, numbers.Number):
return x == 0.0
if isinstance(x, np.ndarray):
return np.all(x == 0)
return False | def _is_zero(x) | Returns True if x is numerically 0 or an array with 0's. | 2.205777 | 1.884025 | 1.170779 |
if sparse_mode.lower() == 'sparse':
min_const_col_number = 0 # enforce sparsity. A single constant column will lead to sparse treatment
elif sparse_mode.lower() == 'dense':
min_const_col_number = X.shape[1] + 1 # never use sparsity
else:
if remove_mean and not modify_data: # in this case we have to copy the data anyway, and can be permissive
min_const_col_number = max(0.1 * X.shape[1], 50)
else:
# This is a rough heuristic to choose a minimum column number for which sparsity may pay off.
# This heuristic is good for large number of samples, i.e. it may be inadequate for small matrices X.
if X.shape[1] < 250:
min_const_col_number = X.shape[1] - 0.25 * X.shape[1]
elif X.shape[1] < 1000:
min_const_col_number = X.shape[1] - (0.5 * X.shape[1] - 100)
else:
min_const_col_number = X.shape[1] - (0.8 * X.shape[1] - 400)
# ensure we have an integer again.
min_const_col_number = int(min_const_col_number)
if X.shape[1] > min_const_col_number:
mask = covartools.variable_cols(X, tol=sparse_tol, min_constant=min_const_col_number) # bool vector
nconst = len(np.where(~mask)[0])
if nconst > min_const_col_number:
xconst = X[0, ~mask]
X = X[:, mask] # sparsify
else:
xconst = None
mask = None
else:
xconst = None
mask = None
return X, mask, xconst | def _sparsify(X, remove_mean=False, modify_data=False, sparse_mode='auto', sparse_tol=0.0) | Determines the sparsity of X and returns a selected sub-matrix
Only conducts sparsification if the number of constant columns is at least
max(a N - b, min_const_col_number),
Parameters
----------
X : ndarray
data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
Returns
-------
X0 : ndarray (view of X)
Either X itself (if not sufficiently sparse), or a sliced view of X,
containing only the variable columns
mask : ndarray(N, dtype=bool) or None
Bool selection array that indicates which columns of X were selected for
X0, i.e. X0 = X[:, mask]. mask is None if no sparse selection was made.
xconst : ndarray(N)
Constant column values that are outside the sparse selection, i.e.
X[i, ~mask] = xconst for any row i. xconst=0 if no sparse selection was made. | 3.695658 | 3.268835 | 1.130574 |
r
# determine type
dtype = np.float64 # default: convert to float64 in order to avoid cancellation errors
if X.dtype.kind == 'b' and X.shape[0] < 2**23 and not remove_mean:
dtype = np.float32 # convert to float32 if we can represent all numbers
# copy/convert if needed
if X.dtype not in (np.float64, dtype): # leave as float64 (conversion is expensive), otherwise convert to dtype
X = X.astype(dtype, order='C')
if const is not None:
const = const.astype(dtype, order='C')
elif copy:
X = X.copy(order='C')
if const is not None:
const = const.copy(order='C')
return X, const | def _copy_convert(X, const=None, remove_mean=False, copy=True) | r""" Makes a copy or converts the data type if needed
Copies the data and converts the data type if unsuitable for covariance
calculation. The standard data type for covariance computations is
float64, because the double precision (but not single precision) is
usually sufficient to compute the long sums involved in covariance
matrix computations. Integer types are avoided even if the data is integer,
because the BLAS matrix multiplication is very fast with floats, but very
slow with integers. If X is of boolean type (0/1), the standard data type
is float32, because this will be sufficient to represent numbers up to 2^23
without rounding error, which is usually sufficient sufficient as the
largest element in np.dot(X.T, X) can then be T, the number of data points.
Parameters
----------
remove_mean : bool
If True, will enforce float64 even if the input is boolean
copy : bool
If True, enforces a copy even if the data type doesn't require it.
Return
------
X : ndarray
copy or reference to X if no copy was needed.
const : ndarray or None
copy or reference to const if no copy was needed. | 3.60809 | 3.304078 | 1.092011 |
r
T = X.shape[0]
# Check if weights are given:
if weights is not None:
X = weights[:, None] * X
if Y is not None:
Y = weights[:, None] * Y
# compute raw sums on variable data
sx_raw = X.sum(axis=0) # this is the mean before subtracting it.
sy_raw = 0
if Y is not None:
sy_raw = Y.sum(axis=0)
# expand raw sums to full data
if xmask is not None:
if weights is not None:
sx_raw = _sum_sparse(sx_raw, xmask, xconst, weights.sum())
else:
sx_raw = _sum_sparse(sx_raw, xmask, xconst, T)
if ymask is not None:
if weights is not None:
sy_raw = _sum_sparse(sy_raw, ymask, yconst, weights.sum())
else:
sy_raw = _sum_sparse(sy_raw, ymask, yconst, T)
# compute effective sums and centered sums
if Y is not None and symmetric:
sx = sx_raw + sy_raw
sy = sx
if weights is not None:
w = 2*np.sum(weights)
else:
w = 2 * T
else:
sx = sx_raw
sy = sy_raw
if weights is not None:
w = np.sum(weights)
else:
w = T
sx_raw_centered = sx_raw.copy()
if Y is not None:
sy_raw_centered = sy_raw.copy()
# center mean.
if remove_mean:
if Y is not None and symmetric:
sx_raw_centered -= 0.5 * sx
sy_raw_centered -= 0.5 * sy
else:
sx_raw_centered = np.zeros(sx.size)
if Y is not None:
sy_raw_centered = np.zeros(sy.size)
# return
if Y is not None:
return w, sx, sx_raw_centered, sy, sy_raw_centered
else:
return w, sx, sx_raw_centered | def _sum(X, xmask=None, xconst=None, Y=None, ymask=None, yconst=None, symmetric=False, remove_mean=False,
weights=None) | r""" Computes the column sums and centered column sums.
If symmetric = False, the sums will be determined as
.. math:
sx &=& \frac{1}{2} \sum_t x_t
sy &=& \frac{1}{2} \sum_t y_t
If symmetric, the sums will be determined as
.. math:
sx = sy = \frac{1}{2T} \sum_t x_t + y_t
Returns
-------
w : float
statistical weight of sx, sy
sx : ndarray
effective row sum of X (including symmetrization if requested)
sx_raw_centered : ndarray
centered raw row sum of X
optional returns (only if Y is given):
sy : ndarray
effective row sum of X (including symmetrization if requested)
sy_raw_centered : ndarray
centered raw row sum of Y | 2.327313 | 2.13795 | 1.088572 |
xmean = s / float(w)
if mask is None:
X = np.subtract(X, xmean, out=X if inplace else None)
else:
X = np.subtract(X, xmean[mask], out=X if inplace else None)
const = np.subtract(const, xmean[~mask], const if inplace else None)
return X, const | def _center(X, w, s, mask=None, const=None, inplace=True) | Centers the data.
Parameters
----------
w : float
statistical weight of s
inplace : bool
center in place
Returns
-------
sx : ndarray
uncentered row sum of X
sx_centered : ndarray
row sum of X after centering
optional returns (only if Y is given):
sy_raw : ndarray
uncentered row sum of Y
sy_centered : ndarray
row sum of Y after centering | 2.72741 | 3.932558 | 0.693546 |
a = np.where(mask)[0]
b = column_selection[np.in1d(column_selection, a)]
return np.searchsorted(a, b) | def _filter_variable_indices(mask, column_selection) | Returns column indices restricted to the variable columns as determined by the given mask.
Parameters
----------
mask : ndarray(N, dtype=bool)
Array indicating the variable columns.
column_selection : ndarray(k, dtype=int)
Column indices to be filtered and mapped.
Returns
-------
ix : ndarray(l, dtype=int)
Column indices restricted to the variable columns, mapped to the correct index range. | 3.576747 | 3.718704 | 0.961826 |
if weights is not None:
if diag_only:
return np.sum(weights[:, None] * X * Y, axis=0)
else:
return np.dot((weights[:, None] * X).T, Y)
else:
if diag_only:
return np.sum(X * Y, axis=0)
else:
return np.dot(X.T, Y) | def _M2_dense(X, Y, weights=None, diag_only=False) | 2nd moment matrix using dense matrix computations.
This function is encapsulated such that we can make easy modifications of the basic algorithms | 1.727301 | 1.972899 | 0.875514 |
r
C = np.zeros((len(mask_X), len(mask_Y)))
# Block 11
C[np.ix_(mask_X, mask_Y)] = _M2_dense(Xvar, Yvar, weights=weights)
# other blocks
xsum_is_0 = _is_zero(xvarsum)
ysum_is_0 = _is_zero(yvarsum)
xconst_is_0 = _is_zero(xconst)
yconst_is_0 = _is_zero(yconst)
# TODO: maybe we don't need the checking here, if we do the decision in the higher-level function M2
# TODO: if not zero, we could still exploit the zeros in const and compute (and write!) this outer product
# TODO: only to a sub-matrix
# Block 12 and 21
if weights is not None:
wsum = np.sum(weights)
xvarsum = np.sum(weights[:, None] * Xvar, axis=0)
yvarsum = np.sum(weights[:, None] * Yvar, axis=0)
else:
wsum = Xvar.shape[0]
if not (xsum_is_0 or yconst_is_0) or not (ysum_is_0 or xconst_is_0):
C[np.ix_(mask_X, ~mask_Y)] = np.outer(xvarsum, yconst)
C[np.ix_(~mask_X, mask_Y)] = np.outer(xconst, yvarsum)
# Block 22
if not (xconst_is_0 or yconst_is_0):
C[np.ix_(~mask_X, ~mask_Y)] = np.outer(wsum*xconst, yconst)
return C | def _M2_const(Xvar, mask_X, xvarsum, xconst, Yvar, mask_Y, yvarsum, yconst, weights=None) | r""" Computes the unnormalized covariance matrix between X and Y, exploiting constant input columns
Computes the unnormalized covariance matrix :math:`C = X^\top Y`
(for symmetric=False) or :math:`C = \frac{1}{2} (X^\top Y + Y^\top X)`
(for symmetric=True). Suppose the data matrices can be column-permuted
to have the form
.. math:
X &=& (X_{\mathrm{var}}, X_{\mathrm{const}})
Y &=& (Y_{\mathrm{var}}, Y_{\mathrm{const}})
with rows:
.. math:
x_t &=& (x_{\mathrm{var},t}, x_{\mathrm{const}})
y_t &=& (y_{\mathrm{var},t}, y_{\mathrm{const}})
where :math:`x_{\mathrm{const}},\:y_{\mathrm{const}}` are constant vectors.
The resulting matrix has the general form:
.. math:
C &=& [X_{\mathrm{var}}^\top Y_{\mathrm{var}} x_{sum} y_{\mathrm{const}}^\top ]
& & [x_{\mathrm{const}}^\top y_{sum}^\top x_{sum} x_{sum}^\top ]
where :math:`x_{sum} = \sum_t x_{\mathrm{var},t}` and
:math:`y_{sum} = \sum_t y_{\mathrm{var},t}`.
Parameters
----------
Xvar : ndarray (T, m)
Part of the data matrix X with :math:`m \le M` variable columns.
mask_X : ndarray (M)
Boolean array of size M of the full columns. False for constant column,
True for variable column in X.
xvarsum : ndarray (m)
Column sum of variable part of data matrix X
xconst : ndarray (M-m)
Values of the constant part of data matrix X
Yvar : ndarray (T, n)
Part of the data matrix Y with :math:`n \le N` variable columns.
mask_Y : ndarray (N)
Boolean array of size N of the full columns. False for constant column,
True for variable column in Y.
yvarsum : ndarray (n)
Column sum of variable part of data matrix Y
yconst : ndarray (N-n)
Values of the constant part of data matrix Y
weights : None or ndarray (N)
weights for all time steps.
Returns
-------
C : ndarray (M, N)
Unnormalized covariance matrix. | 2.997052 | 3.132902 | 0.956638 |
C = np.zeros((len(mask_X), len(mask_Y)))
C[np.ix_(mask_X, mask_Y)] = _M2_dense(Xvar, Yvar, weights=weights)
return C | def _M2_sparse(Xvar, mask_X, Yvar, mask_Y, weights=None) | 2nd moment matrix exploiting zero input columns | 2.151654 | 2.198464 | 0.978708 |
assert len(mask_X) == len(mask_Y), 'X and Y need to have equal sizes for symmetrization'
if column_selection is None:
mask_Xk = mask_X
mask_Yk = mask_Y
Xvark = Xvar
Yvark = Yvar
else:
mask_Xk = mask_X[column_selection]
mask_Yk = mask_Y[column_selection]
Xvark = Xvar[:, _filter_variable_indices(mask_X, column_selection)]
Yvark = Yvar[:, _filter_variable_indices(mask_Y, column_selection)]
Cxxyy = np.zeros((len(mask_X), len(mask_Yk)))
Cxxyy[np.ix_(mask_X, mask_Xk)] = _M2_dense(Xvar, Xvark, weights=weights)
Cxxyy[np.ix_(mask_Y, mask_Yk)] += _M2_dense(Yvar, Yvark, weights=weights)
Cxyyx = np.zeros((len(mask_X), len(mask_Yk)))
Cxy = _M2_dense(Xvar, Yvark, weights=weights)
Cyx = _M2_dense(Yvar, Xvark, weights=weights)
Cxyyx[np.ix_(mask_X, mask_Yk)] = Cxy
Cxyyx[np.ix_(mask_Y, mask_Xk)] += Cyx
return Cxxyy, Cxyyx | def _M2_sparse_sym(Xvar, mask_X, Yvar, mask_Y, weights=None, column_selection=None) | 2nd self-symmetric moment matrix exploiting zero input columns
Computes X'X + Y'Y and X'Y + Y'X | 1.908651 | 1.932704 | 0.987555 |
if mask_X is None and mask_Y is None:
return _M2_dense(Xvar, Yvar, weights=weights, diag_only=diag_only)
else:
# Check if one of the masks is not None, modify it and also adjust the constant columns:
if mask_X is None:
mask_X = np.ones(Xvar.shape[1], dtype=np.bool)
xconst = np.ones(0, dtype=float)
if mask_Y is None:
mask_Y = np.ones(Yvar.shape[1], dtype=np.bool)
yconst = np.ones(0, dtype=float)
if _is_zero(xsum) and _is_zero(ysum) or _is_zero(xconst) and _is_zero(yconst):
return _M2_sparse(Xvar, mask_X, Yvar, mask_Y, weights=weights)
else:
return _M2_const(Xvar, mask_X, xsum[mask_X], xconst, Yvar, mask_Y, ysum[mask_Y], yconst, weights=weights) | def _M2(Xvar, Yvar, mask_X=None, mask_Y=None, xsum=0, xconst=0, ysum=0, yconst=0, weights=None, diag_only=False) | direct (nonsymmetric) second moment matrix. Decide if we need dense, sparse, const | 2.286806 | 2.222301 | 1.029026 |
if mask_X is None and mask_Y is None:
if column_selection is None:
Xvark = Xvar
Yvark = Yvar
else:
Xvark = Xvar[:, column_selection]
Yvark = Yvar[:, column_selection]
Cxxyy = _M2_dense(Xvar, Xvark, weights=weights, diag_only=diag_only) \
+ _M2_dense(Yvar, Yvark, weights=weights, diag_only=diag_only)
Cxy = _M2_dense(Xvar, Yvark, weights=weights, diag_only=diag_only)
Cyx = _M2_dense(Yvar, Xvark, weights=weights, diag_only=diag_only)
Cxyyx = Cxy + Cyx
else:
# Check if one of the masks is not None, modify it and also adjust the constant columns:
if mask_X is None:
mask_X = np.ones(Xvar.shape[1], dtype=np.bool)
xconst = np.ones(0, dtype=float)
if mask_Y is None:
mask_Y = np.ones(Yvar.shape[1], dtype=np.bool)
yconst = np.ones(0, dtype=float)
if _is_zero(xsum) and _is_zero(ysum) or _is_zero(xconst) and _is_zero(yconst):
Cxxyy, Cxyyx = _M2_sparse_sym(Xvar, mask_X, Yvar, mask_Y, weights=weights, column_selection=column_selection)
else:
xvarsum = xsum[mask_X] # to variable part
yvarsum = ysum[mask_Y] # to variable part
if column_selection is None:
Xvark = Xvar
mask_Xk = mask_X
xkvarsum = xvarsum
xkconst = xconst
Yvark = Yvar
mask_Yk = mask_Y
ykvarsum = yvarsum
ykconst = yconst
else:
Xvark = Xvar[:, _filter_variable_indices(mask_X, column_selection)]
mask_Xk = mask_X[column_selection]
xksum = xsum[column_selection]
xkvarsum = xksum[mask_Xk]
xkconst = xconst[_filter_variable_indices(~mask_X, column_selection)]
Yvark = Yvar[:, _filter_variable_indices(mask_Y, column_selection)]
mask_Yk = mask_Y[column_selection]
yksum = ysum[column_selection]
ykvarsum = yksum[mask_Yk]
ykconst = yconst[_filter_variable_indices(~mask_Y, column_selection)]
Cxxyy = _M2_const(Xvar, mask_X, xvarsum, xconst, Xvark, mask_Xk, xkvarsum, xkconst, weights=weights) \
+ _M2_const(Yvar, mask_Y, yvarsum, yconst, Yvark, mask_Yk, ykvarsum, ykconst, weights=weights)
Cxy = _M2_const(Xvar, mask_X, xvarsum, xconst, Yvark, mask_Yk, ykvarsum, ykconst, weights=weights)
Cyx = _M2_const(Yvar, mask_Y, yvarsum, yconst, Xvark, mask_Xk, xkvarsum, xkconst, weights=weights)
Cxyyx = Cxy + Cyx
return Cxxyy, Cxyyx | def _M2_symmetric(Xvar, Yvar, mask_X=None, mask_Y=None, xsum=0, xconst=0, ysum=0, yconst=0, weights=None,
column_selection=None, diag_only=False) | symmetric second moment matrices. Decide if we need dense, sparse, const | 1.746409 | 1.734558 | 1.006832 |
w, s, M = moments_XX(X, remove_mean=remove_mean, weights=weights, modify_data=modify_data,
sparse_mode=sparse_mode, sparse_tol=sparse_tol)
return M / float(w) | def covar(X, remove_mean=False, modify_data=False, weights=None, sparse_mode='auto', sparse_tol=0.0) | Computes the covariance matrix of X
Computes
.. math:
C_XX &=& X^\top X
while exploiting zero or constant columns in the data matrix.
WARNING: Directly use moments_XX if you can. This function does an additional
constant-matrix multiplication and does not return the mean.
Parameters
----------
X : ndarray (T, M)
Data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
weights : None or ndarray(T, )
weights assigned to each trajectory point of X. If None, all data points have weight one.
If ndarray, each data point is assigned a separate weight.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
Returns
-------
C_XX : ndarray (M, M)
Covariance matrix of X
See also
--------
moments_XX | 3.714095 | 4.851028 | 0.76563 |
w, sx, sy, Mxx, Mxy = moments_XXXY(X, Y, remove_mean=remove_mean, modify_data=modify_data, weights=weights,
symmetrize=symmetrize, sparse_mode=sparse_mode, sparse_tol=sparse_tol)
return Mxx / float(w), Mxy / float(w) | def covars(X, Y, remove_mean=False, modify_data=False, symmetrize=False, weights=None, sparse_mode='auto',
sparse_tol=0.0) | Computes the covariance and cross-covariance matrix of X and Y
If symmetrize is False, computes
.. math:
C_XX &=& X^\top X
C_XY &=& X^\top Y
If symmetrize is True, computes
.. math:
C_XX &=& \frac{1}{2} (X^\top X + Y^\top Y)
C_XY &=& \frac{1}{2} (X^\top Y + Y^\top X)
while exploiting zero or constant columns in the data matrix.
WARNING: Directly use moments_XXXY if you can. This function does an additional
constant-matrix multiplication and does not return the mean.
Parameters
----------
X : ndarray (T, M)
Data matrix
Y : ndarray (T, N)
Second data matrix
remove_mean : bool
True: remove column mean from the data, False: don't remove mean.
modify_data : bool
If remove_mean=True, the mean will be removed in the data matrix X,
without creating an independent copy. This option is faster but might
lead to surprises because your input array is changed.
symmetrize : bool
Computes symmetrized means and moments (see above)
weights : None or ndarray(T, )
weights assigned to each trajectory point of X. If None, all data points have weight one.
If ndarray, each data point is assigned a separate weight.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
sparse_tol: float
Threshold for considering column to be zero in order to save computing
effort when the data is sparse or almost sparse.
If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y
is not given) of the covariance matrix will be set to zero. If Y is
given and max(abs(Y[:, i])) < sparse_tol, then column i of the
covariance matrix will be set to zero.
Returns
-------
C_XX : ndarray (M, M)
Covariance matrix of X
C_XY : ndarray (M, N)
Covariance matrix of XY
See also
--------
moments_XXXY | 2.890494 | 3.561651 | 0.81156 |
if not D:
import yaml
args = config.logging_config
default = False
if args.upper() == 'DEFAULT':
default = True
src = config.default_logging_file
else:
src = args
# first try to read configured file
try:
with open(src) as f:
D = yaml.load(f)
except EnvironmentError as ee:
# fall back to default
if not default:
try:
with open(config.default_logging_file) as f2:
D = yaml.load(f2)
except EnvironmentError as ee2:
raise LoggingConfigurationError('Could not read either configured nor '
'default logging configuration!\n%s' % ee2)
else:
raise LoggingConfigurationError('could not handle default logging '
'configuration file\n%s' % ee)
if D is None:
raise LoggingConfigurationError('Empty logging config! Try using default config by'
' setting logging_conf=DEFAULT in pyemma.cfg')
assert D
# this has not been set in PyEMMA version prior 2.0.2+
D.setdefault('version', 1)
# if the user has not explicitly disabled other loggers, we (contrary to Pythons
# default value) do not want to override them.
D.setdefault('disable_existing_loggers', False)
# configure using the dict
try:
dictConfig(D)
except ValueError as ve:
# issue with file handler?
if 'files' in str(ve) and 'rotating_files' in D['handlers']:
print("cfg dir", config.cfg_dir)
new_file = os.path.join(config.cfg_dir, 'pyemma.log')
warnings.warn("set logfile to %s, because there was"
" an error writing to the desired one" % new_file)
D['handlers']['rotating_files']['filename'] = new_file
else:
raise
dictConfig(D)
# get log file name of pyemmas root logger
logger = logging.getLogger('pyemma')
log_files = [getattr(h, 'baseFilename', None) for h in logger.handlers]
import atexit
@atexit.register
def clean_empty_log_files():
# gracefully shutdown logging system
logging.shutdown()
for f in log_files:
if f is not None and os.path.exists(f):
try:
if os.stat(f).st_size == 0:
os.remove(f)
except OSError as o:
print("during removal of empty logfiles there was a problem: ", o) | def setup_logging(config, D=None) | set up the logging system with the configured (in pyemma.cfg) logging config (logging.yml)
@param config: instance of pyemma.config module (wrapper) | 4.581655 | 4.497977 | 1.018603 |
import mdtraj
assert isinstance(self, mdtraj.Trajectory), type(self)
if not isinstance(value, mdtraj.Trajectory):
raise TypeError("value to assign is of incorrect type(%s). Should be mdtraj.Trajectory" % type(value))
idx = np.index_exp[idx]
frames, atoms = None, None
if isinstance(idx, (list, tuple)):
if len(idx) == 1:
frames, atoms = idx[0], slice(None, None, None)
if len(idx) == 2:
frames, atoms = idx[0], idx[1]
if len(idx) >= 3 or len(idx) == 0:
raise IndexError("invalid slice by %s" % idx)
self.xyz[frames, atoms] = value.xyz
self._time[frames] = value.time
self.unitcell_lengths[frames] = value.unitcell_lengths
self.unitcell_angles[frames] = value.unitcell_angles | def trajectory_set_item(self, idx, value) | :param self: mdtraj.Trajectory
:param idx: possible slices over frames,
:param value:
:return: | 2.722671 | 2.503459 | 1.087564 |
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
p.default is not p.empty)
] or None
if args[0] == 'self':
args.pop(0)
return ArgSpec(args, varargs, varkw, defaults) | def getargspec_no_self(func) | inspect.getargspec replacement using inspect.signature.
inspect.getargspec is deprecated in python 3. This is a replacement
based on the (new in python 3.3) `inspect.signature`.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
argspec : ArgSpec(args, varargs, varkw, defaults)
This is similar to the result of inspect.getargspec(func) under
python 2.x.
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not* included in argspec.args.
This is done for consistency between inspect.getargspec() under
python 2.x, and inspect.signature() under python 3.x. | 1.573451 | 1.571708 | 1.001109 |
# get function name
if not isinstance(f, str):
fname = f.__func__.__name__
else:
fname = f
# get the method ref
method = getattr(obj, fname)
# handle cases
if inspect.ismethod(method):
return method(*args, **kwargs)
# attribute or property
return method | def call_member(obj, f, *args, **kwargs) | Calls the specified method, property or attribute of the given object
Parameters
----------
obj : object
The object that will be used
f : str or function
Name of or reference to method, property or attribute
failfast : bool
If True, will raise an exception when trying a method that doesn't exist. If False, will simply return None
in that case | 4.201854 | 4.308732 | 0.975195 |
args, varargs, keywords, defaults = getargspec_no_self(func)
return dict(zip(args[-len(defaults):], defaults)) | def get_default_args(func) | returns a dictionary of arg_name:default_values for the input function | 2.894202 | 2.961257 | 0.977356 |
dtrajs_new = []
for dtraj in dtrajs:
if len(dtraj) <= lag:
continue
if shift is None:
s = np.random.randint(min(lag, dtraj.size-lag))
else:
s = shift
if sliding:
if s > 0:
dtrajs_new.append(dtraj[0:lag+s])
for t0 in range(s, dtraj.size-lag, lag):
dtrajs_new.append(dtraj[t0:t0+2*lag])
else:
for t0 in range(s, dtraj.size-lag, lag):
dtrajs_new.append(dtraj[t0:t0+lag+1])
return dtrajs_new | def blocksplit_dtrajs(dtrajs, lag=1, sliding=True, shift=None) | Splits the discrete trajectories into approximately uncorrelated fragments
Will split trajectories into fragments of lengths lag or longer. These fragments
are overlapping in order to conserve the transition counts at given lag.
If sliding=True, the resulting trajectories will lead to exactly the same count
matrix as when counted from dtrajs. If sliding=False (sampling at lag), the
count matrices are only equal when also setting shift=0.
Parameters
----------
dtrajs : list of ndarray(int)
Discrete trajectories
lag : int
Lag time at which counting will be done. If sh
sliding : bool
True for splitting trajectories for sliding count, False if lag-sampling will be applied
shift : None or int
Start of first full tau-window. If None, shift will be randomly generated | 1.966901 | 2.114579 | 0.930162 |
if len(dtrajs) == 1:
raise ValueError('Only have a single trajectory. Cannot be split into train and test set')
I0 = np.random.choice(len(dtrajs), int(len(dtrajs)/2), replace=False)
I1 = np.array(list(set(list(np.arange(len(dtrajs)))) - set(list(I0))))
dtrajs_train = [dtrajs[i] for i in I0]
dtrajs_test = [dtrajs[i] for i in I1]
return dtrajs_train, dtrajs_test | def cvsplit_dtrajs(dtrajs) | Splits the trajectories into a training and test set with approximately equal number of trajectories
Parameters
----------
dtrajs : list of ndarray(int)
Discrete trajectories | 2.046312 | 2.12954 | 0.960917 |
old_state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(old_state) | def numpy_random_seed(seed=42) | sets the random seed of numpy within the context.
Example
-------
>>> import numpy as np
>>> with numpy_random_seed(seed=0):
... np.random.randint(1000)
684 | 1.813926 | 2.923895 | 0.62038 |
old_state = random.getstate()
random.seed(seed)
try:
yield
finally:
random.setstate(old_state) | def random_seed(seed=42) | sets the random seed of Python within the context.
Example
-------
>>> import random
>>> with random_seed(seed=0):
... random.randint(0, 1000) # doctest: +SKIP
864 | 2.367522 | 3.398164 | 0.696706 |
from pyemma import config
old_settings = {}
try:
# remember old setting, set new one. May raise ValueError, if invalid setting is given.
for k, v in kwargs.items():
old_settings[k] = getattr(config, k)
setattr(config, k, v)
yield
finally:
# restore old settings
for k, v in old_settings.items():
setattr(config, k, v) | def settings(**kwargs) | apply given PyEMMA config values temporarily within the given context. | 3.640124 | 2.922138 | 1.245706 |
res = []
assignment = self.metastable_assignment
for i in range(self.m):
res.append(np.where(assignment == i)[0])
return res | def metastable_sets(self) | Crisp clustering using PCCA. This is only recommended for visualization purposes. You *cannot* compute any
actual quantity of the coarse-grained kinetics without employing the fuzzy memberships!
Returns
-------
A list of length equal to metastable states. Each element is an array with microstate indexes contained in it | 4.740209 | 3.856747 | 1.229069 |
_warn(
'scatter_contour is deprected; use plot_contour instead'
' and manually add a scatter plot on top.',
DeprecationWarning)
ax = contour(
x, y, z, ncontours=ncontours, colorbar=colorbar,
fig=fig, ax=ax, cmap=cmap)
# scatter points
ax.scatter(x , y, marker='o', c='b', s=5)
# show or save
if outfile is not None:
ax.get_figure().savefig(outfile)
return ax | def scatter_contour(
x, y, z, ncontours=50, colorbar=True, fig=None,
ax=None, cmap=None, outfile=None) | Contour plot on scattered data (x,y,z) and
plots the positions of the points (x,y) on top.
Parameters
----------
x : ndarray(T)
x-coordinates
y : ndarray(T)
y-coordinates
z : ndarray(T)
z-coordinates
ncontours : int, optional, default=50
number of contour levels
fig : matplotlib Figure object, optional, default=None
the figure to plot into. When set to None the default
Figure object will be used
ax : matplotlib Axes object, optional, default=None
the axes to plot to. When set to None the default Axes
object will be used.
cmap : matplotlib colormap, optional, default=None
the color map to use. None will use pylab.cm.jet.
outfile : str, optional, default=None
output file to write the figure to. When not given,
the plot will be displayed
Returns
-------
ax : Axes object containing the plot | 3.754243 | 4.651862 | 0.807041 |
z, xedge, yedge = _np.histogram2d(
xall, yall, bins=nbins, weights=weights)
x = 0.5 * (xedge[:-1] + xedge[1:])
y = 0.5 * (yedge[:-1] + yedge[1:])
if avoid_zero_count:
z = _np.maximum(z, _np.min(z[z.nonzero()]))
return x, y, z.T | def get_histogram(
xall, yall, nbins=100,
weights=None, avoid_zero_count=False) | Compute a two-dimensional histogram.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
nbins : int, optional, default=100
Number of histogram bins used in each dimension.
weights : ndarray(T), optional, default=None
Sample weights; by default all samples have the same weight.
avoid_zero_count : bool, optional, default=True
Avoid zero counts by lifting all histogram elements to the
minimum value before computing the free energy. If False,
zero histogram counts would yield infinity in the free energy.
Returns
-------
x : ndarray(nbins, nbins)
The bins' x-coordinates in meshgrid format.
y : ndarray(nbins, nbins)
The bins' y-coordinates in meshgrid format.
z : ndarray(nbins, nbins)
Histogram counts in meshgrid format. | 1.978362 | 2.471346 | 0.80052 |
from scipy.interpolate import griddata
x, y = _np.meshgrid(
_np.linspace(xall.min(), xall.max(), nbins),
_np.linspace(yall.min(), yall.max(), nbins),
indexing='ij')
z = griddata(
_np.hstack([xall[:,None], yall[:,None]]),
zall, (x, y), method=method)
return x, y, z | def get_grid_data(xall, yall, zall, nbins=100, method='nearest') | Interpolate unstructured two-dimensional data.
Parameters
----------
xall : ndarray(T)
Sample x-coordinates.
yall : ndarray(T)
Sample y-coordinates.
zall : ndarray(T)
Sample z-coordinates.
nbins : int, optional, default=100
Number of histogram bins used in x/y-dimensions.
method : str, optional, default='nearest'
Assignment method; scipy.interpolate.griddata supports the
methods 'nearest', 'linear', and 'cubic'.
Returns
-------
x : ndarray(nbins, nbins)
The bins' x-coordinates in meshgrid format.
y : ndarray(nbins, nbins)
The bins' y-coordinates in meshgrid format.
z : ndarray(nbins, nbins)
Interpolated z-data in meshgrid format. | 1.875537 | 2.008142 | 0.933967 |
pi = _to_density(z)
free_energy = _np.inf * _np.ones(shape=z.shape)
nonzero = pi.nonzero()
free_energy[nonzero] = -_np.log(pi[nonzero])
if minener_zero:
free_energy[nonzero] -= _np.min(free_energy[nonzero])
return free_energy | def _to_free_energy(z, minener_zero=False) | Compute free energies from histogram counts.
Parameters
----------
z : ndarray(T)
Histogram counts.
minener_zero : boolean, optional, default=False
Shifts the energy minimum to zero.
Returns
-------
free_energy : ndarray(T)
The free energy values in units of kT. | 2.764176 | 3.314395 | 0.833991 |
allowed_keys = [
'corner_mask', 'alpha', 'locator', 'extend', 'xunits',
'yunits', 'antialiased', 'nchunk', 'hatches', 'zorder']
ignored = [key for key in kwargs.keys() if key not in allowed_keys]
for key in ignored:
_warn(
'{}={} is not an allowed optional parameter and will'
' be ignored'.format(key, kwargs[key]))
kwargs.pop(key, None)
return kwargs | def _prune_kwargs(kwargs) | Remove non-allowed keys from a kwargs dictionary.
Parameters
----------
kwargs : dict
Named parameters to prune. | 5.822411 | 6.14963 | 0.94679 |
traj = None
for ff in file_list:
if traj is None:
traj = md.load(ff, top=top)
else:
traj = traj.join(md.load(ff, top=top))
return traj | def single_traj_from_n_files(file_list, top) | Creates a single trajectory object from a list of files | 1.985289 | 2.159769 | 0.919213 |
# The list of copied attributes can be extended here with time
# Or perhaps ask the mdtraj guys to implement something similar?
stop = start+origin.n_frames
target.xyz[start:stop] = origin.xyz
target.unitcell_lengths[start:stop] = origin.unitcell_lengths
target.unitcell_angles[start:stop] = origin.unitcell_angles
target.time[start:stop] = origin.time
return target | def copy_traj_attributes(target, origin, start) | Inserts certain attributes of origin into target
:param target: target trajectory object
:param origin: origin trajectory object
:param start: :py:obj:`origin` attributes will be inserted in :py:obj:`target` starting at this index
:return: target: the md trajectory with the attributes of :py:obj:`origin` inserted | 4.626668 | 4.357095 | 1.06187 |
r
if not isinstance(e, Iterable):
raise TypeError("given element {} is not iterable in terms of "
"PyEMMAs coordinate pipeline.".format(e))
# only if we have more than one element
if not e.is_reader and len(self._chain) >= 1:
data_producer = self._chain[-1]
# avoid calling the setter of StreamingTransformer.data_producer, since this
# triggers a re-parametrization even on readers (where it makes not sense)
e._data_producer = data_producer
e.chunksize = self.chunksize
self._chain.append(e) | def add_element(self, e) | r""" Appends a pipeline stage.
Appends the given element to the end of the current chain. | 12.089456 | 11.292476 | 1.070576 |
r
if index > len(self._chain):
raise IndexError("tried to access element %i, but chain has only %i"
" elements" % (index, len(self._chain)))
if type(index) is not int:
raise ValueError(
"index is not a integer but '%s'" % str(type(index)))
# if e is already in chain, we're finished
if self._chain[index] is e:
return
# remove current index and its data producer
replaced = self._chain.pop(index)
if not replaced.is_reader:
replaced.data_producer = None
self._chain.insert(index, e)
if index == 0:
e.data_producer = e
else:
# rewire data_producers
e.data_producer = self._chain[index - 1]
# if e has a successive element, need to set data_producer
try:
successor = self._chain[index + 1]
successor.data_producer = e
except IndexError:
pass
# set data_producer for predecessor of e
# self._chain[max(0, index - 1)].data_producer = self._chain[index]
# since data producer of element after insertion changed, reset its status
# TODO: make parameterized a property?
self._chain[index]._estimated = False
return replaced | def set_element(self, index, e) | r""" Replaces a pipeline stage.
Replace an element in chain and return replaced element. | 4.770247 | 4.652707 | 1.025263 |
r
for element in self._chain:
if not element.is_reader and not element._estimated:
element.estimate(element.data_producer, stride=self.param_stride, chunksize=self.chunksize)
self._estimated = True | def parametrize(self) | r"""
Reads all data and discretizes it into discrete trajectories. | 14.930443 | 14.869351 | 1.004109 |
r
result = self._estimated
for el in self._chain:
if not el.is_reader:
result &= el._estimated
return result | def _is_estimated(self) | r"""
Iterates through the pipeline elements and checks if every element is parametrized. | 15.441478 | 11.919847 | 1.295443 |
if not self._estimated:
self.logger.info("not yet parametrized, running now.")
self.parametrize()
return self._chain[-1].dtrajs | def dtrajs(self) | get discrete trajectories | 15.168825 | 12.585473 | 1.205265 |
r
clustering = self._chain[-1]
reader = self._chain[0]
from pyemma.coordinates.clustering.interface import AbstractClustering
assert isinstance(clustering, AbstractClustering)
trajfiles = None
if isinstance(reader, FeatureReader):
trajfiles = reader.filenames
clustering.save_dtrajs(
trajfiles, prefix, output_dir, output_format, extension) | def save_dtrajs(self, prefix='', output_dir='.',
output_format='ascii', extension='.dtraj') | r"""Saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
prefix : str
prepend prefix to filenames.
output_dir : str (optional)
save files to this directory. Defaults to current working directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj') | 6.366404 | 6.922686 | 0.919644 |
count = 0
for a in modifications:
if _debug:
assert a[0] in ('set', 'mv', 'map', 'rm')
logger.debug("processing rule: %s", str(a))
if len(a) == 3:
operation, name, value = a
if operation == 'set':
state[name] = value
count += 1
elif operation == 'mv':
try:
arg = state.pop(name)
state[value] = arg
count += 1
except KeyError:
raise ClassVersionException("the previous version didn't "
"store an attribute named '{}'".format(a[1]))
elif operation == 'map':
func = value
if hasattr(func, '__func__'):
func = func.__func__
assert callable(func)
state[name] = func(state[name])
count += 1
elif len(a) == 2:
operation, value = a
if operation == 'rm':
state.pop(value, None)
count += 1
elif operation == 'transform':
assert callable(value)
value(state)
count += 1
assert count == len(modifications), 'was not able to process all modifications on state' | def apply(modifications, state) | applies modifications to given state
Parameters
----------
modifications: list of tuples
created by this class.list method.
state: dict
state dictionary | 3.410899 | 3.391048 | 1.005854 |
r
from pyemma._base.serialization.h5file import H5File
try:
with H5File(file_name=file_name, mode='a') as f:
f.add_serializable(model_name, obj=self, overwrite=overwrite, save_streaming_chain=save_streaming_chain)
except Exception as e:
msg = ('During saving the object {obj}") '
'the following error occurred: {error}'.format(obj=self, error=e))
if isinstance(self, Loggable):
self.logger.exception(msg)
else:
logger.exception(msg)
raise | def save(self, file_name, model_name='default', overwrite=False, save_streaming_chain=False) | r""" saves the current state of this object to given file and name.
Parameters
-----------
file_name: str
path to desired output file
model_name: str, default='default'
creates a group named 'model_name' in the given file, which will contain all of the data.
If the name already exists, and overwrite is False (default) will raise a RuntimeError.
overwrite: bool, default=False
Should overwrite existing model names?
save_streaming_chain : boolean, default=False
if True, the data_producer(s) of this object will also be saved in the given file.
Examples
--------
>>> import pyemma, numpy as np
>>> from pyemma.util.contexts import named_temporary_file
>>> m = pyemma.msm.MSM(P=np.array([[0.1, 0.9], [0.9, 0.1]]))
>>> with named_temporary_file() as file: # doctest: +SKIP
... m.save(file, 'simple') # doctest: +SKIP
... inst_restored = pyemma.load(file, 'simple') # doctest: +SKIP
>>> np.testing.assert_equal(m.P, inst_restored.P) # doctest: +SKIP | 4.124007 | 4.849933 | 0.850322 |
from .h5file import H5File
with H5File(file_name, model_name=model_name, mode='r') as f:
return f.model | def load(cls, file_name, model_name='default') | Loads a previously saved PyEMMA object from disk.
Parameters
----------
file_name : str or file like object (has to provide read method).
The file like object tried to be read for a serialized object.
model_name: str, default='default'
if multiple models are contained in the file, these can be accessed by
their name. Use :func:`pyemma.list_models` to get a representation of all stored models.
Returns
-------
obj : the de-serialized object | 3.877098 | 4.837276 | 0.801504 |
for field in SerializableMixIn._get_serialize_fields(klass):
# only try to get fields, we actually have.
if hasattr(self, field):
if _debug and field in state:
logger.debug('field "%s" already in state!', field)
state[field] = getattr(self, field)
return state | def _get_state_of_serializeable_fields(self, klass, state) | :return a dictionary {k:v} for k in self.serialize_fields and v=getattr(self, k) | 7.29115 | 6.79051 | 1.073726 |
# klass may have been renamed, so we have to look this up in the class rename registry.
names = [_importable_name(klass)]
# lookup old names, handled by current klass.
from .util import class_rename_registry
names.extend(class_rename_registry.old_handled_by(klass))
for n in names:
try:
return state['class_tree_versions'][n]
except KeyError:
continue
# if we did not find a suitable version number return infinity.
if _debug:
logger.debug('unable to obtain a __serialize_version for class %s', klass)
return float('inf') | def _get_version_for_class_from_state(state, klass) | retrieves the version of the current klass from the state mapping from old locations to new ones. | 9.076488 | 8.519446 | 1.065385 |
if _debug:
logger.debug("restoring state for class %s", klass)
for field in SerializableMixIn._get_serialize_fields(klass):
if field in state:
# ensure we can set attributes. Log culprits.
try:
setattr(self, field, state.get(field))
except AttributeError:
logger.debug('field: %s', field, exc_info=True)
else:
if _debug:
logger.debug("skipped %s, because it is not contained in state", field) | def _set_state_from_serializeable_fields_and_state(self, state, klass) | set only fields from state, which are present in klass.__serialize_fields | 5.829688 | 5.374619 | 1.08467 |
return tuple(filter(lambda c:
SerializableMixIn._get_version(c, require=False) or
(SerializableMixIn._get_serialize_fields(c) or
SerializableMixIn._get_interpolation_map(c)),
self.__class__.__mro__)) | def _get_classes_to_inspect(self) | gets classes self derives from which
1. have custom fields: __serialize_fields
2. provide a modifications map | 10.477015 | 7.821851 | 1.339455 |
from pyemma.coordinates import source
self._estimate(source(X), partial_fit=True)
self._estimated = True
return self | def partial_fit(self, X) | incrementally update the estimates
Parameters
----------
X: array, list of arrays, PyEMMA reader
input data. | 14.463393 | 13.416581 | 1.078024 |
self._check_estimated()
return self._rc.cov_XX(bessel=self.bessel) | def C00_(self) | Instantaneous covariance matrix | 41.166298 | 31.981474 | 1.287192 |
self._check_estimated()
return self._rc.cov_XY(bessel=self.bessel) | def C0t_(self) | Time-lagged covariance matrix | 47.255703 | 33.77177 | 1.399266 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.