code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
default_normcase = getattr(pattern, 'normcase', self.module.normcase)
normcase = normcase or default_normcase
name = normcase(self.name)
pattern = normcase(pattern)
return fnmatch.fnmatchcase(name, pattern)
|
def fnmatch(self, pattern, normcase=None)
|
Return ``True`` if `self.name` matches the given `pattern`.
`pattern` - A filename pattern with wildcards,
for example ``'*.py'``. If the pattern contains a `normcase`
attribute, it is applied to the name and path prior to comparison.
`normcase` - (optional) A function used to normalize the pattern and
filename before matching. Defaults to :meth:`self.module`, which
defaults to :meth:`os.path.normcase`.
.. seealso:: :func:`fnmatch.fnmatch`
| 3.060301 | 3.325897 | 0.920143 |
cls = self._next_class
return [cls(s) for s in glob.glob(self / pattern)]
|
def glob(self, pattern)
|
Return a list of Path objects that match the pattern.
`pattern` - a path relative to this directory, with wildcards.
For example, ``Path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their :file:`bin` directories.
.. seealso:: :func:`glob.glob`
.. note:: Glob is **not** recursive, even when using ``**``.
To do recursive globbing see :func:`walk`,
:func:`walkdirs` or :func:`walkfiles`.
| 9.422841 | 14.352796 | 0.656516 |
with self.open(*args, **kwargs) as f:
for chunk in iter(lambda: f.read(size) or None, None):
yield chunk
|
def chunks(self, size, *args, **kwargs)
|
Returns a generator yielding chunks of the file, so it can
be read piece by piece with a simple for loop.
Any argument you pass after `size` will be passed to :meth:`open`.
:example:
>>> hash = hashlib.md5()
>>> for chunk in Path("CHANGES.rst").chunks(8192, mode='rb'):
... hash.update(chunk)
This will read the file by chunks of 8192 bytes.
| 3.096494 | 4.537555 | 0.682415 |
if append:
mode = 'ab'
else:
mode = 'wb'
with self.open(mode) as f:
f.write(bytes)
|
def write_bytes(self, bytes, append=False)
|
Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call ``p.write_bytes(bytes, append=True)`` to append instead.
| 2.378758 | 2.385145 | 0.997322 |
r
with self.open(mode='r', encoding=encoding, errors=errors) as f:
return U_NEWLINE.sub('\n', f.read())
|
def text(self, encoding=None, errors='strict')
|
r""" Open this file, read it in, return the content as a string.
All newline sequences are converted to ``'\n'``. Keyword arguments
will be passed to :meth:`open`.
.. seealso:: :meth:`lines`
| 6.448951 | 8.945302 | 0.720932 |
r
return self.text(encoding, errors).splitlines(retain)
|
def lines(self, encoding=None, errors='strict', retain=True)
|
r""" Open this file, read all lines, return them in a list.
Optional arguments:
`encoding` - The Unicode encoding (or character set) of
the file. The default is ``None``, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
`errors` - How to handle Unicode errors; see help(str.decode)
for the options. Default is ``'strict'``.
`retain` - If ``True``, retain newline characters; but all newline
character combinations (``'\r'``, ``'\n'``, ``'\r\n'``) are
translated to ``'\n'``. If ``False``, newline characters are
stripped off. Default is ``True``.
.. seealso:: :meth:`text`
| 13.886086 | 21.870743 | 0.634916 |
r
with self.open('ab' if append else 'wb') as f:
for line in lines:
isUnicode = isinstance(line, str)
if linesep is not None:
pattern = U_NL_END if isUnicode else NL_END
line = pattern.sub('', line) + linesep
if isUnicode:
line = line.encode(
encoding or sys.getdefaultencoding(), errors)
f.write(line)
|
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False)
|
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See `linesep` below.
`lines` - A list of strings.
`encoding` - A Unicode encoding to use. This applies only if
`lines` contains any Unicode strings.
`errors` - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending (``'\r'``, ``'\n'``, ``'\r\n'``,
``u'\x85'``, ``u'\r\x85'``, ``u'\u2028'``), that will
be stripped off and this will be used instead. The
default is os.linesep, which is platform-dependent
(``'\r\n'`` on Windows, ``'\n'`` on Unix, etc.).
Specify ``None`` to write the lines as-is, like
:meth:`file.writelines`.
Use the keyword argument ``append=True`` to append lines to the
file. The default is to overwrite the file.
.. warning ::
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the `encoding=` parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
| 4.24346 | 4.555377 | 0.931528 |
m = hashlib.new(hash_name)
for chunk in self.chunks(8192, mode="rb"):
m.update(chunk)
return m
|
def _hash(self, hash_name)
|
Returns a hash object for the file at the current path.
`hash_name` should be a hash algo name (such as ``'md5'``
or ``'sha1'``) that's available in the :mod:`hashlib` module.
| 3.737727 | 3.54959 | 1.053002 |
if not hasattr(self.module, 'samefile'):
other = Path(other).realpath().normpath().normcase()
return self.realpath().normpath().normcase() == other
return self.module.samefile(self, other)
|
def samefile(self, other)
|
.. seealso:: :func:`os.path.samefile`
| 4.178071 | 3.851081 | 1.084909 |
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + '\\' + account
|
def __get_owner_windows(self)
|
Return the name of the owner of this file or directory. Follow
symbolic links.
Return a name of the form ``r'DOMAIN\\User Name'``; may be a group.
.. seealso:: :attr:`owner`
| 3.481206 | 3.991841 | 0.87208 |
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
|
def __get_owner_unix(self)
|
Return the name of the owner of this file or directory. Follow
symbolic links.
.. seealso:: :attr:`owner`
| 4.692205 | 5.171494 | 0.907321 |
if isinstance(mode, str):
mask = _multi_permission_mask(mode)
mode = mask(self.stat().st_mode)
os.chmod(self, mode)
return self
|
def chmod(self, mode)
|
Set the mode. May be the new mode (os.chmod behavior) or a `symbolic
mode <http://en.wikipedia.org/wiki/Chmod#Symbolic_modes>`_.
.. seealso:: :func:`os.chmod`
| 5.693782 | 6.846197 | 0.831671 |
if hasattr(os, 'chown'):
if 'pwd' in globals() and isinstance(uid, str):
uid = pwd.getpwnam(uid).pw_uid
if 'grp' in globals() and isinstance(gid, str):
gid = grp.getgrnam(gid).gr_gid
os.chown(self, uid, gid)
else:
msg = "Ownership not available on this platform."
raise NotImplementedError(msg)
return self
|
def chown(self, uid=-1, gid=-1)
|
Change the owner and group by names rather than the uid or gid numbers.
.. seealso:: :func:`os.chown`
| 2.4876 | 2.61359 | 0.951794 |
os.rename(self, new)
return self._next_class(new)
|
def rename(self, new)
|
.. seealso:: :func:`os.rename`
| 14.375381 | 12.398713 | 1.159425 |
os.renames(self, new)
return self._next_class(new)
|
def renames(self, new)
|
.. seealso:: :func:`os.renames`
| 13.556458 | 10.835893 | 1.25107 |
with contextlib.suppress(FileExistsError):
self.mkdir(mode)
return self
|
def mkdir_p(self, mode=0o777)
|
Like :meth:`mkdir`, but does not raise an exception if the
directory already exists.
| 5.04991 | 4.824485 | 1.046725 |
with contextlib.suppress(FileExistsError):
self.makedirs(mode)
return self
|
def makedirs_p(self, mode=0o777)
|
Like :meth:`makedirs`, but does not raise an exception if the
directory already exists.
| 5.094604 | 4.922205 | 1.035025 |
suppressed = FileNotFoundError, FileExistsError, DirectoryNotEmpty
with contextlib.suppress(suppressed):
with DirectoryNotEmpty.translate():
self.rmdir()
return self
|
def rmdir_p(self)
|
Like :meth:`rmdir`, but does not raise an exception if the
directory is not empty or does not exist.
| 13.657041 | 11.063432 | 1.234431 |
with contextlib.suppress(FileExistsError, DirectoryNotEmpty):
with DirectoryNotEmpty.translate():
self.removedirs()
return self
|
def removedirs_p(self)
|
Like :meth:`removedirs`, but does not raise an exception if the
directory is not empty or does not exist.
| 13.618075 | 11.707793 | 1.163163 |
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0o666)
os.close(fd)
os.utime(self, None)
return self
|
def touch(self)
|
Set the access/modified times of this file to the current time.
Create the file if it does not exist.
| 2.459563 | 2.169063 | 1.133929 |
dst = self._next_class(dst)
dst.makedirs_p()
if update:
warnings.warn(
"Update is deprecated; "
"use copy_function=only_newer(shutil.copy2)",
DeprecationWarning,
stacklevel=2,
)
copy_function = only_newer(copy_function)
sources = self.listdir()
_ignored = ignore(self, [item.name for item in sources])
def ignored(item):
return item.name in _ignored
for source in itertools.filterfalse(ignored, sources):
dest = dst / source.name
if symlinks and source.islink():
target = source.readlink()
target.symlink(dest)
elif source.isdir():
source.merge_tree(
dest,
symlinks=symlinks,
update=update,
copy_function=copy_function,
ignore=ignore,
)
else:
copy_function(source, dest)
self.copystat(dst)
|
def merge_tree(
self, dst, symlinks=False,
*,
update=False,
copy_function=shutil.copy2,
ignore=lambda dir, contents: [])
|
Copy entire contents of self to dst, overwriting existing
contents in dst with those in self.
Pass ``symlinks=True`` to copy symbolic links as links.
Accepts a ``copy_function``, similar to copytree.
To avoid overwriting newer files, supply a copy function
wrapped in ``only_newer``. For example::
src.merge_tree(dst, copy_function=only_newer(shutil.copy2))
| 2.831203 | 2.871249 | 0.986053 |
import io
if set(mode).intersection('wa+'):
raise ValueError('Only read-only file modes can be used')
# move existing file to backup, create new file with same permissions
# borrowed extensively from the fileinput module
backup_fn = self + (backup_extension or os.extsep + 'bak')
try:
os.unlink(backup_fn)
except os.error:
pass
os.rename(self, backup_fn)
readable = io.open(
backup_fn, mode, buffering=buffering,
encoding=encoding, errors=errors, newline=newline,
)
try:
perm = os.fstat(readable.fileno()).st_mode
except OSError:
writable = open(
self, 'w' + mode.replace('r', ''),
buffering=buffering, encoding=encoding, errors=errors,
newline=newline,
)
else:
os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
if hasattr(os, 'O_BINARY'):
os_mode |= os.O_BINARY
fd = os.open(self, os_mode, perm)
writable = io.open(
fd, "w" + mode.replace('r', ''),
buffering=buffering, encoding=encoding, errors=errors,
newline=newline,
)
try:
if hasattr(os, 'chmod'):
os.chmod(self, perm)
except OSError:
pass
try:
yield readable, writable
except Exception:
# move backup back
readable.close()
writable.close()
try:
os.unlink(self)
except os.error:
pass
os.rename(backup_fn, self)
raise
else:
readable.close()
writable.close()
finally:
try:
os.unlink(backup_fn)
except os.error:
pass
|
def in_place(
self, mode='r', buffering=-1, encoding=None, errors=None,
newline=None, backup_extension=None,
)
|
A context in which a file may be re-written in-place with
new content.
Yields a tuple of :samp:`({readable}, {writable})` file
objects, where `writable` replaces `readable`.
If an exception occurs, the old file is restored, removing the
written data.
Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only
read-only-modes are allowed. A :exc:`ValueError` is raised
on invalid modes.
For example, to add line numbers to a file::
p = Path(filename)
assert p.isfile()
with p.in_place() as (reader, writer):
for number, line in enumerate(reader, 1):
writer.write('{0:3}: '.format(number)))
writer.write(line)
Thereafter, the file at `filename` will have line numbers in it.
| 2.545183 | 2.408268 | 1.056852 |
prop_name = '{scope}_{class_}_dir'.format(**locals())
value = getattr(self.wrapper, prop_name)
MultiPath = Multi.for_class(self.path_class)
return MultiPath.detect(value)
|
def get_dir(self, scope, class_)
|
Return the callable function from appdirs, but with the
result wrapped in self.path_class
| 9.954431 | 8.208616 | 1.212681 |
return next(
class_
for class_ in cls.__mro__
if not issubclass(class_, Multi)
)
|
def _next_class(cls)
|
Multi-subclasses should use the parent class
| 6.261824 | 4.427125 | 1.414422 |
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
s = s.replace('\'', "'")
return s
|
def escape(s, quote=True)
|
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true (the default), the quotation mark
characters, both double quote (") and single quote (') characters are also
translated.
| 1.604297 | 1.620358 | 0.990088 |
if token.content.startswith('$$'):
return self.render_raw_text(token)
return '${}$'.format(self.render_raw_text(token))
|
def render_math(self, token)
|
Ensure Math tokens are all enclosed in two dollar signs.
| 3.857963 | 3.422787 | 1.127141 |
with renderer() as renderer:
return renderer.render(Document(iterable))
|
def markdown(iterable, renderer=HTMLRenderer)
|
Output HTML with default settings.
Enables inline and block-level HTML tags.
| 8.058025 | 9.227479 | 0.873264 |
node = {}
# Python 3.6 uses [ordered dicts] [1].
# Put in 'type' entry first to make the final tree format somewhat
# similar to [MDAST] [2].
#
# [1]: https://docs.python.org/3/whatsnew/3.6.html
# [2]: https://github.com/syntax-tree/mdast
node['type'] = token.__class__.__name__
node.update({key: token.__dict__[key] for key in token.__dict__})
if 'header' in node:
node['header'] = get_ast(node['header'])
if 'children' in node:
node['children'] = [get_ast(child) for child in node['children']]
return node
|
def get_ast(token)
|
Recursively unrolls token attributes into dictionaries (token.children
into lists).
Returns:
a dictionary of token's attributes.
| 3.837146 | 3.967348 | 0.967182 |
lines = FileWrapper(iterable)
parse_buffer = ParseBuffer()
line = lines.peek()
while line is not None:
for token_type in token_types:
if token_type.start(line):
result = token_type.read(lines)
if result is not None:
parse_buffer.append((token_type, result))
break
else: # unmatched newlines
next(lines)
parse_buffer.loose = True
line = lines.peek()
return parse_buffer
|
def tokenize_block(iterable, token_types)
|
Returns a list of pairs (token_type, read_result).
Footnotes are parsed here, but span-level parsing has not
started yet.
| 3.556809 | 3.615641 | 0.983728 |
tokens = []
for token_type, result in parse_buffer:
token = token_type(result)
if token is not None:
tokens.append(token)
return tokens
|
def make_tokens(parse_buffer)
|
Takes a list of pairs (token_type, read_result) and
applies token_type(read_result).
Footnotes are already parsed before this point,
and span-level parsing is started here.
| 2.754689 | 2.361387 | 1.166555 |
try:
with open(filename, 'r') as fin:
rendered = mistletoe.markdown(fin, renderer)
print(rendered, end='')
except OSError:
sys.exit('Cannot open file "{}".'.format(filename))
|
def convert_file(filename, renderer)
|
Parse a Markdown file and dump the output to stdout.
| 3.727717 | 3.212982 | 1.160205 |
_import_readline()
_print_heading(renderer)
contents = []
more = False
while True:
try:
prompt, more = ('... ', True) if more else ('>>> ', True)
contents.append(input(prompt) + '\n')
except EOFError:
print('\n' + mistletoe.markdown(contents, renderer), end='')
more = False
contents = []
except KeyboardInterrupt:
print('\nExiting.')
break
|
def interactive(renderer)
|
Parse user input, dump to stdout, rinse and repeat.
Python REPL style.
| 5.080054 | 5.057237 | 1.004512 |
from mistletoe.block_token import List
def get_indent(level):
if self.omit_title:
level -= 1
return ' ' * 4 * (level - 1)
def build_list_item(heading):
level, content = heading
template = '{indent}- {content}\n'
return template.format(indent=get_indent(level), content=content)
return List([build_list_item(heading) for heading in self._headings])
|
def toc(self)
|
Returns table of contents as a block_token.List instance.
| 4.165408 | 3.496042 | 1.191464 |
rendered = super().render_heading(token)
content = self.parse_rendered_heading(rendered)
if not (self.omit_title and token.level == 1
or token.level > self.depth
or any(cond(content) for cond in self.filter_conds)):
self._headings.append((token.level, content))
return rendered
|
def render_heading(self, token)
|
Overrides super().render_heading; stores rendered heading first,
then returns it.
| 5.303463 | 4.961745 | 1.068871 |
match_obj = cls.pattern.match(line)
if match_obj is None:
return None # no valid leader
leader = match_obj.group(1)
content = match_obj.group(0).replace(leader+'\t', leader+' ', 1)
# reassign prepend and leader
prepend = len(content)
if prepend == len(line.rstrip('\n')):
prepend = match_obj.end(1) + 1
else:
spaces = match_obj.group(2)
if spaces.startswith('\t'):
spaces = spaces.replace('\t', ' ', 1)
spaces = spaces.replace('\t', ' ')
n_spaces = len(spaces)
if n_spaces > 4:
prepend = match_obj.end(1) + 1
return prepend, leader
|
def parse_marker(cls, line)
|
Returns a pair (prepend, leader) iff the line has a valid leader.
| 4.122799 | 3.292499 | 1.252179 |
rendered = [self.render(child) for child in token.children]
return ''.join(rendered)
|
def render_inner(self, token)
|
Recursively renders child tokens. Joins the rendered
strings with no space in between.
If newlines / spaces are needed between tokens, add them
in their respective templates, or override this function
in the renderer subclass, so that whitespace won't seem to
appear magically for anyone reading your program.
Arguments:
token: a branch node who has children attribute.
| 4.518612 | 5.217847 | 0.865992 |
# Check if string
if not isinstance(signal, str):
signal = list(signal)
signal = np.array(signal)
# Create a frequency data
data_set = list(set(signal))
freq_list = []
for entry in data_set:
counter = 0.
for i in signal:
if i == entry:
counter += 1
freq_list.append(float(counter) / len(signal))
# Shannon entropy
shannon_entropy = 0.0
for freq in freq_list:
shannon_entropy += freq * np.log2(freq)
shannon_entropy = -shannon_entropy
return(shannon_entropy)
|
def complexity_entropy_shannon(signal)
|
Computes the shannon entropy. Copied from the `pyEntropy <https://github.com/nikdon/pyEntropy>`_ repo by tjugo.
Parameters
----------
signal : list or array
List or array of values.
Returns
----------
shannon_entropy : float
The Shannon Entropy as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> shannon_entropy = nk.complexity_entropy_shannon(signal)
Notes
----------
*Details*
- **shannon entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content.
*Authors*
- tjugo (https://github.com/nikdon)
*Dependencies*
- numpy
*See Also*
- pyEntropy package: https://github.com/nikdon/pyEntropy
References
-----------
- None
| 2.768691 | 3.128997 | 0.884849 |
signal = np.array(signal)
L = []
x = []
N = signal.size
km_idxs = np.triu_indices(k_max - 1)
km_idxs = k_max - np.flipud(np.column_stack(km_idxs)) -1
km_idxs[:,1] -= 1
for k in range(1, k_max):
Lk = 0
for m in range(0, k):
#we pregenerate all idxs
idxs = np.arange(1,int(np.floor((N-m)/k)))
Lmk = np.sum(np.abs(signal[m+idxs*k] - signal[m+k*(idxs-1)]))
Lmk = (Lmk*(N - 1)/(((N - m)/ k)* k)) / k
Lk += Lmk
if Lk != 0:
L.append(np.log(Lk/(m+1)))
x.append([np.log(1.0/ k), 1])
(p, r1, r2, s)=np.linalg.lstsq(x, L)
fd_higushi = p[0]
return (fd_higushi)
|
def complexity_fd_higushi(signal, k_max)
|
Computes Higuchi Fractal Dimension of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann.
Parameters
----------
signal : list or array
List or array of values.
k_max : int
The maximal value of k. The point at which the FD plateaus is considered a saturation point and that kmax value should be selected (Gómez, 2009). Some studies use a value of 8 or 16 for ECG signal and other 48 for MEG.
Returns
----------
fd_higushi : float
The Higushi Fractal Dimension as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> fd_higushi = nk.complexity_fd_higushi(signal, 8)
Notes
----------
*Details*
- **Higushi Fractal Dimension**: Higuchi proposed in 1988 an efficient algorithm for measuring the FD of discrete time sequences. As the reconstruction of the attractor phase space is not necessary, this algorithm is simpler and faster than D2 and other classical measures derived from chaos theory. FD can be used to quantify the complexity and self-similarity of a signal. HFD has already been used to analyse the complexity of brain recordings and other biological signals.
*Authors*
- Quentin Geissmann (https://github.com/qgeissmann)
*Dependencies*
- numpy
*See Also*
- pyrem package: https://github.com/gilestrolab/pyrem
References
-----------
- Accardo, A., Affinito, M., Carrozzi, M., & Bouquet, F. (1997). Use of the fractal dimension for the analysis of electroencephalographic time series. Biological cybernetics, 77(5), 339-350.
- Gómez, C., Mediavilla, Á., Hornero, R., Abásolo, D., & Fernández, A. (2009). Use of the Higuchi's fractal dimension for the analysis of MEG recordings from Alzheimer's disease patients. Medical engineering & physics, 31(3), 306-313.
| 4.840194 | 5.079452 | 0.952897 |
psd = np.abs(np.fft.rfft(signal))**2
psd /= np.sum(psd) # psd as a pdf (normalised to one)
if bands is None:
power_per_band= psd[psd>0]
else:
freqs = np.fft.rfftfreq(signal.size, 1/float(sampling_rate))
bands = np.asarray(bands)
freq_limits_low = np.concatenate([[0.0],bands])
freq_limits_up = np.concatenate([bands, [np.Inf]])
power_per_band = [np.sum(psd[np.bitwise_and(freqs >= low, freqs<up)])
for low,up in zip(freq_limits_low, freq_limits_up)]
power_per_band= np.array(power_per_band)[np.array(power_per_band) > 0]
spectral = - np.sum(power_per_band * np.log2(power_per_band))
return(spectral)
|
def complexity_entropy_spectral(signal, sampling_rate, bands=None)
|
Computes Spectral Entropy of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann. The power spectrum is computed through fft. Then, it is normalised and assimilated to a probability density function.
Parameters
----------
signal : list or array
List or array of values.
sampling_rate : int
Sampling rate (samples/second).
bands : list or array
A list of numbers delimiting the bins of the frequency bands. If None the entropy is computed over the whole range of the DFT (from 0 to `f_s/2`).
Returns
----------
spectral_entropy : float
The spectral entropy as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> spectral_entropy = nk.complexity_entropy_spectral(signal, 1000)
Notes
----------
*Details*
- **Spectral Entropy**: Entropy for different frequency bands.
*Authors*
- Quentin Geissmann (https://github.com/qgeissmann)
*Dependencies*
- numpy
*See Also*
- pyrem package: https://github.com/gilestrolab/pyrem
| 2.639214 | 2.885669 | 0.914594 |
mat = _embed_seq(signal, tau, emb_dim)
W = np.linalg.svd(mat, compute_uv = False)
W /= sum(W) # normalize singular values
entropy_svd = -1*sum(W * np.log2(W))
return(entropy_svd)
|
def complexity_entropy_svd(signal, tau=1, emb_dim=2)
|
Computes the Singular Value Decomposition (SVD) entropy of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann.
Parameters
----------
signal : list or array
List or array of values.
tau : int
The delay
emb_dim : int
The embedding dimension (*m*, the length of vectors to compare).
Returns
----------
entropy_svd : float
The SVD entropy as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> entropy_svd = nk.complexity_entropy_svd(signal, 1, 2)
Notes
----------
*Details*
- **SVD Entropy**: Indicator of how many vectors are needed for an adequate explanation of the data set. Measures feature-richness in the sense that the higher the entropy of the set of SVD weights, the more orthogonal vectors are required to adequately explain it.
*Authors*
- Quentin Geissmann (https://github.com/qgeissmann)
*Dependencies*
- numpy
*See Also*
- pyrem package: https://github.com/gilestrolab/pyrem
| 3.989843 | 4.903371 | 0.813694 |
diff = np.diff(signal)
# x[i] * x[i-1] for i in t0 -> tmax
prod = diff[1:-1] * diff[0:-2]
# Number of sign changes in derivative of the signal
N_delta = np.sum(prod < 0)
n = len(signal)
fd_petrosian = np.log(n)/(np.log(n)+np.log(n/(n+0.4*N_delta)))
return(fd_petrosian)
|
def complexity_fd_petrosian(signal)
|
Computes the Petrosian Fractal Dimension of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann.
Parameters
----------
signal : list or array
List or array of values.
Returns
----------
fd_petrosian : float
The Petrosian FD as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> fd_petrosian = nk.complexity_fd_petrosian(signal, 1, 2)
Notes
----------
*Details*
- **Petrosian Fractal Dimension**: Provide a fast computation of the FD of a signal by translating the series into a binary sequence.
*Authors*
- Quentin Geissmann (https://github.com/qgeissmann)
*Dependencies*
- numpy
*See Also*
- pyrem package: https://github.com/gilestrolab/pyrem
| 5.2371 | 6.330368 | 0.827298 |
mat = _embed_seq(signal, tau, emb_dim)
W = np.linalg.svd(mat, compute_uv = False)
W /= sum(W) # normalize singular values
FI_v = (W[1:] - W[:-1]) **2 / W[:-1]
fisher_info = np.sum(FI_v)
return(fisher_info)
|
def complexity_fisher_info(signal, tau=1, emb_dim=2)
|
Computes the Fisher information of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann.
Parameters
----------
signal : list or array
List or array of values.
tau : int
The delay
emb_dim : int
The embedding dimension (*m*, the length of vectors to compare).
Returns
----------
fisher_info : float
The Fisher information as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> fisher_info = nk.complexity_fisher_info(signal, 1, 2)
Notes
----------
*Details*
- **Fisher Information**: A way of measuring the amount of information that an observable random variable X carries about an unknown parameter θ of a distribution that models X. Formally, it is the variance of the score, or the expected value of the observed information.
*Authors*
- Quentin Geissmann (https://github.com/qgeissmann)
*Dependencies*
- numpy
*See Also*
- pyrem package: https://github.com/gilestrolab/pyrem
| 4.767083 | 6.040404 | 0.789199 |
ntf = data.shape[0]
gfp_curve = np.zeros((ntf, ))
if gflp_method == 'GFPL2':
for i in range(ntf):
x = data[i,:]
gfp_curve[i] = np.sqrt(np.sum((x - x.mean())**2 / len(x) ))
elif gflp_method == 'GFPL1':
for i in range(ntf):
x = data[i,:]
gfp_curve[i] = np.sum(np.abs(x - x.mean())) / len(x)
if peak_method == "wavelet":
gfp_peaks = np.asarray(scipy.signal.find_peaks_cwt(gfp_curve, np.arange(1, 10))) #we would expect a peak at about each 50 ms
else:
gfp_peaks = scipy.signal.argrelmax(gfp_curve)[0]
if smoothing == 'hamming':
gfp_curve = scipy.signal.convolve(gfp_curve, scipy.signal.hamming(smoothing_window) )
elif smoothing == 'hanning':
gfp_curve = scipy.signal.convolve(gfp_curve, scipy.signal.hanning(smoothing_window) )
else:
pass
# Normalize
if normalize is True:
for i in range(len(data)):
data[i,:] = data[i,:]/gfp_curve[i]
return (data, gfp_curve, gfp_peaks)
|
def eeg_gfp_peaks(data, gflp_method='GFPL1', smoothing=False, smoothing_window=100, peak_method="wavelet", normalize=False)
|
The Global Field Power (GFP) is a scalar measure of the strength of the scalp potential field and is calculated as the standard deviation of all electrodes at a given time point (Lehmann and Skrandies, 1980; Michel et al., 1993; Murray et al., 2008; Brunet et al., 2011). Between two GFP troughs, the strength of the potential field varies but the topography remains generally stable. The local maxima of the GFP are thus the best representative of a given microstate in terms of signal-to-noise ratio (Pascual-Marqui et al., 1995), corresponding to moments of high global neuronal synchronization (Skrandies, 2007).
Parameters
----------
X (ndarray):
Array containing values for all time frames and channels.
Dimension: number of time frames x number of channels
method ({'GFPL1', 'GFPL2'}):
`GFPL1` : use L1-Norm to compute GFP peaks
`GFPL2` : use L2-Norm to compute GFP peaks
smoothing ({'hamming', 'hanning'}):
`hamming` : use hamming window to smooth
`hanning` : use hanning window to smooth
smoothing_window = int
about 100
peak_method = str
"relative" or "wavelet"
Returns
----------
ret : ndarray
GFP curve
| 2.207358 | 2.171219 | 1.016644 |
# Load data if necessary
# if isinstance(raws, str):
# raws = load_object(filename=raws)
# Initialize empty dict
gfp = {}
for participant in raws:
gfp[participant] = {}
for run in raws[participant]:
# Generate empty dic
gfp[participant][run] = {}
# Assign raw object to raw
raw = raws[participant][run].copy()
# Check if MEG or EEG data
if True in set(["MEG" in ch for ch in raw.info["ch_names"]]):
meg = True
eeg = False
else:
meg = False
eeg = True
# Save ECG channel
try:
gfp[participant][run]["ecg"] = np.array(raw.copy().pick_types(meg=False, eeg=False, ecg=True).to_data_frame())
except ValueError:
gfp[participant][run]["ecg"] = np.nan
# Select appropriate channels
data = raw.copy().pick_types(meg=meg, eeg=eeg)
gfp[participant][run]["data_info"] = data.info
gfp[participant][run]["data_freq"] = data.info["sfreq"]
gfp[participant][run]["run_duration"] = len(data) / data.info["sfreq"]
# Convert to numpy array
data = np.array(data.to_data_frame())
# find GFP peaks
data, gfp_curve, gfp_peaks = eeg_gfp_peaks(data,
gflp_method=gflp_method,
smoothing=smoothing,
smoothing_window=100,
peak_method="wavelet",
normalize=normalize)
# Store them
gfp[participant][run]["microstates_times"] = gfp_peaks
# Select brain state at peaks
data_peaks = data[gfp_peaks]
# Store the data and scale parameters
if scale is True:
gfp[participant][run]["data"] = z_score(data_peaks)
else:
gfp[participant][run]["data"] = data_peaks
gfp[participant][run]["data_scale"] = scale
gfp[participant][run]["data_normalize"] = normalize
gfp[participant][run]["data_smoothing"] = smoothing
return(gfp)
|
def eeg_gfp(raws, gflp_method="GFPL1", scale=True, normalize=True, smoothing=None)
|
Run the GFP analysis.
| 2.952041 | 2.963998 | 0.995966 |
# Create training set
training_set = data.copy()
if verbose is True:
print("- Initializing the clustering algorithm...")
if clustering_method == "kmeans":
algorithm = sklearn.cluster.KMeans(init='k-means++', n_clusters=n_microstates, n_init=n_init, n_jobs=n_jobs)
elif clustering_method == "spectral":
algorithm = sklearn.cluster.SpectralClustering(n_clusters=n_microstates, n_init=n_init, n_jobs=n_jobs)
elif clustering_method == "agglom":
algorithm = sklearn.cluster.AgglomerativeClustering(n_clusters=n_microstates, linkage="complete")
elif clustering_method == "dbscan":
algorithm = sklearn.cluster.DBSCAN(min_samples=100)
elif clustering_method == "affinity":
algorithm = sklearn.cluster.AffinityPropagation(damping=0.5)
else:
print("NeuroKit Error: eeg_microstates(): clustering_method must be 'kmeans', 'spectral', 'dbscan', 'affinity' or 'agglom'")
refitting = 0 # Initialize the number of refittings
good_fit_achieved = False
while good_fit_achieved is False:
good_fit_achieved = True
if verbose is True:
print("- Fitting the classifier...")
# Fit the algorithm
algorithm.fit(training_set)
if verbose is True:
print("- Clustering back the initial data...")
# Predict the more likely cluster for each observation
predicted = algorithm.fit_predict(training_set)
if verbose is True:
print("- Check for abnormalities...")
# Check for abnormalities and prune the training set until none found
occurences = dict(collections.Counter(predicted))
masks = [np.array([True]*len(training_set))]
for microstate in occurences:
# is the frequency of one microstate inferior to a treshold
if occurences[microstate] < len(data)*occurence_rejection_treshold:
good_fit_achieved = False
refitting += 1 # Increment the refitting
print("NeuroKit Warning: eeg_microstates(): detected some outliers: refitting the classifier (n=" + str(refitting) + ").")
masks.append(predicted!=microstate)
mask = np.all(masks, axis=0)
training_set = training_set[mask]
return(algorithm)
|
def eeg_microstates_clustering(data, n_microstates=4, clustering_method="kmeans", n_jobs=1, n_init=25, occurence_rejection_treshold=0.05, max_refitting=5, verbose=True)
|
Fit the clustering algorithm.
| 2.666384 | 2.672182 | 0.99783 |
for participant in results:
for run in results[participant]:
if verbose is True:
print("- " + participant)
# Frequencies of each microstate
occurences = dict(collections.Counter(results[participant][run]["microstates"]))
# Compute complexity measures of the microstate sequence
if nonlinearity is True:
results[participant][run]["nonlinearity"] = complexity(results[participant][run]["microstates"])
# ECG coherence
# results[participant][run]["ecg"]
# statsmodels.tsa.stattools.grangercausalitytests([])
results[participant][run]["parameters"] = {}
# Compute parameters for each microstates:
for microstate in set(method["microstates"]):
results[participant][run]["parameters"][microstate] = {}
try:
# Coverage
results[participant][run]["parameters"][microstate]["coverage"] = occurences[microstate]/len(results[participant][run]["microstates"])
# Duration
uniques = find_following_duplicates(results[participant][run]["microstates"])
uniques_times = results[participant][run]["microstates_times"][np.where(uniques)]
uniques_ms = results[participant][run]["microstates"][np.where(uniques)]
times = uniques_times[np.array(np.where(uniques_ms==microstate))]
times_1 = np.take(uniques_times, np.array(np.where(uniques_ms==microstate)) + 1, mode='clip')
results[participant][run]["parameters"][microstate]["duration"] = np.mean(times_1 - times)/results[participant][run]["data_sfreq"]*1000
# Occurence
results[participant][run]["parameters"][microstate]["occurence"] = occurences[microstate] / results[participant][run]["run_duration"]
except KeyError:
results[participant][run]["parameters"][microstate]["coverage"] = 0
results[participant][run]["parameters"][microstate]["duration"] = np.nan
results[participant][run]["parameters"][microstate]["occurence"] = 0
return(results)
|
def eeg_microstates_features(results, method, ecg=True, nonlinearity=True, verbose=True)
|
Compute statistics and features for/of the microstates.
| 2.879456 | 2.879443 | 1.000004 |
# Generate and store figures
figures = []
names = []
# Check if microstates metrics available
try:
microstates = method["microstates_good_fit"]
except KeyError:
microstates = method["microstates"]
# Create individual plot for each microstate
for microstate in set(microstates):
if microstate != "Bad":
values = np.mean(method["data"][np.where(microstates == microstate)], axis=0)
values = np.array(values, ndmin=2).T
evoked = mne.EvokedArray(values, method["raw.info_example"], 0)
fig = evoked.plot_topomap(times=0, title=microstate, size=6, contours=contours, time_format="", show=plot, colorbar=colorbar, show_names=show_sensors_name, sensors=show_sensors_position)
figures.append(fig)
# Save separate figures
name = path + "microstate_%s_%s%s%s_%s%i_%s%s" %(microstate, method["data_scale"], method["data_normalize"], method["data_smoothing"], method["feature_reduction_method"], method["n_features"], method["clustering_method"], extension)
fig.savefig(name, dpi=dpi)
names.append(name)
# Save Combined plot
if save is True:
# Combine all plots
image_template = PIL.Image.open(names[0])
X, Y = image_template.size
image_template.close()
combined = PIL.Image.new('RGB', (int(X*len(set(microstates))/2), int( Y*len(set(microstates))/2)))
fig = 0
for x in np.arange(0, len(set(microstates))/2*int(X), int(X)):
for y in np.arange(0, len(set(microstates))/2*int(Y), int(Y)):
try:
newfig = PIL.Image.open(names[fig])
combined.paste(newfig, (int(x), int(y)))
newfig.close()
except:
pass
fig += 1
#combined.show()
combined_name = path + "microstates_%s%s%s_%s%i_%s%s" %(method["data_scale"], method["data_normalize"], method["data_smoothing"], method["feature_reduction_method"], method["n_features"], method["clustering_method"], extension)
combined.save(combined_name)
# Detete separate plots in needed
if separate is False or save is False:
for name in names:
os.remove(name)
return(figures)
|
def eeg_microstates_plot(method, path="", extension=".png", show_sensors_position=False, show_sensors_name=False, plot=True, save=True, dpi=150, contours=0, colorbar=False, separate=False)
|
Plot the microstates.
| 3.157339 | 3.16104 | 0.998829 |
microstates = list(method['microstates'])
for index, microstate in enumerate(method['microstates']):
if microstate in list(reverse_microstates.keys()):
microstates[index] = reverse_microstates[microstate]
method["data"][index] = -1*method["data"][index]
if microstate in list(microstates_labels.keys()):
microstates[index] = microstates_labels[microstate]
method['microstates'] = np.array(microstates)
return(results, method)
|
def eeg_microstates_relabel(method, results, microstates_labels, reverse_microstates=None)
|
Relabel the microstates.
| 2.546372 | 2.512145 | 1.013625 |
'''
This function computes the wavelet coefficients
INPUT:
data: DataFrame, index is a list of timestamps at 8Hz, columns include EDA, filtered_eda
OUTPUT:
wave1Second: DateFrame, index is a list of timestamps at 1Hz, columns include OneSecond_feature1, OneSecond_feature2, OneSecond_feature3
waveHalfSecond: DateFrame, index is a list of timestamps at 2Hz, columns include HalfSecond_feature1, HalfSecond_feature2
'''
# Create wavelet dataframes
oneSecond =
halfSecond =
# Compute wavelets
cA_n, cD_3, cD_2, cD_1 = pywt.wavedec(eda, 'Haar', level=3) #3 = 1Hz, 2 = 2Hz, 1=4Hz
# Wavelet 1 second window
N = int(len(eda)/sampling_rate)
coeff1 = np.max(abs(np.reshape(cD_1[0:4*N],(N,4))), axis=1)
coeff2 = np.max(abs(np.reshape(cD_2[0:2*N],(N,2))), axis=1)
coeff3 = abs(cD_3[0:N])
wave1Second = pd.DataFrame({'OneSecond_feature1':coeff1,'OneSecond_feature2':coeff2,'OneSecond_feature3':coeff3})
wave1Second.index = oneSecond[:len(wave1Second)]
# Wavelet Half second window
N = int(np.floor((len(data)/8.0)*2))
coeff1 = np.max(abs(np.reshape(cD_1[0:2*N],(N,2))),axis=1)
coeff2 = abs(cD_2[0:N])
waveHalfSecond = pd.DataFrame({'HalfSecond_feature1':coeff1,'HalfSecond_feature2':coeff2})
waveHalfSecond.index = halfSecond[:len(waveHalfSecond)]
return wave1Second,waveHalfSecond
|
def getWaveletData(eda)
|
This function computes the wavelet coefficients
INPUT:
data: DataFrame, index is a list of timestamps at 8Hz, columns include EDA, filtered_eda
OUTPUT:
wave1Second: DateFrame, index is a list of timestamps at 1Hz, columns include OneSecond_feature1, OneSecond_feature2, OneSecond_feature3
waveHalfSecond: DateFrame, index is a list of timestamps at 2Hz, columns include HalfSecond_feature1, HalfSecond_feature2
| 2.922885 | 1.985086 | 1.472422 |
# The "Daubechies" wavelet is a rough approximation to a real, single, cardiac cycle
cardiac = scipy.signal.wavelets.daub(10)
# Add the gap after the pqrst when the heart is resting.
cardiac = np.concatenate([cardiac, np.zeros(10)])
# Caculate the number of beats in capture time period
num_heart_beats = int(duration * bpm / 60)
# Concatenate together the number of heart beats needed
ecg = np.tile(cardiac , num_heart_beats)
# Add random (gaussian distributed) noise
noise = np.random.normal(0, noise, len(ecg))
ecg = noise + ecg
# Resample
ecg = scipy.signal.resample(ecg, sampling_rate*duration)
return(ecg)
|
def ecg_simulate(duration=10, sampling_rate=1000, bpm=60, noise=0.01)
|
Simulates an ECG signal.
Parameters
----------
duration : int
Desired recording length.
sampling_rate : int
Desired sampling rate.
bpm : int
Desired simulated heart rate.
noise : float
Desired noise level.
Returns
----------
ECG_Response : dict
Event-related ECG response features.
Example
----------
>>> import neurokit as nk
>>> import pandas as pd
>>>
>>> ecg = nk.ecg_simulate(duration=10, bpm=60, sampling_rate=1000, noise=0.01)
>>> pd.Series(ecg).plot()
Notes
----------
*Authors*
- `Diarmaid O Cualain <https://github.com/diarmaidocualain>`_
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- scipy.signal
References
-----------
| 5.615109 | 6.815954 | 0.823818 |
processed_rsp = {"df": pd.DataFrame({"RSP_Raw": np.array(rsp)})}
biosppy_rsp = dict(biosppy.signals.resp.resp(rsp, sampling_rate=sampling_rate, show=False))
processed_rsp["df"]["RSP_Filtered"] = biosppy_rsp["filtered"]
# RSP Rate
# ============
rsp_rate = biosppy_rsp["resp_rate"]*60 # Get RSP rate value (in cycles per minute)
rsp_times = biosppy_rsp["resp_rate_ts"] # the time (in sec) of each rsp rate value
rsp_times = np.round(rsp_times*sampling_rate).astype(int) # Convert to timepoints
try:
rsp_rate = interpolate(rsp_rate, rsp_times, sampling_rate) # Interpolation using 3rd order spline
processed_rsp["df"]["RSP_Rate"] = rsp_rate
except TypeError:
print("NeuroKit Warning: rsp_process(): Sequence too short to compute respiratory rate.")
processed_rsp["df"]["RSP_Rate"] = np.nan
# RSP Cycles
# ===========================
rsp_cycles = rsp_find_cycles(biosppy_rsp["filtered"])
processed_rsp["df"]["RSP_Inspiration"] = rsp_cycles["RSP_Inspiration"]
processed_rsp["RSP"] = {}
processed_rsp["RSP"]["Cycles_Onsets"] = rsp_cycles["RSP_Cycles_Onsets"]
processed_rsp["RSP"]["Expiration_Onsets"] = rsp_cycles["RSP_Expiration_Onsets"]
processed_rsp["RSP"]["Cycles_Length"] = rsp_cycles["RSP_Cycles_Length"]/sampling_rate
# RSP Variability
# ===========================
rsp_diff = processed_rsp["RSP"]["Cycles_Length"]
processed_rsp["RSP"]["Respiratory_Variability"] = {}
processed_rsp["RSP"]["Respiratory_Variability"]["RSPV_SD"] = np.std(rsp_diff)
processed_rsp["RSP"]["Respiratory_Variability"]["RSPV_RMSSD"] = np.sqrt(np.mean(rsp_diff ** 2))
processed_rsp["RSP"]["Respiratory_Variability"]["RSPV_RMSSD_Log"] = np.log(processed_rsp["RSP"]["Respiratory_Variability"]["RSPV_RMSSD"])
return(processed_rsp)
|
def rsp_process(rsp, sampling_rate=1000)
|
Automated processing of RSP signals.
Parameters
----------
rsp : list or array
Respiratory (RSP) signal array.
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
processed_rsp : dict
Dict containing processed RSP features.
Contains the RSP raw signal, the filtered signal, the respiratory cycles onsets, and respiratory phases (inspirations and expirations).
Example
----------
>>> import neurokit as nk
>>>
>>> processed_rsp = nk.rsp_process(rsp_signal)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- biosppy
- numpy
- pandas
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
| 2.873058 | 2.842538 | 1.010737 |
# Compute gradient (sort of derivative)
gradient = np.gradient(signal)
# Find zero-crossings
zeros, = biosppy.tools.zero_cross(signal=gradient, detrend=True)
# Find respiratory phases
phases_indices = []
for i in zeros:
if gradient[i+1] > gradient[i-1]:
phases_indices.append("Inspiration")
else:
phases_indices.append("Expiration")
# Select cycles (inspiration) and expiration onsets
inspiration_onsets = []
expiration_onsets = []
for index, onset in enumerate(zeros):
if phases_indices[index] == "Inspiration":
inspiration_onsets.append(onset)
if phases_indices[index] == "Expiration":
expiration_onsets.append(onset)
# Create a continuous inspiration signal
# ---------------------------------------
# Find initial phase
if phases_indices[0] == "Inspiration":
phase = "Expiration"
else:
phase = "Inspiration"
inspiration = []
phase_counter = 0
for i, value in enumerate(signal):
if i == zeros[phase_counter]:
phase = phases_indices[phase_counter]
if phase_counter < len(zeros)-1:
phase_counter += 1
inspiration.append(phase)
# Find last phase
if phases_indices[len(phases_indices)-1] == "Inspiration":
last_phase = "Expiration"
else:
last_phase = "Inspiration"
inspiration = np.array(inspiration)
inspiration[max(zeros):] = last_phase
# Convert to binary
inspiration[inspiration == "Inspiration"] = 1
inspiration[inspiration == "Expiration"] = 0
inspiration = pd.to_numeric(inspiration)
cycles_length = np.diff(inspiration_onsets)
rsp_cycles = {"RSP_Inspiration": inspiration,
"RSP_Expiration_Onsets": expiration_onsets,
"RSP_Cycles_Onsets": inspiration_onsets,
"RSP_Cycles_Length": cycles_length}
return(rsp_cycles)
|
def rsp_find_cycles(signal)
|
Find Respiratory cycles onsets, durations and phases.
Parameters
----------
signal : list or array
Respiratory (RSP) signal (preferably filtered).
Returns
----------
rsp_cycles : dict
RSP cycles features.
Example
----------
>>> import neurokit as nk
>>> rsp_cycles = nk.rsp_find_cycles(signal)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- biosppy
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
| 2.802252 | 2.747124 | 1.020067 |
if channel_name is None:
if isinstance(channel, pd.core.series.Series):
if channel.name is not None:
channel_name = channel.name
else:
channel_name = "Added_Channel"
else:
channel_name = "Added_Channel"
# Compute the distance between the two signals
diff = sync_index_channel - sync_index_eeg
if diff > 0:
channel = list(channel)[diff:len(channel)]
channel = channel + [np.nan]*diff
if diff < 0:
channel = [np.nan]*diff + list(channel)
channel = list(channel)[0:len(channel)]
# Adjust to raw size
if len(channel) < len(raw):
channel = list(channel) + [np.nan]*(len(raw)-len(channel))
else:
channel = list(channel)[0:len(raw)] # Crop to fit the raw data
info = mne.create_info([channel_name], raw.info["sfreq"], ch_types=channel_type)
channel = mne.io.RawArray([channel], info)
raw.add_channels([channel], force_update_info=True)
return(raw)
|
def eeg_add_channel(raw, channel, sync_index_eeg=0, sync_index_channel=0, channel_type=None, channel_name=None)
|
Add a channel to a mne's Raw m/eeg file. It will basically synchronize the channel to the eeg data following a particular index and add it.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
channel : list or numpy.array
The channel to be added.
sync_index_eeg : int or list
An index, in the raw data, by which to align the two inputs.
sync_index_channel : int or list
An index, in the channel to add, by which to align the two inputs.
channel_type : str
Channel type. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'.
Returns
----------
raw : mne.io.Raw
Raw data in FIF format.
Example
----------
>>> import neurokit as nk
>>> event_index_in_eeg = 42
>>> event_index_in_ecg = 666
>>> raw = nk.eeg_add_channel(raw, ecg, sync_index_raw=event_index_in_eeg, sync_index_channel=event_index_in_ecg, channel_type="ecg")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- mne
*See Also*
- mne: http://martinos.org/mne/dev/index.html
| 2.571243 | 2.917006 | 0.881466 |
if isinstance(channel_names, list) is False:
channel_names = [channel_names]
channels, time_index = raw.copy().pick_channels(channel_names)[:]
if len(channel_names) > 1:
channels = pd.DataFrame(channels.T, columns=channel_names)
else:
channels = pd.Series(channels[0])
channels.name = channel_names[0]
return(channels)
|
def eeg_select_channels(raw, channel_names)
|
Select one or several channels by name and returns them in a dataframe.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
channel_names : str or list
Channel's name(s).
Returns
----------
channels : pd.DataFrame
Channel.
Example
----------
>>> import neurokit as nk
>>> raw = nk.eeg_select_channel(raw, "TP7")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- mne
*See Also*
- mne package: http://martinos.org/mne/dev/index.html
| 2.802296 | 3.269725 | 0.857043 |
# Get all channel names
eeg = eeg.copy().pick_types(meg=False, eeg=True)
channel_list = eeg.ch_names
# Include
if include == "all":
electrodes = channel_list
elif isinstance(include, str):
electrodes = [s for s in channel_list if include in s]
elif isinstance(include, list):
electrodes = []
for i in include:
electrodes += [s for s in channel_list if i in s]
else:
print("NeuroKit Warning: eeg_select_electrodes(): 'include' parameter must be 'all', str or list.")
# Exclude
if exclude is not None:
if isinstance(exclude, str):
to_remove = [s for s in channel_list if exclude in s]
electrodes = [s for s in electrodes if s not in to_remove]
elif isinstance(exclude, list):
to_remove = []
for i in exclude:
to_remove += [s for s in channel_list if i in s]
electrodes = [s for s in electrodes if s not in to_remove]
else:
print("NeuroKit Warning: eeg_select_electrodes(): 'exclude' parameter must be None, str or list.")
# Laterality
if hemisphere != "both":
if hemisphere.lower() == "left" or hemisphere.lower() == "l":
hemi = [s for s in electrodes if len(re.findall(r'\d+', s)) > 0 and int(re.findall(r'\d+', s)[0])%2 > 0]
elif hemisphere.lower() == "right" or hemisphere.lower() == "r":
hemi = [s for s in electrodes if len(re.findall(r'\d+', s)) > 0 and int(re.findall(r'\d+', s)[0])%2 == 0]
else:
print("NeuroKit Warning: eeg_select_electrodes(): 'hemisphere' parameter must be 'both', 'left' or 'right'. Returning both.")
if central is True:
hemi += [s for s in electrodes if 'z' in s]
electrodes = hemi
return(electrodes)
|
def eeg_select_electrodes(eeg, include="all", exclude=None, hemisphere="both", central=True)
|
Returns electrodes/sensors names of selected region (according to a 10-20 EEG montage).
Parameters
----------
eeg : mne.Raw or mne.Epochs
EEG data.
include : str ot list
Sensor area to include.
exclude : str or list or None
Sensor area to exclude.
hemisphere : str
Select both hemispheres? "both", "left" or "right".
central : bool
Select the central line.
Returns
----------
electrodes : list
List of electrodes/sensors corresponding to the selected area.
Example
----------
>>> import neurokit as nk
>>> nk.eeg_select_electrodes(include="F", exclude="C")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
| 1.649186 | 1.664476 | 0.990814 |
event_id = {}
if conditions is None:
conditions = ["Event"] * len(onsets)
# Sanity check
if len(conditions) != len(onsets):
print("NeuroKit Warning: eeg_create_events(): conditions parameter of different length than onsets. Aborting.")
return()
event_names = list(set(conditions))
# event_index = [1, 2, 3, 4, 5, 32, 64, 128]
event_index = list(range(len(event_names)))
for i in enumerate(event_names):
conditions = [event_index[i[0]] if x==i[1] else x for x in conditions]
event_id[i[1]] = event_index[i[0]]
events = np.array([onsets, [0]*len(onsets), conditions]).T
return(events, event_id)
|
def eeg_create_mne_events(onsets, conditions=None)
|
Create MNE compatible events.
Parameters
----------
onsets : list or array
Events onsets.
conditions : list
A list of equal length containing the stimuli types/conditions.
Returns
----------
(events, event_id) : tuple
MNE-formated events and a dictionary with event's names.
Example
----------
>>> import neurokit as nk
>>> events, event_id = nk.eeg_create_mne_events(events_onset, conditions)
Authors
----------
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
| 3.056284 | 3.471577 | 0.880373 |
# Extract the events_channel from raw if needed
if isinstance(events_channel, str):
try:
events_channel = eeg_select_channels(raw, events_channel)
except:
print("NeuroKit error: eeg_add_events(): Wrong events_channel name provided.")
# Find event onsets
events = find_events(events_channel, treshold=treshold, cut=cut, time_index=time_index, number=number, after=after, before=before, min_duration=min_duration)
# Create mne compatible events
events, event_id = eeg_create_mne_events(events["onsets"], conditions)
# Add them
raw.add_events(events)
return(raw, events, event_id)
|
def eeg_add_events(raw, events_channel, conditions=None, treshold="auto", cut="higher", time_index=None, number="all", after=0, before=None, min_duration=1)
|
Find events on a channel, convert them into an MNE compatible format, and add them to the raw data.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
events_channel : str or array
Name of the trigger channel if in the raw, or array of equal length if externally supplied.
conditions : list
List containing the stimuli types/conditions.
treshold : float
The treshold value by which to select the events. If "auto", takes the value between the max and the min.
cut : str
"higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower".
Add a corresponding datetime index, will return an addional array with the onsets as datetimes.
number : str or int
How many events should it select.
after : int
If number different than "all", then at what time should it start selecting the events.
before : int
If number different than "all", before what time should it select the events.
min_duration : int
The minimum duration of an event (in timepoints).
Returns
----------
(raw, events, event_id) : tuple
The raw file with events, the mne-formatted events and event_id.
Example
----------
>>> import neurokit as nk
>>>
>>> raw, events, event_id = nk.eeg_add_events(raw, events_channel, conditions)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pandas
*See Also*
- mne: http://martinos.org/mne/dev/index.html
References
-----------
- None
| 2.982738 | 3.11047 | 0.958935 |
if conditions is None:
# Get event_id
conditions = {}
for participant, epochs in all_epochs.items():
conditions.update(epochs.event_id)
all_evokeds = {}
for participant, epochs in all_epochs.items():
evokeds = {}
for cond in conditions:
try:
evokeds[cond] = epochs[cond].average()
except KeyError:
pass
all_evokeds[participant] = evokeds
return(all_evokeds)
|
def eeg_to_all_evokeds(all_epochs, conditions=None)
|
Convert all_epochs to all_evokeds.
DOCS INCOMPLETE :(
| 2.227897 | 2.25433 | 0.988274 |
if isinstance(eeg, mne.Epochs):
data = {}
if index is None:
index = range(len(eeg))
for epoch_index, epoch in zip(index, eeg.get_data()):
epoch = pd.DataFrame(epoch.T)
epoch.columns = eeg.ch_names
epoch.index = eeg.times
selection = eeg_select_electrodes(eeg, include=include, exclude=exclude, hemisphere=hemisphere, central=central)
data[epoch_index] = epoch[selection]
else: # it might be a Raw object
data = eeg.get_data().T
data = pd.DataFrame(data)
data.columns = eeg.ch_names
data.index = eeg.times
return(data)
|
def eeg_to_df(eeg, index=None, include="all", exclude=None, hemisphere="both", central=True)
|
Convert mne Raw or Epochs object to dataframe or dict of dataframes.
DOCS INCOMPLETE :(
| 2.420004 | 2.328477 | 1.039308 |
# Sanity check
if isinstance(scores, dict):
if labels is None:
labels = list(scores.keys())
try:
scores = [scores[key] for key in labels]
except KeyError:
print("NeuroKit Error: plot_polarbar(): labels and scores keys not matching. Recheck them.")
# Parameters
if colors == "default":
if len(scores) < 9:
colors = ["#f44336", "#9C27B0", "#3F51B5","#03A9F4", "#009688", "#8BC34A", "#FFEB3B", "#FF9800", "#795548"]
else:
colors = None
if labels is None:
labels = range(len(scores))
N = len(scores)
theta = np.linspace(0.0, -2 * np.pi, N, endpoint=False)
width = 2 * np.pi / N
# Main
plot = plt.figure(figsize=fig_size)
layer1 = plot.add_subplot(111, projection="polar")
bars1 = layer1.bar(theta+np.pi/len(scores), scores, width=width, bottom=0.0)
layer1.yaxis.set_ticks(range(11))
layer1.yaxis.set_ticklabels([])
layer1.xaxis.set_ticks(theta+np.pi/len(scores))
layer1.xaxis.set_ticklabels(labels, fontsize=labels_size)
for index, bar in enumerate(bars1):
if colors is not None:
bar.set_facecolor(colors[index])
bar.set_alpha(1)
# Layer 2
if distribution_means is not None and distribution_sds is not None:
# Sanity check
if isinstance(distribution_means, int):
distribution_means = [distribution_means]*N
if isinstance(distribution_sds, int):
distribution_sds = [distribution_sds]*N
# TODO: add convertion if those parameter are dict
bottoms, tops = normal_range(np.array(distribution_means), np.array(distribution_sds), treshold=treshold)
tops = tops - bottoms
layer2 = plot.add_subplot(111, polar=True)
bars2 = layer2.bar(theta, tops, width=width, bottom=bottoms, linewidth=0)
layer2.xaxis.set_ticks(theta+np.pi/len(scores))
layer2.xaxis.set_ticklabels(labels, fontsize=labels_size)
for index, bar in enumerate(bars2):
bar.set_facecolor("#607D8B")
bar.set_alpha(0.3)
return(plot)
|
def plot_polarbar(scores, labels=None, labels_size=15, colors="default", distribution_means=None, distribution_sds=None, treshold=1.28, fig_size=(15, 15))
|
Polar bar chart.
Parameters
----------
scores : list or dict
Scores to plot.
labels : list
List of labels to be used for ticks.
labels_size : int
Label's size.
colors : list or str
List of colors or "default".
distribution_means : int or list
List of means to add a range ribbon.
distribution_sds : int or list
List of SDs to add a range ribbon.
treshold : float
Limits of the range ribbon (in terms of standart deviation from mean).
fig_size : tuple
Figure size.
Returns
----------
plot : matplotlig figure
The figure.
Example
----------
>>> import neurokit as nk
>>> fig = nk.plot_polarbar(scores=[1, 2, 3, 4, 5], labels=["A", "B", "C", "D", "E"], distribution_means=3, distribution_sds=1)
>>> fig.show()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- matplotlib
- numpy
| 2.340759 | 2.320334 | 1.008803 |
if method == "PCA":
feature_red_method = sklearn.decomposition.PCA(n_components=n_features)
data_processed = feature_red_method.fit_transform(data)
elif method == "agglom":
feature_red_method = sklearn.cluster.FeatureAgglomeration(n_clusters=n_features)
data_processed = feature_red_method.fit_transform(data)
elif method == "ica":
feature_red_method = sklearn.decomposition.FastICA(n_components=n_features)
data_processed = feature_red_method.fit_transform(data)
elif method == "kernelPCA":
feature_red_method = sklearn.decomposition.KernelPCA(n_components=n_features, kernel='linear')
data_processed = feature_red_method.fit_transform(data)
elif method == "kernelPCA":
feature_red_method = sklearn.decomposition.KernelPCA(n_components=n_features, kernel='linear')
data_processed = feature_red_method.fit_transform(data)
elif method == "sparsePCA":
feature_red_method = sklearn.decomposition.SparsePCA(n_components=n_features)
data_processed = feature_red_method.fit_transform(data)
elif method == "incrementalPCA":
feature_red_method = sklearn.decomposition.IncrementalPCA(n_components=n_features)
data_processed = feature_red_method.fit_transform(data)
elif method == "nmf":
if np.min(data) < 0:
data -= np.min(data)
feature_red_method = sklearn.decomposition.NMF(n_components=n_features)
data_processed = feature_red_method.fit_transform(data)
else:
feature_red_method = None
data_processed = data.copy()
return(data_processed)
|
def feature_reduction(data, method, n_features)
|
Feature reduction.
Parameters
----------
NA
Returns
----------
NA
Example
----------
NA
Authors
----------
Dominique Makowski
Dependencies
----------
- sklearn
| 1.440405 | 1.472418 | 0.978258 |
if treshold == "auto":
treshold = (np.max(np.array(signal)) - np.min(np.array(signal)))/2
signal = list(signal)
binary_signal = []
for i in range(len(signal)):
if cut == "higher":
if signal[i] > treshold:
binary_signal.append(1)
else:
binary_signal.append(0)
else:
if signal[i] < treshold:
binary_signal.append(1)
else:
binary_signal.append(0)
return(binary_signal)
|
def binarize_signal(signal, treshold="auto", cut="higher")
|
Binarize a channel based on a continuous channel.
Parameters
----------
signal = array or list
The signal channel.
treshold = float
The treshold value by which to select the events. If "auto", takes the value between the max and the min.
cut = str
"higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower".
Returns
----------
list
binary_signal
Example
----------
>>> import neurokit as nk
>>> binary_signal = nk.binarize_signal(signal, treshold=4)
Authors
----------
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
Dependencies
----------
None
| 1.550149 | 1.79863 | 0.86185 |
events_channel = binarize_signal(events_channel, treshold=treshold, cut=cut)
events = {"onsets":[], "durations":[]}
if time_index is not None:
events["onsets_time"] = []
index = 0
for key, g in (groupby(events_channel)):
duration = len(list(g))
if key == 1:
events["onsets"].append(index)
events["durations"].append(duration)
if time_index is not None:
events["onsets_time"].append(time_index[index])
index += duration
return(events)
|
def localize_events(events_channel, treshold="auto", cut="higher", time_index=None)
|
Find the onsets of all events based on a continuous signal.
Parameters
----------
events_channel = array or list
The trigger channel.
treshold = float
The treshold value by which to select the events. If "auto", takes the value between the max and the min.
cut = str
"higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower".
time_index = array or list
Add a corresponding datetime index, will return an addional array with the onsets as datetimes.
Returns
----------
dict
dict containing the onsets, the duration and the time index if provided.
Example
----------
>>> import neurokit as nk
>>> events_onset = nk.events_onset(events_channel)
Authors
----------
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
Dependencies
----------
None
| 2.838361 | 3.405151 | 0.833549 |
events = localize_events(events_channel, treshold=treshold, cut=cut, time_index=time_index)
# Warning when no events detected
if len(events["onsets"]) == 0:
print("NeuroKit warning: find_events(): No events found. Check your events_channel or adjust trehsold.")
return()
# Remove less than duration
toremove = []
for event in range(len(events["onsets"])):
if events["durations"][event] < min_duration:
toremove.append(False)
else:
toremove.append(True)
events["onsets"] = np.array(events["onsets"])[np.array(toremove)]
events["durations"] = np.array(events["durations"])[np.array(toremove)]
if time_index is not None:
events["onsets_time"] = np.array(events["onsets_time"])[np.array(toremove)]
# Before and after
if isinstance(number, int):
after_times = []
after_onsets = []
after_length = []
before_times = []
before_onsets = []
before_length = []
if after != None:
if events["onsets_time"] == []:
events["onsets_time"] = np.array(events["onsets"])
else:
events["onsets_time"] = np.array(events["onsets_time"])
after_onsets = list(np.array(events["onsets"])[events["onsets_time"]>after])[:number]
after_times = list(np.array(events["onsets_time"])[events["onsets_time"]>after])[:number]
after_length = list(np.array(events["durations"])[events["onsets_time"]>after])[:number]
if before != None:
if events["onsets_time"] == []:
events["onsets_time"] = np.array(events["onsets"])
else:
events["onsets_time"] = np.array(events["onsets_time"])
before_onsets = list(np.array(events["onsets"])[events["onsets_time"]<before])[:number]
before_times = list(np.array(events["onsets_time"])[events["onsets_time"]<before])[:number]
before_length = list(np.array(events["durations"])[events["onsets_time"]<before])[:number]
events["onsets"] = before_onsets + after_onsets
events["onsets_time"] = before_times + after_times
events["durations"] = before_length + after_length
return(events)
|
def find_events(events_channel, treshold="auto", cut="higher", time_index=None, number="all", after=0, before=None, min_duration=1)
|
Find and select events based on a continuous signal.
Parameters
----------
events_channel : array or list
The trigger channel.
treshold : float
The treshold value by which to select the events. If "auto", takes the value between the max and the min.
cut : str
"higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower".
Add a corresponding datetime index, will return an addional array with the onsets as datetimes.
number : str or int
How many events should it select.
after : int
If number different than "all", then at what time should it start selecting the events.
before : int
If number different than "all", before what time should it select the events.
min_duration : int
The minimum duration of an event (in timepoints).
Returns
----------
events : dict
Dict containing events onsets and durations.
Example
----------
>>> import neurokit as nk
>>> events = nk.select_events(events_channel)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
| 1.754806 | 1.796226 | 0.976941 |
df = pd.DataFrame(signal)
ax = df.plot()
def plotOnSignal(x, color, marker=None):
if (marker is None):
plt.axvline(x=event, color=color)
else:
plt.plot(x, signal[x], marker, color=color)
events_onsets = np.array(events_onsets)
try:
len(events_onsets[0])
for index, dim in enumerate(events_onsets):
for event in dim:
plotOnSignal(x=event,
color=color[index] if isinstance(color, list) else color,
marker=marker[index] if isinstance(marker, list) else marker)
except TypeError:
for event in events_onsets:
plotOnSignal(x=event,
color=color[0] if isinstance(color, list) else color,
marker=marker[0] if isinstance(marker, list) else marker)
return ax
|
def plot_events_in_signal(signal, events_onsets, color="red", marker=None)
|
Plot events in signal.
Parameters
----------
signal : array or DataFrame
Signal array (can be a dataframe with many signals).
events_onsets : list or ndarray
Events location.
color : int or list
Marker color.
marker : marker or list of markers (for possible marker values, see: https://matplotlib.org/api/markers_api.html)
Marker type.
Example
----------
>>> import neurokit as nk
>>> bio = nk.bio_process(ecg=signal, sampling_rate=1000)
>>> events_onsets = bio["ECG"]["R_Peaks"]
>>> plot_events_in_signal(bio["df"]["ECG_Filtered"], events_onsets)
>>> plot_events_in_signal(bio["df"]["ECG_Filtered"], events_onsets, color="red", marker="o")
>>> plot_events_in_signal(bio["df"]["ECG_Filtered"], [bio["ECG"]["P_Waves"], bio["ECG"]["R_Peaks"]], color=["blue", "red"], marker=["d","o"])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
- `Renatosc <https://github.com/renatosc/>`_
*Dependencies*
- matplotlib
- pandas
| 2.306681 | 2.55929 | 0.901297 |
# Processing
# ===========
if method == "slow":
# Compute gradient (sort of derivative)
gradient = np.gradient(signal)
# Smoothing
size = int(0.1 * sampling_rate)
smooth, _ = biosppy.tools.smoother(signal=gradient, kernel='bartlett', size=size, mirror=True)
# Find zero-crossings
zeros, = biosppy.tools.zero_cross(signal=smooth, detrend=True)
# Separate onsets and peaks
onsets = []
peaks = []
for i in zeros:
if smooth[i+1] > smooth[i-1]:
onsets.append(i)
else:
peaks.append(i)
peaks = np.array(peaks)
onsets = np.array(onsets)
else:
# find extrema
peaks, _ = biosppy.tools.find_extrema(signal=signal, mode='max')
onsets, _ = biosppy.tools.find_extrema(signal=signal, mode='min')
# Keep only pairs
peaks = peaks[peaks > onsets[0]]
onsets = onsets[onsets < peaks[-1]]
# Artifact Treatment
# ====================
# Compute rising times
risingtimes = peaks-onsets
risingtimes = risingtimes/sampling_rate*1000
peaks = peaks[risingtimes > 100]
onsets = onsets[risingtimes > 100]
# Compute amplitudes
amplitudes = signal[peaks]-signal[onsets]
# Remove low amplitude variations
mask = amplitudes > np.std(signal)*treshold
peaks = peaks[mask]
onsets = onsets[mask]
amplitudes = amplitudes[mask]
# Recovery moments
recoveries = []
for x, peak in enumerate(peaks):
try:
window = signal[peak:onsets[x+1]]
except IndexError:
window = signal[peak:]
recovery_amp = signal[peak]-amplitudes[x]/2
try:
smaller = find_closest_in_list(recovery_amp, window, "smaller")
recovery_pos = peak + list(window).index(smaller)
recoveries.append(recovery_pos)
except ValueError:
recoveries.append(np.nan)
recoveries = np.array(recoveries)
return(onsets, peaks, amplitudes, recoveries)
|
def eda_scr(signal, sampling_rate=1000, treshold=0.1, method="fast")
|
Skin-Conductance Responses extraction algorithm.
Parameters
----------
signal : list or array
EDA signal array.
sampling_rate : int
Sampling rate (samples/second).
treshold : float
SCR minimum treshold (in terms of signal standart deviation).
method : str
"fast" or "slow". Either use a gradient-based approach or a local extrema one.
Returns
----------
onsets, peaks, amplitudes, recoveries : lists
SCRs features.
Example
----------
>>> import neurokit as nk
>>>
>>> onsets, peaks, amplitudes, recoveries = nk.eda_scr(eda_signal)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- biosppy
- numpy
- pandas
*See Also*
- BioSPPy: https://github.com/PIA-Group/BioSPPy
References
-----------
- Kim, K. H., Bang, S. W., & Kim, S. R. (2004). Emotion recognition system using short-term monitoring of physiological signals. Medical and biological engineering and computing, 42(3), 419-427.
- Gamboa, H. (2008). Multi-Modal Behavioral Biometrics Based on HCI and Electrophysiology (Doctoral dissertation, PhD thesis, Universidade Técnica de Lisboa, Instituto Superior Técnico).
| 3.081832 | 2.925802 | 1.053329 |
data = eeg_to_df(eeg, index=index, include=include, exclude=exclude, hemisphere=hemisphere, central=central)
# if data was Raw, make as if it was an Epoch so the following routine is only written once
if isinstance(data, dict) is False:
data = {0: data}
# Create time windows
if isinstance(times, tuple):
times = list(times)
if isinstance(times, list):
if isinstance(times[0], list) is False:
times = [times]
else:
times = [[0, None]]
# Deal with names
if isinstance(names, str):
prefix = [names] * len(times)
if len(times) > 1:
for time_index, time_window in enumerate(times):
prefix[time_index] = prefix[time_index] + "_%.2f_%.2f" %(time_window[0], time_window[1])
else:
prefix = names
# Iterate
complexity_all = pd.DataFrame()
for time_index, time_window in enumerate(times):
if len(times) > 1 and verbose is True:
print("Computing complexity features... window " + str(time_window) + "/" + str(len(times)))
complexity_features = {}
# Compute complexity for each channel for each epoch
index = 0
for epoch_index, epoch in data.items():
if len(times) == 1 and verbose is True:
print("Computing complexity features... " + str(round(index/len(data.items())*100, 2)) + "%")
index +=1
df = epoch[time_window[0]:time_window[1]].copy()
complexity_features[epoch_index] = {}
for channel in df:
signal = df[channel].values
features = complexity(signal, sampling_rate=sampling_rate, shannon=shannon, sampen=sampen, multiscale=multiscale, spectral=spectral, svd=svd, correlation=correlation, higushi=higushi, petrosian=petrosian, fisher=fisher, hurst=hurst, dfa=dfa, lyap_r=lyap_r, lyap_e=lyap_e)
for key, feature in features.items():
if key in complexity_features[epoch_index].keys():
complexity_features[epoch_index][key].append(feature)
else:
complexity_features[epoch_index][key] = [feature]
for epoch_index, epoch in complexity_features.items():
for feature in epoch:
complexity_features[epoch_index][feature] = pd.Series(complexity_features[epoch_index][feature]).mean()
# Convert to dataframe
complexity_features = pd.DataFrame.from_dict(complexity_features, orient="index")
complexity_features.columns = [prefix[time_index] + "_" + s for s in complexity_features.columns]
complexity_all = pd.concat([complexity_all, complexity_features], axis=1)
return(complexity_all)
|
def eeg_complexity(eeg, sampling_rate, times=None, index=None, include="all", exclude=None, hemisphere="both", central=True, verbose=True, shannon=True, sampen=True, multiscale=True, spectral=True, svd=True, correlation=True, higushi=True, petrosian=True, fisher=True, hurst=True, dfa=True, lyap_r=False, lyap_e=False, names="Complexity")
|
Compute complexity indices of epochs or raw object.
DOCS INCOMPLETE :(
| 2.295137 | 2.301192 | 0.997369 |
# Convert to array if list
if isinstance(nbeats_real, list):
nbeats_real = np.array(nbeats_real)
nbeats_reported = np.array(nbeats_reported)
# Compute accuracy
accuracy = 1 - (abs(nbeats_real-nbeats_reported))/((nbeats_real+nbeats_reported)/2)
return(accuracy)
|
def compute_interoceptive_accuracy(nbeats_real, nbeats_reported)
|
Computes interoceptive accuracy according to Garfinkel et al., (2015).
Parameters
----------
nbeats_real : int or list
Real number of heartbeats.
nbeats_reported : int or list
Reported number of heartbeats.
Returns
----------
accuracy : float or list
Objective accuracy in detecting internal bodily sensations. It is the central construct underpinning other interoceptive measures (Garfinkel et al., 2015).
Example
----------
>>> import neurokit as nk
>>>
>>> nk.compute_interoceptive_accuracy(5, 3)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
References
-----------
- Garfinkel, S. N., Seth, A. K., Barrett, A. B., Suzuki, K., & Critchley, H. D. (2015). Knowing your own heart: distinguishing interoceptive accuracy from interoceptive awareness. Biological psychology, 104, 65-74.
| 2.408398 | 2.796996 | 0.861066 |
if value != "stop":
self.X = pd.concat([self.X, pd.DataFrame({"Signal":[value]})])
self.y = np.array(list(self.y) + [response])
if len(set(list(self.y))) > 1:
self.model = self.fit_model(self.X , self.y)
|
def add_response(self, response, value)
|
Add response to staircase.
Parameters
----------
response : int or bool
0 or 1.
value : int or float
Signal corresponding to response.
| 4.473969 | 4.404713 | 1.015723 |
t = (builtin_time.clock()-self.clock)*1000
if reset is True:
self.reset()
return(t)
|
def get(self, reset=True)
|
Get time since last initialisation / reset.
Parameters
----------
reset = bool, optional
Should the clock be reset after returning time?
Returns
----------
float
Time passed in milliseconds.
Example
----------
>>> import neurokit as nk
>>> time_passed_since_neurobox_loading = nk.time.get()
>>> nk.time.reset()
>>> time_passed_since_reset = nk.time.get()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- time
| 9.253175 | 13.77929 | 0.671528 |
# Convert ints to arrays if needed
if isinstance(duration, list) or isinstance(duration, np.ndarray):
duration = np.array(duration)
else:
duration = np.array([duration]*len(events_onsets))
if isinstance(onset, list) or isinstance(onset, np.ndarray):
onset = np.array(onset)
else:
onset = np.array([onset]*len(events_onsets))
if isinstance(data, list) or isinstance(data, np.ndarray) or isinstance(data, pd.Series):
data = pd.DataFrame({"Signal": list(data)})
# Store durations
duration_in_s = duration.copy()
onset_in_s = onset.copy()
# Convert to timepoints
duration = duration*sampling_rate
onset = onset*sampling_rate
# Create the index
if index is None:
index = list(range(len(events_onsets)))
else:
if len(list(set(index))) != len(index):
print("NeuroKit Warning: create_epochs(): events_names does not contain uniques names, replacing them by numbers.")
index = list(range(len(events_onsets)))
else:
index = list(index)
# Create epochs
epochs = {}
for event, event_onset in enumerate(events_onsets):
epoch_onset = int(event_onset + onset[event])
epoch_end = int(event_onset+duration[event]+1)
epoch = data[epoch_onset:epoch_end].copy()
epoch.index = np.linspace(start=onset_in_s[event], stop=duration_in_s[event], num=len(epoch), endpoint=True)
relative_time = np.linspace(start=onset[event], stop=duration[event], num=len(epoch), endpoint=True).astype(int).tolist()
absolute_time = np.linspace(start=epoch_onset, stop=epoch_end, num=len(epoch), endpoint=True).astype(int).tolist()
epoch["Epoch_Relative_Time"] = relative_time
epoch["Epoch_Absolute_Time"] = absolute_time
epochs[index[event]] = epoch
return(epochs)
|
def create_epochs(data, events_onsets, sampling_rate=1000, duration=1, onset=0, index=None)
|
Epoching a dataframe.
Parameters
----------
data : pandas.DataFrame
Data*time.
events_onsets : list
A list of event onsets indices.
sampling_rate : int
Sampling rate (samples/second).
duration : int or list
Duration(s) of each epoch(s) (in seconds).
onset : int
Epoch onset(s) relative to events_onsets (in seconds).
index : list
Events names in order that will be used as index. Must contains uniques names. If not provided, will be replaced by event number.
Returns
----------
epochs : dict
dict containing all epochs.
Example
----------
>>> import neurokit as nk
>>> epochs = nk.create_epochs(data, events_onsets)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- numpy
| 2.287446 | 2.220774 | 1.030022 |
# values=RRis.copy()
# value_times=beats_times.copy()
# Preprocessing
initial_index = value_times[0]
value_times = np.array(value_times) - initial_index
# fit a 3rd degree spline on the data.
spline = scipy.interpolate.splrep(x=value_times, y=values, k=3, s=0) # s=0 guarantees that it will pass through ALL the given points
x = np.arange(0, value_times[-1], 1)
# Get the values indexed per time
signal = scipy.interpolate.splev(x=x, tck=spline, der=0)
# Transform to series
signal = pd.Series(signal)
signal.index = np.array(np.arange(initial_index, initial_index+len(signal), 1))
return(signal)
|
def interpolate(values, value_times, sampling_rate=1000)
|
3rd order spline interpolation.
Parameters
----------
values : dataframe
Values.
value_times : list
Time indices of values.
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
signal : pd.Series
An array containing the values indexed by time.
Example
----------
>>> import neurokit as nk
>>> signal = interpolate([800, 900, 700, 500], [1000, 2000, 3000, 4000], sampling_rate=1000)
>>> pd.Series(signal).plot()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- scipy
- pandas
| 4.659616 | 4.613018 | 1.010101 |
derivative = np.gradient(signal, 2)
peaks = np.where(np.diff(np.sign(derivative)))[0]
return(peaks)
|
def find_peaks(signal)
|
Locate peaks based on derivative.
Parameters
----------
signal : list or array
Signal.
Returns
----------
peaks : array
An array containing the peak indices.
Example
----------
>>> signal = np.sin(np.arange(0, np.pi*10, 0.05))
>>> peaks = nk.find_peaks(signal)
>>> nk.plot_events_in_signal(signal, peaks)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- scipy
- pandas
| 3.213131 | 5.256818 | 0.611231 |
freqs = list(freqs)
freqs_names = []
for freq in freqs:
if freq < 1:
freqs_names.append("UltraLow")
elif freq <= 3:
freqs_names.append("Delta")
elif freq <= 7:
freqs_names.append("Theta")
elif freq <= 9:
freqs_names.append("Alpha1/Mu")
elif freq <= 12:
freqs_names.append("Alpha2/Mu")
elif freq <= 13:
freqs_names.append("Beta1/Mu")
elif freq <= 17:
freqs_names.append("Beta1")
elif freq <= 30:
freqs_names.append("Beta2")
elif freq <= 40:
freqs_names.append("Gamma1")
elif freq <= 50:
freqs_names.append("Gamma2")
else:
freqs_names.append("UltraHigh")
return(freqs_names)
|
def eeg_name_frequencies(freqs)
|
Name frequencies according to standart classifications.
Parameters
----------
freqs : list or numpy.array
list of floats containing frequencies to classify.
Returns
----------
freqs_names : list
Named frequencies
Example
----------
>>> import neurokit as nk
>>>
>>> nk.eeg_name_frequencies([0.5, 1.5, 3, 5, 7, 15])
Notes
----------
*Details*
- Delta: 1-3Hz
- Theta: 4-7Hz
- Alpha1: 8-9Hz
- Alpha2: 10-12Hz
- Beta1: 13-17Hz
- Beta2: 18-30Hz
- Gamma1: 31-40Hz
- Gamma2: 41-50Hz
- Mu: 8-13Hz
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
References
------------
- None
| 1.839735 | 1.621692 | 1.134454 |
picks = mne.pick_types(raw.info, include=eeg_select_electrodes(include=sensors_include, exclude=sensors_exclude), exclude="bads")
if method == "multitaper":
psds, freqs = mne.time_frequency.psd_multitaper(raw,
fmin=fmin,
fmax=fmax,
low_bias=True,
proj=proj,
picks=picks)
else:
psds, freqs = mne.time_frequency.psd_welch(raw,
fmin=fmin,
fmax=fmax,
proj=proj,
picks=picks)
tf = pd.DataFrame(psds)
tf.columns = eeg_name_frequencies(freqs)
tf = tf.mean(axis=0)
mean_psd = {}
for freq in ["UltraLow", "Delta", "Theta", "Alpha", "Alpha1", "Alpha2", "Mu", "Beta", "Beta1", "Beta2", "Gamma", "Gamma1", "Gamma2", "UltraHigh"]:
mean_psd[freq] = tf[[freq in s for s in tf.index]].mean()
mean_psd = pd.DataFrame.from_dict(mean_psd, orient="index").T
return(mean_psd)
|
def eeg_psd(raw, sensors_include="all", sensors_exclude=None, fmin=0.016, fmax=60, method="multitaper", proj=False)
|
Compute Power-Spectral Density (PSD).
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
sensors_include : str
Sensor area to include. See :func:`neurokit.eeg_select_sensors()`.
sensors_exclude : str
Sensor area to exclude. See :func:`neurokit.eeg_select_sensors()`.
fmin : float
Min frequency of interest.
fmax: float
Max frequency of interest.
method : str
"multitaper" or "welch".
proj : bool
add projectors.
Returns
----------
mean_psd : pandas.DataFrame
Averaged PSDs.
Example
----------
>>> import neurokit as nk
Notes
----------
*Details*
- Delta: 1-3Hz
- Theta: 4-7Hz
- Alpha1: 8-9Hz
- Alpha2: 10-12Hz
- Beta1: 13-17Hz
- Beta2: 18-30Hz
- Gamma1: 31-40Hz
- Gamma2: 41-50Hz
- Mu: 8-13Hz
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
References
------------
- None
| 2.397727 | 2.454231 | 0.976977 |
if bands == "all" or bands == "All":
bands = ["Delta", "Theta", "Alpha", "Beta", "Gamma", "Mu"]
if "Alpha" in bands:
bands.remove("Alpha")
bands += ["Alpha1", "Alpha2"]
if "Beta" in bands:
bands.remove("Beta")
bands += ["Beta1", "Beta2"]
if "Gamma" in bands:
bands.remove("Gamma")
bands += ["Gamma1", "Gamma2"]
frequencies = {}
for band in bands:
if band == "Delta":
frequencies[band] = np.arange(1, 3+0.1, step)
if band == "Theta":
frequencies[band] = np.arange(4, 7+0.1, step)
if band == "Alpha1":
frequencies[band] = np.arange(8, 9+0.1, step)
if band == "Alpha2":
frequencies[band] = np.arange(10, 12+0.1, step)
if band == "Beta1":
frequencies[band] = np.arange(13, 17+0.1, step)
if band == "Beta2":
frequencies[band] = np.arange(18, 30+0.1, step)
if band == "Gamma1":
frequencies[band] = np.arange(31, 40+0.1, step)
if band == "Gamma2":
frequencies[band] = np.arange(41, 50+0.1, step)
if band == "Mu":
frequencies[band] = np.arange(8, 13+0.1, step)
return(frequencies)
|
def eeg_create_frequency_bands(bands="all", step=1)
|
Delta: 1-3Hz
Theta: 4-7Hz
Alpha1: 8-9Hz
Alpha2: 10-12Hz
Beta1: 13-17Hz
Beta2: 18-30Hz
Gamma1: 31-40Hz
Gamma2: 41-50Hz
Mu: 8-13Hz
| 1.492875 | 1.27191 | 1.173727 |
median = np.median(var)
mad = np.median(np.abs(var - median))
mad = mad*constant
return(mad)
|
def mad(var, constant=1)
|
Median Absolute Deviation: a "robust" version of standard deviation.
Parameters
----------
var : list or ndarray
Value array.
constant : float
Scale factor. Use 1.4826 for results similar to default R.
Returns
----------
mad : float
The MAD.
Example
----------
>>> import neurokit as nk
>>> hrv = nk.mad([2, 8, 7, 5, 4, 12, 5, 1])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
References
-----------
- https://en.wikipedia.org/wiki/Median_absolute_deviation
| 2.778168 | 5.134145 | 0.541116 |
df = pd.DataFrame(raw_scores)
mean = df.mean(axis=0)
sd = df.std(axis=0)
z_scores = (df - mean)/sd
return(z_scores)
|
def z_score(raw_scores, center=True, scale=True)
|
Transform an array, serie or list into Z scores (scaled and centered scores).
Parameters
----------
raw_scores : list, ndarray or pandas.Series
ECG signal array.
centered : bool
Center the array (mean = 0).
scale : bool
scale the array (sd = 1).
Returns
----------
z_scores : pandas.DataFrame
The normalized scores.
Example
----------
>>> import neurokit as nk
>>>
>>> nk.z_score([3, 1, 2, 4, 6])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pandas
| 2.533976 | 3.839158 | 0.660034 |
outliers = []
mean = np.mean(data)
std = np.std(data)
for i in data:
if abs(i - mean)/std < treshold:
outliers.append(False)
else:
outliers.append(True)
outliers = np.array(outliers)
return (outliers)
|
def find_outliers(data, treshold=2.58)
|
Identify outliers (abnormal values) using the standart deviation.
Parameters
----------
data : list or ndarray
Data array
treshold : float
Maximum deviation (in terms of standart deviation). Rule of thumb of a gaussian distribution: 2.58 = rejecting 1%, 2.33 = rejecting 2%, 1.96 = 5% and 1.28 = rejecting 10%.
Returns
----------
outliers : ndarray
A list of True/False with True being the outliers.
Example
----------
>>> import neurokit as nk
>>> outliers = nk.find_outliers([1, 2, 1, 5, 666, 4, 1 ,3, 5])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
| 2.025037 | 2.613572 | 0.774816 |
bottom = mean - sd*treshold
top = mean + sd*treshold
return(bottom, top)
|
def normal_range(mean, sd, treshold=1.28)
|
Returns a bottom and a top limit on a normal distribution portion based on a treshold.
Parameters
----------
treshold : float
maximum deviation (in terms of standart deviation). Rule of thumb of a gaussian distribution: 2.58 = keeping 99%, 2.33 = keeping 98%, 1.96 = 95% and 1.28 = keeping 90%.
Returns
----------
(bottom, top) : tuple
Lower and higher range.
Example
----------
>>> import neurokit as nk
>>> bottom, top = nk.normal_range(mean=100, sd=15, treshold=2)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
| 3.66599 | 7.137011 | 0.513659 |
array = array[:]
uniques = []
for i in range(len(array)):
if i == 0:
uniques.append(True)
else:
if array[i] == array[i-1]:
uniques.append(False)
else:
uniques.append(True)
return(uniques)
|
def find_following_duplicates(array)
|
Find the duplicates that are following themselves.
Parameters
----------
array : list or ndarray
A list containing duplicates.
Returns
----------
uniques : list
A list containing True for each unique and False for following duplicates.
Example
----------
>>> import neurokit as nk
>>> mylist = ["a","a","b","a","a","a","c","c","b","b"]
>>> uniques = nk.find_following_duplicates(mylist)
>>> indices = np.where(uniques) # Find indices of uniques
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
| 2.296646 | 2.940961 | 0.780917 |
if direction == "both":
closest = min(array, key=lambda x:abs(x-number))
if direction == "smaller":
if strictly is True:
closest = max(x for x in array if x < number)
else:
closest = max(x for x in array if x <= number)
if direction == "greater":
if strictly is True:
closest = min(filter(lambda x: x > number, array))
else:
closest = min(filter(lambda x: x >= number, array))
return(closest)
|
def find_closest_in_list(number, array, direction="both", strictly=False)
|
Find the closest number in the array from x.
Parameters
----------
number : float
The number.
array : list
The list to look in.
direction : str
"both" for smaller or greater, "greater" for only greater numbers and "smaller" for the closest smaller.
strictly : bool
False for stricly superior or inferior or True for including equal.
Returns
----------
closest : int
The closest number in the array.
Example
----------
>>> import neurokit as nk
>>> nk.find_closest_in_list(1.8, [3, 5, 6, 1, 2])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
| 2.003633 | 2.544549 | 0.787422 |
emg = np.asarray(emg)
tkeo = np.copy(emg)
# Teager–Kaiser Energy operator
tkeo[1:-1] = emg[1:-1]*emg[1:-1] - emg[:-2]*emg[2:]
# correct the data in the extremities
tkeo[0], tkeo[-1] = tkeo[1], tkeo[-2]
return(tkeo)
|
def emg_tkeo(emg)
|
Calculates the Teager–Kaiser Energy operator.
Parameters
----------
emg : array
raw EMG signal.
Returns
-------
tkeo : 1D array_like
signal processed by the Teager–Kaiser Energy operator.
Notes
-----
*Authors*
- Marcos Duarte
*See Also*
See this notebook [1]_.
References
----------
.. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb
| 3.566987 | 2.918266 | 1.222296 |
r
emg = emg_tkeo(emg)
if np.size(freqs) == 2:
# band-pass filter
b, a = scipy.signal.butter(2, np.array(freqs)/(sampling_rate/2.), btype = 'bandpass')
emg = scipy.signal.filtfilt(b, a, emg)
if np.size(lfreq) == 1:
# full-wave rectification
envelope = abs(emg)
# low-pass Butterworth filter
b, a = scipy.signal.butter(2, np.array(lfreq)/(sampling_rate/2.), btype = 'low')
envelope = scipy.signal.filtfilt(b, a, envelope)
return (envelope)
|
def emg_linear_envelope(emg, sampling_rate=1000, freqs=[10, 400], lfreq=4)
|
r"""Calculate the linear envelope of a signal.
Parameters
----------
emg : array
raw EMG signal.
sampling_rate : int
Sampling rate (samples/second).
freqs : list [fc_h, fc_l], optional
cutoff frequencies for the band-pass filter (in Hz).
lfreq : number, optional
cutoff frequency for the low-pass filter (in Hz).
Returns
-------
envelope : array
linear envelope of the signal.
Notes
-----
*Authors*
- Marcos Duarte
*See Also*
See this notebook [1]_.
References
----------
.. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb
| 2.881984 | 2.963137 | 0.972612 |
n_above = n_above*sampling_rate
n_below = n_below*sampling_rate
envelope = np.atleast_1d(envelope).astype('float64')
# deal with NaN's (by definition, NaN's are not greater than threshold)
envelope[np.isnan(envelope)] = -np.inf
# indices of data greater than or equal to threshold
inds = np.nonzero(envelope >= threshold)[0]
if inds.size:
# initial and final indexes of continuous data
inds = np.vstack((inds[np.diff(np.hstack((-np.inf, inds))) > n_below+1], \
inds[np.diff(np.hstack((inds, np.inf))) > n_below+1])).T
# indexes of continuous data longer than or equal to n_above
inds = inds[inds[:, 1]-inds[:, 0] >= n_above-1, :]
if not inds.size:
inds = np.array([]) # standardize inds shape
inds = np.array(inds)
activation = np.array([0]*len(envelope))
for i in inds:
activation[i[0]:i[1]] = 1
return (activation)
|
def emg_find_activation(envelope, sampling_rate=1000, threshold=0, n_above=0.25, n_below=1)
|
Detects onset in data based on amplitude threshold.
Parameters
----------
envelope : array
Linear envelope of EMG signal.
sampling_rate : int
Sampling rate (samples/second).
threshold : float
minimum amplitude of `x` to detect.
n_above : float
minimum continuous time (in s) greater than or equal to `threshold` to detect (but see the parameter `n_below`).
n_below : float
minimum time (in s) below `threshold` that will be ignored in the detection of `x` >= `threshold`.
Returns
-------
activation : array
With 1 when muscle activated and 0 when not.
Notes
-----
You might have to tune the parameters according to the signal-to-noise
characteristic of the data.
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectOnset.ipynb
| 3.535918 | 3.683752 | 0.959869 |
rpeaks, = biosppy.ecg.hamilton_segmenter(np.array(signal), sampling_rate=sampling_rate)
rpeaks, = biosppy.ecg.correct_rpeaks(signal=np.array(signal), rpeaks=rpeaks, sampling_rate=sampling_rate, tol=0.05)
return(rpeaks)
|
def ecg_find_peaks(signal, sampling_rate=1000)
|
Find R peaks indices on the ECG channel.
Parameters
----------
signal : list or ndarray
ECG signal (preferably filtered).
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
rpeaks : list
List of R-peaks location indices.
Example
----------
>>> import neurokit as nk
>>> Rpeaks = nk.ecg_find_peaks(signal)
Notes
----------
*Authors*
- the bioSSPy dev team (https://github.com/PIA-Group/BioSPPy)
*Dependencies*
- biosppy
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
| 3.404642 | 3.985469 | 0.854264 |
waves = np.array([""]*len(ecg))
waves[rpeaks] = "R"
waves[t_waves_ends] = "T"
systole = [0]
current = 0
for index, value in enumerate(waves[1:]):
if waves[index-1] == "R":
current = 1
if waves[index-1] == "T":
current = 0
systole.append(current)
return(systole)
|
def ecg_systole(ecg, rpeaks, t_waves_ends)
|
Returns the localization of systoles and diastoles.
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
t_waves_ends : list or ndarray
T waves localization.
Returns
----------
systole : ndarray
Array indicating where systole (1) and diastole (0).
Example
----------
>>> import neurokit as nk
>>> systole = nk.ecg_systole(ecg, rpeaks, t_waves_ends)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Details*
- **Systole/Diastole**: One prominent channel of body and brain communication is that conveyed by baroreceptors, pressure and stretch-sensitive receptors within the heart and surrounding arteries. Within each cardiac cycle, bursts of baroreceptor afferent activity encoding the strength and timing of each heartbeat are carried via the vagus and glossopharyngeal nerve afferents to the nucleus of the solitary tract. This is the principal route that communicates to the brain the dynamic state of the heart, enabling the representation of cardiovascular arousal within viscerosensory brain regions, and influence ascending neuromodulator systems implicated in emotional and motivational behaviour. Because arterial baroreceptors are activated by the arterial pulse pressure wave, their phasic discharge is maximal during and immediately after the cardiac systole, that is, when the blood is ejected from the heart, and minimal during cardiac diastole, that is, between heartbeats (Azevedo, 2017).
References
-----------
- Azevedo, R. T., Garfinkel, S. N., Critchley, H. D., & Tsakiris, M. (2017). Cardiac afferent activity modulates the expression of racial stereotypes. Nature communications, 8.
- Edwards, L., Ring, C., McIntyre, D., & Carroll, D. (2001). Modulation of the human nociceptive flexion reflex across the cardiac cycle. Psychophysiology, 38(4), 712-718.
- Gray, M. A., Rylander, K., Harrison, N. A., Wallin, B. G., & Critchley, H. D. (2009). Following one's heart: cardiac rhythms gate central initiation of sympathetic reflexes. Journal of Neuroscience, 29(6), 1817-1825.
| 2.863225 | 3.473675 | 0.824264 |
window_size = int(window_size*sampling_rate)
lowpass = scipy.signal.butter(1, hfreq/(sampling_rate/2.0), 'low')
highpass = scipy.signal.butter(1, lfreq/(sampling_rate/2.0), 'high')
# TODO: Could use an actual bandpass filter
ecg_low = scipy.signal.filtfilt(*lowpass, x=ecg)
ecg_band = scipy.signal.filtfilt(*highpass, x=ecg_low)
# Square (=signal power) of the first difference of the signal
decg = np.diff(ecg_band)
decg_power = decg**2
# Robust threshold and normalizator estimation
thresholds = []
max_powers = []
for i in range(int(len(decg_power)/window_size)):
sample = slice(i*window_size, (i+1)*window_size)
d = decg_power[sample]
thresholds.append(0.5*np.std(d))
max_powers.append(np.max(d))
threshold = 0.5*np.std(decg_power)
threshold = np.median(thresholds)
max_power = np.median(max_powers)
decg_power[decg_power < threshold] = 0
decg_power = decg_power/max_power
decg_power[decg_power > 1.0] = 1.0
square_decg_power = decg_power**2
# shannon_energy = -square_decg_power*np.log(square_decg_power) # This errors
# shannon_energy[np.where(np.isfinite(shannon_energy) == False)] = 0.0
shannon_energy = -square_decg_power*np.log(square_decg_power.clip(min=1e-6))
shannon_energy[np.where(shannon_energy <= 0)] = 0.0
mean_window_len = int(sampling_rate*0.125+1)
lp_energy = np.convolve(shannon_energy, [1.0/mean_window_len]*mean_window_len, mode='same')
#lp_energy = scipy.signal.filtfilt(*lowpass2, x=shannon_energy)
lp_energy = scipy.ndimage.gaussian_filter1d(lp_energy, sampling_rate/8.0)
lp_energy_diff = np.diff(lp_energy)
rpeaks = (lp_energy_diff[:-1] > 0) & (lp_energy_diff[1:] < 0)
rpeaks = np.flatnonzero(rpeaks)
rpeaks -= 1
return(rpeaks)
|
def segmenter_pekkanen(ecg, sampling_rate, window_size=5.0, lfreq=5.0, hfreq=15.0)
|
ECG R peak detection based on `Kathirvel et al. (2001) <http://link.springer.com/article/10.1007/s13239-011-0065-3/fulltext.html>`_ with some tweaks (mainly robust estimation of the rectified signal cutoff threshold).
Parameters
----------
ecg : list or ndarray
ECG signal array.
sampling_rate : int
Sampling rate (samples/second).
window_size : float
Ransac window size.
lfreq : float
Low frequency of the band pass filter.
hfreq : float
High frequency of the band pass filter.
Returns
----------
rpeaks : ndarray
R peaks location.
Example
----------
>>> import neurokit as nk
>>> rpeaks = nk.segmenter_pekkanen(ecg_signal, 1000)
*Authors*
- `Jami Pekkanen <https://github.com/jampekka>`_
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- scipy
- numpy
*See Also*
- rpeakdetect: https://github.com/tru-hy/rpeakdetect
| 2.565333 | 2.671913 | 0.960111 |
erp = {}
data = eeg_to_df(eeg, index=index, include=include, exclude=exclude, hemisphere=hemisphere, central=central)
for epoch_index, epoch in data.items():
# Segment according to window
if isinstance(times, list):
if isinstance(times[0], list):
values = {}
for window_index, window in enumerate(times):
df = epoch[window[0]:window[1]]
value = df.mean().mean()
values[window_index] = value
erp[epoch_index] = values
else:
df = epoch[times[0]:times[1]]
value = df.mean().mean()
erp[epoch_index] = [value]
elif isinstance(times, tuple):
values = {}
for window_index, window in enumerate(times):
df = epoch[window[0]:window[1]]
if method == "max":
value = df.mean().max()
elif method == "min":
value = df.mean().min()
else:
value = df.mean().mean()
values[window_index] = value
erp[epoch_index] = values
else:
df = epoch[0:]
value = df.mean().mean()
erp[epoch_index] = [value]
# Convert to dataframe
erp = pd.DataFrame.from_dict(erp, orient="index")
if isinstance(names, str):
names = [names]
if len(names) == len(erp.columns):
erp.columns = names
return(erp)
|
def eeg_erp(eeg, times=None, index=None, include="all", exclude=None, hemisphere="both", central=True, verbose=True, names="ERP", method="mean")
|
DOCS INCOMPLETE :(
| 1.929082 | 1.926727 | 1.001222 |
# Preserve original
all_epochs_current = all_epochs.copy()
# Filter using Savitzky-Golay polynomial method
if (filter_hfreq is not None) and (isinstance(filter_hfreq, int)):
for participant, epochs in all_epochs_current.items():
all_epochs_current[participant] = epochs.savgol_filter(filter_hfreq, copy=True)
# Crop
if isinstance(times, list) and len(times) == 2:
for participant, epochs in all_epochs_current.items():
all_epochs_current[participant] = epochs.copy().crop(times[0], times[1])
# Transform to evokeds
all_evokeds = eeg_to_all_evokeds(all_epochs_current, conditions=conditions)
data = {}
for participant, epochs in all_evokeds.items():
for condition, epoch in epochs.items():
data[condition] = []
for participant, epochs in all_evokeds.items():
for condition, epoch in epochs.items():
data[condition].append(epoch)
conditions = list(data.keys())
# Line styles
if isinstance(linestyle, str):
linestyle = [linestyle] * len(conditions)
elif isinstance(linestyle, list) and len(linestyle) >= len(conditions):
pass
elif isinstance(linestyle, dict) and len(linestyle.keys()) >= len(conditions):
linestyle = [linestyle[cond] for cond in conditions]
else:
print("NeuroKit Warning: plot_eeg_erp(): linestyle must be either a str, a list or a dict.")
# Colors
if isinstance(colors, str):
colors = {condition: colors for condition in conditions}
elif isinstance(colors, list) and len(colors) >= len(conditions):
colors= {condition: colors[index] for index, condition in enumerate(conditions)}
elif isinstance(colors, dict) and len(colors.keys()) >= len(conditions):
pass
elif colors is None:
pass
else:
print("NeuroKit Warning: plot_eeg_erp(): colors must be either a str, a list, a dict or None.")
# Modify styles
styles = {}
for index, condition in enumerate(conditions):
styles[condition] = {"linewidth": linewidth, "linestyle": linestyle[index]}
# Select electrodes
picks = mne.pick_types(epoch.info, eeg=True, selection=eeg_select_electrodes(epoch, include=include, exclude=exclude, hemisphere=hemisphere, central=central))
# Plot
try:
plot = mne.viz.plot_compare_evokeds(data, picks=picks, colors=colors, styles=styles, title=name, gfp=gfp, ci=ci, invert_y=invert_y, ci_alpha=ci_alpha)
except TypeError:
print("NeuroKit Warning: plot_eeg_erp(): You're using a version of mne that does not support ci_alpha or ci_method parameters. Leaving defaults.")
plot = mne.viz.plot_compare_evokeds(data, picks=picks, colors=colors, styles=styles, title=name, gfp=gfp, ci=ci, invert_y=invert_y)
return(plot)
|
def plot_eeg_erp(all_epochs, conditions=None, times=None, include="all", exclude=None, hemisphere="both", central=True, name=None, colors=None, gfp=False, ci=0.95, ci_alpha=0.333, invert_y=False, linewidth=1, linestyle="-", filter_hfreq=None)
|
DOCS INCOMPLETE :(
| 2.206293 | 2.199164 | 1.003242 |
all_evokeds = eeg_to_all_evokeds(all_epochs)
data = {}
for participant, epochs in all_evokeds.items():
for cond, epoch in epochs.items():
data[cond] = []
for participant, epochs in all_evokeds.items():
for cond, epoch in epochs.items():
data[cond].append(epoch)
if colors is not None:
color_list = []
else:
color_list = None
evokeds = []
for condition, evoked in data.items():
grand_average = mne.grand_average(evoked)
grand_average.comment = condition
evokeds += [grand_average]
if colors is not None:
color_list.append(colors[condition])
plot = mne.viz.plot_evoked_topo(evokeds, background_color="w", color=color_list)
return(plot)
|
def plot_eeg_erp_topo(all_epochs, colors=None)
|
Plot butterfly plot.
DOCS INCOMPLETE :(
| 2.57373 | 2.553671 | 1.007855 |
if compress is True:
with gzip.open(path + filename + "." + extension, 'wb') as name:
pickle.dump(obj, name, protocol=compatibility)
else:
with open(path + filename + "." + extension, 'wb') as name:
pickle.dump(obj, name, protocol=compatibility)
|
def save_nk_object(obj, filename="file", path="", extension="nk", compress=False, compatibility=-1)
|
Save whatever python object to a pickled file.
Parameters
----------
file : object
Whatever python thing (list, dict, ...).
filename : str
File's name.
path : str
File's path.
extension : str
File's extension. Default "nk" but can be whatever.
compress: bool
Enable compression using gzip.
compatibility : int
See :func:`pickle.dump`.
Example
----------
>>> import neurokit as nk
>>> obj = [1, 2]
>>> nk.save_nk_object(obj, filename="myobject")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pickle
- gzip
| 1.91628 | 2.296419 | 0.834464 |
filename = path + filename
try:
with open(filename, 'rb') as name:
file = pickle.load(name)
except pickle.UnpicklingError:
with gzip.open(filename, 'rb') as name:
file = pickle.load(name)
except ModuleNotFoundError: # In case you're trying to unpickle a dataframe made with pandas < 0.17
try:
file = pd.read_pickle(filename)
except:
pass
return(file)
|
def read_nk_object(filename, path="")
|
Read a pickled file.
Parameters
----------
filename : str
Full file's name (with extension).
path : str
File's path.
Example
----------
>>> import neurokit as nk
>>> obj = [1, 2]
>>> nk.save_nk_object(obj, filename="myobject")
>>> loaded_obj = nk.read_nk_object("myobject.nk")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pickle
- gzip
| 3.70378 | 4.568792 | 0.810669 |
if platform.system() == 'Windows':
return(os.path.getctime(path))
else:
stat = os.stat(path)
try:
return(stat.st_birthtime)
except AttributeError:
print("Neuropsydia error: get_creation_date(): We're probably on Linux. No easy way to get creation dates here, so we'll settle for when its content was last modified.")
return(stat.st_mtime)
|
def find_creation_date(path)
|
Try to get the date that a file was created, falling back to when it was last modified if that's not possible.
Parameters
----------
path : str
File's path.
Returns
----------
creation_date : str
Time of file creation.
Example
----------
>>> import neurokit as nk
>>> import datetime
>>>
>>> creation_date = nk.find_creation_date(file)
>>> creation_date = datetime.datetime.fromtimestamp(creation_date)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
- Mark Amery
*Dependencies*
- platform
- os
*See Also*
- http://stackoverflow.com/a/39501288/1709587
| 3.797024 | 3.628842 | 1.046346 |
from . import __version__
d = OrderedDict()
d['Version'] = '%s' % __version__
for key, val in PyVisaLibrary.get_session_classes().items():
key_name = '%s %s' % (key[0].name.upper(), key[1])
try:
d[key_name] = getattr(val, 'session_issue').split('\n')
except AttributeError:
d[key_name] = 'Available ' + val.get_low_level_info()
return d
|
def get_debug_info()
|
Return a list of lines with backend info.
| 5.355957 | 5.166027 | 1.036765 |
session = None
while session is None or session in self.sessions:
session = random.randint(1000000, 9999999)
self.sessions[session] = obj
return session
|
def _register(self, obj)
|
Creates a random but unique session handle for a session object,
register it in the sessions dictionary and return the value
:param obj: a session object.
:return: session handle
:rtype: int
| 3.349382 | 2.740734 | 1.222075 |
logger.debug('%s%s -> %r',
func.__name__, _args_to_str(arguments), ret_value,
extra=self._logging_extra)
try:
ret_value = StatusCode(ret_value)
except ValueError:
pass
self._last_status = ret_value
# The first argument of almost all registered visa functions is a session.
# We store the error code per session
session = None
if func.__name__ not in ('viFindNext', ):
try:
session = arguments[0]
except KeyError:
raise Exception('Function %r does not seem to be a valid '
'visa function (len args %d)' % (func, len(arguments)))
# Functions that use the first parameter to get a session value.
if func.__name__ in ('viOpenDefaultRM', ):
# noinspection PyProtectedMember
session = session._obj.value
if isinstance(session, integer_types):
self._last_status_in_session[session] = ret_value
else:
# Functions that might or might have a session in the first argument.
if func.__name__ not in ('viClose', 'viGetAttribute', 'viSetAttribute', 'viStatusDesc'):
raise Exception('Function %r does not seem to be a valid '
'visa function (type args[0] %r)' % (func, type(session)))
if ret_value < 0:
raise errors.VisaIOError(ret_value)
if ret_value in self.issue_warning_on:
if session and ret_value not in self._ignore_warning_in_session[session]:
warnings.warn(errors.VisaIOWarning(ret_value), stacklevel=2)
return ret_value
|
def _return_handler(self, ret_value, func, arguments)
|
Check return values for errors and warnings.
TODO: THIS IS JUST COPIED PASTED FROM NIVisaLibrary.
Needs to be adapted.
| 4.210368 | 3.999854 | 1.05263 |
try:
sess = self.sessions[session]
except KeyError:
return constants.StatusCode.error_invalid_object
return sess.clear()
|
def clear(self, session)
|
Clears a device.
Corresponds to viClear function of the VISA library.
:param session: Unique logical identifier to a session.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
| 9.11718 | 6.331506 | 1.43997 |
try:
return self.sessions[session].gpib_command(command_byte)
except KeyError:
return constants.StatusCode.error_invalid_object
|
def gpib_command(self, session, command_byte)
|
Write GPIB command byte on the bus.
Corresponds to viGpibCommand function of the VISA library.
See: https://linux-gpib.sourceforge.io/doc_html/gpib-protocol.html#REFERENCE-COMMAND-BYTES
:param command_byte: command byte to send
:type command_byte: int, must be [0 255]
:return: return value of the library call
:rtype: :class:`pyvisa.constants.StatusCode`
| 6.69687 | 6.352597 | 1.054194 |
try:
return self.sessions[session].assert_trigger(protocol)
except KeyError:
return constants.StatusCode.error_invalid_object
|
def assert_trigger(self, session, protocol)
|
Asserts software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
:param session: Unique logical identifier to a session.
:param protocol: Trigger protocol to use during assertion. (Constants.PROT*)
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
| 10.127504 | 6.620013 | 1.529831 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.