index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
727,970 | mod_wsgi_van.server | update | null | def update(self, version: typing.Any) -> None:
if self.version != version:
# Clear import cache and reload
self.import_cache = {}
self.load()
self.version = version
| (self, version: Any) -> NoneType |
727,973 | tables.array | Array | This class represents homogeneous datasets in an HDF5 file.
This class provides methods to write or read data to or from array objects
in the file. This class does not allow you neither to enlarge nor compress
the datasets on disk; use the EArray class (see :ref:`EArrayClassDescr`) if
you want enlargeable dataset support or compression features, or CArray
(see :ref:`CArrayClassDescr`) if you just want compression.
An interesting property of the Array class is that it remembers the
*flavor* of the object that has been saved so that if you saved, for
example, a list, you will get a list during readings afterwards; if you
saved a NumPy array, you will get a NumPy object, and so forth.
Note that this class inherits all the public attributes and methods that
Leaf (see :ref:`LeafClassDescr`) already provides. However, as Array
instances have no internal I/O buffers, it is not necessary to use the
flush() method they inherit from Leaf in order to save their internal state
to disk. When a writing method call returns, all the data is already on
disk.
Parameters
----------
parentnode
The parent :class:`Group` object.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*
name : str
The name of this node in its parent group.
obj
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars as well as native Python sequences and
scalars, provided that values are regular (i.e. they are not
like ``[[1,2],2]``) and homogeneous (i.e. all the elements are
of the same type).
.. versionchanged:: 3.0
Renamed form *object* into *obj*.
title
A description for this node (it sets the ``TITLE`` HDF5 attribute on
disk).
byteorder
The byteorder of the data *on disk*, specified as 'little' or 'big'.
If this is not specified, the byteorder is that of the given `object`.
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time, object
birth time); default True. Semantics of these times depend on their
implementation in the HDF5 library: refer to documentation of the
H5O_info_t data structure. As of HDF5 1.8.15, only ctime (metadata
change time) is implemented.
.. versionadded:: 3.4.3
| class Array(hdf5extension.Array, Leaf):
"""This class represents homogeneous datasets in an HDF5 file.
This class provides methods to write or read data to or from array objects
in the file. This class does not allow you neither to enlarge nor compress
the datasets on disk; use the EArray class (see :ref:`EArrayClassDescr`) if
you want enlargeable dataset support or compression features, or CArray
(see :ref:`CArrayClassDescr`) if you just want compression.
An interesting property of the Array class is that it remembers the
*flavor* of the object that has been saved so that if you saved, for
example, a list, you will get a list during readings afterwards; if you
saved a NumPy array, you will get a NumPy object, and so forth.
Note that this class inherits all the public attributes and methods that
Leaf (see :ref:`LeafClassDescr`) already provides. However, as Array
instances have no internal I/O buffers, it is not necessary to use the
flush() method they inherit from Leaf in order to save their internal state
to disk. When a writing method call returns, all the data is already on
disk.
Parameters
----------
parentnode
The parent :class:`Group` object.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*
name : str
The name of this node in its parent group.
obj
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars as well as native Python sequences and
scalars, provided that values are regular (i.e. they are not
like ``[[1,2],2]``) and homogeneous (i.e. all the elements are
of the same type).
.. versionchanged:: 3.0
Renamed form *object* into *obj*.
title
A description for this node (it sets the ``TITLE`` HDF5 attribute on
disk).
byteorder
The byteorder of the data *on disk*, specified as 'little' or 'big'.
If this is not specified, the byteorder is that of the given `object`.
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time, object
birth time); default True. Semantics of these times depend on their
implementation in the HDF5 library: refer to documentation of the
H5O_info_t data structure. As of HDF5 1.8.15, only ctime (metadata
change time) is implemented.
.. versionadded:: 3.4.3
"""
# Class identifier.
_c_classid = 'ARRAY'
@lazyattr
def dtype(self):
"""The NumPy ``dtype`` that most closely matches this array."""
return self.atom.dtype
@property
def nrows(self):
"""The number of rows in the array."""
if self.shape == ():
return SizeType(1) # scalar case
else:
return self.shape[self.maindim]
@property
def rowsize(self):
"""The size of the rows in bytes in dimensions orthogonal to
*maindim*."""
maindim = self.maindim
rowsize = self.atom.size
for i, dim in enumerate(self.shape):
if i != maindim:
rowsize *= dim
return rowsize
@property
def size_in_memory(self):
"""The size of this array's data in bytes when it is fully loaded into
memory."""
return self.nrows * self.rowsize
def __init__(self, parentnode, name,
obj=None, title="",
byteorder=None, _log=True, _atom=None,
track_times=True):
self._v_version = None
"""The object version of this array."""
self._v_new = new = obj is not None
"""Is this the first time the node has been created?"""
self._v_new_title = title
"""New title for this node."""
self._obj = obj
"""The object to be stored in the array. It can be any of numpy,
list, tuple, string, integer of floating point types, provided
that they are regular (i.e. they are not like ``[[1, 2], 2]``).
.. versionchanged:: 3.0
Renamed form *_object* into *_obj*.
"""
self._v_convert = True
"""Whether the ``Array`` object must be converted or not."""
# Miscellaneous iteration rubbish.
self._start = None
"""Starting row for the current iteration."""
self._stop = None
"""Stopping row for the current iteration."""
self._step = None
"""Step size for the current iteration."""
self._nrowsread = None
"""Number of rows read up to the current state of iteration."""
self._startb = None
"""Starting row for current buffer."""
self._stopb = None
"""Stopping row for current buffer. """
self._row = None
"""Current row in iterators (sentinel)."""
self._init = False
"""Whether we are in the middle of an iteration or not (sentinel)."""
self.listarr = None
"""Current buffer in iterators."""
# Documented (*public*) attributes.
self.atom = _atom
"""An Atom (see :ref:`AtomClassDescr`) instance representing the *type*
and *shape* of the atomic objects to be saved.
"""
self.shape = None
"""The shape of the stored array."""
self.nrow = None
"""On iterators, this is the index of the current row."""
self.extdim = -1 # ordinary arrays are not enlargeable
"""The index of the enlargeable dimension."""
# Ordinary arrays have no filters: leaf is created with default ones.
super().__init__(parentnode, name, new, Filters(), byteorder, _log,
track_times)
def _g_create(self):
"""Save a new array in file."""
self._v_version = obversion
try:
# `Leaf._g_post_init_hook()` should be setting the flavor on disk.
self._flavor = flavor = flavor_of(self._obj)
nparr = array_as_internal(self._obj, flavor)
except Exception: # XXX
# Problems converting data. Close the node and re-raise exception.
self.close(flush=0)
raise
# Raise an error in case of unsupported object
if nparr.dtype.kind in ['V', 'U', 'O']: # in void, unicode, object
raise TypeError("Array objects cannot currently deal with void, "
"unicode or object arrays")
# Decrease the number of references to the object
self._obj = None
# Fix the byteorder of data
nparr = self._g_fix_byteorder_data(nparr, nparr.dtype.byteorder)
# Create the array on-disk
try:
# ``self._v_objectid`` needs to be set because would be
# needed for setting attributes in some descendants later
# on
(self._v_objectid, self.shape, self.atom) = self._create_array(
nparr, self._v_new_title, self.atom)
except Exception: # XXX
# Problems creating the Array on disk. Close node and re-raise.
self.close(flush=0)
raise
# Compute the optimal buffer size
self.nrowsinbuf = self._calc_nrowsinbuf()
# Arrays don't have chunkshapes (so, set it to None)
self._v_chunkshape = None
return self._v_objectid
def _g_open(self):
"""Get the metadata info for an array in file."""
(oid, self.atom, self.shape, self._v_chunkshape) = self._open_array()
self.nrowsinbuf = self._calc_nrowsinbuf()
return oid
def get_enum(self):
"""Get the enumerated type associated with this array.
If this array is of an enumerated type, the corresponding Enum instance
(see :ref:`EnumClassDescr`) is returned. If it is not of an enumerated
type, a TypeError is raised.
"""
if self.atom.kind != 'enum':
raise TypeError("array ``%s`` is not of an enumerated type"
% self._v_pathname)
return self.atom.enum
def iterrows(self, start=None, stop=None, step=None):
"""Iterate over the rows of the array.
This method returns an iterator yielding an object of the current
flavor for each selected row in the array. The returned rows are taken
from the *main dimension*.
If a range is not supplied, *all the rows* in the array are iterated
upon - you can also use the :meth:`Array.__iter__` special method for
that purpose. If you only want to iterate over a given *range of rows*
in the array, you may use the start, stop and step parameters.
Examples
--------
::
result = [row for row in arrayInstance.iterrows(step=4)]
.. versionchanged:: 3.0
If the *start* parameter is provided and *stop* is None then the
array is iterated from *start* to the last line.
In PyTables < 3.0 only one element was returned.
"""
try:
(self._start, self._stop, self._step) = self._process_range(
start, stop, step)
except IndexError:
# If problems with indexes, silently return the null tuple
return ()
self._init_loop()
return self
def __iter__(self):
"""Iterate over the rows of the array.
This is equivalent to calling :meth:`Array.iterrows` with default
arguments, i.e. it iterates over *all the rows* in the array.
Examples
--------
::
result = [row[2] for row in array]
Which is equivalent to::
result = [row[2] for row in array.iterrows()]
"""
if not self._init:
# If the iterator is called directly, assign default variables
self._start = 0
self._stop = self.nrows
self._step = 1
# and initialize the loop
self._init_loop()
return self
def _init_loop(self):
"""Initialization for the __iter__ iterator."""
self._nrowsread = self._start
self._startb = self._start
self._row = -1 # Sentinel
self._init = True # Sentinel
self.nrow = SizeType(self._start - self._step) # row number
def __next__(self):
"""Get the next element of the array during an iteration.
The element is returned as an object of the current flavor.
"""
# this could probably be sped up for long iterations by reusing the
# listarr buffer
if self._nrowsread >= self._stop:
self._init = False
self.listarr = None # fixes issue #308
raise StopIteration # end of iteration
else:
# Read a chunk of rows
if self._row + 1 >= self.nrowsinbuf or self._row < 0:
self._stopb = self._startb + self._step * self.nrowsinbuf
# Protection for reading more elements than needed
if self._stopb > self._stop:
self._stopb = self._stop
listarr = self._read(self._startb, self._stopb, self._step)
# Swap the axes to easy the return of elements
if self.extdim > 0:
listarr = listarr.swapaxes(self.extdim, 0)
self.listarr = internal_to_flavor(listarr, self.flavor)
self._row = -1
self._startb = self._stopb
self._row += 1
self.nrow += self._step
self._nrowsread += self._step
# Fixes bug #968132
# if self.listarr.shape:
if self.shape:
return self.listarr[self._row]
else:
return self.listarr # Scalar case
def _interpret_indexing(self, keys):
"""Internal routine used by __getitem__ and __setitem__"""
maxlen = len(self.shape)
shape = (maxlen,)
startl = np.empty(shape=shape, dtype=SizeType)
stopl = np.empty(shape=shape, dtype=SizeType)
stepl = np.empty(shape=shape, dtype=SizeType)
stop_None = np.zeros(shape=shape, dtype=SizeType)
if not isinstance(keys, tuple):
keys = (keys,)
nkeys = len(keys)
dim = 0
# Here is some problem when dealing with [...,...] params
# but this is a bit weird way to pass parameters anyway
for key in keys:
ellipsis = 0 # Sentinel
if isinstance(key, type(Ellipsis)):
ellipsis = 1
for diml in range(dim, len(self.shape) - (nkeys - dim) + 1):
startl[dim] = 0
stopl[dim] = self.shape[diml]
stepl[dim] = 1
dim += 1
elif dim >= maxlen:
raise IndexError("Too many indices for object '%s'" %
self._v_pathname)
elif is_idx(key):
key = operator.index(key)
# Protection for index out of range
if key >= self.shape[dim]:
raise IndexError("Index out of range")
if key < 0:
# To support negative values (Fixes bug #968149)
key += self.shape[dim]
start, stop, step = self._process_range(
key, key + 1, 1, dim=dim)
stop_None[dim] = 1
elif isinstance(key, slice):
start, stop, step = self._process_range(
key.start, key.stop, key.step, dim=dim)
else:
raise TypeError("Non-valid index or slice: %s" % key)
if not ellipsis:
startl[dim] = start
stopl[dim] = stop
stepl[dim] = step
dim += 1
# Complete the other dimensions, if needed
if dim < len(self.shape):
for diml in range(dim, len(self.shape)):
startl[dim] = 0
stopl[dim] = self.shape[diml]
stepl[dim] = 1
dim += 1
# Compute the shape for the container properly. Fixes #1288792
shape = []
for dim in range(len(self.shape)):
new_dim = len(range(startl[dim], stopl[dim], stepl[dim]))
if not (new_dim == 1 and stop_None[dim]):
shape.append(new_dim)
return startl, stopl, stepl, shape
def _fancy_selection(self, args):
"""Performs a NumPy-style fancy selection in `self`.
Implements advanced NumPy-style selection operations in
addition to the standard slice-and-int behavior.
Indexing arguments may be ints, slices or lists of indices.
Note: This is a backport from the h5py project.
"""
# Internal functions
def validate_number(num, length):
"""Validate a list member for the given axis length."""
try:
num = int(num)
except TypeError:
raise TypeError("Illegal index: %r" % num)
if num > length - 1:
raise IndexError("Index out of bounds: %d" % num)
def expand_ellipsis(args, rank):
"""Expand ellipsis objects and fill in missing axes."""
n_el = sum(1 for arg in args if arg is Ellipsis)
if n_el > 1:
raise IndexError("Only one ellipsis may be used.")
elif n_el == 0 and len(args) != rank:
args = args + (Ellipsis,)
final_args = []
n_args = len(args)
for idx, arg in enumerate(args):
if arg is Ellipsis:
final_args.extend((slice(None),) * (rank - n_args + 1))
else:
final_args.append(arg)
if len(final_args) > rank:
raise IndexError("Too many indices.")
return final_args
def translate_slice(exp, length):
"""Given a slice object, return a 3-tuple (start, count, step)
This is for use with the hyperslab selection routines.
"""
start, stop, step = exp.start, exp.stop, exp.step
if start is None:
start = 0
else:
start = int(start)
if stop is None:
stop = length
else:
stop = int(stop)
if step is None:
step = 1
else:
step = int(step)
if step < 1:
raise IndexError("Step must be >= 1 (got %d)" % step)
if stop == start:
raise IndexError("Zero-length selections are not allowed")
if stop < start:
raise IndexError("Reverse-order selections are not allowed")
if start < 0:
start = length + start
if stop < 0:
stop = length + stop
if not 0 <= start <= (length - 1):
raise IndexError(
"Start index %s out of range (0-%d)" % (start, length - 1))
if not 1 <= stop <= length:
raise IndexError(
"Stop index %s out of range (1-%d)" % (stop, length))
count = (stop - start) // step
if (stop - start) % step != 0:
count += 1
if start + count > length:
raise IndexError(
"Selection out of bounds (%d; axis has %d)" %
(start + count, length))
return start, count, step
# Main code for _fancy_selection
mshape = []
selection = []
if not isinstance(args, tuple):
args = (args,)
args = expand_ellipsis(args, len(self.shape))
list_seen = False
reorder = None
for idx, (exp, length) in enumerate(zip(args, self.shape)):
if isinstance(exp, slice):
start, count, step = translate_slice(exp, length)
selection.append((start, count, step, idx, "AND"))
mshape.append(count)
else:
try:
exp = list(exp)
except TypeError:
exp = [exp] # Handle scalar index as a list of length 1
mshape.append(0) # Keep track of scalar index for NumPy
else:
mshape.append(len(exp))
if len(exp) == 0:
raise IndexError(
"Empty selections are not allowed (axis %d)" % idx)
elif len(exp) > 1:
if list_seen:
raise IndexError("Only one selection list is allowed")
else:
list_seen = True
else:
if (not isinstance(exp[0], (int, np.integer)) or
(isinstance(exp[0], np.ndarray) and not
np.issubdtype(exp[0].dtype, np.integer))):
raise TypeError("Only integer coordinates allowed.")
nexp = np.asarray(exp, dtype="i8")
# Convert negative values
nexp = np.where(nexp < 0, length + nexp, nexp)
# Check whether the list is ordered or not
# (only one unordered list is allowed)
if len(nexp) != len(np.unique(nexp)):
raise IndexError(
"Selection lists cannot have repeated values")
neworder = nexp.argsort()
if (neworder.shape != (len(exp),) or
np.sum(np.abs(neworder - np.arange(len(exp)))) != 0):
if reorder is not None:
raise IndexError(
"Only one selection list can be unordered")
corrected_idx = sum(1 for x in mshape if x != 0) - 1
reorder = (corrected_idx, neworder)
nexp = nexp[neworder]
for select_idx in range(len(nexp) + 1):
# This crazy piece of code performs a list selection
# using HDF5 hyperslabs.
# For each index, perform a "NOTB" selection on every
# portion of *this axis* which falls *outside* the list
# selection. For this to work, the input array MUST be
# monotonically increasing.
if select_idx < len(nexp):
validate_number(nexp[select_idx], length)
if select_idx == 0:
start = 0
count = nexp[0]
elif select_idx == len(nexp):
start = nexp[-1] + 1
count = length - start
else:
start = nexp[select_idx - 1] + 1
count = nexp[select_idx] - start
if count > 0:
selection.append((start, count, 1, idx, "NOTB"))
mshape = tuple(x for x in mshape if x != 0)
return selection, reorder, mshape
def __getitem__(self, key):
"""Get a row, a range of rows or a slice from the array.
The set of tokens allowed for the key is the same as that for extended
slicing in Python (including the Ellipsis or ... token). The result is
an object of the current flavor; its shape depends on the kind of slice
used as key and the shape of the array itself.
Furthermore, NumPy-style fancy indexing, where a list of indices in a
certain axis is specified, is also supported. Note that only one list
per selection is supported right now. Finally, NumPy-style point and
boolean selections are supported as well.
Examples
--------
::
array1 = array[4] # simple selection
array2 = array[4:1000:2] # slice selection
array3 = array[1, ..., ::2, 1:4, 4:] # general slice selection
array4 = array[1, [1,5,10], ..., -1] # fancy selection
array5 = array[np.where(array[:] > 4)] # point selection
array6 = array[array[:] > 4] # boolean selection
"""
self._g_check_open()
try:
# First, try with a regular selection
startl, stopl, stepl, shape = self._interpret_indexing(key)
arr = self._read_slice(startl, stopl, stepl, shape)
except TypeError:
# Then, try with a point-wise selection
try:
coords = self._point_selection(key)
arr = self._read_coords(coords)
except TypeError:
# Finally, try with a fancy selection
selection, reorder, shape = self._fancy_selection(key)
arr = self._read_selection(selection, reorder, shape)
if self.flavor == "numpy" or not self._v_convert:
return arr
return internal_to_flavor(arr, self.flavor)
def __setitem__(self, key, value):
"""Set a row, a range of rows or a slice in the array.
It takes different actions depending on the type of the key parameter:
if it is an integer, the corresponding array row is set to value (the
value is broadcast when needed). If key is a slice, the row slice
determined by it is set to value (as usual, if the slice to be updated
exceeds the actual shape of the array, only the values in the existing
range are updated).
If value is a multidimensional object, then its shape must be
compatible with the shape determined by key, otherwise, a ValueError
will be raised.
Furthermore, NumPy-style fancy indexing, where a list of indices in a
certain axis is specified, is also supported. Note that only one list
per selection is supported right now. Finally, NumPy-style point and
boolean selections are supported as well.
Examples
--------
::
a1[0] = 333 # assign an integer to a Integer Array row
a2[0] = 'b' # assign a string to a string Array row
a3[1:4] = 5 # broadcast 5 to slice 1:4
a4[1:4:2] = 'xXx' # broadcast 'xXx' to slice 1:4:2
# General slice update (a5.shape = (4,3,2,8,5,10).
a5[1, ..., ::2, 1:4, 4:] = numpy.arange(1728, shape=(4,3,2,4,3,6))
a6[1, [1,5,10], ..., -1] = arr # fancy selection
a7[np.where(a6[:] > 4)] = 4 # point selection + broadcast
a8[arr > 4] = arr2 # boolean selection
"""
self._g_check_open()
# Create an array compliant with the specified slice
nparr = convert_to_np_atom2(value, self.atom)
if nparr.size == 0:
return
# truncate data if least_significant_digit filter is set
# TODO: add the least_significant_digit attribute to the array on disk
if (self.filters.least_significant_digit is not None and
not np.issubdtype(nparr.dtype, np.signedinteger)):
nparr = quantize(nparr, self.filters.least_significant_digit)
try:
startl, stopl, stepl, shape = self._interpret_indexing(key)
self._write_slice(startl, stopl, stepl, shape, nparr)
except TypeError:
# Then, try with a point-wise selection
try:
coords = self._point_selection(key)
self._write_coords(coords, nparr)
except TypeError:
selection, reorder, shape = self._fancy_selection(key)
self._write_selection(selection, reorder, shape, nparr)
def _check_shape(self, nparr, slice_shape):
"""Test that nparr shape is consistent with underlying object.
If not, try creating a new nparr object, using broadcasting if
necessary.
"""
if nparr.shape != (slice_shape + self.atom.dtype.shape):
# Create an array compliant with the specified shape
narr = np.empty(shape=slice_shape, dtype=self.atom.dtype)
# Assign the value to it. It will raise a ValueError exception
# if the objects cannot be broadcast to a single shape.
narr[...] = nparr
return narr
else:
return nparr
def _read_slice(self, startl, stopl, stepl, shape):
"""Read a slice based on `startl`, `stopl` and `stepl`."""
nparr = np.empty(dtype=self.atom.dtype, shape=shape)
# Protection against reading empty arrays
if 0 not in shape:
# Arrays that have non-zero dimensionality
self._g_read_slice(startl, stopl, stepl, nparr)
# For zero-shaped arrays, return the scalar
if nparr.shape == ():
nparr = nparr[()]
return nparr
def _read_coords(self, coords):
"""Read a set of points defined by `coords`."""
nparr = np.empty(dtype=self.atom.dtype, shape=len(coords))
if len(coords) > 0:
self._g_read_coords(coords, nparr)
# For zero-shaped arrays, return the scalar
if nparr.shape == ():
nparr = nparr[()]
return nparr
def _read_selection(self, selection, reorder, shape):
"""Read a `selection`.
Reorder if necessary.
"""
# Create the container for the slice
nparr = np.empty(dtype=self.atom.dtype, shape=shape)
# Arrays that have non-zero dimensionality
self._g_read_selection(selection, nparr)
# For zero-shaped arrays, return the scalar
if nparr.shape == ():
nparr = nparr[()]
elif reorder is not None:
# We need to reorder the array
idx, neworder = reorder
k = [slice(None)] * len(shape)
k[idx] = neworder.argsort()
# Apparently, a copy is not needed here, but doing it
# for symmetry with the `_write_selection()` method.
nparr = nparr[tuple(k)].copy()
return nparr
def _write_slice(self, startl, stopl, stepl, shape, nparr):
"""Write `nparr` in a slice based on `startl`, `stopl` and `stepl`."""
nparr = self._check_shape(nparr, tuple(shape))
countl = ((stopl - startl - 1) // stepl) + 1
self._g_write_slice(startl, stepl, countl, nparr)
def _write_coords(self, coords, nparr):
"""Write `nparr` values in points defined by `coords` coordinates."""
if len(coords) > 0:
nparr = self._check_shape(nparr, (len(coords),))
self._g_write_coords(coords, nparr)
def _write_selection(self, selection, reorder, shape, nparr):
"""Write `nparr` in `selection`.
Reorder if necessary.
"""
nparr = self._check_shape(nparr, tuple(shape))
# Check whether we should reorder the array
if reorder is not None:
idx, neworder = reorder
k = [slice(None)] * len(shape)
k[idx] = neworder
# For a reason a don't understand well, we need a copy of
# the reordered array
nparr = nparr[tuple(k)].copy()
self._g_write_selection(selection, nparr)
def _read(self, start, stop, step, out=None):
"""Read the array from disk without slice or flavor processing."""
nrowstoread = len(range(start, stop, step))
shape = list(self.shape)
if shape:
shape[self.maindim] = nrowstoread
if out is None:
arr = np.empty(dtype=self.atom.dtype, shape=shape)
else:
bytes_required = self.rowsize * nrowstoread
# if buffer is too small, it will segfault
if bytes_required != out.nbytes:
raise ValueError(f'output array size invalid, got {out.nbytes}'
f' bytes, need {bytes_required} bytes')
if not out.flags['C_CONTIGUOUS']:
raise ValueError('output array not C contiguous')
arr = out
# Protection against reading empty arrays
if 0 not in shape:
# Arrays that have non-zero dimensionality
self._read_array(start, stop, step, arr)
# data is always read in the system byteorder
# if the out array's byteorder is different, do a byteswap
if (out is not None and
byteorders[arr.dtype.byteorder] != sys.byteorder):
arr.byteswap(True)
return arr
def read(self, start=None, stop=None, step=None, out=None):
"""Get data in the array as an object of the current flavor.
The start, stop and step parameters can be used to select only a
*range of rows* in the array. Their meanings are the same as in
the built-in range() Python function, except that negative values
of step are not allowed yet. Moreover, if only start is specified,
then stop will be set to start + 1. If you do not specify neither
start nor stop, then *all the rows* in the array are selected.
The out parameter may be used to specify a NumPy array to receive
the output data. Note that the array must have the same size as
the data selected with the other parameters. Note that the array's
datatype is not checked and no type casting is performed, so if it
does not match the datatype on disk, the output will not be correct.
Also, this parameter is only valid when the array's flavor is set
to 'numpy'. Otherwise, a TypeError will be raised.
When data is read from disk in NumPy format, the output will be
in the current system's byteorder, regardless of how it is stored
on disk.
The exception is when an output buffer is supplied, in which case
the output will be in the byteorder of that output buffer.
.. versionchanged:: 3.0
Added the *out* parameter.
"""
self._g_check_open()
if out is not None and self.flavor != 'numpy':
msg = ("Optional 'out' argument may only be supplied if array "
"flavor is 'numpy', currently is {}").format(self.flavor)
raise TypeError(msg)
(start, stop, step) = self._process_range_read(start, stop, step)
arr = self._read(start, stop, step, out)
return internal_to_flavor(arr, self.flavor)
def _g_copy_with_stats(self, group, name, start, stop, step,
title, filters, chunkshape, _log, **kwargs):
"""Private part of Leaf.copy() for each kind of leaf."""
# Compute the correct indices.
(start, stop, step) = self._process_range_read(start, stop, step)
# Get the slice of the array
# (non-buffered version)
if self.shape:
arr = self[start:stop:step]
else:
arr = self[()]
# Build the new Array object. Use the _atom reserved keyword
# just in case the array is being copied from a native HDF5
# with atomic types different from scalars.
# For details, see #275 of trac.
object_ = Array(group, name, arr, title=title, _log=_log,
_atom=self.atom)
nbytes = np.prod(self.shape, dtype=SizeType) * self.atom.size
return (object_, nbytes)
def __repr__(self):
"""This provides more metainfo in addition to standard __str__"""
return f"""{self}
atom := {self.atom!r}
maindim := {self.maindim!r}
flavor := {self.flavor!r}
byteorder := {self.byteorder!r}
chunkshape := {self.chunkshape!r}"""
| (parentnode, name, obj=None, title='', byteorder=None, _log=True, _atom=None, track_times=True) |
727,974 | tables.node | __del__ | null | def __del__(self):
# Closed `Node` instances can not be killed and revived.
# Instead, accessing a closed and deleted (from memory, not
# disk) one yields a *new*, open `Node` instance. This is
# because of two reasons:
#
# 1. Predictability. After closing a `Node` and deleting it,
# only one thing can happen when accessing it again: a new,
# open `Node` instance is returned. If closed nodes could be
# revived, one could get either a closed or an open `Node`.
#
# 2. Ease of use. If the user wants to access a closed node
# again, the only condition would be that no references to
# the `Node` instance were left. If closed nodes could be
# revived, the user would also need to force the closed
# `Node` out of memory, which is not a trivial task.
#
if not self._v_isopen:
return # the node is already closed or not initialized
self._v__deleting = True
# If we get here, the `Node` is still open.
try:
node_manager = self._v_file._node_manager
node_manager.drop_node(self, check_unregistered=False)
finally:
# At this point the node can still be open if there is still some
# alive reference around (e.g. if the __del__ method is called
# explicitly by the user).
if self._v_isopen:
self._v__deleting = True
self._f_close()
| (self) |
727,975 | tables.array | __getitem__ | Get a row, a range of rows or a slice from the array.
The set of tokens allowed for the key is the same as that for extended
slicing in Python (including the Ellipsis or ... token). The result is
an object of the current flavor; its shape depends on the kind of slice
used as key and the shape of the array itself.
Furthermore, NumPy-style fancy indexing, where a list of indices in a
certain axis is specified, is also supported. Note that only one list
per selection is supported right now. Finally, NumPy-style point and
boolean selections are supported as well.
Examples
--------
::
array1 = array[4] # simple selection
array2 = array[4:1000:2] # slice selection
array3 = array[1, ..., ::2, 1:4, 4:] # general slice selection
array4 = array[1, [1,5,10], ..., -1] # fancy selection
array5 = array[np.where(array[:] > 4)] # point selection
array6 = array[array[:] > 4] # boolean selection
| def __getitem__(self, key):
"""Get a row, a range of rows or a slice from the array.
The set of tokens allowed for the key is the same as that for extended
slicing in Python (including the Ellipsis or ... token). The result is
an object of the current flavor; its shape depends on the kind of slice
used as key and the shape of the array itself.
Furthermore, NumPy-style fancy indexing, where a list of indices in a
certain axis is specified, is also supported. Note that only one list
per selection is supported right now. Finally, NumPy-style point and
boolean selections are supported as well.
Examples
--------
::
array1 = array[4] # simple selection
array2 = array[4:1000:2] # slice selection
array3 = array[1, ..., ::2, 1:4, 4:] # general slice selection
array4 = array[1, [1,5,10], ..., -1] # fancy selection
array5 = array[np.where(array[:] > 4)] # point selection
array6 = array[array[:] > 4] # boolean selection
"""
self._g_check_open()
try:
# First, try with a regular selection
startl, stopl, stepl, shape = self._interpret_indexing(key)
arr = self._read_slice(startl, stopl, stepl, shape)
except TypeError:
# Then, try with a point-wise selection
try:
coords = self._point_selection(key)
arr = self._read_coords(coords)
except TypeError:
# Finally, try with a fancy selection
selection, reorder, shape = self._fancy_selection(key)
arr = self._read_selection(selection, reorder, shape)
if self.flavor == "numpy" or not self._v_convert:
return arr
return internal_to_flavor(arr, self.flavor)
| (self, key) |
727,976 | tables.array | __init__ | null | def __init__(self, parentnode, name,
obj=None, title="",
byteorder=None, _log=True, _atom=None,
track_times=True):
self._v_version = None
"""The object version of this array."""
self._v_new = new = obj is not None
"""Is this the first time the node has been created?"""
self._v_new_title = title
"""New title for this node."""
self._obj = obj
"""The object to be stored in the array. It can be any of numpy,
list, tuple, string, integer of floating point types, provided
that they are regular (i.e. they are not like ``[[1, 2], 2]``).
.. versionchanged:: 3.0
Renamed form *_object* into *_obj*.
"""
self._v_convert = True
"""Whether the ``Array`` object must be converted or not."""
# Miscellaneous iteration rubbish.
self._start = None
"""Starting row for the current iteration."""
self._stop = None
"""Stopping row for the current iteration."""
self._step = None
"""Step size for the current iteration."""
self._nrowsread = None
"""Number of rows read up to the current state of iteration."""
self._startb = None
"""Starting row for current buffer."""
self._stopb = None
"""Stopping row for current buffer. """
self._row = None
"""Current row in iterators (sentinel)."""
self._init = False
"""Whether we are in the middle of an iteration or not (sentinel)."""
self.listarr = None
"""Current buffer in iterators."""
# Documented (*public*) attributes.
self.atom = _atom
"""An Atom (see :ref:`AtomClassDescr`) instance representing the *type*
and *shape* of the atomic objects to be saved.
"""
self.shape = None
"""The shape of the stored array."""
self.nrow = None
"""On iterators, this is the index of the current row."""
self.extdim = -1 # ordinary arrays are not enlargeable
"""The index of the enlargeable dimension."""
# Ordinary arrays have no filters: leaf is created with default ones.
super().__init__(parentnode, name, new, Filters(), byteorder, _log,
track_times)
| (self, parentnode, name, obj=None, title='', byteorder=None, _log=True, _atom=None, track_times=True) |
727,977 | tables.array | __iter__ | Iterate over the rows of the array.
This is equivalent to calling :meth:`Array.iterrows` with default
arguments, i.e. it iterates over *all the rows* in the array.
Examples
--------
::
result = [row[2] for row in array]
Which is equivalent to::
result = [row[2] for row in array.iterrows()]
| def __iter__(self):
"""Iterate over the rows of the array.
This is equivalent to calling :meth:`Array.iterrows` with default
arguments, i.e. it iterates over *all the rows* in the array.
Examples
--------
::
result = [row[2] for row in array]
Which is equivalent to::
result = [row[2] for row in array.iterrows()]
"""
if not self._init:
# If the iterator is called directly, assign default variables
self._start = 0
self._stop = self.nrows
self._step = 1
# and initialize the loop
self._init_loop()
return self
| (self) |
727,978 | tables.leaf | __len__ | Return the length of the main dimension of the leaf data.
Please note that this may raise an OverflowError on 32-bit platforms
for datasets having more than 2**31-1 rows. This is a limitation of
Python that you can work around by using the nrows or shape attributes.
| def __len__(self):
"""Return the length of the main dimension of the leaf data.
Please note that this may raise an OverflowError on 32-bit platforms
for datasets having more than 2**31-1 rows. This is a limitation of
Python that you can work around by using the nrows or shape attributes.
"""
return self.nrows
| (self) |
727,979 | tables.array | __next__ | Get the next element of the array during an iteration.
The element is returned as an object of the current flavor.
| def __next__(self):
"""Get the next element of the array during an iteration.
The element is returned as an object of the current flavor.
"""
# this could probably be sped up for long iterations by reusing the
# listarr buffer
if self._nrowsread >= self._stop:
self._init = False
self.listarr = None # fixes issue #308
raise StopIteration # end of iteration
else:
# Read a chunk of rows
if self._row + 1 >= self.nrowsinbuf or self._row < 0:
self._stopb = self._startb + self._step * self.nrowsinbuf
# Protection for reading more elements than needed
if self._stopb > self._stop:
self._stopb = self._stop
listarr = self._read(self._startb, self._stopb, self._step)
# Swap the axes to easy the return of elements
if self.extdim > 0:
listarr = listarr.swapaxes(self.extdim, 0)
self.listarr = internal_to_flavor(listarr, self.flavor)
self._row = -1
self._startb = self._stopb
self._row += 1
self.nrow += self._step
self._nrowsread += self._step
# Fixes bug #968132
# if self.listarr.shape:
if self.shape:
return self.listarr[self._row]
else:
return self.listarr # Scalar case
| (self) |
727,980 | tables.array | __repr__ | This provides more metainfo in addition to standard __str__ | """Here is defined the Array class."""
import operator
import sys
import numpy as np
from . import hdf5extension
from .filters import Filters
from .flavor import flavor_of, array_as_internal, internal_to_flavor
from .leaf import Leaf
from .utils import (is_idx, convert_to_np_atom2, SizeType, lazyattr,
byteorders, quantize)
# default version for ARRAY objects
# obversion = "1.0" # initial version
# obversion = "2.0" # Added an optional EXTDIM attribute
# obversion = "2.1" # Added support for complex datatypes
# obversion = "2.2" # This adds support for time datatypes.
# obversion = "2.3" # This adds support for enumerated datatypes.
obversion = "2.4" # Numeric and numarray flavors are gone.
class Array(hdf5extension.Array, Leaf):
"""This class represents homogeneous datasets in an HDF5 file.
This class provides methods to write or read data to or from array objects
in the file. This class does not allow you neither to enlarge nor compress
the datasets on disk; use the EArray class (see :ref:`EArrayClassDescr`) if
you want enlargeable dataset support or compression features, or CArray
(see :ref:`CArrayClassDescr`) if you just want compression.
An interesting property of the Array class is that it remembers the
*flavor* of the object that has been saved so that if you saved, for
example, a list, you will get a list during readings afterwards; if you
saved a NumPy array, you will get a NumPy object, and so forth.
Note that this class inherits all the public attributes and methods that
Leaf (see :ref:`LeafClassDescr`) already provides. However, as Array
instances have no internal I/O buffers, it is not necessary to use the
flush() method they inherit from Leaf in order to save their internal state
to disk. When a writing method call returns, all the data is already on
disk.
Parameters
----------
parentnode
The parent :class:`Group` object.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*
name : str
The name of this node in its parent group.
obj
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars as well as native Python sequences and
scalars, provided that values are regular (i.e. they are not
like ``[[1,2],2]``) and homogeneous (i.e. all the elements are
of the same type).
.. versionchanged:: 3.0
Renamed form *object* into *obj*.
title
A description for this node (it sets the ``TITLE`` HDF5 attribute on
disk).
byteorder
The byteorder of the data *on disk*, specified as 'little' or 'big'.
If this is not specified, the byteorder is that of the given `object`.
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time, object
birth time); default True. Semantics of these times depend on their
implementation in the HDF5 library: refer to documentation of the
H5O_info_t data structure. As of HDF5 1.8.15, only ctime (metadata
change time) is implemented.
.. versionadded:: 3.4.3
"""
# Class identifier.
_c_classid = 'ARRAY'
@lazyattr
def dtype(self):
"""The NumPy ``dtype`` that most closely matches this array."""
return self.atom.dtype
@property
def nrows(self):
"""The number of rows in the array."""
if self.shape == ():
return SizeType(1) # scalar case
else:
return self.shape[self.maindim]
@property
def rowsize(self):
"""The size of the rows in bytes in dimensions orthogonal to
*maindim*."""
maindim = self.maindim
rowsize = self.atom.size
for i, dim in enumerate(self.shape):
if i != maindim:
rowsize *= dim
return rowsize
@property
def size_in_memory(self):
"""The size of this array's data in bytes when it is fully loaded into
memory."""
return self.nrows * self.rowsize
def __init__(self, parentnode, name,
obj=None, title="",
byteorder=None, _log=True, _atom=None,
track_times=True):
self._v_version = None
"""The object version of this array."""
self._v_new = new = obj is not None
"""Is this the first time the node has been created?"""
self._v_new_title = title
"""New title for this node."""
self._obj = obj
"""The object to be stored in the array. It can be any of numpy,
list, tuple, string, integer of floating point types, provided
that they are regular (i.e. they are not like ``[[1, 2], 2]``).
.. versionchanged:: 3.0
Renamed form *_object* into *_obj*.
"""
self._v_convert = True
"""Whether the ``Array`` object must be converted or not."""
# Miscellaneous iteration rubbish.
self._start = None
"""Starting row for the current iteration."""
self._stop = None
"""Stopping row for the current iteration."""
self._step = None
"""Step size for the current iteration."""
self._nrowsread = None
"""Number of rows read up to the current state of iteration."""
self._startb = None
"""Starting row for current buffer."""
self._stopb = None
"""Stopping row for current buffer. """
self._row = None
"""Current row in iterators (sentinel)."""
self._init = False
"""Whether we are in the middle of an iteration or not (sentinel)."""
self.listarr = None
"""Current buffer in iterators."""
# Documented (*public*) attributes.
self.atom = _atom
"""An Atom (see :ref:`AtomClassDescr`) instance representing the *type*
and *shape* of the atomic objects to be saved.
"""
self.shape = None
"""The shape of the stored array."""
self.nrow = None
"""On iterators, this is the index of the current row."""
self.extdim = -1 # ordinary arrays are not enlargeable
"""The index of the enlargeable dimension."""
# Ordinary arrays have no filters: leaf is created with default ones.
super().__init__(parentnode, name, new, Filters(), byteorder, _log,
track_times)
def _g_create(self):
"""Save a new array in file."""
self._v_version = obversion
try:
# `Leaf._g_post_init_hook()` should be setting the flavor on disk.
self._flavor = flavor = flavor_of(self._obj)
nparr = array_as_internal(self._obj, flavor)
except Exception: # XXX
# Problems converting data. Close the node and re-raise exception.
self.close(flush=0)
raise
# Raise an error in case of unsupported object
if nparr.dtype.kind in ['V', 'U', 'O']: # in void, unicode, object
raise TypeError("Array objects cannot currently deal with void, "
"unicode or object arrays")
# Decrease the number of references to the object
self._obj = None
# Fix the byteorder of data
nparr = self._g_fix_byteorder_data(nparr, nparr.dtype.byteorder)
# Create the array on-disk
try:
# ``self._v_objectid`` needs to be set because would be
# needed for setting attributes in some descendants later
# on
(self._v_objectid, self.shape, self.atom) = self._create_array(
nparr, self._v_new_title, self.atom)
except Exception: # XXX
# Problems creating the Array on disk. Close node and re-raise.
self.close(flush=0)
raise
# Compute the optimal buffer size
self.nrowsinbuf = self._calc_nrowsinbuf()
# Arrays don't have chunkshapes (so, set it to None)
self._v_chunkshape = None
return self._v_objectid
def _g_open(self):
"""Get the metadata info for an array in file."""
(oid, self.atom, self.shape, self._v_chunkshape) = self._open_array()
self.nrowsinbuf = self._calc_nrowsinbuf()
return oid
def get_enum(self):
"""Get the enumerated type associated with this array.
If this array is of an enumerated type, the corresponding Enum instance
(see :ref:`EnumClassDescr`) is returned. If it is not of an enumerated
type, a TypeError is raised.
"""
if self.atom.kind != 'enum':
raise TypeError("array ``%s`` is not of an enumerated type"
% self._v_pathname)
return self.atom.enum
def iterrows(self, start=None, stop=None, step=None):
"""Iterate over the rows of the array.
This method returns an iterator yielding an object of the current
flavor for each selected row in the array. The returned rows are taken
from the *main dimension*.
If a range is not supplied, *all the rows* in the array are iterated
upon - you can also use the :meth:`Array.__iter__` special method for
that purpose. If you only want to iterate over a given *range of rows*
in the array, you may use the start, stop and step parameters.
Examples
--------
::
result = [row for row in arrayInstance.iterrows(step=4)]
.. versionchanged:: 3.0
If the *start* parameter is provided and *stop* is None then the
array is iterated from *start* to the last line.
In PyTables < 3.0 only one element was returned.
"""
try:
(self._start, self._stop, self._step) = self._process_range(
start, stop, step)
except IndexError:
# If problems with indexes, silently return the null tuple
return ()
self._init_loop()
return self
def __iter__(self):
"""Iterate over the rows of the array.
This is equivalent to calling :meth:`Array.iterrows` with default
arguments, i.e. it iterates over *all the rows* in the array.
Examples
--------
::
result = [row[2] for row in array]
Which is equivalent to::
result = [row[2] for row in array.iterrows()]
"""
if not self._init:
# If the iterator is called directly, assign default variables
self._start = 0
self._stop = self.nrows
self._step = 1
# and initialize the loop
self._init_loop()
return self
def _init_loop(self):
"""Initialization for the __iter__ iterator."""
self._nrowsread = self._start
self._startb = self._start
self._row = -1 # Sentinel
self._init = True # Sentinel
self.nrow = SizeType(self._start - self._step) # row number
def __next__(self):
"""Get the next element of the array during an iteration.
The element is returned as an object of the current flavor.
"""
# this could probably be sped up for long iterations by reusing the
# listarr buffer
if self._nrowsread >= self._stop:
self._init = False
self.listarr = None # fixes issue #308
raise StopIteration # end of iteration
else:
# Read a chunk of rows
if self._row + 1 >= self.nrowsinbuf or self._row < 0:
self._stopb = self._startb + self._step * self.nrowsinbuf
# Protection for reading more elements than needed
if self._stopb > self._stop:
self._stopb = self._stop
listarr = self._read(self._startb, self._stopb, self._step)
# Swap the axes to easy the return of elements
if self.extdim > 0:
listarr = listarr.swapaxes(self.extdim, 0)
self.listarr = internal_to_flavor(listarr, self.flavor)
self._row = -1
self._startb = self._stopb
self._row += 1
self.nrow += self._step
self._nrowsread += self._step
# Fixes bug #968132
# if self.listarr.shape:
if self.shape:
return self.listarr[self._row]
else:
return self.listarr # Scalar case
def _interpret_indexing(self, keys):
"""Internal routine used by __getitem__ and __setitem__"""
maxlen = len(self.shape)
shape = (maxlen,)
startl = np.empty(shape=shape, dtype=SizeType)
stopl = np.empty(shape=shape, dtype=SizeType)
stepl = np.empty(shape=shape, dtype=SizeType)
stop_None = np.zeros(shape=shape, dtype=SizeType)
if not isinstance(keys, tuple):
keys = (keys,)
nkeys = len(keys)
dim = 0
# Here is some problem when dealing with [...,...] params
# but this is a bit weird way to pass parameters anyway
for key in keys:
ellipsis = 0 # Sentinel
if isinstance(key, type(Ellipsis)):
ellipsis = 1
for diml in range(dim, len(self.shape) - (nkeys - dim) + 1):
startl[dim] = 0
stopl[dim] = self.shape[diml]
stepl[dim] = 1
dim += 1
elif dim >= maxlen:
raise IndexError("Too many indices for object '%s'" %
self._v_pathname)
elif is_idx(key):
key = operator.index(key)
# Protection for index out of range
if key >= self.shape[dim]:
raise IndexError("Index out of range")
if key < 0:
# To support negative values (Fixes bug #968149)
key += self.shape[dim]
start, stop, step = self._process_range(
key, key + 1, 1, dim=dim)
stop_None[dim] = 1
elif isinstance(key, slice):
start, stop, step = self._process_range(
key.start, key.stop, key.step, dim=dim)
else:
raise TypeError("Non-valid index or slice: %s" % key)
if not ellipsis:
startl[dim] = start
stopl[dim] = stop
stepl[dim] = step
dim += 1
# Complete the other dimensions, if needed
if dim < len(self.shape):
for diml in range(dim, len(self.shape)):
startl[dim] = 0
stopl[dim] = self.shape[diml]
stepl[dim] = 1
dim += 1
# Compute the shape for the container properly. Fixes #1288792
shape = []
for dim in range(len(self.shape)):
new_dim = len(range(startl[dim], stopl[dim], stepl[dim]))
if not (new_dim == 1 and stop_None[dim]):
shape.append(new_dim)
return startl, stopl, stepl, shape
def _fancy_selection(self, args):
"""Performs a NumPy-style fancy selection in `self`.
Implements advanced NumPy-style selection operations in
addition to the standard slice-and-int behavior.
Indexing arguments may be ints, slices or lists of indices.
Note: This is a backport from the h5py project.
"""
# Internal functions
def validate_number(num, length):
"""Validate a list member for the given axis length."""
try:
num = int(num)
except TypeError:
raise TypeError("Illegal index: %r" % num)
if num > length - 1:
raise IndexError("Index out of bounds: %d" % num)
def expand_ellipsis(args, rank):
"""Expand ellipsis objects and fill in missing axes."""
n_el = sum(1 for arg in args if arg is Ellipsis)
if n_el > 1:
raise IndexError("Only one ellipsis may be used.")
elif n_el == 0 and len(args) != rank:
args = args + (Ellipsis,)
final_args = []
n_args = len(args)
for idx, arg in enumerate(args):
if arg is Ellipsis:
final_args.extend((slice(None),) * (rank - n_args + 1))
else:
final_args.append(arg)
if len(final_args) > rank:
raise IndexError("Too many indices.")
return final_args
def translate_slice(exp, length):
"""Given a slice object, return a 3-tuple (start, count, step)
This is for use with the hyperslab selection routines.
"""
start, stop, step = exp.start, exp.stop, exp.step
if start is None:
start = 0
else:
start = int(start)
if stop is None:
stop = length
else:
stop = int(stop)
if step is None:
step = 1
else:
step = int(step)
if step < 1:
raise IndexError("Step must be >= 1 (got %d)" % step)
if stop == start:
raise IndexError("Zero-length selections are not allowed")
if stop < start:
raise IndexError("Reverse-order selections are not allowed")
if start < 0:
start = length + start
if stop < 0:
stop = length + stop
if not 0 <= start <= (length - 1):
raise IndexError(
"Start index %s out of range (0-%d)" % (start, length - 1))
if not 1 <= stop <= length:
raise IndexError(
"Stop index %s out of range (1-%d)" % (stop, length))
count = (stop - start) // step
if (stop - start) % step != 0:
count += 1
if start + count > length:
raise IndexError(
"Selection out of bounds (%d; axis has %d)" %
(start + count, length))
return start, count, step
# Main code for _fancy_selection
mshape = []
selection = []
if not isinstance(args, tuple):
args = (args,)
args = expand_ellipsis(args, len(self.shape))
list_seen = False
reorder = None
for idx, (exp, length) in enumerate(zip(args, self.shape)):
if isinstance(exp, slice):
start, count, step = translate_slice(exp, length)
selection.append((start, count, step, idx, "AND"))
mshape.append(count)
else:
try:
exp = list(exp)
except TypeError:
exp = [exp] # Handle scalar index as a list of length 1
mshape.append(0) # Keep track of scalar index for NumPy
else:
mshape.append(len(exp))
if len(exp) == 0:
raise IndexError(
"Empty selections are not allowed (axis %d)" % idx)
elif len(exp) > 1:
if list_seen:
raise IndexError("Only one selection list is allowed")
else:
list_seen = True
else:
if (not isinstance(exp[0], (int, np.integer)) or
(isinstance(exp[0], np.ndarray) and not
np.issubdtype(exp[0].dtype, np.integer))):
raise TypeError("Only integer coordinates allowed.")
nexp = np.asarray(exp, dtype="i8")
# Convert negative values
nexp = np.where(nexp < 0, length + nexp, nexp)
# Check whether the list is ordered or not
# (only one unordered list is allowed)
if len(nexp) != len(np.unique(nexp)):
raise IndexError(
"Selection lists cannot have repeated values")
neworder = nexp.argsort()
if (neworder.shape != (len(exp),) or
np.sum(np.abs(neworder - np.arange(len(exp)))) != 0):
if reorder is not None:
raise IndexError(
"Only one selection list can be unordered")
corrected_idx = sum(1 for x in mshape if x != 0) - 1
reorder = (corrected_idx, neworder)
nexp = nexp[neworder]
for select_idx in range(len(nexp) + 1):
# This crazy piece of code performs a list selection
# using HDF5 hyperslabs.
# For each index, perform a "NOTB" selection on every
# portion of *this axis* which falls *outside* the list
# selection. For this to work, the input array MUST be
# monotonically increasing.
if select_idx < len(nexp):
validate_number(nexp[select_idx], length)
if select_idx == 0:
start = 0
count = nexp[0]
elif select_idx == len(nexp):
start = nexp[-1] + 1
count = length - start
else:
start = nexp[select_idx - 1] + 1
count = nexp[select_idx] - start
if count > 0:
selection.append((start, count, 1, idx, "NOTB"))
mshape = tuple(x for x in mshape if x != 0)
return selection, reorder, mshape
def __getitem__(self, key):
"""Get a row, a range of rows or a slice from the array.
The set of tokens allowed for the key is the same as that for extended
slicing in Python (including the Ellipsis or ... token). The result is
an object of the current flavor; its shape depends on the kind of slice
used as key and the shape of the array itself.
Furthermore, NumPy-style fancy indexing, where a list of indices in a
certain axis is specified, is also supported. Note that only one list
per selection is supported right now. Finally, NumPy-style point and
boolean selections are supported as well.
Examples
--------
::
array1 = array[4] # simple selection
array2 = array[4:1000:2] # slice selection
array3 = array[1, ..., ::2, 1:4, 4:] # general slice selection
array4 = array[1, [1,5,10], ..., -1] # fancy selection
array5 = array[np.where(array[:] > 4)] # point selection
array6 = array[array[:] > 4] # boolean selection
"""
self._g_check_open()
try:
# First, try with a regular selection
startl, stopl, stepl, shape = self._interpret_indexing(key)
arr = self._read_slice(startl, stopl, stepl, shape)
except TypeError:
# Then, try with a point-wise selection
try:
coords = self._point_selection(key)
arr = self._read_coords(coords)
except TypeError:
# Finally, try with a fancy selection
selection, reorder, shape = self._fancy_selection(key)
arr = self._read_selection(selection, reorder, shape)
if self.flavor == "numpy" or not self._v_convert:
return arr
return internal_to_flavor(arr, self.flavor)
def __setitem__(self, key, value):
"""Set a row, a range of rows or a slice in the array.
It takes different actions depending on the type of the key parameter:
if it is an integer, the corresponding array row is set to value (the
value is broadcast when needed). If key is a slice, the row slice
determined by it is set to value (as usual, if the slice to be updated
exceeds the actual shape of the array, only the values in the existing
range are updated).
If value is a multidimensional object, then its shape must be
compatible with the shape determined by key, otherwise, a ValueError
will be raised.
Furthermore, NumPy-style fancy indexing, where a list of indices in a
certain axis is specified, is also supported. Note that only one list
per selection is supported right now. Finally, NumPy-style point and
boolean selections are supported as well.
Examples
--------
::
a1[0] = 333 # assign an integer to a Integer Array row
a2[0] = 'b' # assign a string to a string Array row
a3[1:4] = 5 # broadcast 5 to slice 1:4
a4[1:4:2] = 'xXx' # broadcast 'xXx' to slice 1:4:2
# General slice update (a5.shape = (4,3,2,8,5,10).
a5[1, ..., ::2, 1:4, 4:] = numpy.arange(1728, shape=(4,3,2,4,3,6))
a6[1, [1,5,10], ..., -1] = arr # fancy selection
a7[np.where(a6[:] > 4)] = 4 # point selection + broadcast
a8[arr > 4] = arr2 # boolean selection
"""
self._g_check_open()
# Create an array compliant with the specified slice
nparr = convert_to_np_atom2(value, self.atom)
if nparr.size == 0:
return
# truncate data if least_significant_digit filter is set
# TODO: add the least_significant_digit attribute to the array on disk
if (self.filters.least_significant_digit is not None and
not np.issubdtype(nparr.dtype, np.signedinteger)):
nparr = quantize(nparr, self.filters.least_significant_digit)
try:
startl, stopl, stepl, shape = self._interpret_indexing(key)
self._write_slice(startl, stopl, stepl, shape, nparr)
except TypeError:
# Then, try with a point-wise selection
try:
coords = self._point_selection(key)
self._write_coords(coords, nparr)
except TypeError:
selection, reorder, shape = self._fancy_selection(key)
self._write_selection(selection, reorder, shape, nparr)
def _check_shape(self, nparr, slice_shape):
"""Test that nparr shape is consistent with underlying object.
If not, try creating a new nparr object, using broadcasting if
necessary.
"""
if nparr.shape != (slice_shape + self.atom.dtype.shape):
# Create an array compliant with the specified shape
narr = np.empty(shape=slice_shape, dtype=self.atom.dtype)
# Assign the value to it. It will raise a ValueError exception
# if the objects cannot be broadcast to a single shape.
narr[...] = nparr
return narr
else:
return nparr
def _read_slice(self, startl, stopl, stepl, shape):
"""Read a slice based on `startl`, `stopl` and `stepl`."""
nparr = np.empty(dtype=self.atom.dtype, shape=shape)
# Protection against reading empty arrays
if 0 not in shape:
# Arrays that have non-zero dimensionality
self._g_read_slice(startl, stopl, stepl, nparr)
# For zero-shaped arrays, return the scalar
if nparr.shape == ():
nparr = nparr[()]
return nparr
def _read_coords(self, coords):
"""Read a set of points defined by `coords`."""
nparr = np.empty(dtype=self.atom.dtype, shape=len(coords))
if len(coords) > 0:
self._g_read_coords(coords, nparr)
# For zero-shaped arrays, return the scalar
if nparr.shape == ():
nparr = nparr[()]
return nparr
def _read_selection(self, selection, reorder, shape):
"""Read a `selection`.
Reorder if necessary.
"""
# Create the container for the slice
nparr = np.empty(dtype=self.atom.dtype, shape=shape)
# Arrays that have non-zero dimensionality
self._g_read_selection(selection, nparr)
# For zero-shaped arrays, return the scalar
if nparr.shape == ():
nparr = nparr[()]
elif reorder is not None:
# We need to reorder the array
idx, neworder = reorder
k = [slice(None)] * len(shape)
k[idx] = neworder.argsort()
# Apparently, a copy is not needed here, but doing it
# for symmetry with the `_write_selection()` method.
nparr = nparr[tuple(k)].copy()
return nparr
def _write_slice(self, startl, stopl, stepl, shape, nparr):
"""Write `nparr` in a slice based on `startl`, `stopl` and `stepl`."""
nparr = self._check_shape(nparr, tuple(shape))
countl = ((stopl - startl - 1) // stepl) + 1
self._g_write_slice(startl, stepl, countl, nparr)
def _write_coords(self, coords, nparr):
"""Write `nparr` values in points defined by `coords` coordinates."""
if len(coords) > 0:
nparr = self._check_shape(nparr, (len(coords),))
self._g_write_coords(coords, nparr)
def _write_selection(self, selection, reorder, shape, nparr):
"""Write `nparr` in `selection`.
Reorder if necessary.
"""
nparr = self._check_shape(nparr, tuple(shape))
# Check whether we should reorder the array
if reorder is not None:
idx, neworder = reorder
k = [slice(None)] * len(shape)
k[idx] = neworder
# For a reason a don't understand well, we need a copy of
# the reordered array
nparr = nparr[tuple(k)].copy()
self._g_write_selection(selection, nparr)
def _read(self, start, stop, step, out=None):
"""Read the array from disk without slice or flavor processing."""
nrowstoread = len(range(start, stop, step))
shape = list(self.shape)
if shape:
shape[self.maindim] = nrowstoread
if out is None:
arr = np.empty(dtype=self.atom.dtype, shape=shape)
else:
bytes_required = self.rowsize * nrowstoread
# if buffer is too small, it will segfault
if bytes_required != out.nbytes:
raise ValueError(f'output array size invalid, got {out.nbytes}'
f' bytes, need {bytes_required} bytes')
if not out.flags['C_CONTIGUOUS']:
raise ValueError('output array not C contiguous')
arr = out
# Protection against reading empty arrays
if 0 not in shape:
# Arrays that have non-zero dimensionality
self._read_array(start, stop, step, arr)
# data is always read in the system byteorder
# if the out array's byteorder is different, do a byteswap
if (out is not None and
byteorders[arr.dtype.byteorder] != sys.byteorder):
arr.byteswap(True)
return arr
def read(self, start=None, stop=None, step=None, out=None):
"""Get data in the array as an object of the current flavor.
The start, stop and step parameters can be used to select only a
*range of rows* in the array. Their meanings are the same as in
the built-in range() Python function, except that negative values
of step are not allowed yet. Moreover, if only start is specified,
then stop will be set to start + 1. If you do not specify neither
start nor stop, then *all the rows* in the array are selected.
The out parameter may be used to specify a NumPy array to receive
the output data. Note that the array must have the same size as
the data selected with the other parameters. Note that the array's
datatype is not checked and no type casting is performed, so if it
does not match the datatype on disk, the output will not be correct.
Also, this parameter is only valid when the array's flavor is set
to 'numpy'. Otherwise, a TypeError will be raised.
When data is read from disk in NumPy format, the output will be
in the current system's byteorder, regardless of how it is stored
on disk.
The exception is when an output buffer is supplied, in which case
the output will be in the byteorder of that output buffer.
.. versionchanged:: 3.0
Added the *out* parameter.
"""
self._g_check_open()
if out is not None and self.flavor != 'numpy':
msg = ("Optional 'out' argument may only be supplied if array "
"flavor is 'numpy', currently is {}").format(self.flavor)
raise TypeError(msg)
(start, stop, step) = self._process_range_read(start, stop, step)
arr = self._read(start, stop, step, out)
return internal_to_flavor(arr, self.flavor)
def _g_copy_with_stats(self, group, name, start, stop, step,
title, filters, chunkshape, _log, **kwargs):
"""Private part of Leaf.copy() for each kind of leaf."""
# Compute the correct indices.
(start, stop, step) = self._process_range_read(start, stop, step)
# Get the slice of the array
# (non-buffered version)
if self.shape:
arr = self[start:stop:step]
else:
arr = self[()]
# Build the new Array object. Use the _atom reserved keyword
# just in case the array is being copied from a native HDF5
# with atomic types different from scalars.
# For details, see #275 of trac.
object_ = Array(group, name, arr, title=title, _log=_log,
_atom=self.atom)
nbytes = np.prod(self.shape, dtype=SizeType) * self.atom.size
return (object_, nbytes)
def __repr__(self):
"""This provides more metainfo in addition to standard __str__"""
return f"""{self}
atom := {self.atom!r}
maindim := {self.maindim!r}
flavor := {self.flavor!r}
byteorder := {self.byteorder!r}
chunkshape := {self.chunkshape!r}"""
| (self) |
727,981 | tables.array | __setitem__ | Set a row, a range of rows or a slice in the array.
It takes different actions depending on the type of the key parameter:
if it is an integer, the corresponding array row is set to value (the
value is broadcast when needed). If key is a slice, the row slice
determined by it is set to value (as usual, if the slice to be updated
exceeds the actual shape of the array, only the values in the existing
range are updated).
If value is a multidimensional object, then its shape must be
compatible with the shape determined by key, otherwise, a ValueError
will be raised.
Furthermore, NumPy-style fancy indexing, where a list of indices in a
certain axis is specified, is also supported. Note that only one list
per selection is supported right now. Finally, NumPy-style point and
boolean selections are supported as well.
Examples
--------
::
a1[0] = 333 # assign an integer to a Integer Array row
a2[0] = 'b' # assign a string to a string Array row
a3[1:4] = 5 # broadcast 5 to slice 1:4
a4[1:4:2] = 'xXx' # broadcast 'xXx' to slice 1:4:2
# General slice update (a5.shape = (4,3,2,8,5,10).
a5[1, ..., ::2, 1:4, 4:] = numpy.arange(1728, shape=(4,3,2,4,3,6))
a6[1, [1,5,10], ..., -1] = arr # fancy selection
a7[np.where(a6[:] > 4)] = 4 # point selection + broadcast
a8[arr > 4] = arr2 # boolean selection
| def __setitem__(self, key, value):
"""Set a row, a range of rows or a slice in the array.
It takes different actions depending on the type of the key parameter:
if it is an integer, the corresponding array row is set to value (the
value is broadcast when needed). If key is a slice, the row slice
determined by it is set to value (as usual, if the slice to be updated
exceeds the actual shape of the array, only the values in the existing
range are updated).
If value is a multidimensional object, then its shape must be
compatible with the shape determined by key, otherwise, a ValueError
will be raised.
Furthermore, NumPy-style fancy indexing, where a list of indices in a
certain axis is specified, is also supported. Note that only one list
per selection is supported right now. Finally, NumPy-style point and
boolean selections are supported as well.
Examples
--------
::
a1[0] = 333 # assign an integer to a Integer Array row
a2[0] = 'b' # assign a string to a string Array row
a3[1:4] = 5 # broadcast 5 to slice 1:4
a4[1:4:2] = 'xXx' # broadcast 'xXx' to slice 1:4:2
# General slice update (a5.shape = (4,3,2,8,5,10).
a5[1, ..., ::2, 1:4, 4:] = numpy.arange(1728, shape=(4,3,2,4,3,6))
a6[1, [1,5,10], ..., -1] = arr # fancy selection
a7[np.where(a6[:] > 4)] = 4 # point selection + broadcast
a8[arr > 4] = arr2 # boolean selection
"""
self._g_check_open()
# Create an array compliant with the specified slice
nparr = convert_to_np_atom2(value, self.atom)
if nparr.size == 0:
return
# truncate data if least_significant_digit filter is set
# TODO: add the least_significant_digit attribute to the array on disk
if (self.filters.least_significant_digit is not None and
not np.issubdtype(nparr.dtype, np.signedinteger)):
nparr = quantize(nparr, self.filters.least_significant_digit)
try:
startl, stopl, stepl, shape = self._interpret_indexing(key)
self._write_slice(startl, stopl, stepl, shape, nparr)
except TypeError:
# Then, try with a point-wise selection
try:
coords = self._point_selection(key)
self._write_coords(coords, nparr)
except TypeError:
selection, reorder, shape = self._fancy_selection(key)
self._write_selection(selection, reorder, shape, nparr)
| (self, key, value) |
727,982 | tables.leaf | __str__ | The string representation for this object is its pathname in the
HDF5 object tree plus some additional metainfo. | def csformula(expected_mb):
"""Return the fitted chunksize for expected_mb."""
# For a basesize of 8 KB, this will return:
# 8 KB for datasets <= 1 MB
# 1 MB for datasets >= 10 TB
basesize = 8 * 1024 # 8 KB is a good minimum
return basesize * int(2**math.log10(expected_mb))
| (self) |
727,983 | tables.leaf | _calc_chunkshape | Calculate the shape for the HDF5 chunk. | def _calc_chunkshape(self, expectedrows, rowsize, itemsize):
"""Calculate the shape for the HDF5 chunk."""
# In case of a scalar shape, return the unit chunksize
if self.shape == ():
return (SizeType(1),)
# Compute the chunksize
MB = 1024 * 1024
expected_mb = (expectedrows * rowsize) // MB
chunksize = calc_chunksize(expected_mb)
complib = self.filters.complib
if (complib is not None and
complib.startswith("blosc2") and
self._c_classid in ('TABLE', 'CARRAY', 'EARRAY')):
# Blosc2 can introspect into blocks, so we can increase the
# chunksize for improving HDF5 perf for its internal btree.
# For the time being, this has been implemented efficiently
# just for tables, but in the future *Array objects could also
# be included.
# Use a decent default value for chunksize
chunksize *= 16
# Now, go explore the L3 size and try to find a smarter chunksize
if 'l3_cache_size' in cpu_info:
# In general, is a good idea to set the chunksize equal to L3
l3_cache_size = cpu_info['l3_cache_size']
# cpuinfo sometimes returns cache sizes as strings (like,
# "4096 KB"), so refuse the temptation to guess and use the
# value only when it is an actual int.
# Also, sometimes cpuinfo does not return a correct L3 size;
# so in general, enforcing L3 > L2 is a good sanity check.
l2_cache_size = cpu_info.get('l2_cache_size', "Not found")
if (type(l3_cache_size) is int and
type(l2_cache_size) is int and
l3_cache_size > l2_cache_size):
chunksize = l3_cache_size
# In Blosc2, the chunksize cannot be larger than 2 GB - BLOSC2_MAX_BUFFERSIZE
if chunksize > 2**31 - 32:
chunksize = 2**31 - 32
maindim = self.maindim
# Compute the chunknitems
chunknitems = chunksize // itemsize
# Safeguard against itemsizes being extremely large
if chunknitems == 0:
chunknitems = 1
chunkshape = list(self.shape)
# Check whether trimming the main dimension is enough
chunkshape[maindim] = 1
newchunknitems = np.prod(chunkshape, dtype=SizeType)
if newchunknitems <= chunknitems:
chunkshape[maindim] = chunknitems // newchunknitems
else:
# No, so start trimming other dimensions as well
for j in range(len(chunkshape)):
# Check whether trimming this dimension is enough
chunkshape[j] = 1
newchunknitems = np.prod(chunkshape, dtype=SizeType)
if newchunknitems <= chunknitems:
chunkshape[j] = chunknitems // newchunknitems
break
else:
# Ops, we ran out of the loop without a break
# Set the last dimension to chunknitems
chunkshape[-1] = chunknitems
return tuple(SizeType(s) for s in chunkshape)
| (self, expectedrows, rowsize, itemsize) |
727,984 | tables.leaf | _calc_nrowsinbuf | Calculate the number of rows that fits on a PyTables buffer. | def _calc_nrowsinbuf(self):
"""Calculate the number of rows that fits on a PyTables buffer."""
params = self._v_file.params
# Compute the nrowsinbuf
rowsize = self.rowsize
buffersize = params['IO_BUFFER_SIZE']
if rowsize != 0:
nrowsinbuf = buffersize // rowsize
else:
nrowsinbuf = 1
# Safeguard against row sizes being extremely large
if nrowsinbuf == 0:
nrowsinbuf = 1
# If rowsize is too large, issue a Performance warning
maxrowsize = params['BUFFER_TIMES'] * buffersize
if rowsize > maxrowsize:
warnings.warn("""\
The Leaf ``%s`` is exceeding the maximum recommended rowsize (%d bytes);
be ready to see PyTables asking for *lots* of memory and possibly slow
I/O. You may want to reduce the rowsize by trimming the value of
dimensions that are orthogonal (and preferably close) to the *main*
dimension of this leave. Alternatively, in case you have specified a
very small/large chunksize, you may want to increase/decrease it."""
% (self._v_pathname, maxrowsize),
PerformanceWarning)
return nrowsinbuf
| (self) |
727,985 | tables.array | _check_shape | Test that nparr shape is consistent with underlying object.
If not, try creating a new nparr object, using broadcasting if
necessary.
| def _check_shape(self, nparr, slice_shape):
"""Test that nparr shape is consistent with underlying object.
If not, try creating a new nparr object, using broadcasting if
necessary.
"""
if nparr.shape != (slice_shape + self.atom.dtype.shape):
# Create an array compliant with the specified shape
narr = np.empty(shape=slice_shape, dtype=self.atom.dtype)
# Assign the value to it. It will raise a ValueError exception
# if the objects cannot be broadcast to a single shape.
narr[...] = nparr
return narr
else:
return nparr
| (self, nparr, slice_shape) |
727,986 | tables.leaf | _f_close | Close this node in the tree.
This method has the behavior described in :meth:`Node._f_close`.
Besides that, the optional argument flush tells whether to flush
pending data to disk or not before closing.
| def _f_close(self, flush=True):
"""Close this node in the tree.
This method has the behavior described in :meth:`Node._f_close`.
Besides that, the optional argument flush tells whether to flush
pending data to disk or not before closing.
"""
if not self._v_isopen:
return # the node is already closed or not initialized
# Only do a flush in case the leaf has an IO buffer. The
# internal buffers of HDF5 will be flushed afterwards during the
# self._g_close() call. Avoiding an unnecessary flush()
# operation accelerates the closing for the unbuffered leaves.
if flush and hasattr(self, "_v_iobuf"):
self.flush()
# Close the dataset and release resources
self._g_close()
# Close myself as a node.
super()._f_close()
| (self, flush=True) |
727,987 | tables.node | _f_copy | Copy this node and return the new node.
Creates and returns a copy of the node, maybe in a different place in
the hierarchy. newparent can be a Group object (see
:ref:`GroupClassDescr`) or a pathname in string form. If it is not
specified or None, the current parent group is chosen as the new
parent. newname must be a string with a new name. If it is not
specified or None, the current name is chosen as the new name. If
recursive copy is stated, all descendants are copied as well. If
createparents is true, the needed groups for the given new parent group
path to exist will be created.
Copying a node across databases is supported but can not be
undone. Copying a node over itself is not allowed, nor it is
recursively copying a node into itself. These result in a
NodeError. Copying over another existing node is similarly not allowed,
unless the optional overwrite argument is true, in which case that node
is recursively removed before copying.
Additional keyword arguments may be passed to customize the copying
process. For instance, title and filters may be changed, user
attributes may be or may not be copied, data may be sub-sampled, stats
may be collected, etc. See the documentation for the particular node
type.
Using only the first argument is equivalent to copying the node to a
new location without changing its name. Using only the second argument
is equivalent to making a copy of the node in the same group.
| def _f_copy(self, newparent=None, newname=None,
overwrite=False, recursive=False, createparents=False,
**kwargs):
"""Copy this node and return the new node.
Creates and returns a copy of the node, maybe in a different place in
the hierarchy. newparent can be a Group object (see
:ref:`GroupClassDescr`) or a pathname in string form. If it is not
specified or None, the current parent group is chosen as the new
parent. newname must be a string with a new name. If it is not
specified or None, the current name is chosen as the new name. If
recursive copy is stated, all descendants are copied as well. If
createparents is true, the needed groups for the given new parent group
path to exist will be created.
Copying a node across databases is supported but can not be
undone. Copying a node over itself is not allowed, nor it is
recursively copying a node into itself. These result in a
NodeError. Copying over another existing node is similarly not allowed,
unless the optional overwrite argument is true, in which case that node
is recursively removed before copying.
Additional keyword arguments may be passed to customize the copying
process. For instance, title and filters may be changed, user
attributes may be or may not be copied, data may be sub-sampled, stats
may be collected, etc. See the documentation for the particular node
type.
Using only the first argument is equivalent to copying the node to a
new location without changing its name. Using only the second argument
is equivalent to making a copy of the node in the same group.
"""
self._g_check_open()
srcfile = self._v_file
srcparent = self._v_parent
srcname = self._v_name
dstparent = newparent
dstname = newname
# Set default arguments.
if dstparent is None and dstname is None:
raise NodeError("you should specify at least "
"a ``newparent`` or a ``newname`` parameter")
if dstparent is None:
dstparent = srcparent
if dstname is None:
dstname = srcname
# Get destination location.
if hasattr(dstparent, '_v_file'): # from node
dstfile = dstparent._v_file
dstpath = dstparent._v_pathname
elif hasattr(dstparent, 'startswith'): # from path
dstfile = srcfile
dstpath = dstparent
else:
raise TypeError("new parent is not a node nor a path: %r"
% (dstparent,))
# Validity checks on arguments.
if dstfile is srcfile:
# Copying over itself?
srcpath = srcparent._v_pathname
if dstpath == srcpath and dstname == srcname:
raise NodeError(
"source and destination nodes are the same node: ``%s``"
% self._v_pathname)
# Recursively copying into itself?
if recursive:
self._g_check_not_contains(dstpath)
# Note that the previous checks allow us to go ahead and create
# the parent groups if `createparents` is true. `dstParent` is
# used instead of `dstPath` because it may be in other file, and
# to avoid accepting `Node` objects when `createparents` is
# true.
dstparent = srcfile._get_or_create_path(dstparent, createparents)
self._g_check_group(dstparent) # Is it a group?
# Copying to another file with undo enabled?
if dstfile is not srcfile and srcfile.is_undo_enabled():
warnings.warn("copying across databases can not be undone "
"nor redone from this database",
UndoRedoWarning)
# Copying over an existing node?
self._g_maybe_remove(dstparent, dstname, overwrite)
# Copy the node.
# The constructor of the new node takes care of logging.
return self._g_copy(dstparent, dstname, recursive, **kwargs)
| (self, newparent=None, newname=None, overwrite=False, recursive=False, createparents=False, **kwargs) |
727,988 | tables.node | _f_delattr | Delete a PyTables attribute from this node.
If the named attribute does not exist, an AttributeError is
raised.
| def _f_delattr(self, name):
"""Delete a PyTables attribute from this node.
If the named attribute does not exist, an AttributeError is
raised.
"""
delattr(self._v_attrs, name)
| (self, name) |
727,989 | tables.node | _f_getattr | Get a PyTables attribute from this node.
If the named attribute does not exist, an AttributeError is
raised.
| def _f_getattr(self, name):
"""Get a PyTables attribute from this node.
If the named attribute does not exist, an AttributeError is
raised.
"""
return getattr(self._v_attrs, name)
| (self, name) |
727,990 | tables.node | _f_isvisible | Is this node visible? | def _f_isvisible(self):
"""Is this node visible?"""
self._g_check_open()
return isvisiblepath(self._v_pathname)
| (self) |
727,991 | tables.node | _f_move | Move or rename this node.
Moves a node into a new parent group, or changes the name of the
node. newparent can be a Group object (see :ref:`GroupClassDescr`) or a
pathname in string form. If it is not specified or None, the current
parent group is chosen as the new parent. newname must be a string
with a new name. If it is not specified or None, the current name is
chosen as the new name. If createparents is true, the needed groups for
the given new parent group path to exist will be created.
Moving a node across databases is not allowed, nor it is moving a node
*into* itself. These result in a NodeError. However, moving a node
*over* itself is allowed and simply does nothing. Moving over another
existing node is similarly not allowed, unless the optional overwrite
argument is true, in which case that node is recursively removed before
moving.
Usually, only the first argument will be used, effectively moving the
node to a new location without changing its name. Using only the
second argument is equivalent to renaming the node in place.
| def _f_move(self, newparent=None, newname=None,
overwrite=False, createparents=False):
"""Move or rename this node.
Moves a node into a new parent group, or changes the name of the
node. newparent can be a Group object (see :ref:`GroupClassDescr`) or a
pathname in string form. If it is not specified or None, the current
parent group is chosen as the new parent. newname must be a string
with a new name. If it is not specified or None, the current name is
chosen as the new name. If createparents is true, the needed groups for
the given new parent group path to exist will be created.
Moving a node across databases is not allowed, nor it is moving a node
*into* itself. These result in a NodeError. However, moving a node
*over* itself is allowed and simply does nothing. Moving over another
existing node is similarly not allowed, unless the optional overwrite
argument is true, in which case that node is recursively removed before
moving.
Usually, only the first argument will be used, effectively moving the
node to a new location without changing its name. Using only the
second argument is equivalent to renaming the node in place.
"""
self._g_check_open()
file_ = self._v_file
oldparent = self._v_parent
oldname = self._v_name
# Set default arguments.
if newparent is None and newname is None:
raise NodeError("you should specify at least "
"a ``newparent`` or a ``newname`` parameter")
if newparent is None:
newparent = oldparent
if newname is None:
newname = oldname
# Get destination location.
if hasattr(newparent, '_v_file'): # from node
newfile = newparent._v_file
newpath = newparent._v_pathname
elif hasattr(newparent, 'startswith'): # from path
newfile = file_
newpath = newparent
else:
raise TypeError("new parent is not a node nor a path: %r"
% (newparent,))
# Validity checks on arguments.
# Is it in the same file?
if newfile is not file_:
raise NodeError("nodes can not be moved across databases; "
"please make a copy of the node")
# The movement always fails if the hosting file can not be modified.
file_._check_writable()
# Moving over itself?
oldpath = oldparent._v_pathname
if newpath == oldpath and newname == oldname:
# This is equivalent to renaming the node to its current name,
# and it does not change the referenced object,
# so it is an allowed no-op.
return
# Moving into itself?
self._g_check_not_contains(newpath)
# Note that the previous checks allow us to go ahead and create
# the parent groups if `createparents` is true. `newparent` is
# used instead of `newpath` to avoid accepting `Node` objects
# when `createparents` is true.
newparent = file_._get_or_create_path(newparent, createparents)
self._g_check_group(newparent) # Is it a group?
# Moving over an existing node?
self._g_maybe_remove(newparent, newname, overwrite)
# Move the node.
oldpathname = self._v_pathname
self._g_move(newparent, newname)
# Log the change.
if file_.is_undo_enabled():
self._g_log_move(oldpathname)
| (self, newparent=None, newname=None, overwrite=False, createparents=False) |
727,992 | tables.node | _f_remove | Remove this node from the hierarchy.
If the node has children, recursive removal must be stated by giving
recursive a true value; otherwise, a NodeError will be raised.
If the node is a link to a Group object, and you are sure that you want
to delete it, you can do this by setting the force flag to true.
| def _f_remove(self, recursive=False, force=False):
"""Remove this node from the hierarchy.
If the node has children, recursive removal must be stated by giving
recursive a true value; otherwise, a NodeError will be raised.
If the node is a link to a Group object, and you are sure that you want
to delete it, you can do this by setting the force flag to true.
"""
self._g_check_open()
file_ = self._v_file
file_._check_writable()
if file_.is_undo_enabled():
self._g_remove_and_log(recursive, force)
else:
self._g_remove(recursive, force)
| (self, recursive=False, force=False) |
727,993 | tables.node | _f_rename | Rename this node in place.
Changes the name of a node to *newname* (a string). If a node with the
same newname already exists and overwrite is true, recursively remove
it before renaming.
| def _f_rename(self, newname, overwrite=False):
"""Rename this node in place.
Changes the name of a node to *newname* (a string). If a node with the
same newname already exists and overwrite is true, recursively remove
it before renaming.
"""
self._f_move(newname=newname, overwrite=overwrite)
| (self, newname, overwrite=False) |
727,994 | tables.node | _f_setattr | Set a PyTables attribute for this node.
If the node already has a large number of attributes, a
PerformanceWarning is issued.
| def _f_setattr(self, name, value):
"""Set a PyTables attribute for this node.
If the node already has a large number of attributes, a
PerformanceWarning is issued.
"""
setattr(self._v_attrs, name, value)
| (self, name, value) |
727,995 | tables.array | _fancy_selection | Performs a NumPy-style fancy selection in `self`.
Implements advanced NumPy-style selection operations in
addition to the standard slice-and-int behavior.
Indexing arguments may be ints, slices or lists of indices.
Note: This is a backport from the h5py project.
| def _fancy_selection(self, args):
"""Performs a NumPy-style fancy selection in `self`.
Implements advanced NumPy-style selection operations in
addition to the standard slice-and-int behavior.
Indexing arguments may be ints, slices or lists of indices.
Note: This is a backport from the h5py project.
"""
# Internal functions
def validate_number(num, length):
"""Validate a list member for the given axis length."""
try:
num = int(num)
except TypeError:
raise TypeError("Illegal index: %r" % num)
if num > length - 1:
raise IndexError("Index out of bounds: %d" % num)
def expand_ellipsis(args, rank):
"""Expand ellipsis objects and fill in missing axes."""
n_el = sum(1 for arg in args if arg is Ellipsis)
if n_el > 1:
raise IndexError("Only one ellipsis may be used.")
elif n_el == 0 and len(args) != rank:
args = args + (Ellipsis,)
final_args = []
n_args = len(args)
for idx, arg in enumerate(args):
if arg is Ellipsis:
final_args.extend((slice(None),) * (rank - n_args + 1))
else:
final_args.append(arg)
if len(final_args) > rank:
raise IndexError("Too many indices.")
return final_args
def translate_slice(exp, length):
"""Given a slice object, return a 3-tuple (start, count, step)
This is for use with the hyperslab selection routines.
"""
start, stop, step = exp.start, exp.stop, exp.step
if start is None:
start = 0
else:
start = int(start)
if stop is None:
stop = length
else:
stop = int(stop)
if step is None:
step = 1
else:
step = int(step)
if step < 1:
raise IndexError("Step must be >= 1 (got %d)" % step)
if stop == start:
raise IndexError("Zero-length selections are not allowed")
if stop < start:
raise IndexError("Reverse-order selections are not allowed")
if start < 0:
start = length + start
if stop < 0:
stop = length + stop
if not 0 <= start <= (length - 1):
raise IndexError(
"Start index %s out of range (0-%d)" % (start, length - 1))
if not 1 <= stop <= length:
raise IndexError(
"Stop index %s out of range (1-%d)" % (stop, length))
count = (stop - start) // step
if (stop - start) % step != 0:
count += 1
if start + count > length:
raise IndexError(
"Selection out of bounds (%d; axis has %d)" %
(start + count, length))
return start, count, step
# Main code for _fancy_selection
mshape = []
selection = []
if not isinstance(args, tuple):
args = (args,)
args = expand_ellipsis(args, len(self.shape))
list_seen = False
reorder = None
for idx, (exp, length) in enumerate(zip(args, self.shape)):
if isinstance(exp, slice):
start, count, step = translate_slice(exp, length)
selection.append((start, count, step, idx, "AND"))
mshape.append(count)
else:
try:
exp = list(exp)
except TypeError:
exp = [exp] # Handle scalar index as a list of length 1
mshape.append(0) # Keep track of scalar index for NumPy
else:
mshape.append(len(exp))
if len(exp) == 0:
raise IndexError(
"Empty selections are not allowed (axis %d)" % idx)
elif len(exp) > 1:
if list_seen:
raise IndexError("Only one selection list is allowed")
else:
list_seen = True
else:
if (not isinstance(exp[0], (int, np.integer)) or
(isinstance(exp[0], np.ndarray) and not
np.issubdtype(exp[0].dtype, np.integer))):
raise TypeError("Only integer coordinates allowed.")
nexp = np.asarray(exp, dtype="i8")
# Convert negative values
nexp = np.where(nexp < 0, length + nexp, nexp)
# Check whether the list is ordered or not
# (only one unordered list is allowed)
if len(nexp) != len(np.unique(nexp)):
raise IndexError(
"Selection lists cannot have repeated values")
neworder = nexp.argsort()
if (neworder.shape != (len(exp),) or
np.sum(np.abs(neworder - np.arange(len(exp)))) != 0):
if reorder is not None:
raise IndexError(
"Only one selection list can be unordered")
corrected_idx = sum(1 for x in mshape if x != 0) - 1
reorder = (corrected_idx, neworder)
nexp = nexp[neworder]
for select_idx in range(len(nexp) + 1):
# This crazy piece of code performs a list selection
# using HDF5 hyperslabs.
# For each index, perform a "NOTB" selection on every
# portion of *this axis* which falls *outside* the list
# selection. For this to work, the input array MUST be
# monotonically increasing.
if select_idx < len(nexp):
validate_number(nexp[select_idx], length)
if select_idx == 0:
start = 0
count = nexp[0]
elif select_idx == len(nexp):
start = nexp[-1] + 1
count = length - start
else:
start = nexp[select_idx - 1] + 1
count = nexp[select_idx] - start
if count > 0:
selection.append((start, count, 1, idx, "NOTB"))
mshape = tuple(x for x in mshape if x != 0)
return selection, reorder, mshape
| (self, args) |
727,996 | tables.node | _g_check_group | null | def _g_check_group(self, node):
# Node must be defined in order to define a Group.
# However, we need to know Group here.
# Using class_name_dict avoids a circular import.
if not isinstance(node, class_name_dict['Node']):
raise TypeError("new parent is not a registered node: %s"
% node._v_pathname)
if not isinstance(node, class_name_dict['Group']):
raise TypeError("new parent node ``%s`` is not a group"
% node._v_pathname)
| (self, node) |
727,997 | tables.node | _g_check_name | Check validity of name for this particular kind of node.
This is invoked once the standard HDF5 and natural naming checks
have successfully passed.
| def _g_check_name(self, name):
"""Check validity of name for this particular kind of node.
This is invoked once the standard HDF5 and natural naming checks
have successfully passed.
"""
if name.startswith('_i_'):
# This is reserved for table index groups.
raise ValueError(
"node name starts with reserved prefix ``_i_``: %s" % name)
| (self, name) |
727,998 | tables.node | _g_check_not_contains | null | def _g_check_not_contains(self, pathname):
# The not-a-TARDIS test. ;)
mypathname = self._v_pathname
if (mypathname == '/' # all nodes fall below the root group
or pathname == mypathname
or pathname.startswith(mypathname + '/')):
raise NodeError("can not move or recursively copy node ``%s`` "
"into itself" % mypathname)
| (self, pathname) |
727,999 | tables.node | _g_check_open | Check that the node is open.
If the node is closed, a `ClosedNodeError` is raised.
| def _g_check_open(self):
"""Check that the node is open.
If the node is closed, a `ClosedNodeError` is raised.
"""
if not self._v_isopen:
raise ClosedNodeError("the node object is closed")
assert self._v_file.isopen, "found an open node in a closed file"
| (self) |
728,000 | tables.leaf | _g_copy | null | def _g_copy(self, newparent, newname, recursive, _log=True, **kwargs):
# Compute default arguments.
start = kwargs.pop('start', None)
stop = kwargs.pop('stop', None)
step = kwargs.pop('step', None)
title = kwargs.pop('title', self._v_title)
filters = kwargs.pop('filters', self.filters)
chunkshape = kwargs.pop('chunkshape', self.chunkshape)
copyuserattrs = kwargs.pop('copyuserattrs', True)
stats = kwargs.pop('stats', None)
if chunkshape == 'keep':
chunkshape = self.chunkshape # Keep the original chunkshape
elif chunkshape == 'auto':
chunkshape = None # Will recompute chunkshape
# Fix arguments with explicit None values for backwards compatibility.
if title is None:
title = self._v_title
if filters is None:
filters = self.filters
# Create a copy of the object.
(new_node, bytes) = self._g_copy_with_stats(
newparent, newname, start, stop, step,
title, filters, chunkshape, _log, **kwargs)
# Copy user attributes if requested (or the flavor at least).
if copyuserattrs:
self._v_attrs._g_copy(new_node._v_attrs, copyclass=True)
elif 'FLAVOR' in self._v_attrs:
if self._v_file.params['PYTABLES_SYS_ATTRS']:
new_node._v_attrs._g__setattr('FLAVOR', self._flavor)
new_node._flavor = self._flavor # update cached value
# Update statistics if needed.
if stats is not None:
stats['leaves'] += 1
stats['bytes'] += bytes
return new_node
| (self, newparent, newname, recursive, _log=True, **kwargs) |
728,001 | tables.node | _g_copy_as_child | Copy this node as a child of another group.
Copies just this node into `newparent`, not recursing children
nor overwriting nodes nor logging the copy. This is intended to
be used when copying whole sub-trees.
| def _g_copy_as_child(self, newparent, **kwargs):
"""Copy this node as a child of another group.
Copies just this node into `newparent`, not recursing children
nor overwriting nodes nor logging the copy. This is intended to
be used when copying whole sub-trees.
"""
return self._g_copy(newparent, self._v_name,
recursive=False, _log=False, **kwargs)
| (self, newparent, **kwargs) |
728,002 | tables.array | _g_copy_with_stats | Private part of Leaf.copy() for each kind of leaf. | def _g_copy_with_stats(self, group, name, start, stop, step,
title, filters, chunkshape, _log, **kwargs):
"""Private part of Leaf.copy() for each kind of leaf."""
# Compute the correct indices.
(start, stop, step) = self._process_range_read(start, stop, step)
# Get the slice of the array
# (non-buffered version)
if self.shape:
arr = self[start:stop:step]
else:
arr = self[()]
# Build the new Array object. Use the _atom reserved keyword
# just in case the array is being copied from a native HDF5
# with atomic types different from scalars.
# For details, see #275 of trac.
object_ = Array(group, name, arr, title=title, _log=_log,
_atom=self.atom)
nbytes = np.prod(self.shape, dtype=SizeType) * self.atom.size
return (object_, nbytes)
| (self, group, name, start, stop, step, title, filters, chunkshape, _log, **kwargs) |
728,003 | tables.array | _g_create | Save a new array in file. | def _g_create(self):
"""Save a new array in file."""
self._v_version = obversion
try:
# `Leaf._g_post_init_hook()` should be setting the flavor on disk.
self._flavor = flavor = flavor_of(self._obj)
nparr = array_as_internal(self._obj, flavor)
except Exception: # XXX
# Problems converting data. Close the node and re-raise exception.
self.close(flush=0)
raise
# Raise an error in case of unsupported object
if nparr.dtype.kind in ['V', 'U', 'O']: # in void, unicode, object
raise TypeError("Array objects cannot currently deal with void, "
"unicode or object arrays")
# Decrease the number of references to the object
self._obj = None
# Fix the byteorder of data
nparr = self._g_fix_byteorder_data(nparr, nparr.dtype.byteorder)
# Create the array on-disk
try:
# ``self._v_objectid`` needs to be set because would be
# needed for setting attributes in some descendants later
# on
(self._v_objectid, self.shape, self.atom) = self._create_array(
nparr, self._v_new_title, self.atom)
except Exception: # XXX
# Problems creating the Array on disk. Close node and re-raise.
self.close(flush=0)
raise
# Compute the optimal buffer size
self.nrowsinbuf = self._calc_nrowsinbuf()
# Arrays don't have chunkshapes (so, set it to None)
self._v_chunkshape = None
return self._v_objectid
| (self) |
728,004 | tables.node | _g_del_location | Clear location-dependent attributes.
This also triggers the removal of file references to this node.
| def _g_del_location(self):
"""Clear location-dependent attributes.
This also triggers the removal of file references to this node.
"""
node_manager = self._v_file._node_manager
pathname = self._v_pathname
if not self._v__deleting:
node_manager.drop_from_cache(pathname)
# Note: node_manager.drop_node do not removes the node form the
# registry if it is still open
node_manager.registry.pop(pathname, None)
self._v_file = None
self._v_isopen = False
self._v_pathname = None
self._v_name = None
self._v_depth = None
| (self) |
728,005 | tables.leaf | _g_fix_byteorder_data | Fix the byteorder of data passed in constructors. | def _g_fix_byteorder_data(self, data, dbyteorder):
"""Fix the byteorder of data passed in constructors."""
dbyteorder = byteorders[dbyteorder]
# If self.byteorder has not been passed as an argument of
# the constructor, then set it to the same value of data.
if self.byteorder is None:
self.byteorder = dbyteorder
# Do an additional in-place byteswap of data if the in-memory
# byteorder doesn't match that of the on-disk. This is the only
# place that we have to do the conversion manually. In all the
# other cases, it will be HDF5 the responsible for doing the
# byteswap properly.
if dbyteorder in ['little', 'big']:
if dbyteorder != self.byteorder:
# if data is not writeable, do a copy first
if not data.flags.writeable:
data = data.copy()
data.byteswap(True)
else:
# Fix the byteorder again, no matter which byteorder have
# specified the user in the constructor.
self.byteorder = "irrelevant"
return data
| (self, data, dbyteorder) |
728,006 | tables.node | _g_getparent | The parent :class:`Group` instance | def _g_getparent(self):
"""The parent :class:`Group` instance"""
(parentpath, nodename) = split_path(self._v_pathname)
return self._v_file._get_node(parentpath)
| (self) |
728,007 | tables.node | _g_gettitle | A description of this node. A shorthand for TITLE attribute. | def _g_gettitle(self):
"""A description of this node. A shorthand for TITLE attribute."""
if hasattr(self._v_attrs, 'TITLE'):
return self._v_attrs.TITLE
else:
return ''
| (self) |
728,008 | tables.node | _g_log_create | null | def _g_log_create(self):
self._v_file._log('CREATE', self._v_pathname)
| (self) |
728,009 | tables.node | _g_log_move | null | def _g_log_move(self, oldpathname):
self._v_file._log('MOVE', oldpathname, self._v_pathname)
| (self, oldpathname) |
728,010 | tables.node | _g_maybe_remove | null | def _g_maybe_remove(self, parent, name, overwrite):
if name in parent:
if not overwrite:
raise NodeError(
f"destination group ``{parent._v_pathname}`` already "
f"has a node named ``{name}``; you may want to use the "
f"``overwrite`` argument")
parent._f_get_child(name)._f_remove(True)
| (self, parent, name, overwrite) |
728,011 | tables.node | _g_move | Move this node in the hierarchy.
Moves the node into the given `newparent`, with the given
`newname`.
It does not log the change.
| def _g_move(self, newparent, newname):
"""Move this node in the hierarchy.
Moves the node into the given `newparent`, with the given
`newname`.
It does not log the change.
"""
oldparent = self._v_parent
oldname = self._v_name
oldpathname = self._v_pathname # to move the HDF5 node
# Try to insert the node into the new parent.
newparent._g_refnode(self, newname)
# Remove the node from the new parent.
oldparent._g_unrefnode(oldname)
# Remove location information for this node.
self._g_del_location()
# Set new location information for this node.
self._g_set_location(newparent, newname)
# hdf5extension operations:
# Update node attributes.
self._g_new(newparent, self._v_name, init=False)
# Move the node.
# self._v_parent._g_move_node(oldpathname, self._v_pathname)
self._v_parent._g_move_node(oldparent._v_objectid, oldname,
newparent._v_objectid, newname,
oldpathname, self._v_pathname)
# Tell dependent objects about the new location of this node.
self._g_update_dependent()
| (self, newparent, newname) |
728,012 | tables.array | _g_open | Get the metadata info for an array in file. | def _g_open(self):
"""Get the metadata info for an array in file."""
(oid, self.atom, self.shape, self._v_chunkshape) = self._open_array()
self.nrowsinbuf = self._calc_nrowsinbuf()
return oid
| (self) |
728,013 | tables.leaf | _g_post_init_hook | Code to be run after node creation and before creation logging.
This method gets or sets the flavor of the leaf.
| def _g_post_init_hook(self):
"""Code to be run after node creation and before creation logging.
This method gets or sets the flavor of the leaf.
"""
super()._g_post_init_hook()
if self._v_new: # set flavor of new node
if self._flavor is None:
self._flavor = internal_flavor
else: # flavor set at creation time, do not log
if self._v_file.params['PYTABLES_SYS_ATTRS']:
self._v_attrs._g__setattr('FLAVOR', self._flavor)
else: # get flavor of existing node (if any)
if self._v_file.params['PYTABLES_SYS_ATTRS']:
flavor = getattr(self._v_attrs, 'FLAVOR', internal_flavor)
self._flavor = flavor_alias_map.get(flavor, flavor)
else:
self._flavor = internal_flavor
| (self) |
728,014 | tables.node | _g_pre_kill_hook | Code to be called before killing the node. | def _g_pre_kill_hook(self):
"""Code to be called before killing the node."""
pass
| (self) |
728,015 | tables.node | _g_remove | Remove this node from the hierarchy.
If the node has children, recursive removal must be stated by
giving `recursive` a true value; otherwise, a `NodeError` will
be raised.
If `force` is set to true, the node will be removed no matter it
has children or not (useful for deleting hard links).
It does not log the change.
| def _g_remove(self, recursive, force):
"""Remove this node from the hierarchy.
If the node has children, recursive removal must be stated by
giving `recursive` a true value; otherwise, a `NodeError` will
be raised.
If `force` is set to true, the node will be removed no matter it
has children or not (useful for deleting hard links).
It does not log the change.
"""
# Remove the node from the PyTables hierarchy.
parent = self._v_parent
parent._g_unrefnode(self._v_name)
# Close the node itself.
self._f_close()
# hdf5extension operations:
# Remove the node from the HDF5 hierarchy.
self._g_delete(parent)
| (self, recursive, force) |
728,016 | tables.node | _g_remove_and_log | null | def _g_remove_and_log(self, recursive, force):
file_ = self._v_file
oldpathname = self._v_pathname
# Log *before* moving to use the right shadow name.
file_._log('REMOVE', oldpathname)
move_to_shadow(file_, oldpathname)
| (self, recursive, force) |
728,017 | tables.node | _g_set_location | Set location-dependent attributes.
Sets the location-dependent attributes of this node to reflect
that it is placed under the specified `parentnode`, with the
specified `name`.
This also triggers the insertion of file references to this
node. If the maximum recommended tree depth is exceeded, a
`PerformanceWarning` is issued.
| def _g_set_location(self, parentnode, name):
"""Set location-dependent attributes.
Sets the location-dependent attributes of this node to reflect
that it is placed under the specified `parentnode`, with the
specified `name`.
This also triggers the insertion of file references to this
node. If the maximum recommended tree depth is exceeded, a
`PerformanceWarning` is issued.
"""
file_ = parentnode._v_file
parentdepth = parentnode._v_depth
self._v_file = file_
self._v_isopen = True
root_uep = file_.root_uep
if name.startswith(root_uep):
# This has been called from File._get_node()
assert parentdepth == 0
if root_uep == "/":
self._v_pathname = name
else:
self._v_pathname = name[len(root_uep):]
_, self._v_name = split_path(name)
self._v_depth = name.count("/") - root_uep.count("/") + 1
else:
# If we enter here is because this has been called elsewhere
self._v_name = name
self._v_pathname = join_path(parentnode._v_pathname, name)
self._v_depth = parentdepth + 1
# Check if the node is too deep in the tree.
if parentdepth >= self._v_maxtreedepth:
warnings.warn("""\
node ``%s`` is exceeding the recommended maximum depth (%d);\
be ready to see PyTables asking for *lots* of memory and possibly slow I/O"""
% (self._v_pathname, self._v_maxtreedepth),
PerformanceWarning)
if self._v_pathname != '/':
file_._node_manager.cache_node(self, self._v_pathname)
| (self, parentnode, name) |
728,018 | tables.node | _g_settitle | null | def _g_settitle(self, title):
self._v_attrs.TITLE = title
| (self, title) |
728,019 | tables.node | _g_update_dependent | Update dependent objects after a location change.
All dependent objects (but not nodes!) referencing this node
must be updated here.
| def _g_update_dependent(self):
"""Update dependent objects after a location change.
All dependent objects (but not nodes!) referencing this node
must be updated here.
"""
if '_v_attrs' in self.__dict__:
self._v_attrs._g_update_node_location(self)
| (self) |
728,020 | tables.node | _g_update_location | Update location-dependent attributes.
Updates location data when an ancestor node has changed its
location in the hierarchy to `newparentpath`. In fact, this
method is expected to be called by an ancestor of this node.
This also triggers the update of file references to this node.
If the maximum recommended node depth is exceeded, a
`PerformanceWarning` is issued. This warning is assured to be
unique.
| def _g_update_location(self, newparentpath):
"""Update location-dependent attributes.
Updates location data when an ancestor node has changed its
location in the hierarchy to `newparentpath`. In fact, this
method is expected to be called by an ancestor of this node.
This also triggers the update of file references to this node.
If the maximum recommended node depth is exceeded, a
`PerformanceWarning` is issued. This warning is assured to be
unique.
"""
oldpath = self._v_pathname
newpath = join_path(newparentpath, self._v_name)
newdepth = newpath.count('/')
self._v_pathname = newpath
self._v_depth = newdepth
# Check if the node is too deep in the tree.
if newdepth > self._v_maxtreedepth:
warnings.warn("""\
moved descendent node is exceeding the recommended maximum depth (%d);\
be ready to see PyTables asking for *lots* of memory and possibly slow I/O"""
% (self._v_maxtreedepth,), PerformanceWarning)
node_manager = self._v_file._node_manager
node_manager.rename_node(oldpath, newpath)
# Tell dependent objects about the new location of this node.
self._g_update_dependent()
| (self, newparentpath) |
728,021 | tables.array | _init_loop | Initialization for the __iter__ iterator. | def _init_loop(self):
"""Initialization for the __iter__ iterator."""
self._nrowsread = self._start
self._startb = self._start
self._row = -1 # Sentinel
self._init = True # Sentinel
self.nrow = SizeType(self._start - self._step) # row number
| (self) |
728,022 | tables.array | _interpret_indexing | Internal routine used by __getitem__ and __setitem__ | def _interpret_indexing(self, keys):
"""Internal routine used by __getitem__ and __setitem__"""
maxlen = len(self.shape)
shape = (maxlen,)
startl = np.empty(shape=shape, dtype=SizeType)
stopl = np.empty(shape=shape, dtype=SizeType)
stepl = np.empty(shape=shape, dtype=SizeType)
stop_None = np.zeros(shape=shape, dtype=SizeType)
if not isinstance(keys, tuple):
keys = (keys,)
nkeys = len(keys)
dim = 0
# Here is some problem when dealing with [...,...] params
# but this is a bit weird way to pass parameters anyway
for key in keys:
ellipsis = 0 # Sentinel
if isinstance(key, type(Ellipsis)):
ellipsis = 1
for diml in range(dim, len(self.shape) - (nkeys - dim) + 1):
startl[dim] = 0
stopl[dim] = self.shape[diml]
stepl[dim] = 1
dim += 1
elif dim >= maxlen:
raise IndexError("Too many indices for object '%s'" %
self._v_pathname)
elif is_idx(key):
key = operator.index(key)
# Protection for index out of range
if key >= self.shape[dim]:
raise IndexError("Index out of range")
if key < 0:
# To support negative values (Fixes bug #968149)
key += self.shape[dim]
start, stop, step = self._process_range(
key, key + 1, 1, dim=dim)
stop_None[dim] = 1
elif isinstance(key, slice):
start, stop, step = self._process_range(
key.start, key.stop, key.step, dim=dim)
else:
raise TypeError("Non-valid index or slice: %s" % key)
if not ellipsis:
startl[dim] = start
stopl[dim] = stop
stepl[dim] = step
dim += 1
# Complete the other dimensions, if needed
if dim < len(self.shape):
for diml in range(dim, len(self.shape)):
startl[dim] = 0
stopl[dim] = self.shape[diml]
stepl[dim] = 1
dim += 1
# Compute the shape for the container properly. Fixes #1288792
shape = []
for dim in range(len(self.shape)):
new_dim = len(range(startl[dim], stopl[dim], stepl[dim]))
if not (new_dim == 1 and stop_None[dim]):
shape.append(new_dim)
return startl, stopl, stepl, shape
| (self, keys) |
728,023 | tables.leaf | _point_selection | Perform a point-wise selection.
`key` can be any of the following items:
* A boolean array with the same shape than self. Those positions
with True values will signal the coordinates to be returned.
* A numpy array (or list or tuple) with the point coordinates.
This has to be a two-dimensional array of size len(self.shape)
by num_elements containing a list of zero-based values
specifying the coordinates in the dataset of the selected
elements. The order of the element coordinates in the array
specifies the order in which the array elements are iterated
through when I/O is performed. Duplicate coordinate locations
are not checked for.
Return the coordinates array. If this is not possible, raise a
`TypeError` so that the next selection method can be tried out.
This is useful for whatever `Leaf` instance implementing a
point-wise selection.
| def _point_selection(self, key):
"""Perform a point-wise selection.
`key` can be any of the following items:
* A boolean array with the same shape than self. Those positions
with True values will signal the coordinates to be returned.
* A numpy array (or list or tuple) with the point coordinates.
This has to be a two-dimensional array of size len(self.shape)
by num_elements containing a list of zero-based values
specifying the coordinates in the dataset of the selected
elements. The order of the element coordinates in the array
specifies the order in which the array elements are iterated
through when I/O is performed. Duplicate coordinate locations
are not checked for.
Return the coordinates array. If this is not possible, raise a
`TypeError` so that the next selection method can be tried out.
This is useful for whatever `Leaf` instance implementing a
point-wise selection.
"""
input_key = key
if type(key) in (list, tuple):
if isinstance(key, tuple) and len(key) > len(self.shape):
raise IndexError(f"Invalid index or slice: {key!r}")
# Try to convert key to a numpy array. If not possible,
# a TypeError will be issued (to be catched later on).
try:
key = toarray(key)
except ValueError:
raise TypeError(f"Invalid index or slice: {key!r}")
elif not isinstance(key, np.ndarray):
raise TypeError(f"Invalid index or slice: {key!r}")
# Protection against empty keys
if len(key) == 0:
return np.array([], dtype="i8")
if key.dtype.kind == 'b':
if not key.shape == self.shape:
raise IndexError(
"Boolean indexing array has incompatible shape")
# Get the True coordinates (64-bit indices!)
coords = np.asarray(key.nonzero(), dtype='i8')
coords = np.transpose(coords)
elif key.dtype.kind == 'i' or key.dtype.kind == 'u':
if len(key.shape) > 2:
raise IndexError(
"Coordinate indexing array has incompatible shape")
elif len(key.shape) == 2:
if key.shape[0] != len(self.shape):
raise IndexError(
"Coordinate indexing array has incompatible shape")
coords = np.asarray(key, dtype="i8")
coords = np.transpose(coords)
else:
# For 1-dimensional datasets
coords = np.asarray(key, dtype="i8")
# handle negative indices
base = coords if coords.base is None else coords.base
if base is input_key:
# never modify the original "key" data
coords = coords.copy()
idx = coords < 0
coords[idx] = (coords + self.shape)[idx]
# bounds check
if np.any(coords < 0) or np.any(coords >= self.shape):
raise IndexError("Index out of bounds")
else:
raise TypeError("Only integer coordinates allowed.")
# We absolutely need a contiguous array
if not coords.flags.contiguous:
coords = coords.copy()
return coords
| (self, key) |
728,024 | tables.leaf | _process_range | null | def _process_range(self, start, stop, step, dim=None, warn_negstep=True):
if dim is None:
nrows = self.nrows # self.shape[self.maindim]
else:
nrows = self.shape[dim]
if warn_negstep and step and step < 0:
raise ValueError("slice step cannot be negative")
# if start is not None: start = long(start)
# if stop is not None: stop = long(stop)
# if step is not None: step = long(step)
return slice(start, stop, step).indices(int(nrows))
| (self, start, stop, step, dim=None, warn_negstep=True) |
728,025 | tables.leaf | _process_range_read | null | def _process_range_read(self, start, stop, step, warn_negstep=True):
nrows = self.nrows
if start is not None and stop is None and step is None:
# Protection against start greater than available records
# nrows == 0 is a special case for empty objects
if 0 < nrows <= start:
raise IndexError("start of range (%s) is greater than "
"number of rows (%s)" % (start, nrows))
step = 1
if start == -1: # corner case
stop = nrows
else:
stop = start + 1
# Finally, get the correct values (over the main dimension)
start, stop, step = self._process_range(start, stop, step,
warn_negstep=warn_negstep)
return (start, stop, step)
| (self, start, stop, step, warn_negstep=True) |
728,026 | tables.array | _read | Read the array from disk without slice or flavor processing. | def _read(self, start, stop, step, out=None):
"""Read the array from disk without slice or flavor processing."""
nrowstoread = len(range(start, stop, step))
shape = list(self.shape)
if shape:
shape[self.maindim] = nrowstoread
if out is None:
arr = np.empty(dtype=self.atom.dtype, shape=shape)
else:
bytes_required = self.rowsize * nrowstoread
# if buffer is too small, it will segfault
if bytes_required != out.nbytes:
raise ValueError(f'output array size invalid, got {out.nbytes}'
f' bytes, need {bytes_required} bytes')
if not out.flags['C_CONTIGUOUS']:
raise ValueError('output array not C contiguous')
arr = out
# Protection against reading empty arrays
if 0 not in shape:
# Arrays that have non-zero dimensionality
self._read_array(start, stop, step, arr)
# data is always read in the system byteorder
# if the out array's byteorder is different, do a byteswap
if (out is not None and
byteorders[arr.dtype.byteorder] != sys.byteorder):
arr.byteswap(True)
return arr
| (self, start, stop, step, out=None) |
728,027 | tables.array | _read_coords | Read a set of points defined by `coords`. | def _read_coords(self, coords):
"""Read a set of points defined by `coords`."""
nparr = np.empty(dtype=self.atom.dtype, shape=len(coords))
if len(coords) > 0:
self._g_read_coords(coords, nparr)
# For zero-shaped arrays, return the scalar
if nparr.shape == ():
nparr = nparr[()]
return nparr
| (self, coords) |
728,028 | tables.array | _read_selection | Read a `selection`.
Reorder if necessary.
| def _read_selection(self, selection, reorder, shape):
"""Read a `selection`.
Reorder if necessary.
"""
# Create the container for the slice
nparr = np.empty(dtype=self.atom.dtype, shape=shape)
# Arrays that have non-zero dimensionality
self._g_read_selection(selection, nparr)
# For zero-shaped arrays, return the scalar
if nparr.shape == ():
nparr = nparr[()]
elif reorder is not None:
# We need to reorder the array
idx, neworder = reorder
k = [slice(None)] * len(shape)
k[idx] = neworder.argsort()
# Apparently, a copy is not needed here, but doing it
# for symmetry with the `_write_selection()` method.
nparr = nparr[tuple(k)].copy()
return nparr
| (self, selection, reorder, shape) |
728,029 | tables.array | _read_slice | Read a slice based on `startl`, `stopl` and `stepl`. | def _read_slice(self, startl, stopl, stepl, shape):
"""Read a slice based on `startl`, `stopl` and `stepl`."""
nparr = np.empty(dtype=self.atom.dtype, shape=shape)
# Protection against reading empty arrays
if 0 not in shape:
# Arrays that have non-zero dimensionality
self._g_read_slice(startl, stopl, stepl, nparr)
# For zero-shaped arrays, return the scalar
if nparr.shape == ():
nparr = nparr[()]
return nparr
| (self, startl, stopl, stepl, shape) |
728,030 | tables.array | _write_coords | Write `nparr` values in points defined by `coords` coordinates. | def _write_coords(self, coords, nparr):
"""Write `nparr` values in points defined by `coords` coordinates."""
if len(coords) > 0:
nparr = self._check_shape(nparr, (len(coords),))
self._g_write_coords(coords, nparr)
| (self, coords, nparr) |
728,031 | tables.array | _write_selection | Write `nparr` in `selection`.
Reorder if necessary.
| def _write_selection(self, selection, reorder, shape, nparr):
"""Write `nparr` in `selection`.
Reorder if necessary.
"""
nparr = self._check_shape(nparr, tuple(shape))
# Check whether we should reorder the array
if reorder is not None:
idx, neworder = reorder
k = [slice(None)] * len(shape)
k[idx] = neworder
# For a reason a don't understand well, we need a copy of
# the reordered array
nparr = nparr[tuple(k)].copy()
self._g_write_selection(selection, nparr)
| (self, selection, reorder, shape, nparr) |
728,032 | tables.array | _write_slice | Write `nparr` in a slice based on `startl`, `stopl` and `stepl`. | def _write_slice(self, startl, stopl, stepl, shape, nparr):
"""Write `nparr` in a slice based on `startl`, `stopl` and `stepl`."""
nparr = self._check_shape(nparr, tuple(shape))
countl = ((stopl - startl - 1) // stepl) + 1
self._g_write_slice(startl, stepl, countl, nparr)
| (self, startl, stopl, stepl, shape, nparr) |
728,033 | tables.leaf | close | Close this node in the tree.
This method is completely equivalent to :meth:`Leaf._f_close`.
| def close(self, flush=True):
"""Close this node in the tree.
This method is completely equivalent to :meth:`Leaf._f_close`.
"""
self._f_close(flush)
| (self, flush=True) |
728,034 | tables.leaf | copy | Copy this node and return the new one.
This method has the behavior described in :meth:`Node._f_copy`. Please
note that there is no recursive flag since leaves do not have child
nodes.
.. warning::
Note that unknown parameters passed to this method will be
ignored, so may want to double check the spelling of these
(i.e. if you write them incorrectly, they will most probably
be ignored).
Parameters
----------
title
The new title for the destination. If omitted or None, the original
title is used.
filters : Filters
Specifying this parameter overrides the original filter properties
in the source node. If specified, it must be an instance of the
Filters class (see :ref:`FiltersClassDescr`). The default is to
copy the filter properties from the source node.
copyuserattrs
You can prevent the user attributes from being copied by setting
this parameter to False. The default is to copy them.
start, stop, step : int
Specify the range of rows to be copied; the default is to copy all
the rows.
stats
This argument may be used to collect statistics on the copy
process. When used, it should be a dictionary with keys 'groups',
'leaves' and 'bytes' having a numeric value. Their values will be
incremented to reflect the number of groups, leaves and bytes,
respectively, that have been copied during the operation.
chunkshape
The chunkshape of the new leaf. It supports a couple of special
values. A value of keep means that the chunkshape will be the same
than original leaf (this is the default). A value of auto means
that a new shape will be computed automatically in order to ensure
best performance when accessing the dataset through the main
dimension. Any other value should be an integer or a tuple
matching the dimensions of the leaf.
| def copy(self, newparent=None, newname=None,
overwrite=False, createparents=False, **kwargs):
"""Copy this node and return the new one.
This method has the behavior described in :meth:`Node._f_copy`. Please
note that there is no recursive flag since leaves do not have child
nodes.
.. warning::
Note that unknown parameters passed to this method will be
ignored, so may want to double check the spelling of these
(i.e. if you write them incorrectly, they will most probably
be ignored).
Parameters
----------
title
The new title for the destination. If omitted or None, the original
title is used.
filters : Filters
Specifying this parameter overrides the original filter properties
in the source node. If specified, it must be an instance of the
Filters class (see :ref:`FiltersClassDescr`). The default is to
copy the filter properties from the source node.
copyuserattrs
You can prevent the user attributes from being copied by setting
this parameter to False. The default is to copy them.
start, stop, step : int
Specify the range of rows to be copied; the default is to copy all
the rows.
stats
This argument may be used to collect statistics on the copy
process. When used, it should be a dictionary with keys 'groups',
'leaves' and 'bytes' having a numeric value. Their values will be
incremented to reflect the number of groups, leaves and bytes,
respectively, that have been copied during the operation.
chunkshape
The chunkshape of the new leaf. It supports a couple of special
values. A value of keep means that the chunkshape will be the same
than original leaf (this is the default). A value of auto means
that a new shape will be computed automatically in order to ensure
best performance when accessing the dataset through the main
dimension. Any other value should be an integer or a tuple
matching the dimensions of the leaf.
"""
return self._f_copy(
newparent, newname, overwrite, createparents, **kwargs)
| (self, newparent=None, newname=None, overwrite=False, createparents=False, **kwargs) |
728,035 | tables.leaf | del_attr | Delete a PyTables attribute from this node.
This method has the behavior described in :meth:`Node_f_delAttr`.
| def del_attr(self, name):
"""Delete a PyTables attribute from this node.
This method has the behavior described in :meth:`Node_f_delAttr`.
"""
self._f_delattr(name)
| (self, name) |
728,036 | tables.leaf | flush | Flush pending data to disk.
Saves whatever remaining buffered data to disk. It also releases
I/O buffers, so if you are filling many datasets in the same
PyTables session, please call flush() extensively so as to help
PyTables to keep memory requirements low.
| def flush(self):
"""Flush pending data to disk.
Saves whatever remaining buffered data to disk. It also releases
I/O buffers, so if you are filling many datasets in the same
PyTables session, please call flush() extensively so as to help
PyTables to keep memory requirements low.
"""
self._g_flush()
| (self) |
728,037 | tables.leaf | get_attr | Get a PyTables attribute from this node.
This method has the behavior described in :meth:`Node._f_getattr`.
| def get_attr(self, name):
"""Get a PyTables attribute from this node.
This method has the behavior described in :meth:`Node._f_getattr`.
"""
return self._f_getattr(name)
| (self, name) |
728,038 | tables.array | get_enum | Get the enumerated type associated with this array.
If this array is of an enumerated type, the corresponding Enum instance
(see :ref:`EnumClassDescr`) is returned. If it is not of an enumerated
type, a TypeError is raised.
| def get_enum(self):
"""Get the enumerated type associated with this array.
If this array is of an enumerated type, the corresponding Enum instance
(see :ref:`EnumClassDescr`) is returned. If it is not of an enumerated
type, a TypeError is raised.
"""
if self.atom.kind != 'enum':
raise TypeError("array ``%s`` is not of an enumerated type"
% self._v_pathname)
return self.atom.enum
| (self) |
728,039 | tables.leaf | isvisible | Is this node visible?
This method has the behavior described in :meth:`Node._f_isvisible()`.
| def isvisible(self):
"""Is this node visible?
This method has the behavior described in :meth:`Node._f_isvisible()`.
"""
return self._f_isvisible()
| (self) |
728,040 | tables.array | iterrows | Iterate over the rows of the array.
This method returns an iterator yielding an object of the current
flavor for each selected row in the array. The returned rows are taken
from the *main dimension*.
If a range is not supplied, *all the rows* in the array are iterated
upon - you can also use the :meth:`Array.__iter__` special method for
that purpose. If you only want to iterate over a given *range of rows*
in the array, you may use the start, stop and step parameters.
Examples
--------
::
result = [row for row in arrayInstance.iterrows(step=4)]
.. versionchanged:: 3.0
If the *start* parameter is provided and *stop* is None then the
array is iterated from *start* to the last line.
In PyTables < 3.0 only one element was returned.
| def iterrows(self, start=None, stop=None, step=None):
"""Iterate over the rows of the array.
This method returns an iterator yielding an object of the current
flavor for each selected row in the array. The returned rows are taken
from the *main dimension*.
If a range is not supplied, *all the rows* in the array are iterated
upon - you can also use the :meth:`Array.__iter__` special method for
that purpose. If you only want to iterate over a given *range of rows*
in the array, you may use the start, stop and step parameters.
Examples
--------
::
result = [row for row in arrayInstance.iterrows(step=4)]
.. versionchanged:: 3.0
If the *start* parameter is provided and *stop* is None then the
array is iterated from *start* to the last line.
In PyTables < 3.0 only one element was returned.
"""
try:
(self._start, self._stop, self._step) = self._process_range(
start, stop, step)
except IndexError:
# If problems with indexes, silently return the null tuple
return ()
self._init_loop()
return self
| (self, start=None, stop=None, step=None) |
728,041 | tables.leaf | move | Move or rename this node.
This method has the behavior described in :meth:`Node._f_move`
| def move(self, newparent=None, newname=None,
overwrite=False, createparents=False):
"""Move or rename this node.
This method has the behavior described in :meth:`Node._f_move`
"""
self._f_move(newparent, newname, overwrite, createparents)
| (self, newparent=None, newname=None, overwrite=False, createparents=False) |
728,042 | tables.array | read | Get data in the array as an object of the current flavor.
The start, stop and step parameters can be used to select only a
*range of rows* in the array. Their meanings are the same as in
the built-in range() Python function, except that negative values
of step are not allowed yet. Moreover, if only start is specified,
then stop will be set to start + 1. If you do not specify neither
start nor stop, then *all the rows* in the array are selected.
The out parameter may be used to specify a NumPy array to receive
the output data. Note that the array must have the same size as
the data selected with the other parameters. Note that the array's
datatype is not checked and no type casting is performed, so if it
does not match the datatype on disk, the output will not be correct.
Also, this parameter is only valid when the array's flavor is set
to 'numpy'. Otherwise, a TypeError will be raised.
When data is read from disk in NumPy format, the output will be
in the current system's byteorder, regardless of how it is stored
on disk.
The exception is when an output buffer is supplied, in which case
the output will be in the byteorder of that output buffer.
.. versionchanged:: 3.0
Added the *out* parameter.
| def read(self, start=None, stop=None, step=None, out=None):
"""Get data in the array as an object of the current flavor.
The start, stop and step parameters can be used to select only a
*range of rows* in the array. Their meanings are the same as in
the built-in range() Python function, except that negative values
of step are not allowed yet. Moreover, if only start is specified,
then stop will be set to start + 1. If you do not specify neither
start nor stop, then *all the rows* in the array are selected.
The out parameter may be used to specify a NumPy array to receive
the output data. Note that the array must have the same size as
the data selected with the other parameters. Note that the array's
datatype is not checked and no type casting is performed, so if it
does not match the datatype on disk, the output will not be correct.
Also, this parameter is only valid when the array's flavor is set
to 'numpy'. Otherwise, a TypeError will be raised.
When data is read from disk in NumPy format, the output will be
in the current system's byteorder, regardless of how it is stored
on disk.
The exception is when an output buffer is supplied, in which case
the output will be in the byteorder of that output buffer.
.. versionchanged:: 3.0
Added the *out* parameter.
"""
self._g_check_open()
if out is not None and self.flavor != 'numpy':
msg = ("Optional 'out' argument may only be supplied if array "
"flavor is 'numpy', currently is {}").format(self.flavor)
raise TypeError(msg)
(start, stop, step) = self._process_range_read(start, stop, step)
arr = self._read(start, stop, step, out)
return internal_to_flavor(arr, self.flavor)
| (self, start=None, stop=None, step=None, out=None) |
728,043 | tables.leaf | remove | Remove this node from the hierarchy.
This method has the behavior described
in :meth:`Node._f_remove`. Please note that there is no recursive flag
since leaves do not have child nodes.
| def remove(self):
"""Remove this node from the hierarchy.
This method has the behavior described
in :meth:`Node._f_remove`. Please note that there is no recursive flag
since leaves do not have child nodes.
"""
self._f_remove(False)
| (self) |
728,044 | tables.leaf | rename | Rename this node in place.
This method has the behavior described in :meth:`Node._f_rename()`.
| def rename(self, newname):
"""Rename this node in place.
This method has the behavior described in :meth:`Node._f_rename()`.
"""
self._f_rename(newname)
| (self, newname) |
728,045 | tables.leaf | set_attr | Set a PyTables attribute for this node.
This method has the behavior described in :meth:`Node._f_setattr()`.
| def set_attr(self, name, value):
"""Set a PyTables attribute for this node.
This method has the behavior described in :meth:`Node._f_setattr()`.
"""
self._f_setattr(name, value)
| (self, name, value) |
728,046 | tables.leaf | truncate | Truncate the main dimension to be size rows.
If the main dimension previously was larger than this size, the extra
data is lost. If the main dimension previously was shorter, it is
extended, and the extended part is filled with the default values.
The truncation operation can only be applied to *enlargeable* datasets,
else a TypeError will be raised.
| def truncate(self, size):
"""Truncate the main dimension to be size rows.
If the main dimension previously was larger than this size, the extra
data is lost. If the main dimension previously was shorter, it is
extended, and the extended part is filled with the default values.
The truncation operation can only be applied to *enlargeable* datasets,
else a TypeError will be raised.
"""
# A non-enlargeable arrays (Array, CArray) cannot be truncated
if self.extdim < 0:
raise TypeError("non-enlargeable datasets cannot be truncated")
self._g_truncate(size)
| (self, size) |
728,047 | tables.atom | Atom | Defines the type of atomic cells stored in a dataset.
The meaning of *atomic* is that individual elements of a cell can
not be extracted directly by indexing (i.e. __getitem__()) the
dataset; e.g. if a dataset has shape (2, 2) and its atoms have
shape (3,), to get the third element of the cell at (1, 0) one
should use dataset[1,0][2] instead of dataset[1,0,2].
The Atom class is meant to declare the different properties of the
*base element* (also known as *atom*) of CArray, EArray and
VLArray datasets, although they are also used to describe the base
elements of Array datasets. Atoms have the property that their
length is always the same. However, you can grow datasets along
the extensible dimension in the case of EArray or put a variable
number of them on a VLArray row. Moreover, they are not restricted
to scalar values, and they can be *fully multidimensional
objects*.
Parameters
----------
itemsize : int
For types with a non-fixed size, this sets the size in
bytes of individual items in the atom.
shape : tuple
Sets the shape of the atom. An integer shape of
N is equivalent to the tuple (N,).
dflt
Sets the default value for the atom.
The following are the public methods and attributes of the Atom class.
Notes
-----
A series of descendant classes are offered in order to make the
use of these element descriptions easier. You should use a
particular Atom descendant class whenever you know the exact type
you will need when writing your code. Otherwise, you may use one
of the Atom.from_*() factory Methods.
.. rubric:: Atom attributes
.. attribute:: dflt
The default value of the atom.
If the user does not supply a value for an element while
filling a dataset, this default value will be written to disk.
If the user supplies a scalar value for a multidimensional
atom, this value is automatically *broadcast* to all the items
in the atom cell. If dflt is not supplied, an appropriate zero
value (or *null* string) will be chosen by default. Please
note that default values are kept internally as NumPy objects.
.. attribute:: dtype
The NumPy dtype that most closely matches this atom.
.. attribute:: itemsize
Size in bytes of a single item in the atom.
Specially useful for atoms of the string kind.
.. attribute:: kind
The PyTables kind of the atom (a string).
.. attribute:: shape
The shape of the atom (a tuple for scalar atoms).
.. attribute:: type
The PyTables type of the atom (a string).
Atoms can be compared with atoms and other objects for
strict (in)equality without having to compare individual
attributes::
>>> atom1 = StringAtom(itemsize=10) # same as ``atom2``
>>> atom2 = Atom.from_kind('string', 10) # same as ``atom1``
>>> atom3 = IntAtom()
>>> atom1 == 'foo'
False
>>> atom1 == atom2
True
>>> atom2 != atom1
False
>>> atom1 == atom3
False
>>> atom3 != atom2
True
| class Atom(metaclass=MetaAtom):
"""Defines the type of atomic cells stored in a dataset.
The meaning of *atomic* is that individual elements of a cell can
not be extracted directly by indexing (i.e. __getitem__()) the
dataset; e.g. if a dataset has shape (2, 2) and its atoms have
shape (3,), to get the third element of the cell at (1, 0) one
should use dataset[1,0][2] instead of dataset[1,0,2].
The Atom class is meant to declare the different properties of the
*base element* (also known as *atom*) of CArray, EArray and
VLArray datasets, although they are also used to describe the base
elements of Array datasets. Atoms have the property that their
length is always the same. However, you can grow datasets along
the extensible dimension in the case of EArray or put a variable
number of them on a VLArray row. Moreover, they are not restricted
to scalar values, and they can be *fully multidimensional
objects*.
Parameters
----------
itemsize : int
For types with a non-fixed size, this sets the size in
bytes of individual items in the atom.
shape : tuple
Sets the shape of the atom. An integer shape of
N is equivalent to the tuple (N,).
dflt
Sets the default value for the atom.
The following are the public methods and attributes of the Atom class.
Notes
-----
A series of descendant classes are offered in order to make the
use of these element descriptions easier. You should use a
particular Atom descendant class whenever you know the exact type
you will need when writing your code. Otherwise, you may use one
of the Atom.from_*() factory Methods.
.. rubric:: Atom attributes
.. attribute:: dflt
The default value of the atom.
If the user does not supply a value for an element while
filling a dataset, this default value will be written to disk.
If the user supplies a scalar value for a multidimensional
atom, this value is automatically *broadcast* to all the items
in the atom cell. If dflt is not supplied, an appropriate zero
value (or *null* string) will be chosen by default. Please
note that default values are kept internally as NumPy objects.
.. attribute:: dtype
The NumPy dtype that most closely matches this atom.
.. attribute:: itemsize
Size in bytes of a single item in the atom.
Specially useful for atoms of the string kind.
.. attribute:: kind
The PyTables kind of the atom (a string).
.. attribute:: shape
The shape of the atom (a tuple for scalar atoms).
.. attribute:: type
The PyTables type of the atom (a string).
Atoms can be compared with atoms and other objects for
strict (in)equality without having to compare individual
attributes::
>>> atom1 = StringAtom(itemsize=10) # same as ``atom2``
>>> atom2 = Atom.from_kind('string', 10) # same as ``atom1``
>>> atom3 = IntAtom()
>>> atom1 == 'foo'
False
>>> atom1 == atom2
True
>>> atom2 != atom1
False
>>> atom1 == atom3
False
>>> atom3 != atom2
True
"""
@classmethod
def prefix(cls):
"""Return the atom class prefix."""
cname = cls.__name__
return cname[:cname.rfind('Atom')]
@classmethod
def from_sctype(cls, sctype, shape=(), dflt=None):
"""Create an Atom from a NumPy scalar type sctype.
Optional shape and default value may be specified as the
shape and dflt
arguments, respectively. Information in the
sctype not represented in an Atom is ignored::
>>> import numpy as np
>>> Atom.from_sctype(np.int16, shape=(2, 2))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_sctype('S5', dflt='hello')
Traceback (most recent call last):
...
ValueError: unknown NumPy scalar type: 'S5'
>>> Atom.from_sctype('float64')
Float64Atom(shape=(), dflt=0.0)
"""
if (not isinstance(sctype, type)
or not issubclass(sctype, np.generic)):
if "," in sctype:
raise ValueError(f"unknown NumPy scalar type: {sctype!r}")
try:
dtype = np.dtype(sctype)
except TypeError:
raise ValueError(f"unknown NumPy scalar type: {sctype!r}") from None
if issubclass(dtype.type, np.flexible) and dtype.itemsize > 0:
raise ValueError(f"unknown NumPy scalar type: {sctype!r}")
sctype = dtype.type
return cls.from_dtype(np.dtype((sctype, shape)), dflt)
@classmethod
def from_dtype(cls, dtype, dflt=None):
"""Create an Atom from a NumPy dtype.
An optional default value may be specified as the dflt
argument. Information in the dtype not represented in an Atom is
ignored::
>>> import numpy as np
>>> Atom.from_dtype(np.dtype((np.int16, (2, 2))))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_dtype(np.dtype('float64'))
Float64Atom(shape=(), dflt=0.0)
Note: for easier use in Python 3, where all strings lead to the
Unicode dtype, this dtype will also generate a StringAtom. Since
this is only viable for strings that are castable as ascii, a
warning is issued.
>>> Atom.from_dtype(np.dtype('U20')) # doctest: +SKIP
Atom.py:392: FlavorWarning: support for unicode type is very
limited, and only works for strings that can be cast as ascii
StringAtom(itemsize=20, shape=(), dflt=b'')
"""
basedtype = dtype.base
if basedtype.names:
raise ValueError("compound data types are not supported: %r"
% dtype)
if basedtype.shape != ():
raise ValueError("nested data types are not supported: %r"
% dtype)
if basedtype.kind == 'S': # can not reuse something like 'string80'
itemsize = basedtype.itemsize
return cls.from_kind('string', itemsize, dtype.shape, dflt)
elif basedtype.kind == 'U':
# workaround for unicode type (standard string type in Python 3)
warnings.warn("support for unicode type is very limited, and "
"only works for strings that can be cast as ascii",
FlavorWarning)
itemsize = basedtype.itemsize // 4
assert str(itemsize) in basedtype.str, (
"something went wrong in handling unicode.")
return cls.from_kind('string', itemsize, dtype.shape, dflt)
# Most NumPy types have direct correspondence with PyTables types.
return cls.from_type(basedtype.name, dtype.shape, dflt)
@classmethod
def from_type(cls, type, shape=(), dflt=None):
"""Create an Atom from a PyTables type.
Optional shape and default value may be specified as the
shape and dflt arguments, respectively::
>>> Atom.from_type('bool')
BoolAtom(shape=(), dflt=False)
>>> Atom.from_type('int16', shape=(2, 2))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_type('string40', dflt='hello')
Traceback (most recent call last):
...
ValueError: unknown type: 'string40'
>>> Atom.from_type('Float64')
Traceback (most recent call last):
...
ValueError: unknown type: 'Float64'
"""
if type not in all_types:
raise ValueError(f"unknown type: {type!r}")
kind, itemsize = split_type(type)
return cls.from_kind(kind, itemsize, shape, dflt)
@classmethod
def from_kind(cls, kind, itemsize=None, shape=(), dflt=None):
"""Create an Atom from a PyTables kind.
Optional item size, shape and default value may be
specified as the itemsize, shape and dflt
arguments, respectively. Bear in mind that not all atoms support
a default item size::
>>> Atom.from_kind('int', itemsize=2, shape=(2, 2))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_kind('int', shape=(2, 2))
Int32Atom(shape=(2, 2), dflt=0)
>>> Atom.from_kind('int', shape=1)
Int32Atom(shape=(1,), dflt=0)
>>> Atom.from_kind('string', dflt=b'hello')
Traceback (most recent call last):
...
ValueError: no default item size for kind ``string``
>>> Atom.from_kind('Float')
Traceback (most recent call last):
...
ValueError: unknown kind: 'Float'
Moreover, some kinds with atypical constructor signatures
are not supported; you need to use the proper
constructor::
>>> Atom.from_kind('enum') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: the ``enum`` kind is not supported...
"""
kwargs = {'shape': shape}
if kind not in atom_map:
raise ValueError(f"unknown kind: {kind!r}")
# This incompatibility detection may get out-of-date and is
# too hard-wired, but I couldn't come up with something
# smarter. -- Ivan (2007-02-08)
if kind in ['enum']:
raise ValueError("the ``%s`` kind is not supported; "
"please use the appropriate constructor"
% kind)
# If no `itemsize` is given, try to get the default type of the
# kind (which has a fixed item size).
if itemsize is None:
if kind not in deftype_from_kind:
raise ValueError("no default item size for kind ``%s``"
% kind)
type_ = deftype_from_kind[kind]
kind, itemsize = split_type(type_)
kdata = atom_map[kind]
# Look up the class and set a possible item size.
if hasattr(kdata, 'kind'): # atom class: non-fixed item size
atomclass = kdata
kwargs['itemsize'] = itemsize
else: # dictionary: fixed item size
if itemsize not in kdata:
raise _invalid_itemsize_error(kind, itemsize, kdata)
atomclass = kdata[itemsize]
# Only set a `dflt` argument if given (`None` may not be understood).
if dflt is not None:
kwargs['dflt'] = dflt
return atomclass(**kwargs)
@property
def size(self):
"""Total size in bytes of the atom."""
return self.dtype.itemsize
@property
def recarrtype(self):
"""String type to be used in numpy.rec.array()."""
return str(self.dtype.shape) + self.dtype.base.str[1:]
@property
def ndim(self):
"""The number of dimensions of the atom.
.. versionadded:: 2.4"""
return len(self.shape)
def __init__(self, nptype, shape, dflt):
if not hasattr(self, 'type'):
raise NotImplementedError("``%s`` is an abstract class; "
"please use one of its subclasses"
% self.__class__.__name__)
self.shape = shape = _normalize_shape(shape)
"""The shape of the atom (a tuple for scalar atoms)."""
# Curiously enough, NumPy isn't generally able to accept NumPy
# integers in a shape. ;(
npshape = tuple(int(s) for s in shape)
self.dtype = dtype = np.dtype((nptype, npshape))
"""The NumPy dtype that most closely matches this atom."""
self.dflt = _normalize_default(dflt, dtype)
"""The default value of the atom.
If the user does not supply a value for an element while
filling a dataset, this default value will be written to
disk. If the user supplies a scalar value for a
multidimensional atom, this value is automatically *broadcast*
to all the items in the atom cell. If dflt is not supplied, an
appropriate zero value (or *null* string) will be chosen by
default. Please note that default values are kept internally
as NumPy objects."""
def __repr__(self):
args = f'shape={self.shape}, dflt={self.dflt!r}'
if not hasattr(self.__class__.itemsize, '__int__'): # non-fixed
args = f'itemsize={self.itemsize}, {args}'
return f'{self.__class__.__name__}({args})'
__eq__ = _cmp_dispatcher('_is_equal_to_atom')
def __ne__(self, other):
return not self.__eq__(other)
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self.__class__, self.type, self.shape, self.itemsize,
# self.dflt))
def copy(self, **override):
"""Get a copy of the atom, possibly overriding some arguments.
Constructor arguments to be overridden must be passed as
keyword arguments::
>>> atom1 = Int32Atom(shape=12)
>>> atom2 = atom1.copy()
>>> print(atom1)
Int32Atom(shape=(12,), dflt=0)
>>> print(atom2)
Int32Atom(shape=(12,), dflt=0)
>>> atom1 is atom2
False
>>> atom3 = atom1.copy(shape=(2, 2))
>>> print(atom3)
Int32Atom(shape=(2, 2), dflt=0)
>>> atom1.copy(foobar=42) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...__init__() got an unexpected keyword argument 'foobar'
"""
newargs = self._get_init_args()
newargs.update(override)
return self.__class__(**newargs)
def _get_init_args(self):
"""Get a dictionary of instance constructor arguments.
This implementation works on classes which use the same names
for both constructor arguments and instance attributes.
"""
signature = inspect.signature(self.__init__)
parameters = signature.parameters
args = [arg for arg, p in parameters.items()
if p.kind is p.POSITIONAL_OR_KEYWORD]
return {arg: getattr(self, arg) for arg in args if arg != 'self'}
def _is_equal_to_atom(self, atom):
"""Is this object equal to the given `atom`?"""
return (self.type == atom.type and self.shape == atom.shape
and self.itemsize == atom.itemsize
and np.all(self.dflt == atom.dflt))
| (nptype, shape, dflt) |
728,048 | tables.atom | dispatched_cmp | null | def _cmp_dispatcher(other_method_name):
"""Dispatch comparisons to a method of the *other* object.
Returns a new *rich comparison* method which dispatches calls to
the method `other_method_name` of the *other* object. If there is
no such method in the object, ``False`` is returned.
This is part of the implementation of a double dispatch pattern.
"""
def dispatched_cmp(self, other):
try:
other_method = getattr(other, other_method_name)
except AttributeError:
return False
return other_method(self)
return dispatched_cmp
| (self, other) |
728,049 | tables.atom | __init__ | null | def __init__(self, nptype, shape, dflt):
if not hasattr(self, 'type'):
raise NotImplementedError("``%s`` is an abstract class; "
"please use one of its subclasses"
% self.__class__.__name__)
self.shape = shape = _normalize_shape(shape)
"""The shape of the atom (a tuple for scalar atoms)."""
# Curiously enough, NumPy isn't generally able to accept NumPy
# integers in a shape. ;(
npshape = tuple(int(s) for s in shape)
self.dtype = dtype = np.dtype((nptype, npshape))
"""The NumPy dtype that most closely matches this atom."""
self.dflt = _normalize_default(dflt, dtype)
"""The default value of the atom.
If the user does not supply a value for an element while
filling a dataset, this default value will be written to
disk. If the user supplies a scalar value for a
multidimensional atom, this value is automatically *broadcast*
to all the items in the atom cell. If dflt is not supplied, an
appropriate zero value (or *null* string) will be chosen by
default. Please note that default values are kept internally
as NumPy objects."""
| (self, nptype, shape, dflt) |
728,051 | tables.atom | __repr__ | null | def __repr__(self):
args = f'shape={self.shape}, dflt={self.dflt!r}'
if not hasattr(self.__class__.itemsize, '__int__'): # non-fixed
args = f'itemsize={self.itemsize}, {args}'
return f'{self.__class__.__name__}({args})'
| (self) |
728,052 | tables.atom | _get_init_args | Get a dictionary of instance constructor arguments.
This implementation works on classes which use the same names
for both constructor arguments and instance attributes.
| def _get_init_args(self):
"""Get a dictionary of instance constructor arguments.
This implementation works on classes which use the same names
for both constructor arguments and instance attributes.
"""
signature = inspect.signature(self.__init__)
parameters = signature.parameters
args = [arg for arg, p in parameters.items()
if p.kind is p.POSITIONAL_OR_KEYWORD]
return {arg: getattr(self, arg) for arg in args if arg != 'self'}
| (self) |
728,053 | tables.atom | _is_equal_to_atom | Is this object equal to the given `atom`? | def _is_equal_to_atom(self, atom):
"""Is this object equal to the given `atom`?"""
return (self.type == atom.type and self.shape == atom.shape
and self.itemsize == atom.itemsize
and np.all(self.dflt == atom.dflt))
| (self, atom) |
728,054 | tables.atom | copy | Get a copy of the atom, possibly overriding some arguments.
Constructor arguments to be overridden must be passed as
keyword arguments::
>>> atom1 = Int32Atom(shape=12)
>>> atom2 = atom1.copy()
>>> print(atom1)
Int32Atom(shape=(12,), dflt=0)
>>> print(atom2)
Int32Atom(shape=(12,), dflt=0)
>>> atom1 is atom2
False
>>> atom3 = atom1.copy(shape=(2, 2))
>>> print(atom3)
Int32Atom(shape=(2, 2), dflt=0)
>>> atom1.copy(foobar=42) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...__init__() got an unexpected keyword argument 'foobar'
| def copy(self, **override):
"""Get a copy of the atom, possibly overriding some arguments.
Constructor arguments to be overridden must be passed as
keyword arguments::
>>> atom1 = Int32Atom(shape=12)
>>> atom2 = atom1.copy()
>>> print(atom1)
Int32Atom(shape=(12,), dflt=0)
>>> print(atom2)
Int32Atom(shape=(12,), dflt=0)
>>> atom1 is atom2
False
>>> atom3 = atom1.copy(shape=(2, 2))
>>> print(atom3)
Int32Atom(shape=(2, 2), dflt=0)
>>> atom1.copy(foobar=42) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...__init__() got an unexpected keyword argument 'foobar'
"""
newargs = self._get_init_args()
newargs.update(override)
return self.__class__(**newargs)
| (self, **override) |
728,055 | tables.atom | BoolAtom | Defines an atom of type bool. | class BoolAtom(Atom):
"""Defines an atom of type bool."""
kind = 'bool'
itemsize = 1
type = 'bool'
_deftype = 'bool8'
_defvalue = False
def __init__(self, shape=(), dflt=_defvalue):
Atom.__init__(self, self.type, shape, dflt)
| (shape=(), dflt=False) |
728,057 | tables.atom | __init__ | null | def __init__(self, shape=(), dflt=_defvalue):
Atom.__init__(self, self.type, shape, dflt)
| (self, shape=(), dflt=False) |
728,063 | tables.description | BoolCol | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import BoolCol
| (*args, **kwargs) |
728,064 | tables.description | dispatched_cmp | null | def same_position(oldmethod):
"""Decorate `oldmethod` to also compare the `_v_pos` attribute."""
def newmethod(self, other):
try:
other._v_pos
except AttributeError:
return False # not a column definition
return self._v_pos == other._v_pos and oldmethod(self, other)
newmethod.__name__ = oldmethod.__name__
newmethod.__doc__ = oldmethod.__doc__
return newmethod
| (self, other) |
728,065 | tables.description | __init__ | null | @classmethod
def _subclass_from_prefix(cls, prefix):
"""Get a column subclass for the given `prefix`."""
cname = '%sCol' % prefix
class_from_prefix = cls._class_from_prefix
if cname in class_from_prefix:
return class_from_prefix[cname]
atombase = getattr(atom, '%sAtom' % prefix)
class NewCol(cls, atombase):
"""Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
"""
def __init__(self, *args, **kwargs):
pos = kwargs.pop('pos', None)
col_attrs = kwargs.pop('attrs', {})
offset = kwargs.pop('_offset', None)
class_from_prefix = self._class_from_prefix
atombase.__init__(self, *args, **kwargs)
# The constructor of an abstract atom may have changed
# the class of `self` to something different of `NewCol`
# and `atombase` (that's why the prefix map is saved).
if self.__class__ is not NewCol:
colclass = class_from_prefix[self.prefix()]
self.__class__ = colclass
self._v_pos = pos
self._v_offset = offset
self._v_col_attrs = col_attrs
__eq__ = same_position(atombase.__eq__)
_is_equal_to_atom = same_position(atombase._is_equal_to_atom)
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self._v_pos, self.atombase))
if prefix == 'Enum':
_is_equal_to_enumatom = same_position(
atombase._is_equal_to_enumatom)
NewCol.__name__ = cname
class_from_prefix[prefix] = NewCol
return NewCol
| (self, *args, **kwargs) |
728,067 | tables.description | __repr__ | null | def __repr__(self):
# Reuse the atom representation.
atomrepr = super().__repr__()
lpar = atomrepr.index('(')
rpar = atomrepr.rindex(')')
atomargs = atomrepr[lpar + 1:rpar]
classname = self.__class__.__name__
if self._v_col_attrs:
return (f'{classname}({atomargs}, pos={self._v_pos}'
f', attrs={self._v_col_attrs})')
return f'{classname}({atomargs}, pos={self._v_pos})'
| (self) |
728,068 | tables.description | _get_init_args | Get a dictionary of instance constructor arguments. | def _get_init_args(self):
"""Get a dictionary of instance constructor arguments."""
kwargs = {arg: getattr(self, arg) for arg in ('shape', 'dflt')}
kwargs['pos'] = getattr(self, '_v_pos', None)
return kwargs
| (self) |
728,069 | tables.description | _is_equal_to_atom | Is this object equal to the given `atom`? | def same_position(oldmethod):
"""Decorate `oldmethod` to also compare the `_v_pos` attribute."""
def newmethod(self, other):
try:
other._v_pos
except AttributeError:
return False # not a column definition
return self._v_pos == other._v_pos and oldmethod(self, other)
newmethod.__name__ = oldmethod.__name__
newmethod.__doc__ = oldmethod.__doc__
return newmethod
| (self, other) |
728,071 | tables.carray | CArray | This class represents homogeneous datasets in an HDF5 file.
The difference between a CArray and a normal Array (see
:ref:`ArrayClassDescr`), from which it inherits, is that a CArray
has a chunked layout and, as a consequence, it supports compression.
You can use datasets of this class to easily save or load arrays to
or from disk, with compression support included.
CArray includes all the instance variables and methods of Array.
Only those with different behavior are mentioned here.
Parameters
----------
parentnode
The parent :class:`Group` object.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*.
name : str
The name of this node in its parent group.
atom
An `Atom` instance representing the *type* and *shape* of
the atomic objects to be saved.
shape
The shape of the new array.
title
A description for this node (it sets the ``TITLE`` HDF5
attribute on disk).
filters
An instance of the `Filters` class that provides
information about the desired I/O filters to be applied
during the life of this object.
chunkshape
The shape of the data chunk to be read or written in a
single HDF5 I/O operation. Filters are applied to those
chunks of data. The dimensionality of `chunkshape` must
be the same as that of `shape`. If ``None``, a sensible
value is calculated (which is recommended).
byteorder
The byteorder of the data *on disk*, specified as 'little'
or 'big'. If this is not specified, the byteorder is that
of the platform.
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time, object
birth time); default True. Semantics of these times depend on their
implementation in the HDF5 library: refer to documentation of the
H5O_info_t data structure. As of HDF5 1.8.15, only ctime (metadata
change time) is implemented.
.. versionadded:: 3.4.3
Examples
--------
See below a small example of the use of the `CArray` class.
The code is available in ``examples/carray1.py``::
import numpy as np
import tables as tb
fileName = 'carray1.h5'
shape = (200, 300)
atom = tb.UInt8Atom()
filters = tb.Filters(complevel=5, complib='zlib')
h5f = tb.open_file(fileName, 'w')
ca = h5f.create_carray(h5f.root, 'carray', atom, shape,
filters=filters)
# Fill a hyperslab in ``ca``.
ca[10:60, 20:70] = np.ones((50, 50))
h5f.close()
# Re-open a read another hyperslab
h5f = tb.open_file(fileName)
print(h5f)
print(h5f.root.carray[8:12, 18:22])
h5f.close()
The output for the previous script is something like::
carray1.h5 (File) ''
Last modif.: 'Thu Apr 12 10:15:38 2007'
Object Tree:
/ (RootGroup) ''
/carray (CArray(200, 300), shuffle, zlib(5)) ''
[[0 0 0 0]
[0 0 0 0]
[0 0 1 1]
[0 0 1 1]]
| class CArray(Array):
"""This class represents homogeneous datasets in an HDF5 file.
The difference between a CArray and a normal Array (see
:ref:`ArrayClassDescr`), from which it inherits, is that a CArray
has a chunked layout and, as a consequence, it supports compression.
You can use datasets of this class to easily save or load arrays to
or from disk, with compression support included.
CArray includes all the instance variables and methods of Array.
Only those with different behavior are mentioned here.
Parameters
----------
parentnode
The parent :class:`Group` object.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*.
name : str
The name of this node in its parent group.
atom
An `Atom` instance representing the *type* and *shape* of
the atomic objects to be saved.
shape
The shape of the new array.
title
A description for this node (it sets the ``TITLE`` HDF5
attribute on disk).
filters
An instance of the `Filters` class that provides
information about the desired I/O filters to be applied
during the life of this object.
chunkshape
The shape of the data chunk to be read or written in a
single HDF5 I/O operation. Filters are applied to those
chunks of data. The dimensionality of `chunkshape` must
be the same as that of `shape`. If ``None``, a sensible
value is calculated (which is recommended).
byteorder
The byteorder of the data *on disk*, specified as 'little'
or 'big'. If this is not specified, the byteorder is that
of the platform.
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time, object
birth time); default True. Semantics of these times depend on their
implementation in the HDF5 library: refer to documentation of the
H5O_info_t data structure. As of HDF5 1.8.15, only ctime (metadata
change time) is implemented.
.. versionadded:: 3.4.3
Examples
--------
See below a small example of the use of the `CArray` class.
The code is available in ``examples/carray1.py``::
import numpy as np
import tables as tb
fileName = 'carray1.h5'
shape = (200, 300)
atom = tb.UInt8Atom()
filters = tb.Filters(complevel=5, complib='zlib')
h5f = tb.open_file(fileName, 'w')
ca = h5f.create_carray(h5f.root, 'carray', atom, shape,
filters=filters)
# Fill a hyperslab in ``ca``.
ca[10:60, 20:70] = np.ones((50, 50))
h5f.close()
# Re-open a read another hyperslab
h5f = tb.open_file(fileName)
print(h5f)
print(h5f.root.carray[8:12, 18:22])
h5f.close()
The output for the previous script is something like::
carray1.h5 (File) ''
Last modif.: 'Thu Apr 12 10:15:38 2007'
Object Tree:
/ (RootGroup) ''
/carray (CArray(200, 300), shuffle, zlib(5)) ''
[[0 0 0 0]
[0 0 0 0]
[0 0 1 1]
[0 0 1 1]]
"""
# Class identifier.
_c_classid = 'CARRAY'
def __init__(self, parentnode, name,
atom=None, shape=None,
title="", filters=None,
chunkshape=None, byteorder=None,
_log=True, track_times=True):
self.atom = atom
"""An `Atom` instance representing the shape, type of the atomic
objects to be saved.
"""
self.shape = None
"""The shape of the stored array."""
self.extdim = -1 # `CArray` objects are not enlargeable by default
"""The index of the enlargeable dimension."""
# Other private attributes
self._v_version = None
"""The object version of this array."""
self._v_new = new = atom is not None
"""Is this the first time the node has been created?"""
self._v_new_title = title
"""New title for this node."""
self._v_convert = True
"""Whether the ``Array`` object must be converted or not."""
self._v_chunkshape = chunkshape
"""Private storage for the `chunkshape` property of the leaf."""
# Miscellaneous iteration rubbish.
self._start = None
"""Starting row for the current iteration."""
self._stop = None
"""Stopping row for the current iteration."""
self._step = None
"""Step size for the current iteration."""
self._nrowsread = None
"""Number of rows read up to the current state of iteration."""
self._startb = None
"""Starting row for current buffer."""
self._stopb = None
"""Stopping row for current buffer. """
self._row = None
"""Current row in iterators (sentinel)."""
self._init = False
"""Whether we are in the middle of an iteration or not (sentinel)."""
self.listarr = None
"""Current buffer in iterators."""
if new:
if not isinstance(atom, Atom):
raise ValueError("atom parameter should be an instance of "
"tables.Atom and you passed a %s." %
type(atom))
if shape is None:
raise ValueError("you must specify a non-empty shape")
try:
shape = tuple(shape)
except TypeError:
raise TypeError("`shape` parameter must be a sequence "
"and you passed a %s" % type(shape))
self.shape = tuple(SizeType(s) for s in shape)
if chunkshape is not None:
try:
chunkshape = tuple(chunkshape)
except TypeError:
raise TypeError(
"`chunkshape` parameter must be a sequence "
"and you passed a %s" % type(chunkshape))
if len(shape) != len(chunkshape):
raise ValueError(f"the shape ({shape}) and chunkshape "
f"({chunkshape}) ranks must be equal.")
elif min(chunkshape) < 1:
raise ValueError("chunkshape parameter cannot have "
"zero-dimensions.")
self._v_chunkshape = tuple(SizeType(s) for s in chunkshape)
# The `Array` class is not abstract enough! :(
super(Array, self).__init__(parentnode, name, new, filters,
byteorder, _log, track_times)
def _g_create(self):
"""Create a new array in file (specific part)."""
if min(self.shape) < 1:
raise ValueError(
"shape parameter cannot have zero-dimensions.")
# Finish the common part of creation process
return self._g_create_common(self.nrows)
def _g_create_common(self, expectedrows):
"""Create a new array in file (common part)."""
self._v_version = obversion
if self._v_chunkshape is None:
# Compute the optimal chunk size
self._v_chunkshape = self._calc_chunkshape(
expectedrows, self.rowsize, self.atom.size)
# Compute the optimal nrowsinbuf
self.nrowsinbuf = self._calc_nrowsinbuf()
# Correct the byteorder if needed
if self.byteorder is None:
self.byteorder = correct_byteorder(self.atom.type, sys.byteorder)
try:
# ``self._v_objectid`` needs to be set because would be
# needed for setting attributes in some descendants later
# on
self._v_objectid = self._create_carray(self._v_new_title)
except Exception: # XXX
# Problems creating the Array on disk. Close node and re-raise.
self.close(flush=0)
raise
return self._v_objectid
def _g_copy_with_stats(self, group, name, start, stop, step,
title, filters, chunkshape, _log, **kwargs):
"""Private part of Leaf.copy() for each kind of leaf."""
(start, stop, step) = self._process_range_read(start, stop, step)
maindim = self.maindim
shape = list(self.shape)
shape[maindim] = len(range(start, stop, step))
# Now, fill the new carray with values from source
nrowsinbuf = self.nrowsinbuf
# The slices parameter for self.__getitem__
slices = [slice(0, dim, 1) for dim in self.shape]
# This is a hack to prevent doing unnecessary conversions
# when copying buffers
self._v_convert = False
# Build the new CArray object
object = CArray(group, name, atom=self.atom, shape=shape,
title=title, filters=filters, chunkshape=chunkshape,
_log=_log)
# Start the copy itself
for start2 in range(start, stop, step * nrowsinbuf):
# Save the records on disk
stop2 = start2 + step * nrowsinbuf
if stop2 > stop:
stop2 = stop
# Set the proper slice in the main dimension
slices[maindim] = slice(start2, stop2, step)
start3 = (start2 - start) // step
stop3 = start3 + nrowsinbuf
if stop3 > shape[maindim]:
stop3 = shape[maindim]
# The next line should be generalised if, in the future,
# maindim is designed to be different from 0 in CArrays.
# See ticket #199.
object[start3:stop3] = self.__getitem__(tuple(slices))
# Activate the conversion again (default)
self._v_convert = True
nbytes = np.prod(self.shape, dtype=SizeType) * self.atom.size
return (object, nbytes)
| (parentnode, name, atom=None, shape=None, title='', filters=None, chunkshape=None, byteorder=None, _log=True, track_times=True) |
728,074 | tables.carray | __init__ | null | def __init__(self, parentnode, name,
atom=None, shape=None,
title="", filters=None,
chunkshape=None, byteorder=None,
_log=True, track_times=True):
self.atom = atom
"""An `Atom` instance representing the shape, type of the atomic
objects to be saved.
"""
self.shape = None
"""The shape of the stored array."""
self.extdim = -1 # `CArray` objects are not enlargeable by default
"""The index of the enlargeable dimension."""
# Other private attributes
self._v_version = None
"""The object version of this array."""
self._v_new = new = atom is not None
"""Is this the first time the node has been created?"""
self._v_new_title = title
"""New title for this node."""
self._v_convert = True
"""Whether the ``Array`` object must be converted or not."""
self._v_chunkshape = chunkshape
"""Private storage for the `chunkshape` property of the leaf."""
# Miscellaneous iteration rubbish.
self._start = None
"""Starting row for the current iteration."""
self._stop = None
"""Stopping row for the current iteration."""
self._step = None
"""Step size for the current iteration."""
self._nrowsread = None
"""Number of rows read up to the current state of iteration."""
self._startb = None
"""Starting row for current buffer."""
self._stopb = None
"""Stopping row for current buffer. """
self._row = None
"""Current row in iterators (sentinel)."""
self._init = False
"""Whether we are in the middle of an iteration or not (sentinel)."""
self.listarr = None
"""Current buffer in iterators."""
if new:
if not isinstance(atom, Atom):
raise ValueError("atom parameter should be an instance of "
"tables.Atom and you passed a %s." %
type(atom))
if shape is None:
raise ValueError("you must specify a non-empty shape")
try:
shape = tuple(shape)
except TypeError:
raise TypeError("`shape` parameter must be a sequence "
"and you passed a %s" % type(shape))
self.shape = tuple(SizeType(s) for s in shape)
if chunkshape is not None:
try:
chunkshape = tuple(chunkshape)
except TypeError:
raise TypeError(
"`chunkshape` parameter must be a sequence "
"and you passed a %s" % type(chunkshape))
if len(shape) != len(chunkshape):
raise ValueError(f"the shape ({shape}) and chunkshape "
f"({chunkshape}) ranks must be equal.")
elif min(chunkshape) < 1:
raise ValueError("chunkshape parameter cannot have "
"zero-dimensions.")
self._v_chunkshape = tuple(SizeType(s) for s in chunkshape)
# The `Array` class is not abstract enough! :(
super(Array, self).__init__(parentnode, name, new, filters,
byteorder, _log, track_times)
| (self, parentnode, name, atom=None, shape=None, title='', filters=None, chunkshape=None, byteorder=None, _log=True, track_times=True) |
728,100 | tables.carray | _g_copy_with_stats | Private part of Leaf.copy() for each kind of leaf. | def _g_copy_with_stats(self, group, name, start, stop, step,
title, filters, chunkshape, _log, **kwargs):
"""Private part of Leaf.copy() for each kind of leaf."""
(start, stop, step) = self._process_range_read(start, stop, step)
maindim = self.maindim
shape = list(self.shape)
shape[maindim] = len(range(start, stop, step))
# Now, fill the new carray with values from source
nrowsinbuf = self.nrowsinbuf
# The slices parameter for self.__getitem__
slices = [slice(0, dim, 1) for dim in self.shape]
# This is a hack to prevent doing unnecessary conversions
# when copying buffers
self._v_convert = False
# Build the new CArray object
object = CArray(group, name, atom=self.atom, shape=shape,
title=title, filters=filters, chunkshape=chunkshape,
_log=_log)
# Start the copy itself
for start2 in range(start, stop, step * nrowsinbuf):
# Save the records on disk
stop2 = start2 + step * nrowsinbuf
if stop2 > stop:
stop2 = stop
# Set the proper slice in the main dimension
slices[maindim] = slice(start2, stop2, step)
start3 = (start2 - start) // step
stop3 = start3 + nrowsinbuf
if stop3 > shape[maindim]:
stop3 = shape[maindim]
# The next line should be generalised if, in the future,
# maindim is designed to be different from 0 in CArrays.
# See ticket #199.
object[start3:stop3] = self.__getitem__(tuple(slices))
# Activate the conversion again (default)
self._v_convert = True
nbytes = np.prod(self.shape, dtype=SizeType) * self.atom.size
return (object, nbytes)
| (self, group, name, start, stop, step, title, filters, chunkshape, _log, **kwargs) |
728,101 | tables.carray | _g_create | Create a new array in file (specific part). | def _g_create(self):
"""Create a new array in file (specific part)."""
if min(self.shape) < 1:
raise ValueError(
"shape parameter cannot have zero-dimensions.")
# Finish the common part of creation process
return self._g_create_common(self.nrows)
| (self) |
728,102 | tables.carray | _g_create_common | Create a new array in file (common part). | def _g_create_common(self, expectedrows):
"""Create a new array in file (common part)."""
self._v_version = obversion
if self._v_chunkshape is None:
# Compute the optimal chunk size
self._v_chunkshape = self._calc_chunkshape(
expectedrows, self.rowsize, self.atom.size)
# Compute the optimal nrowsinbuf
self.nrowsinbuf = self._calc_nrowsinbuf()
# Correct the byteorder if needed
if self.byteorder is None:
self.byteorder = correct_byteorder(self.atom.type, sys.byteorder)
try:
# ``self._v_objectid`` needs to be set because would be
# needed for setting attributes in some descendants later
# on
self._v_objectid = self._create_carray(self._v_new_title)
except Exception: # XXX
# Problems creating the Array on disk. Close node and re-raise.
self.close(flush=0)
raise
return self._v_objectid
| (self, expectedrows) |
728,146 | tables.exceptions | ClosedFileError | The operation can not be completed because the hosting file is closed.
For instance, getting an existing node from a closed file is not
allowed.
| class ClosedFileError(ValueError):
"""The operation can not be completed because the hosting file is closed.
For instance, getting an existing node from a closed file is not
allowed.
"""
pass
| null |
728,147 | tables.exceptions | ClosedNodeError | The operation can not be completed because the node is closed.
For instance, listing the children of a closed group is not allowed.
| class ClosedNodeError(ValueError):
"""The operation can not be completed because the node is closed.
For instance, listing the children of a closed group is not allowed.
"""
pass
| null |
728,148 | tables.description | Col | Defines a non-nested column.
Col instances are used as a means to declare the different properties of a
non-nested column in a table or nested column. Col classes are descendants
of their equivalent Atom classes (see :ref:`AtomClassDescr`), but their
instances have an additional _v_pos attribute that is used to decide the
position of the column inside its parent table or nested column (see the
IsDescription class in :ref:`IsDescriptionClassDescr` for more information
on column positions).
In the same fashion as Atom, you should use a particular Col descendant
class whenever you know the exact type you will need when writing your
code. Otherwise, you may use one of the Col.from_*() factory methods.
Each factory method inherited from the Atom class is available with the
same signature, plus an additional pos parameter (placed in last position)
which defaults to None and that may take an integer value. This parameter
might be used to specify the position of the column in the table.
Besides, there are the next additional factory methods, available only for
Col objects.
The following parameters are available for most Col-derived constructors.
Parameters
----------
itemsize : int
For types with a non-fixed size, this sets the size in bytes of
individual items in the column.
shape : tuple
Sets the shape of the column. An integer shape of N is equivalent to
the tuple (N,).
dflt
Sets the default value for the column.
pos : int
Sets the position of column in table. If unspecified, the position
will be randomly selected.
attrs : dict
Attribute metadata stored in the column (see
:ref:`AttributeSetClassDescr`).
| class Col(atom.Atom, metaclass=type):
"""Defines a non-nested column.
Col instances are used as a means to declare the different properties of a
non-nested column in a table or nested column. Col classes are descendants
of their equivalent Atom classes (see :ref:`AtomClassDescr`), but their
instances have an additional _v_pos attribute that is used to decide the
position of the column inside its parent table or nested column (see the
IsDescription class in :ref:`IsDescriptionClassDescr` for more information
on column positions).
In the same fashion as Atom, you should use a particular Col descendant
class whenever you know the exact type you will need when writing your
code. Otherwise, you may use one of the Col.from_*() factory methods.
Each factory method inherited from the Atom class is available with the
same signature, plus an additional pos parameter (placed in last position)
which defaults to None and that may take an integer value. This parameter
might be used to specify the position of the column in the table.
Besides, there are the next additional factory methods, available only for
Col objects.
The following parameters are available for most Col-derived constructors.
Parameters
----------
itemsize : int
For types with a non-fixed size, this sets the size in bytes of
individual items in the column.
shape : tuple
Sets the shape of the column. An integer shape of N is equivalent to
the tuple (N,).
dflt
Sets the default value for the column.
pos : int
Sets the position of column in table. If unspecified, the position
will be randomly selected.
attrs : dict
Attribute metadata stored in the column (see
:ref:`AttributeSetClassDescr`).
"""
_class_from_prefix = {} # filled as column classes are created
"""Maps column prefixes to column classes."""
@classmethod
def prefix(cls):
"""Return the column class prefix."""
cname = cls.__name__
return cname[:cname.rfind('Col')]
@classmethod
def from_atom(cls, atom, pos=None, _offset=None):
"""Create a Col definition from a PyTables atom.
An optional position may be specified as the pos argument.
"""
prefix = atom.prefix()
kwargs = atom._get_init_args()
colclass = cls._class_from_prefix[prefix]
return colclass(pos=pos, _offset=_offset, **kwargs)
@classmethod
def from_sctype(cls, sctype, shape=(), dflt=None, pos=None):
"""Create a `Col` definition from a NumPy scalar type `sctype`.
Optional shape, default value and position may be specified as
the `shape`, `dflt` and `pos` arguments, respectively.
Information in the `sctype` not represented in a `Col` is
ignored.
"""
newatom = atom.Atom.from_sctype(sctype, shape, dflt)
return cls.from_atom(newatom, pos=pos)
@classmethod
def from_dtype(cls, dtype, dflt=None, pos=None, _offset=None):
"""Create a `Col` definition from a NumPy `dtype`.
Optional default value and position may be specified as the
`dflt` and `pos` arguments, respectively. The `dtype` must have
a byte order which is irrelevant or compatible with that of the
system. Information in the `dtype` not represented in a `Col`
is ignored.
"""
newatom = atom.Atom.from_dtype(dtype, dflt)
return cls.from_atom(newatom, pos=pos, _offset=_offset)
@classmethod
def from_type(cls, type, shape=(), dflt=None, pos=None):
"""Create a `Col` definition from a PyTables `type`.
Optional shape, default value and position may be specified as
the `shape`, `dflt` and `pos` arguments, respectively.
"""
newatom = atom.Atom.from_type(type, shape, dflt)
return cls.from_atom(newatom, pos=pos)
@classmethod
def from_kind(cls, kind, itemsize=None, shape=(), dflt=None, pos=None):
"""Create a `Col` definition from a PyTables `kind`.
Optional item size, shape, default value and position may be
specified as the `itemsize`, `shape`, `dflt` and `pos`
arguments, respectively. Bear in mind that not all columns
support a default item size.
"""
newatom = atom.Atom.from_kind(kind, itemsize, shape, dflt)
return cls.from_atom(newatom, pos=pos)
@classmethod
def _subclass_from_prefix(cls, prefix):
"""Get a column subclass for the given `prefix`."""
cname = '%sCol' % prefix
class_from_prefix = cls._class_from_prefix
if cname in class_from_prefix:
return class_from_prefix[cname]
atombase = getattr(atom, '%sAtom' % prefix)
class NewCol(cls, atombase):
"""Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
"""
def __init__(self, *args, **kwargs):
pos = kwargs.pop('pos', None)
col_attrs = kwargs.pop('attrs', {})
offset = kwargs.pop('_offset', None)
class_from_prefix = self._class_from_prefix
atombase.__init__(self, *args, **kwargs)
# The constructor of an abstract atom may have changed
# the class of `self` to something different of `NewCol`
# and `atombase` (that's why the prefix map is saved).
if self.__class__ is not NewCol:
colclass = class_from_prefix[self.prefix()]
self.__class__ = colclass
self._v_pos = pos
self._v_offset = offset
self._v_col_attrs = col_attrs
__eq__ = same_position(atombase.__eq__)
_is_equal_to_atom = same_position(atombase._is_equal_to_atom)
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self._v_pos, self.atombase))
if prefix == 'Enum':
_is_equal_to_enumatom = same_position(
atombase._is_equal_to_enumatom)
NewCol.__name__ = cname
class_from_prefix[prefix] = NewCol
return NewCol
def __repr__(self):
# Reuse the atom representation.
atomrepr = super().__repr__()
lpar = atomrepr.index('(')
rpar = atomrepr.rindex(')')
atomargs = atomrepr[lpar + 1:rpar]
classname = self.__class__.__name__
if self._v_col_attrs:
return (f'{classname}({atomargs}, pos={self._v_pos}'
f', attrs={self._v_col_attrs})')
return f'{classname}({atomargs}, pos={self._v_pos})'
def _get_init_args(self):
"""Get a dictionary of instance constructor arguments."""
kwargs = {arg: getattr(self, arg) for arg in ('shape', 'dflt')}
kwargs['pos'] = getattr(self, '_v_pos', None)
return kwargs
| (nptype, shape, dflt) |
728,156 | tables.table | Cols | Container for columns in a table or nested column.
This class is used as an *accessor* to the columns in a table or nested
column. It supports the *natural naming* convention, so that you can
access the different columns as attributes which lead to Column instances
(for non-nested columns) or other Cols instances (for nested columns).
For instance, if table.cols is a Cols instance with a column named col1
under it, the later can be accessed as table.cols.col1. If col1 is nested
and contains a col2 column, this can be accessed as table.cols.col1.col2
and so on. Because of natural naming, the names of members start with
special prefixes, like in the Group class (see :ref:`GroupClassDescr`).
Like the Column class (see :ref:`ColumnClassDescr`), Cols supports item
access to read and write ranges of values in the table or nested column.
.. rubric:: Cols attributes
.. attribute:: _v_colnames
A list of the names of the columns hanging directly
from the associated table or nested column. The order of
the names matches the order of their respective columns in
the containing table.
.. attribute:: _v_colpathnames
A list of the pathnames of all the columns under the
associated table or nested column (in preorder). If it does
not contain nested columns, this is exactly the same as the
:attr:`Cols._v_colnames` attribute.
.. attribute:: _v_desc
The associated Description instance (see
:ref:`DescriptionClassDescr`).
| class Cols:
"""Container for columns in a table or nested column.
This class is used as an *accessor* to the columns in a table or nested
column. It supports the *natural naming* convention, so that you can
access the different columns as attributes which lead to Column instances
(for non-nested columns) or other Cols instances (for nested columns).
For instance, if table.cols is a Cols instance with a column named col1
under it, the later can be accessed as table.cols.col1. If col1 is nested
and contains a col2 column, this can be accessed as table.cols.col1.col2
and so on. Because of natural naming, the names of members start with
special prefixes, like in the Group class (see :ref:`GroupClassDescr`).
Like the Column class (see :ref:`ColumnClassDescr`), Cols supports item
access to read and write ranges of values in the table or nested column.
.. rubric:: Cols attributes
.. attribute:: _v_colnames
A list of the names of the columns hanging directly
from the associated table or nested column. The order of
the names matches the order of their respective columns in
the containing table.
.. attribute:: _v_colpathnames
A list of the pathnames of all the columns under the
associated table or nested column (in preorder). If it does
not contain nested columns, this is exactly the same as the
:attr:`Cols._v_colnames` attribute.
.. attribute:: _v_desc
The associated Description instance (see
:ref:`DescriptionClassDescr`).
"""
@property
def _v_table(self):
"""The parent Table instance (see :ref:`TableClassDescr`)."""
return self._v__tableFile._get_node(self._v__tablePath)
def __init__(self, table, desc):
myDict = self.__dict__
myDict['_v__tableFile'] = table._v_file
myDict['_v__tablePath'] = table._v_pathname
myDict['_v_desc'] = desc
myDict['_v_colnames'] = desc._v_names
myDict['_v_colpathnames'] = table.description._v_pathnames
# Put the column in the local dictionary
for name in desc._v_names:
if name in desc._v_types:
myDict[name] = Column(table, name, desc)
else:
myDict[name] = Cols(table, desc._v_colobjects[name])
def _g_update_table_location(self, table):
"""Updates the location information about the associated `table`."""
myDict = self.__dict__
myDict['_v__tableFile'] = table._v_file
myDict['_v__tablePath'] = table._v_pathname
# Update the locations in individual columns.
for colname in self._v_colnames:
myDict[colname]._g_update_table_location(table)
def __len__(self):
"""Get the number of top level columns in table."""
return len(self._v_colnames)
def _f_col(self, colname):
"""Get an accessor to the column colname.
This method returns a Column instance (see :ref:`ColumnClassDescr`) if
the requested column is not nested, and a Cols instance (see
:ref:`ColsClassDescr`) if it is. You may use full column pathnames in
colname.
Calling cols._f_col('col1/col2') is equivalent to using cols.col1.col2.
However, the first syntax is more intended for programmatic use. It is
also better if you want to access columns with names that are not valid
Python identifiers.
"""
if not isinstance(colname, str):
raise TypeError("Parameter can only be an string. You passed "
"object: %s" % colname)
if ((colname.find('/') > -1 and
colname not in self._v_colpathnames) and
colname not in self._v_colnames):
raise KeyError(("Cols accessor ``%s.cols%s`` does not have a "
"column named ``%s``")
% (self._v__tablePath, self._v_desc._v_pathname,
colname))
return self._g_col(colname)
def _g_col(self, colname):
"""Like `self._f_col()` but it does not check arguments."""
# Get the Column or Description object
inames = colname.split('/')
cols = self
for iname in inames:
cols = cols.__dict__[iname]
return cols
def __getitem__(self, key):
"""Get a row or a range of rows from a table or nested column.
If key argument is an integer, the corresponding nested type row is
returned as a record of the current flavor. If key is a slice, the
range of rows determined by it is returned as a structured array of the
current flavor.
Examples
--------
::
record = table.cols[4] # equivalent to table[4]
recarray = table.cols.Info[4:1000:2]
Those statements are equivalent to::
nrecord = table.read(start=4)[0]
nrecarray = table.read(start=4, stop=1000, step=2).field('Info')
Here you can see how a mix of natural naming, indexing and slicing can
be used as shorthands for the :meth:`Table.read` method.
"""
table = self._v_table
nrows = table.nrows
if is_idx(key):
key = operator.index(key)
# Index out of range protection
if key >= nrows:
raise IndexError("Index out of range")
if key < 0:
# To support negative values
key += nrows
(start, stop, step) = table._process_range(key, key + 1, 1)
colgroup = self._v_desc._v_pathname
if colgroup == "": # The root group
return table.read(start, stop, step)[0]
else:
crecord = table.read(start, stop, step)[0]
return crecord[colgroup]
elif isinstance(key, slice):
(start, stop, step) = table._process_range(
key.start, key.stop, key.step)
colgroup = self._v_desc._v_pathname
if colgroup == "": # The root group
return table.read(start, stop, step)
else:
crecarray = table.read(start, stop, step)
if hasattr(crecarray, "field"):
return crecarray.field(colgroup) # RecArray case
else:
return get_nested_field(crecarray, colgroup) # numpy case
else:
raise TypeError(f"invalid index or slice: {key!r}")
def __setitem__(self, key, value):
"""Set a row or a range of rows in a table or nested column.
If key argument is an integer, the corresponding row is set to
value. If key is a slice, the range of rows determined by it is set to
value.
Examples
--------
::
table.cols[4] = record
table.cols.Info[4:1000:2] = recarray
Those statements are equivalent to::
table.modify_rows(4, rows=record)
table.modify_column(4, 1000, 2, colname='Info', column=recarray)
Here you can see how a mix of natural naming, indexing and slicing
can be used as shorthands for the :meth:`Table.modify_rows` and
:meth:`Table.modify_column` methods.
"""
table = self._v_table
nrows = table.nrows
if is_idx(key):
key = operator.index(key)
# Index out of range protection
if key >= nrows:
raise IndexError("Index out of range")
if key < 0:
# To support negative values
key += nrows
(start, stop, step) = table._process_range(key, key + 1, 1)
elif isinstance(key, slice):
(start, stop, step) = table._process_range(
key.start, key.stop, key.step)
else:
raise TypeError(f"invalid index or slice: {key!r}")
# Actually modify the correct columns
colgroup = self._v_desc._v_pathname
if colgroup == "": # The root group
table.modify_rows(start, stop, step, rows=value)
else:
table.modify_column(
start, stop, step, colname=colgroup, column=value)
def _g_close(self):
# First, close the columns (ie possible indices open)
for col in self._v_colnames:
colobj = self._g_col(col)
if isinstance(colobj, Column):
colobj.close()
# Delete the reference to column
del self.__dict__[col]
else:
colobj._g_close()
self.__dict__.clear()
def __str__(self):
"""The string representation for this object."""
# The pathname
descpathname = self._v_desc._v_pathname
if descpathname:
descpathname = "." + descpathname
return (f"{self._v__tablePath}.cols{descpathname} "
f"({self.__class__.__name__}), "
f"{len(self._v_colnames)} columns")
def __repr__(self):
"""A detailed string representation for this object."""
lines = [f'{self!s}']
for name in self._v_colnames:
# Get this class name
classname = getattr(self, name).__class__.__name__
# The type
if name in self._v_desc._v_dtypes:
tcol = self._v_desc._v_dtypes[name]
# The shape for this column
shape = (self._v_table.nrows,) + \
self._v_desc._v_dtypes[name].shape
else:
tcol = "Description"
# Description doesn't have a shape currently
shape = ()
lines.append(f" {name} ({classname}{shape}, {tcol})")
return '\n'.join(lines) + '\n'
| (table, desc) |
728,157 | tables.table | __getitem__ | Get a row or a range of rows from a table or nested column.
If key argument is an integer, the corresponding nested type row is
returned as a record of the current flavor. If key is a slice, the
range of rows determined by it is returned as a structured array of the
current flavor.
Examples
--------
::
record = table.cols[4] # equivalent to table[4]
recarray = table.cols.Info[4:1000:2]
Those statements are equivalent to::
nrecord = table.read(start=4)[0]
nrecarray = table.read(start=4, stop=1000, step=2).field('Info')
Here you can see how a mix of natural naming, indexing and slicing can
be used as shorthands for the :meth:`Table.read` method.
| def __getitem__(self, key):
"""Get a row or a range of rows from a table or nested column.
If key argument is an integer, the corresponding nested type row is
returned as a record of the current flavor. If key is a slice, the
range of rows determined by it is returned as a structured array of the
current flavor.
Examples
--------
::
record = table.cols[4] # equivalent to table[4]
recarray = table.cols.Info[4:1000:2]
Those statements are equivalent to::
nrecord = table.read(start=4)[0]
nrecarray = table.read(start=4, stop=1000, step=2).field('Info')
Here you can see how a mix of natural naming, indexing and slicing can
be used as shorthands for the :meth:`Table.read` method.
"""
table = self._v_table
nrows = table.nrows
if is_idx(key):
key = operator.index(key)
# Index out of range protection
if key >= nrows:
raise IndexError("Index out of range")
if key < 0:
# To support negative values
key += nrows
(start, stop, step) = table._process_range(key, key + 1, 1)
colgroup = self._v_desc._v_pathname
if colgroup == "": # The root group
return table.read(start, stop, step)[0]
else:
crecord = table.read(start, stop, step)[0]
return crecord[colgroup]
elif isinstance(key, slice):
(start, stop, step) = table._process_range(
key.start, key.stop, key.step)
colgroup = self._v_desc._v_pathname
if colgroup == "": # The root group
return table.read(start, stop, step)
else:
crecarray = table.read(start, stop, step)
if hasattr(crecarray, "field"):
return crecarray.field(colgroup) # RecArray case
else:
return get_nested_field(crecarray, colgroup) # numpy case
else:
raise TypeError(f"invalid index or slice: {key!r}")
| (self, key) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.