index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
72,284 |
ephem
|
next_antitransit
|
Find the next passage of a body across the anti-meridian.
|
def next_antitransit(self, body, start=None):
"""Find the next passage of a body across the anti-meridian."""
original_date = self.date
d = self._next_antitransit(body, start)
self.date = original_date
return d
|
(self, body, start=None)
|
72,285 |
ephem
|
next_pass
|
Return the next rising, culmination, and setting of a satellite.
If singlepass is True, return next consecutive set of
``(rising, culmination, setting)``.
If singlepass is False, return
``(next_rising, next_culmination, next_setting)``.
|
def next_pass(self, body, singlepass=True):
"""Return the next rising, culmination, and setting of a satellite.
If singlepass is True, return next consecutive set of
``(rising, culmination, setting)``.
If singlepass is False, return
``(next_rising, next_culmination, next_setting)``.
"""
if not isinstance(body, EarthSatellite):
raise TypeError(
'the next_pass() method is only for use with'
' EarthSatellite objects because of their high speed'
)
result = _libastro._next_pass(self, body)
# _libastro behavior is singlepass=False
if ((not singlepass)
or (None in result)
or (result[4] >= result[0])):
return result
# retry starting just before next_rising
obscopy = self.copy()
# Almost always 1 minute before next_rising except
# in pathological case where set came immediately before rise
obscopy.date = result[0] - min(1.0/1440,
(result[0] - result[4])/2)
result = _libastro._next_pass(obscopy, body)
if result[0] <= result[2] <= result[4]:
return result
raise ValueError("this software is having trouble with those satellite parameters")
|
(self, body, singlepass=True)
|
72,286 |
ephem
|
next_rising
|
Search for the given body's next rising, returning its date.
The search starts at the `date` of this `Observer` and is limited to
the single circuit of the sky, from antitransit to antitransit, that
the `body` was in the middle of describing at that date and time.
If the body did not, in fact, cross the horizon in the direction you
are asking about during that particular circuit, then the search
must raise a `CircumpolarError` exception like `NeverUpError` or
`AlwaysUpError` instead of returning a date.
|
@describe_riset_search
def next_rising(self, body, start=None, use_center=False):
"""Search for the given body's next rising"""
return self._find_rise_or_set(body, start, use_center, +1, True)
|
(self, body, start=None, use_center=False)
|
72,287 |
ephem
|
next_setting
|
Search for the given body's next setting, returning its date.
The search starts at the `date` of this `Observer` and is limited to
the single circuit of the sky, from antitransit to antitransit, that
the `body` was in the middle of describing at that date and time.
If the body did not, in fact, cross the horizon in the direction you
are asking about during that particular circuit, then the search
must raise a `CircumpolarError` exception like `NeverUpError` or
`AlwaysUpError` instead of returning a date.
|
@describe_riset_search
def next_setting(self, body, start=None, use_center=False):
"""Search for the given body's next setting"""
return self._find_rise_or_set(body, start, use_center, +1, False)
|
(self, body, start=None, use_center=False)
|
72,288 |
ephem
|
next_transit
|
Find the next passage of a body across the meridian.
|
def next_transit(self, body, start=None):
"""Find the next passage of a body across the meridian."""
original_date = self.date
d = self._next_transit(body, start)
self.date = original_date
return d
|
(self, body, start=None)
|
72,289 |
ephem
|
previous_antitransit
|
Find the previous passage of a body across the anti-meridian.
|
def previous_antitransit(self, body, start=None):
"""Find the previous passage of a body across the anti-meridian."""
original_date = self.date
d = self._previous_antitransit(body, start)
self.date = original_date
return d
|
(self, body, start=None)
|
72,290 |
ephem
|
previous_rising
|
Search for the given body's previous rising, returning its date.
The search starts at the `date` of this `Observer` and is limited to
the single circuit of the sky, from antitransit to antitransit, that
the `body` was in the middle of describing at that date and time.
If the body did not, in fact, cross the horizon in the direction you
are asking about during that particular circuit, then the search
must raise a `CircumpolarError` exception like `NeverUpError` or
`AlwaysUpError` instead of returning a date.
|
@describe_riset_search
def previous_rising(self, body, start=None, use_center=False):
"""Search for the given body's previous rising"""
return self._find_rise_or_set(body, start, use_center, -1, True)
|
(self, body, start=None, use_center=False)
|
72,291 |
ephem
|
previous_setting
|
Search for the given body's previous setting, returning its date.
The search starts at the `date` of this `Observer` and is limited to
the single circuit of the sky, from antitransit to antitransit, that
the `body` was in the middle of describing at that date and time.
If the body did not, in fact, cross the horizon in the direction you
are asking about during that particular circuit, then the search
must raise a `CircumpolarError` exception like `NeverUpError` or
`AlwaysUpError` instead of returning a date.
|
@describe_riset_search
def previous_setting(self, body, start=None, use_center=False):
"""Search for the given body's previous setting"""
return self._find_rise_or_set(body, start, use_center, -1, False)
|
(self, body, start=None, use_center=False)
|
72,292 |
ephem
|
previous_transit
|
Find the previous passage of a body across the meridian.
|
def previous_transit(self, body, start=None):
"""Find the previous passage of a body across the meridian."""
original_date = self.date
d = self._previous_transit(body, start)
self.date = original_date
return d
|
(self, body, start=None)
|
72,293 |
ephem
|
ParabolicBody
|
A celestial body, that can compute() its sky position
|
from ephem import ParabolicBody
| null |
72,294 |
ephem
|
Phobos
|
Create a Body instance representing Phobos
|
from ephem import Phobos
| null |
72,295 |
ephem
|
Planet
|
A celestial body, that can compute() its sky position
|
from ephem import Planet
| null |
72,296 |
ephem
|
PlanetMoon
|
A celestial body, that can compute() its sky position
|
from ephem import PlanetMoon
| null |
72,297 |
ephem
|
Pluto
|
Create a Body instance representing Pluto
|
from ephem import Pluto
| null |
72,298 |
ephem
|
Rhea
|
Create a Body instance representing Rhea
|
from ephem import Rhea
| null |
72,299 |
ephem
|
Saturn
|
Create a Body instance representing Saturn.
|
from ephem import Saturn
| null |
72,300 |
ephem
|
Sun
|
Create a Body instance representing Sun
|
from ephem import Sun
| null |
72,301 |
ephem
|
Tethys
|
Create a Body instance representing Tethys
|
from ephem import Tethys
| null |
72,302 |
ephem
|
Titan
|
Create a Body instance representing Titan
|
from ephem import Titan
| null |
72,303 |
ephem
|
Titania
|
Create a Body instance representing Titania
|
from ephem import Titania
| null |
72,304 |
ephem
|
Umbriel
|
Create a Body instance representing Umbriel
|
from ephem import Umbriel
| null |
72,305 |
ephem
|
Uranus
|
Create a Body instance representing Uranus
|
from ephem import Uranus
| null |
72,306 |
ephem
|
Venus
|
Create a Body instance representing Venus
|
from ephem import Venus
| null |
72,307 |
ephem
|
_UTC
| null |
class _UTC(_tzinfo):
ZERO = _timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
def __repr__(self):
return "<ephem.UTC>"
| null |
72,308 |
ephem
|
__repr__
| null |
def __repr__(self):
return "<ephem.UTC>"
|
(self)
|
72,309 |
ephem
|
dst
| null |
def dst(self, dt):
return self.ZERO
|
(self, dt)
|
72,310 |
ephem
|
utcoffset
| null |
def utcoffset(self, dt):
return self.ZERO
|
(self, dt)
|
72,311 |
ephem
|
_convert_to_seconds_and_microseconds
|
Converts a PyEphem date into seconds
|
def _convert_to_seconds_and_microseconds(date):
"""Converts a PyEphem date into seconds"""
microseconds = int(round(24 * 60 * 60 * 1000000 * date))
seconds, microseconds = divmod(microseconds, 1000000)
seconds -= 2209032000 # difference between epoch 1900 and epoch 1970
return seconds, microseconds
|
(date)
|
72,313 |
ephem
|
_find_moon_phase
|
Function that assists the finding of moon phases.
|
def _find_moon_phase(d0, motion, target):
"""Function that assists the finding of moon phases."""
def f(d):
_sun.compute(d)
_moon.compute(d)
slon = _libastro.eq_ecl(d, _sun.g_ra, _sun.g_dec)[0]
mlon = _libastro.eq_ecl(d, _moon.g_ra, _moon.g_dec)[0]
return (mlon - slon - antitarget) % twopi - pi
antitarget = target + pi
d0 = Date(d0)
f0 = f(d0)
angle_to_cover = (- f0) % motion
if abs(angle_to_cover) < tiny:
angle_to_cover = motion
d = d0 + 29.53 * angle_to_cover / twopi
return date(newton(f, d, d + hour))
|
(d0, motion, target)
|
72,315 |
ephem
|
_plusminus_pi
| null |
def _plusminus_pi(angle):
return (angle - pi) % tau - pi
|
(angle)
|
72,319 |
ephem
|
city
|
Load the cities database and return a city.
|
def city(name):
"""Load the cities database and return a city."""
global city
import ephem.cities
city = ephem.cities.city
return city(name)
|
(name)
|
72,321 |
ephem
|
holiday
|
Function that assists the finding of equinoxes and solstices.
|
def holiday(d0, motion, offset):
"""Function that assists the finding of equinoxes and solstices."""
def f(d):
_sun.compute(d)
return (_sun.ra + eighthpi) % quarterpi - eighthpi
d0 = Date(d0)
_sun.compute(d0)
angle_to_cover = motion - (_sun.ra + offset) % motion
if abs(angle_to_cover) < tiny:
angle_to_cover = motion
d = d0 + 365.25 * angle_to_cover / twopi
return date(newton(f, d, d + hour))
|
(d0, motion, offset)
|
72,322 |
ephem
|
localtime
|
Convert a PyEphem date into naive local time, returning a Python datetime.
|
def localtime(date):
"""Convert a PyEphem date into naive local time, returning a Python datetime."""
seconds, microseconds = _convert_to_seconds_and_microseconds(date)
y, m, d, H, M, S, wday, yday, isdst = _localtime(seconds)
return _datetime(y, m, d, H, M, S, microseconds)
|
(date)
|
72,323 |
ephem
|
newton
|
Return an x-value at which the given function reaches zero.
Stops and declares victory once the x-value is within ``precision``
of the solution, which defaults to a half-second of clock time.
|
def newton(f, x0, x1, precision=default_newton_precision):
"""Return an x-value at which the given function reaches zero.
Stops and declares victory once the x-value is within ``precision``
of the solution, which defaults to a half-second of clock time.
"""
f0, f1 = f(x0), f(x1)
while f1 and abs(x1 - x0) > precision and f1 != f0:
x0, x1 = x1, x1 + (x1 - x0) / (f0/f1 - 1)
f0, f1 = f1, f(x1)
return x1
|
(f, x0, x1, precision=1.1574074074074074e-06)
|
72,324 |
ephem
|
next_autumnal_equinox
|
Return the date of the next autumnal equinox.
|
def next_autumnal_equinox(date):
"""Return the date of the next autumnal equinox."""
return holiday(date, twopi, pi)
|
(date)
|
72,326 |
ephem
|
next_equinox
|
Return the date of the next equinox.
|
def next_equinox(date):
"""Return the date of the next equinox."""
return holiday(date, pi, 0)
|
(date)
|
72,328 |
ephem
|
next_first_quarter_moon
|
Return the date of the next First Quarter Moon.
|
def next_first_quarter_moon(date):
"""Return the date of the next First Quarter Moon."""
return _find_moon_phase(date, twopi, halfpi)
|
(date)
|
72,329 |
ephem
|
next_full_moon
|
Return the date of the next Full Moon.
|
def next_full_moon(date):
"""Return the date of the next Full Moon."""
return _find_moon_phase(date, twopi, pi)
|
(date)
|
72,330 |
ephem
|
next_last_quarter_moon
|
Return the date of the next Last Quarter Moon.
|
def next_last_quarter_moon(date):
"""Return the date of the next Last Quarter Moon."""
return _find_moon_phase(date, twopi, pi + halfpi)
|
(date)
|
72,331 |
ephem
|
next_new_moon
|
Return the date of the next New Moon.
|
def next_new_moon(date):
"""Return the date of the next New Moon."""
return _find_moon_phase(date, twopi, 0)
|
(date)
|
72,332 |
ephem
|
next_solstice
|
Return the date of the next solstice.
|
def next_solstice(date):
"""Return the date of the next solstice."""
return holiday(date, pi, halfpi)
|
(date)
|
72,333 |
ephem
|
next_vernal_equinox
|
Return the date of the next vernal equinox.
|
def next_vernal_equinox(date):
"""Return the date of the next vernal equinox."""
return holiday(date, twopi, 0)
|
(date)
|
72,334 |
ephem
|
next_summer_solstice
|
Return the date of the next summer solstice.
|
def next_summer_solstice(date):
"""Return the date of the next summer solstice."""
return holiday(date, twopi, pi + halfpi)
|
(date)
|
72,336 |
ephem
|
next_winter_solstice
|
Return the date of the next winter solstice.
|
def next_winter_solstice(date):
"""Return the date of the next winter solstice."""
return holiday(date, twopi, halfpi)
|
(date)
|
72,337 |
ephem
|
previous_autumnal_equinox
|
Return the date of the previous autumnal equinox.
|
def previous_autumnal_equinox(date):
"""Return the date of the previous autumnal equinox."""
return holiday(date, -twopi, pi)
|
(date)
|
72,339 |
ephem
|
previous_equinox
|
Return the date of the previous equinox.
|
def previous_equinox(date):
"""Return the date of the previous equinox."""
return holiday(date, -pi, 0)
|
(date)
|
72,341 |
ephem
|
previous_first_quarter_moon
|
Return the date of the previous First Quarter Moon.
|
def previous_first_quarter_moon(date):
"""Return the date of the previous First Quarter Moon."""
return _find_moon_phase(date, -twopi, halfpi)
|
(date)
|
72,342 |
ephem
|
previous_full_moon
|
Return the date of the previous Full Moon.
|
def previous_full_moon(date):
"""Return the date of the previous Full Moon."""
return _find_moon_phase(date, -twopi, pi)
|
(date)
|
72,343 |
ephem
|
previous_last_quarter_moon
|
Return the date of the previous Last Quarter Moon.
|
def previous_last_quarter_moon(date):
"""Return the date of the previous Last Quarter Moon."""
return _find_moon_phase(date, -twopi, pi + halfpi)
|
(date)
|
72,344 |
ephem
|
previous_new_moon
|
Return the date of the previous New Moon.
|
def previous_new_moon(date):
"""Return the date of the previous New Moon."""
return _find_moon_phase(date, -twopi, 0)
|
(date)
|
72,345 |
ephem
|
previous_solstice
|
Return the date of the previous solstice.
|
def previous_solstice(date):
"""Return the date of the previous solstice."""
return holiday(date, -pi, halfpi)
|
(date)
|
72,346 |
ephem
|
previous_vernal_equinox
|
Return the date of the previous vernal equinox.
|
def previous_vernal_equinox(date):
"""Return the date of the previous vernal equinox."""
return holiday(date, -twopi, 0)
|
(date)
|
72,347 |
ephem
|
previous_summer_solstice
|
Return the date of the previous summer solstice.
|
def previous_summer_solstice(date):
"""Return the date of the previous summer solstice."""
return holiday(date, -twopi, pi + halfpi)
|
(date)
|
72,349 |
ephem
|
previous_winter_solstice
|
Return the date of the previous winter solstice.
|
def previous_winter_solstice(date):
"""Return the date of the previous winter solstice."""
return holiday(date, -twopi, halfpi)
|
(date)
|
72,351 |
ephem
|
star
|
Load the stars database and return a star.
|
def star(name, *args, **kwargs):
"""Load the stars database and return a star."""
global star
import ephem.stars
star = ephem.stars.star
return star(name, *args, **kwargs)
|
(name, *args, **kwargs)
|
72,352 |
ephem
|
to_timezone
|
"Convert a PyEphem date into a timezone aware Python datetime representation.
|
def to_timezone(date, tzinfo):
""""Convert a PyEphem date into a timezone aware Python datetime representation."""
seconds, microseconds = _convert_to_seconds_and_microseconds(date)
date = _datetime.fromtimestamp(seconds, tzinfo)
date = date.replace(microsecond=microseconds)
return date
|
(date, tzinfo)
|
72,359 |
obogo.tree
|
reader
| null |
def reader(obo_file_path:str, keep_obsolete=True, ns=None)-> DiGraph :
G = GO_tree() #nx.DiGraph()
for node_buffer in obo_node_buffer_iter(open(obo_file_path, 'r')):
if not keep_obsolete and node_buffer.is_obsolete:
continue
G.add_node(node_buffer['id'], **node_buffer.nx_node_param)
for node_parent_id in node_buffer.is_a_iter():
G.add_edge(node_parent_id, node_buffer['id'], type='is_a')
return G
|
(obo_file_path: str, keep_obsolete=True, ns=None) -> networkx.classes.digraph.DiGraph
|
72,363 |
domdf_python_tools.paths
|
PathPlus
|
Subclass of :class:`pathlib.Path` with additional methods and a default encoding of UTF-8.
Path represents a filesystem path but, unlike :class:`pathlib.PurePath`, also offers
methods to do system calls on path objects.
Depending on your system, instantiating a :class:`~.PathPlus` will return
either a :class:`~.PosixPathPlus` or a :class:`~.WindowsPathPlus`. object.
You can also instantiate a :class:`~.PosixPathPlus` or :class:`WindowsPath` directly,
but cannot instantiate a :class:`~.WindowsPathPlus` on a POSIX system or vice versa.
.. versionadded:: 0.3.8
.. versionchanged:: 0.5.1 Defaults to Unix line endings (``LF``) on all platforms.
|
class PathPlus(pathlib.Path):
"""
Subclass of :class:`pathlib.Path` with additional methods and a default encoding of UTF-8.
Path represents a filesystem path but, unlike :class:`pathlib.PurePath`, also offers
methods to do system calls on path objects.
Depending on your system, instantiating a :class:`~.PathPlus` will return
either a :class:`~.PosixPathPlus` or a :class:`~.WindowsPathPlus`. object.
You can also instantiate a :class:`~.PosixPathPlus` or :class:`WindowsPath` directly,
but cannot instantiate a :class:`~.WindowsPathPlus` on a POSIX system or vice versa.
.. versionadded:: 0.3.8
.. versionchanged:: 0.5.1 Defaults to Unix line endings (``LF``) on all platforms.
"""
__slots__ = ()
if sys.version_info < (3, 11):
_accessor = pathlib._normal_accessor # type: ignore
_closed = False
def _init(self, *args, **kwargs):
pass
@classmethod
def _from_parts(cls, args, init=True):
return super()._from_parts(args) # type: ignore
def __new__(cls: Type[_PP], *args, **kwargs) -> _PP: # noqa: D102
if cls is PathPlus:
cls = WindowsPathPlus if os.name == "nt" else PosixPathPlus # type: ignore
return super().__new__(cls, *args, **kwargs)
def make_executable(self) -> None:
"""
Make the file executable.
.. versionadded:: 0.3.8
"""
make_executable(self)
def write_clean(
self,
string: str,
encoding: Optional[str] = "UTF-8",
errors: Optional[str] = None,
):
"""
Write to the file without trailing whitespace, and with a newline at the end of the file.
.. versionadded:: 0.3.8
:param string:
:param encoding: The encoding to write to the file in.
:param errors:
"""
with self.open('w', encoding=encoding, errors=errors) as fp:
clean_writer(string, fp)
def maybe_make(
self,
mode: int = 0o777,
parents: bool = False,
):
"""
Create a directory at this path, but only if the directory does not already exist.
.. versionadded:: 0.3.8
:param mode: Combined with the process’ umask value to determine the file mode and access flags
:param parents: If :py:obj:`False` (the default), a missing parent raises a :class:`FileNotFoundError`.
If :py:obj:`True`, any missing parents of this path are created as needed; they are created with the
default permissions without taking mode into account (mimicking the POSIX mkdir -p command).
:no-default parents:
.. versionchanged:: 1.6.0 Removed the ``'exist_ok'`` option, since it made no sense in this context.
.. attention::
This will fail silently if a file with the same name already exists.
This appears to be due to the behaviour of :func:`os.mkdir`.
"""
try:
self.mkdir(mode, parents, exist_ok=True)
except FileExistsError:
pass
def append_text(
self,
string: str,
encoding: Optional[str] = "UTF-8",
errors: Optional[str] = None,
):
"""
Open the file in text mode, append the given string to it, and close the file.
.. versionadded:: 0.3.8
:param string:
:param encoding: The encoding to write to the file in.
:param errors:
"""
with self.open('a', encoding=encoding, errors=errors) as fp:
fp.write(string)
def write_text(
self,
data: str,
encoding: Optional[str] = "UTF-8",
errors: Optional[str] = None,
newline: Optional[str] = NEWLINE_DEFAULT,
) -> int:
"""
Open the file in text mode, write to it, and close the file.
.. versionadded:: 0.3.8
:param data:
:param encoding: The encoding to write to the file in.
:param errors:
:param newline:
:default newline: `universal newlines <https://docs.python.org/3/glossary.html#term-universal-newlines>`__ for reading, Unix line endings (``LF``) for writing.
.. versionchanged:: 3.1.0
Added the ``newline`` argument to match Python 3.10.
(see :github:pull:`22420 <python/cpython>`)
"""
if not isinstance(data, str):
raise TypeError(f'data must be str, not {data.__class__.__name__}')
with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f:
return f.write(data)
def write_lines(
self,
data: Iterable[str],
encoding: Optional[str] = "UTF-8",
errors: Optional[str] = None,
*,
trailing_whitespace: bool = False
) -> None:
"""
Write the given list of lines to the file without trailing whitespace.
.. versionadded:: 0.5.0
:param data:
:param encoding: The encoding to write to the file in.
:param errors:
:param trailing_whitespace: If :py:obj:`True` trailing whitespace is preserved.
.. versionchanged:: 2.4.0 Added the ``trailing_whitespace`` option.
"""
if trailing_whitespace:
data = list(data)
if data[-1].strip():
data.append('')
self.write_text('\n'.join(data), encoding=encoding, errors=errors)
else:
self.write_clean('\n'.join(data), encoding=encoding, errors=errors)
def read_text(
self,
encoding: Optional[str] = "UTF-8",
errors: Optional[str] = None,
) -> str:
"""
Open the file in text mode, read it, and close the file.
.. versionadded:: 0.3.8
:param encoding: The encoding to write to the file in.
:param errors:
:return: The content of the file.
"""
return super().read_text(encoding=encoding, errors=errors)
def read_lines(
self,
encoding: Optional[str] = "UTF-8",
errors: Optional[str] = None,
) -> List[str]:
"""
Open the file in text mode, return a list containing the lines in the file,
and close the file.
.. versionadded:: 0.5.0
:param encoding: The encoding to write to the file in.
:param errors:
:return: The content of the file.
""" # noqa: D400
return self.read_text(encoding=encoding, errors=errors).split('\n')
def open( # type: ignore # noqa: A003 # pylint: disable=redefined-builtin
self,
mode: str = 'r',
buffering: int = -1,
encoding: Optional[str] = "UTF-8",
errors: Optional[str] = None,
newline: Optional[str] = NEWLINE_DEFAULT,
) -> IO[Any]:
"""
Open the file pointed by this path and return a file object, as
the built-in :func:`open` function does.
.. versionadded:: 0.3.8
:param mode: The mode to open the file in.
:default mode: ``'r'`` (read only)
:param buffering:
:param encoding:
:param errors:
:param newline:
:default newline: `universal newlines <https://docs.python.org/3/glossary.html#term-universal-newlines>`__ for reading, Unix line endings (``LF``) for writing.
:rtype:
.. versionchanged:: 0.5.1
Defaults to Unix line endings (``LF``) on all platforms.
""" # noqa: D400
if 'b' in mode:
encoding = None
newline = None
if newline is NEWLINE_DEFAULT:
if 'r' in mode:
newline = None
else:
newline = '\n'
return super().open(
mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def dump_json(
self,
data: Any,
encoding: Optional[str] = "UTF-8",
errors: Optional[str] = None,
json_library: JsonLibrary = json, # type: ignore
*,
compress: bool = False,
**kwargs,
) -> None:
r"""
Dump ``data`` to the file as JSON.
.. versionadded:: 0.5.0
:param data: The object to serialise to JSON.
:param encoding: The encoding to write to the file in.
:param errors:
:param json_library: The JSON serialisation library to use.
:default json_library: :mod:`json`
:param compress: Whether to compress the JSON file using gzip.
:param \*\*kwargs: Keyword arguments to pass to the JSON serialisation function.
:rtype:
.. versionchanged:: 1.0.0
Now uses :meth:`PathPlus.write_clean <domdf_python_tools.paths.PathPlus.write_clean>`
rather than :meth:`PathPlus.write_text <domdf_python_tools.paths.PathPlus.write_text>`,
and as a result returns :py:obj:`None` rather than :class:`int`.
.. versionchanged:: 1.9.0 Added the ``compress`` keyword-only argument.
"""
if compress:
with gzip.open(self, mode="wt", encoding=encoding, errors=errors) as fp:
fp.write(json_library.dumps(data, **kwargs))
else:
self.write_clean(
json_library.dumps(data, **kwargs),
encoding=encoding,
errors=errors,
)
def load_json(
self,
encoding: Optional[str] = "UTF-8",
errors: Optional[str] = None,
json_library: JsonLibrary = json, # type: ignore
*,
decompress: bool = False,
**kwargs,
) -> Any:
r"""
Load JSON data from the file.
.. versionadded:: 0.5.0
:param encoding: The encoding to write to the file in.
:param errors:
:param json_library: The JSON serialisation library to use.
:default json_library: :mod:`json`
:param decompress: Whether to decompress the JSON file using gzip.
Will raise an exception if the file is not compressed.
:param \*\*kwargs: Keyword arguments to pass to the JSON deserialisation function.
:return: The deserialised JSON data.
.. versionchanged:: 1.9.0 Added the ``compress`` keyword-only argument.
"""
if decompress:
with gzip.open(self, mode="rt", encoding=encoding, errors=errors) as fp:
content = fp.read()
else:
content = self.read_text(encoding=encoding, errors=errors)
return json_library.loads(
content,
**kwargs,
)
if sys.version_info < (3, 10): # pragma: no cover (py310+)
def is_mount(self) -> bool:
"""
Check if this path is a POSIX mount point.
.. versionadded:: 0.3.8 for Python 3.7 and above
.. versionadded:: 0.11.0 for Python 3.6
"""
# Need to exist and be a dir
if not self.exists() or not self.is_dir():
return False
# https://github.com/python/cpython/pull/18839/files
try:
parent_dev = self.parent.stat().st_dev
except OSError:
return False
dev = self.stat().st_dev
if dev != parent_dev:
return True
ino = self.stat().st_ino
parent_ino = self.parent.stat().st_ino
return ino == parent_ino
if sys.version_info < (3, 8): # pragma: no cover (py38+)
def rename(self: _P, target: Union[str, pathlib.PurePath]) -> _P:
"""
Rename this path to the target path.
The target path may be absolute or relative. Relative paths are
interpreted relative to the current working directory, *not* the
directory of the Path object.
.. versionadded:: 0.3.8 for Python 3.8 and above
.. versionadded:: 0.11.0 for Python 3.6 and Python 3.7
:param target:
:returns: The new Path instance pointing to the target path.
"""
os.rename(self, target)
return self.__class__(target)
def replace(self: _P, target: Union[str, pathlib.PurePath]) -> _P:
"""
Rename this path to the target path, overwriting if that path exists.
The target path may be absolute or relative. Relative paths are
interpreted relative to the current working directory, *not* the
directory of the Path object.
Returns the new Path instance pointing to the target path.
.. versionadded:: 0.3.8 for Python 3.8 and above
.. versionadded:: 0.11.0 for Python 3.6 and Python 3.7
:param target:
:returns: The new Path instance pointing to the target path.
"""
os.replace(self, target)
return self.__class__(target)
def unlink(self, missing_ok: bool = False) -> None:
"""
Remove this file or link.
If the path is a directory, use :meth:`~domdf_python_tools.paths.PathPlus.rmdir()` instead.
.. versionadded:: 0.3.8 for Python 3.8 and above
.. versionadded:: 0.11.0 for Python 3.6 and Python 3.7
"""
try:
os.unlink(self)
except FileNotFoundError:
if not missing_ok:
raise
def __enter__(self):
return self
def __exit__(self, t, v, tb):
# https://bugs.python.org/issue39682
# In previous versions of pathlib, this method marked this path as
# closed; subsequent attempts to perform I/O would raise an IOError.
# This functionality was never documented, and had the effect of
# making Path objects mutable, contrary to PEP 428. In Python 3.9 the
# _closed attribute was removed, and this method made a no-op.
# This method and __enter__()/__exit__() should be deprecated and
# removed in the future.
pass
if sys.version_info < (3, 9): # pragma: no cover (py39+)
def is_relative_to(self, *other: Union[str, os.PathLike]) -> bool:
r"""
Returns whether the path is relative to another path.
.. versionadded:: 0.3.8 for Python 3.9 and above.
.. latex:vspace:: -10px
.. versionadded:: 1.4.0 for Python 3.6 and Python 3.7.
.. latex:vspace:: -10px
:param \*other:
.. latex:vspace:: -20px
:rtype:
.. latex:vspace:: -20px
"""
try:
self.relative_to(*other)
return True
except ValueError:
return False
def abspath(self) -> "PathPlus":
"""
Return the absolute version of the path.
.. versionadded:: 1.3.0
"""
return self.__class__(os.path.abspath(self))
def iterchildren(
self: _PP,
exclude_dirs: Optional[Iterable[str]] = unwanted_dirs,
match: Optional[str] = None,
matchcase: bool = True,
) -> Iterator[_PP]:
"""
Returns an iterator over all children (files and directories) of the current path object.
.. versionadded:: 2.3.0
:param exclude_dirs: A list of directory names which should be excluded from the output,
together with their children.
:param match: A pattern to match filenames against.
The pattern should be in the format taken by :func:`~.matchglob`.
:param matchcase: Whether the filename's case should match the pattern.
:rtype:
.. versionchanged:: 2.5.0 Added the ``matchcase`` option.
"""
if not self.abspath().is_dir():
return
if exclude_dirs is None:
exclude_dirs = ()
if match and not os.path.isabs(match) and self.is_absolute():
match = (self / match).as_posix()
file: _PP
for file in self.iterdir():
parts = file.parts
if any(d in parts for d in exclude_dirs):
continue
if match is None or (match is not None and matchglob(file, match, matchcase)):
yield file
if file.is_dir():
yield from file.iterchildren(exclude_dirs, match)
@classmethod
def from_uri(cls: Type[_PP], uri: str) -> _PP:
"""
Construct a :class:`~.PathPlus` from a ``file`` URI returned by :meth:`pathlib.PurePath.as_uri`.
.. versionadded:: 2.9.0
:param uri:
:rtype: :class:`~.PathPlus`
"""
parseresult = urllib.parse.urlparse(uri)
if parseresult.scheme != "file":
raise ValueError(f"Unsupported URI scheme {parseresult.scheme!r}")
if parseresult.params or parseresult.query or parseresult.fragment:
raise ValueError("Malformed file URI")
if sys.platform == "win32": # pragma: no cover (!Windows)
if parseresult.netloc:
path = ''.join([
"//",
urllib.parse.unquote_to_bytes(parseresult.netloc).decode("UTF-8"),
urllib.parse.unquote_to_bytes(parseresult.path).decode("UTF-8"),
])
else:
path = urllib.parse.unquote_to_bytes(parseresult.path).decode("UTF-8").lstrip('/')
else: # pragma: no cover (Windows)
if parseresult.netloc:
raise ValueError("Malformed file URI")
path = urllib.parse.unquote_to_bytes(parseresult.path).decode("UTF-8")
return cls(path)
def move(self: _PP, dst: PathLike) -> _PP:
"""
Recursively move ``self`` to ``dst``.
``self`` may be a file or a directory.
See :func:`shutil.move` for more details.
.. versionadded:: 3.2.0
:param dst:
:returns: The new location of ``self``.
:rtype: :class:`~.PathPlus`
"""
new_path = shutil.move(os.fspath(self), dst)
return self.__class__(new_path)
def stream(self, chunk_size: int = 1024) -> Iterator[bytes]:
"""
Stream the file in ``chunk_size`` sized chunks.
:param chunk_size: The chunk size, in bytes
.. versionadded:: 3.2.0
"""
with self.open("rb") as fp:
while True:
chunk = fp.read(chunk_size)
if not chunk:
break
yield chunk
|
(*args, **kwargs) -> ~_PP
|
72,365 |
domdf_python_tools.paths
|
__enter__
| null |
nter__(self):
self
|
(self)
|
72,367 |
domdf_python_tools.paths
|
__exit__
| null |
xit__(self, t, v, tb):
s://bugs.python.org/issue39682
revious versions of pathlib, this method marked this path as
ed; subsequent attempts to perform I/O would raise an IOError.
functionality was never documented, and had the effect of
ng Path objects mutable, contrary to PEP 428. In Python 3.9 the
sed attribute was removed, and this method made a no-op.
method and __enter__()/__exit__() should be deprecated and
ved in the future.
|
(self, t, v, tb)
|
72,374 |
domdf_python_tools.paths
|
__new__
| null |
ew__(cls: Type[_PP], *args, **kwargs) -> _PP: # noqa: D102
is PathPlus:
WindowsPathPlus if os.name == "nt" else PosixPathPlus # type: ignore
super().__new__(cls, *args, **kwargs)
|
(cls: Type[~_PP], *args, **kwargs) -> ~_PP
|
72,380 |
domdf_python_tools.paths
|
_init
| null |
it(self, *args, **kwargs):
|
(self, *args, **kwargs)
|
72,384 |
domdf_python_tools.paths
|
abspath
|
Return the absolute version of the path.
.. versionadded:: 1.3.0
|
path(self) -> "PathPlus":
the absolute version of the path.
sionadded:: 1.3.0
self.__class__(os.path.abspath(self))
|
(self) -> domdf_python_tools.paths.PathPlus
|
72,385 |
domdf_python_tools.paths
|
append_text
|
Open the file in text mode, append the given string to it, and close the file.
.. versionadded:: 0.3.8
:param string:
:param encoding: The encoding to write to the file in.
:param errors:
|
end_text(
g: str,
ing: Optional[str] = "UTF-8",
s: Optional[str] = None,
he file in text mode, append the given string to it, and close the file.
sionadded:: 0.3.8
string:
encoding: The encoding to write to the file in.
errors:
elf.open('a', encoding=encoding, errors=errors) as fp:
ite(string)
|
(self, string: str, encoding: Optional[str] = 'UTF-8', errors: Optional[str] = None)
|
72,389 |
domdf_python_tools.paths
|
dump_json
|
Dump ``data`` to the file as JSON.
.. versionadded:: 0.5.0
:param data: The object to serialise to JSON.
:param encoding: The encoding to write to the file in.
:param errors:
:param json_library: The JSON serialisation library to use.
:default json_library: :mod:`json`
:param compress: Whether to compress the JSON file using gzip.
:param \*\*kwargs: Keyword arguments to pass to the JSON serialisation function.
:rtype:
.. versionchanged:: 1.0.0
Now uses :meth:`PathPlus.write_clean <domdf_python_tools.paths.PathPlus.write_clean>`
rather than :meth:`PathPlus.write_text <domdf_python_tools.paths.PathPlus.write_text>`,
and as a result returns :py:obj:`None` rather than :class:`int`.
.. versionchanged:: 1.9.0 Added the ``compress`` keyword-only argument.
|
p_json(
Any,
ing: Optional[str] = "UTF-8",
s: Optional[str] = None,
library: JsonLibrary = json, # type: ignore
ess: bool = False,
rgs,
None:
`data`` to the file as JSON.
sionadded:: 0.5.0
data: The object to serialise to JSON.
encoding: The encoding to write to the file in.
errors:
json_library: The JSON serialisation library to use.
lt json_library: :mod:`json`
compress: Whether to compress the JSON file using gzip.
\*\*kwargs: Keyword arguments to pass to the JSON serialisation function.
:
sionchanged:: 1.0.0
ses :meth:`PathPlus.write_clean <domdf_python_tools.paths.PathPlus.write_clean>`
r than :meth:`PathPlus.write_text <domdf_python_tools.paths.PathPlus.write_text>`,
s a result returns :py:obj:`None` rather than :class:`int`.
sionchanged:: 1.9.0 Added the ``compress`` keyword-only argument.
press:
gzip.open(self, mode="wt", encoding=encoding, errors=errors) as fp:
rite(json_library.dumps(data, **kwargs))
write_clean(
n_library.dumps(data, **kwargs),
oding=encoding,
ors=errors,
|
(self, data: Any, encoding: Optional[str] = 'UTF-8', errors: Optional[str] = None, json_library: domdf_python_tools.typing.JsonLibrary = <module 'json' from '/usr/local/lib/python3.10/json/__init__.py'>, *, compress: bool = False, **kwargs) -> NoneType
|
72,406 |
domdf_python_tools.paths
|
iterchildren
|
Returns an iterator over all children (files and directories) of the current path object.
.. versionadded:: 2.3.0
:param exclude_dirs: A list of directory names which should be excluded from the output,
together with their children.
:param match: A pattern to match filenames against.
The pattern should be in the format taken by :func:`~.matchglob`.
:param matchcase: Whether the filename's case should match the pattern.
:rtype:
.. versionchanged:: 2.5.0 Added the ``matchcase`` option.
|
rchildren(
_PP,
de_dirs: Optional[Iterable[str]] = unwanted_dirs,
: Optional[str] = None,
case: bool = True,
Iterator[_PP]:
s an iterator over all children (files and directories) of the current path object.
sionadded:: 2.3.0
exclude_dirs: A list of directory names which should be excluded from the output,
her with their children.
match: A pattern to match filenames against.
attern should be in the format taken by :func:`~.matchglob`.
matchcase: Whether the filename's case should match the pattern.
:
sionchanged:: 2.5.0 Added the ``matchcase`` option.
self.abspath().is_dir():
n
lude_dirs is None:
de_dirs = ()
ch and not os.path.isabs(match) and self.is_absolute():
= (self / match).as_posix()
_PP
le in self.iterdir():
= file.parts
y(d in parts for d in exclude_dirs):
inue
tch is None or (match is not None and matchglob(file, match, matchcase)):
d file
le.is_dir():
d from file.iterchildren(exclude_dirs, match)
|
(self: ~_PP, exclude_dirs: Optional[Iterable[str]] = ('.git', '.hg', 'venv', '.venv', '.mypy_cache', '__pycache__', '.pytest_cache', '.tox', '.tox4', '.nox', '__pypackages__'), match: Optional[str] = None, matchcase: bool = True) -> Iterator[~_PP]
|
72,411 |
domdf_python_tools.paths
|
load_json
|
Load JSON data from the file.
.. versionadded:: 0.5.0
:param encoding: The encoding to write to the file in.
:param errors:
:param json_library: The JSON serialisation library to use.
:default json_library: :mod:`json`
:param decompress: Whether to decompress the JSON file using gzip.
Will raise an exception if the file is not compressed.
:param \*\*kwargs: Keyword arguments to pass to the JSON deserialisation function.
:return: The deserialised JSON data.
.. versionchanged:: 1.9.0 Added the ``compress`` keyword-only argument.
|
d_json(
ing: Optional[str] = "UTF-8",
s: Optional[str] = None,
library: JsonLibrary = json, # type: ignore
press: bool = False,
rgs,
Any:
SON data from the file.
sionadded:: 0.5.0
encoding: The encoding to write to the file in.
errors:
json_library: The JSON serialisation library to use.
lt json_library: :mod:`json`
decompress: Whether to decompress the JSON file using gzip.
raise an exception if the file is not compressed.
\*\*kwargs: Keyword arguments to pass to the JSON deserialisation function.
n: The deserialised JSON data.
sionchanged:: 1.9.0 Added the ``compress`` keyword-only argument.
ompress:
gzip.open(self, mode="rt", encoding=encoding, errors=errors) as fp:
ent = fp.read()
nt = self.read_text(encoding=encoding, errors=errors)
json_library.loads(
ent,
args,
|
(self, encoding: Optional[str] = 'UTF-8', errors: Optional[str] = None, json_library: domdf_python_tools.typing.JsonLibrary = <module 'json' from '/usr/local/lib/python3.10/json/__init__.py'>, *, decompress: bool = False, **kwargs) -> Any
|
72,413 |
domdf_python_tools.paths
|
make_executable
|
Make the file executable.
.. versionadded:: 0.3.8
|
e_executable(self) -> None:
he file executable.
sionadded:: 0.3.8
xecutable(self)
|
(self) -> NoneType
|
72,415 |
domdf_python_tools.paths
|
maybe_make
|
Create a directory at this path, but only if the directory does not already exist.
.. versionadded:: 0.3.8
:param mode: Combined with the process’ umask value to determine the file mode and access flags
:param parents: If :py:obj:`False` (the default), a missing parent raises a :class:`FileNotFoundError`.
If :py:obj:`True`, any missing parents of this path are created as needed; they are created with the
default permissions without taking mode into account (mimicking the POSIX mkdir -p command).
:no-default parents:
.. versionchanged:: 1.6.0 Removed the ``'exist_ok'`` option, since it made no sense in this context.
.. attention::
This will fail silently if a file with the same name already exists.
This appears to be due to the behaviour of :func:`os.mkdir`.
|
be_make(
int = 0o777,
ts: bool = False,
a directory at this path, but only if the directory does not already exist.
sionadded:: 0.3.8
mode: Combined with the process’ umask value to determine the file mode and access flags
parents: If :py:obj:`False` (the default), a missing parent raises a :class:`FileNotFoundError`.
y:obj:`True`, any missing parents of this path are created as needed; they are created with the
lt permissions without taking mode into account (mimicking the POSIX mkdir -p command).
fault parents:
sionchanged:: 1.6.0 Removed the ``'exist_ok'`` option, since it made no sense in this context.
ention::
will fail silently if a file with the same name already exists.
appears to be due to the behaviour of :func:`os.mkdir`.
mkdir(mode, parents, exist_ok=True)
FileExistsError:
|
(self, mode: int = 511, parents: bool = False)
|
72,417 |
domdf_python_tools.paths
|
move
|
Recursively move ``self`` to ``dst``.
``self`` may be a file or a directory.
See :func:`shutil.move` for more details.
.. versionadded:: 3.2.0
:param dst:
:returns: The new location of ``self``.
:rtype: :class:`~.PathPlus`
|
e(self: _PP, dst: PathLike) -> _PP:
ively move ``self`` to ``dst``.
`` may be a file or a directory.
unc:`shutil.move` for more details.
sionadded:: 3.2.0
dst:
ns: The new location of ``self``.
: :class:`~.PathPlus`
th = shutil.move(os.fspath(self), dst)
self.__class__(new_path)
|
(self: ~_PP, dst: Union[str, pathlib.Path, os.PathLike]) -> ~_PP
|
72,418 |
domdf_python_tools.paths
|
open
|
Open the file pointed by this path and return a file object, as
the built-in :func:`open` function does.
.. versionadded:: 0.3.8
:param mode: The mode to open the file in.
:default mode: ``'r'`` (read only)
:param buffering:
:param encoding:
:param errors:
:param newline:
:default newline: `universal newlines <https://docs.python.org/3/glossary.html#term-universal-newlines>`__ for reading, Unix line endings (``LF``) for writing.
:rtype:
.. versionchanged:: 0.5.1
Defaults to Unix line endings (``LF``) on all platforms.
|
n( # type: ignore # noqa: A003 # pylint: disable=redefined-builtin
str = 'r',
ing: int = -1,
ng: Optional[str] = "UTF-8",
: Optional[str] = None,
e: Optional[str] = NEWLINE_DEFAULT,
O[Any]:
he file pointed by this path and return a file object, as
ilt-in :func:`open` function does.
sionadded:: 0.3.8
mode: The mode to open the file in.
lt mode: ``'r'`` (read only)
buffering:
encoding:
errors:
newline:
lt newline: `universal newlines <https://docs.python.org/3/glossary.html#term-universal-newlines>`__ for reading, Unix line endings (``LF``) for writing.
:
sionchanged:: 0.5.1
lts to Unix line endings (``LF``) on all platforms.
noqa: D400
in mode:
ing = None
ne = None
line is NEWLINE_DEFAULT:
' in mode:
ine = None
ine = '\n'
super().open(
,
ering=buffering,
ding=encoding,
rs=errors,
ine=newline,
|
(self, mode: str = 'r', buffering: int = -1, encoding: Optional[str] = 'UTF-8', errors: Optional[str] = None, newline: Optional[str] = NEWLINE_DEFAULT) -> IO[Any]
|
72,421 |
domdf_python_tools.paths
|
read_lines
|
Open the file in text mode, return a list containing the lines in the file,
and close the file.
.. versionadded:: 0.5.0
:param encoding: The encoding to write to the file in.
:param errors:
:return: The content of the file.
|
d_lines(
ing: Optional[str] = "UTF-8",
s: Optional[str] = None,
List[str]:
he file in text mode, return a list containing the lines in the file,
ose the file.
sionadded:: 0.5.0
encoding: The encoding to write to the file in.
errors:
n: The content of the file.
noqa: D400
self.read_text(encoding=encoding, errors=errors).split('\n')
|
(self, encoding: Optional[str] = 'UTF-8', errors: Optional[str] = None) -> List[str]
|
72,422 |
domdf_python_tools.paths
|
read_text
|
Open the file in text mode, read it, and close the file.
.. versionadded:: 0.3.8
:param encoding: The encoding to write to the file in.
:param errors:
:return: The content of the file.
|
d_text(
ing: Optional[str] = "UTF-8",
s: Optional[str] = None,
str:
he file in text mode, read it, and close the file.
sionadded:: 0.3.8
encoding: The encoding to write to the file in.
errors:
n: The content of the file.
super().read_text(encoding=encoding, errors=errors)
|
(self, encoding: Optional[str] = 'UTF-8', errors: Optional[str] = None) -> str
|
72,432 |
domdf_python_tools.paths
|
stream
|
Stream the file in ``chunk_size`` sized chunks.
:param chunk_size: The chunk size, in bytes
.. versionadded:: 3.2.0
|
eam(self, chunk_size: int = 1024) -> Iterator[bytes]:
the file in ``chunk_size`` sized chunks.
chunk_size: The chunk size, in bytes
sionadded:: 3.2.0
elf.open("rb") as fp:
True:
k = fp.read(chunk_size)
ot chunk:
ak
d chunk
|
(self, chunk_size: int = 1024) -> Iterator[bytes]
|
72,440 |
domdf_python_tools.paths
|
write_clean
|
Write to the file without trailing whitespace, and with a newline at the end of the file.
.. versionadded:: 0.3.8
:param string:
:param encoding: The encoding to write to the file in.
:param errors:
|
te_clean(
g: str,
ing: Optional[str] = "UTF-8",
s: Optional[str] = None,
to the file without trailing whitespace, and with a newline at the end of the file.
sionadded:: 0.3.8
string:
encoding: The encoding to write to the file in.
errors:
elf.open('w', encoding=encoding, errors=errors) as fp:
_writer(string, fp)
|
(self, string: str, encoding: Optional[str] = 'UTF-8', errors: Optional[str] = None)
|
72,441 |
domdf_python_tools.paths
|
write_lines
|
Write the given list of lines to the file without trailing whitespace.
.. versionadded:: 0.5.0
:param data:
:param encoding: The encoding to write to the file in.
:param errors:
:param trailing_whitespace: If :py:obj:`True` trailing whitespace is preserved.
.. versionchanged:: 2.4.0 Added the ``trailing_whitespace`` option.
|
te_lines(
Iterable[str],
ing: Optional[str] = "UTF-8",
s: Optional[str] = None,
ing_whitespace: bool = False
None:
the given list of lines to the file without trailing whitespace.
sionadded:: 0.5.0
data:
encoding: The encoding to write to the file in.
errors:
trailing_whitespace: If :py:obj:`True` trailing whitespace is preserved.
sionchanged:: 2.4.0 Added the ``trailing_whitespace`` option.
iling_whitespace:
= list(data)
ta[-1].strip():
.append('')
write_text('\n'.join(data), encoding=encoding, errors=errors)
write_clean('\n'.join(data), encoding=encoding, errors=errors)
|
(self, data: Iterable[str], encoding: Optional[str] = 'UTF-8', errors: Optional[str] = None, *, trailing_whitespace: bool = False) -> NoneType
|
72,442 |
domdf_python_tools.paths
|
write_text
|
Open the file in text mode, write to it, and close the file.
.. versionadded:: 0.3.8
:param data:
:param encoding: The encoding to write to the file in.
:param errors:
:param newline:
:default newline: `universal newlines <https://docs.python.org/3/glossary.html#term-universal-newlines>`__ for reading, Unix line endings (``LF``) for writing.
.. versionchanged:: 3.1.0
Added the ``newline`` argument to match Python 3.10.
(see :github:pull:`22420 <python/cpython>`)
|
te_text(
str,
ing: Optional[str] = "UTF-8",
s: Optional[str] = None,
ne: Optional[str] = NEWLINE_DEFAULT,
int:
he file in text mode, write to it, and close the file.
sionadded:: 0.3.8
data:
encoding: The encoding to write to the file in.
errors:
newline:
lt newline: `universal newlines <https://docs.python.org/3/glossary.html#term-universal-newlines>`__ for reading, Unix line endings (``LF``) for writing.
sionchanged:: 3.1.0
the ``newline`` argument to match Python 3.10.
:github:pull:`22420 <python/cpython>`)
isinstance(data, str):
TypeError(f'data must be str, not {data.__class__.__name__}')
elf.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f:
n f.write(data)
|
(self, data: str, encoding: Optional[str] = 'UTF-8', errors: Optional[str] = None, newline: Optional[str] = NEWLINE_DEFAULT) -> int
|
72,443 |
dom_toml.decoder
|
TomlDecoder
|
TOML decoder which uses a dict-subclass for inline tables.
.. versionadded:: 2.0.0
|
class TomlDecoder:
"""
TOML decoder which uses a dict-subclass for inline tables.
.. versionadded:: 2.0.0
"""
def loads(self, s: str) -> Dict[str, Any]:
"""
Parse the given string as TOML.
:param s:
:returns: A mapping containing the ``TOML`` data.
.. latex:clearpage::
"""
try:
pit = tomli._parser.parse_inline_table
def _parse_inline_table(src: str, pos: int, parse_float: Callable[[str], Any]) -> Tuple[int, Dict]:
pos, table = pit(src, pos, parse_float)
return pos, InlineTableDict(table)
tomli._parser.parse_inline_table = _parse_inline_table
return tomli.loads(s)
finally:
tomli._parser.parse_inline_table = pit
|
()
|
72,444 |
dom_toml.decoder
|
loads
|
Parse the given string as TOML.
:param s:
:returns: A mapping containing the ``TOML`` data.
.. latex:clearpage::
|
ds(self, s: str) -> Dict[str, Any]:
the given string as TOML.
s:
ns: A mapping containing the ``TOML`` data.
ex:clearpage::
tomli._parser.parse_inline_table
parse_inline_table(src: str, pos: int, parse_float: Callable[[str], Any]) -> Tuple[int, Dict]:
table = pit(src, pos, parse_float)
rn pos, InlineTableDict(table)
._parser.parse_inline_table = _parse_inline_table
n tomli.loads(s)
y:
._parser.parse_inline_table = pit
|
(self, s: str) -> Dict[str, Any]
|
72,445 |
dom_toml.encoder
|
TomlEncoder
|
TOML encoder which wraps long lists onto multiple lines and adds a blank line before arrays of tables.
:param preserve:
:param allow_multiline:
:param separator:
.. versionchanged:: 0.2.0 Moved from ``__init__.py``
.. versionchanged:: 2.0.0 Added ``allow_multiline`` argument.
.. autosummary-widths:: 45/100
|
class TomlEncoder:
"""
TOML encoder which wraps long lists onto multiple lines and adds a blank line before arrays of tables.
:param preserve:
:param allow_multiline:
:param separator:
.. versionchanged:: 0.2.0 Moved from ``__init__.py``
.. versionchanged:: 2.0.0 Added ``allow_multiline`` argument.
.. autosummary-widths:: 45/100
"""
# The maximum width of the list **value**, after which it will be wrapped.
max_width: int = 100
allow_multiline: bool
# cache rendered inline tables (mapping from object id to rendered inline table)
inline_table_cache: Dict[int, str]
def __init__(self, preserve: bool = True, multiline_strings: bool = False):
self.preserve = preserve
self.allow_multiline = multiline_strings
self.inline_table_cache = {}
def dumps(
self,
table: Mapping[str, Any],
*,
name: str,
inside_aot: bool = False,
) -> Iterator[str]:
"""
Serialise the given table.
:param name: The table name.
:param inside_aot:
:rtype:
.. versionadded:: 2.0.0
"""
yielded = False
literals = []
tables: List[Tuple[str, Any, bool]] = [] # => [(key, value, inside_aot)]
for k, v in table.items():
if v is None:
continue
elif self.preserve and isinstance(v, InlineTableDict):
literals.append((k, v))
elif isinstance(v, dict):
tables.append((k, v, False))
elif self._is_aot(v) and not all(self._is_suitable_inline_table(t) for t in v):
tables.extend((k, t, True) for t in v)
else:
literals.append((k, v))
if inside_aot or name and (literals or not tables):
yielded = True
yield f"[[{name}]]\n" if inside_aot else f"[{name}]\n"
if literals:
yielded = True
for k, v in literals:
yield f"{self.format_key_part(k)} = {self.format_literal(v)}\n"
for k, v, in_aot in tables:
if yielded:
yield '\n'
else:
yielded = True
key_part = self.format_key_part(k)
display_name = f"{name}.{key_part}" if name else key_part
yield from self.dumps(v, name=display_name, inside_aot=in_aot)
def format_literal(self, obj: object, *, nest_level: int = 0) -> str:
"""
Format a literal value.
:param obj:
:param nest_level:
:rtype:
.. versionadded:: 2.0.0
"""
if isinstance(obj, bool):
return "true" if obj else "false"
if isinstance(obj, (int, float, date, datetime)):
return str(obj)
if isinstance(obj, Decimal):
return self.format_decimal(obj)
if isinstance(obj, time):
if obj.tzinfo:
raise ValueError("TOML does not support offset times")
return str(obj)
if isinstance(obj, str):
return self.format_string(obj, allow_multiline=self.allow_multiline)
if isinstance(obj, ARRAY_TYPES):
return self.format_inline_array(obj, nest_level)
if isinstance(obj, dict):
return self.format_inline_table(obj)
raise TypeError(f"Object of type {type(obj)} is not TOML serializable")
def format_decimal(self, obj: Decimal) -> str:
"""
Format a decimal value.
:param obj:
:rtype:
.. versionadded:: 2.0.0
"""
if obj.is_nan():
return "nan"
if obj == Decimal("inf"):
return "inf"
if obj == Decimal("-inf"):
return "-inf"
return str(obj)
def format_inline_table(self, obj: dict) -> str:
"""
Format an inline table.
:param obj:
:rtype:
.. versionadded:: 2.0.0
"""
# check cache first
obj_id = id(obj)
if obj_id in self.inline_table_cache:
return self.inline_table_cache[obj_id]
if not obj:
rendered = "{}"
else:
rendered = (
"{ "
+ ", ".join(f"{self.format_key_part(k)} = {self.format_literal(v)}" for k, v in obj.items())
+ " }"
)
self.inline_table_cache[obj_id] = rendered
return rendered
def format_inline_array(self, obj: Union[Tuple, List], nest_level: int) -> str:
"""
Format an inline array.
:param obj:
:param nest_level:
:rtype:
.. versionadded:: 2.0.0
"""
if not len(obj):
return "[]"
item_indent = ARRAY_INDENT * (1 + nest_level)
closing_bracket_indent = ARRAY_INDENT * nest_level
single_line = "[ " + ", ".join(
self.format_literal(item, nest_level=nest_level + 1) for item in obj
) + f",]"
if len(single_line) <= self.max_width:
return single_line
else:
start = "[\n"
body = ",\n".join(item_indent + self.format_literal(item, nest_level=nest_level + 1) for item in obj)
end = f",\n{closing_bracket_indent}]"
return start + body + end
def format_key_part(self, part: str) -> str:
"""
Format part of a key.
:param part:
:rtype:
.. versionadded:: 2.0.0
"""
if part and BARE_KEY_CHARS.issuperset(part):
return part
return self.format_string(part, allow_multiline=False)
def format_string(self, s: str, *, allow_multiline: bool) -> str:
"""
Format a string.
:param s:
:param allow_multiline:
:rtype:
.. versionadded:: 2.0.0
.. latex:clearpage::
"""
do_multiline = allow_multiline and '\n' in s
if do_multiline:
result = '"""\n'
s = s.replace("\r\n", '\n')
else:
result = '"'
pos = seq_start = 0
while True:
try:
char = s[pos]
except IndexError:
result += s[seq_start:pos]
if do_multiline:
return result + '"""'
return result + '"'
if char in ILLEGAL_BASIC_STR_CHARS:
result += s[seq_start:pos]
if char in COMPACT_ESCAPES:
if do_multiline and char == '\n':
result += '\n'
else:
result += COMPACT_ESCAPES[char]
else:
result += "\\u" + hex(ord(char))[2:].rjust(4, '0')
seq_start = pos + 1
pos += 1
def _is_aot(self, obj: Any) -> bool:
"""
Decides if an object behaves as an array of tables (i.e. a nonempty list of dicts).
:param obj:
"""
return bool(isinstance(obj, ARRAY_TYPES) and obj and all(isinstance(v, dict) for v in obj))
def _is_suitable_inline_table(self, obj: dict) -> bool:
"""
Use heuristics to decide if the inline-style representation is a good choice for a given table.
:param obj:
"""
# if self.preserve and isinstance(dict, InlineTableDict):
# return True
rendered_inline = f"{ARRAY_INDENT}{self.format_inline_table(obj)},"
return len(rendered_inline) <= self.max_width and '\n' not in rendered_inline
|
(preserve: bool = True, multiline_strings: bool = False)
|
72,446 |
dom_toml.encoder
|
__init__
| null |
nit__(self, preserve: bool = True, multiline_strings: bool = False):
reserve = preserve
llow_multiline = multiline_strings
nline_table_cache = {}
|
(self, preserve: bool = True, multiline_strings: bool = False)
|
72,447 |
dom_toml.encoder
|
_is_aot
|
Decides if an object behaves as an array of tables (i.e. a nonempty list of dicts).
:param obj:
|
_aot(self, obj: Any) -> bool:
s if an object behaves as an array of tables (i.e. a nonempty list of dicts).
obj:
bool(isinstance(obj, ARRAY_TYPES) and obj and all(isinstance(v, dict) for v in obj))
|
(self, obj: Any) -> bool
|
72,448 |
dom_toml.encoder
|
_is_suitable_inline_table
|
Use heuristics to decide if the inline-style representation is a good choice for a given table.
:param obj:
|
_suitable_inline_table(self, obj: dict) -> bool:
uristics to decide if the inline-style representation is a good choice for a given table.
obj:
elf.preserve and isinstance(dict, InlineTableDict):
urn True
ed_inline = f"{ARRAY_INDENT}{self.format_inline_table(obj)},"
len(rendered_inline) <= self.max_width and '\n' not in rendered_inline
|
(self, obj: dict) -> bool
|
72,449 |
dom_toml.encoder
|
dumps
|
Serialise the given table.
:param name: The table name.
:param inside_aot:
:rtype:
.. versionadded:: 2.0.0
|
ps(
: Mapping[str, Any],
str,
e_aot: bool = False,
Iterator[str]:
ise the given table.
name: The table name.
inside_aot:
:
sionadded:: 2.0.0
d = False
ls = []
: List[Tuple[str, Any, bool]] = [] # => [(key, value, inside_aot)]
v in table.items():
is None:
inue
self.preserve and isinstance(v, InlineTableDict):
rals.append((k, v))
isinstance(v, dict):
es.append((k, v, False))
self._is_aot(v) and not all(self._is_suitable_inline_table(t) for t in v):
es.extend((k, t, True) for t in v)
rals.append((k, v))
ide_aot or name and (literals or not tables):
ed = True
f"[[{name}]]\n" if inside_aot else f"[{name}]\n"
erals:
ed = True
, v in literals:
d f"{self.format_key_part(k)} = {self.format_literal(v)}\n"
v, in_aot in tables:
elded:
d '\n'
ded = True
art = self.format_key_part(k)
ay_name = f"{name}.{key_part}" if name else key_part
from self.dumps(v, name=display_name, inside_aot=in_aot)
|
(self, table: Mapping[str, Any], *, name: str, inside_aot: bool = False) -> Iterator[str]
|
72,450 |
dom_toml.encoder
|
format_decimal
|
Format a decimal value.
:param obj:
:rtype:
.. versionadded:: 2.0.0
|
mat_decimal(self, obj: Decimal) -> str:
a decimal value.
obj:
:
sionadded:: 2.0.0
.is_nan():
n "nan"
== Decimal("inf"):
n "inf"
== Decimal("-inf"):
n "-inf"
str(obj)
|
(self, obj: decimal.Decimal) -> str
|
72,451 |
dom_toml.encoder
|
format_inline_array
|
Format an inline array.
:param obj:
:param nest_level:
:rtype:
.. versionadded:: 2.0.0
|
mat_inline_array(self, obj: Union[Tuple, List], nest_level: int) -> str:
an inline array.
obj:
nest_level:
:
sionadded:: 2.0.0
len(obj):
n "[]"
ndent = ARRAY_INDENT * (1 + nest_level)
g_bracket_indent = ARRAY_INDENT * nest_level
_line = "[ " + ", ".join(
.format_literal(item, nest_level=nest_level + 1) for item in obj
f",]"
(single_line) <= self.max_width:
n single_line
= "[\n"
= ",\n".join(item_indent + self.format_literal(item, nest_level=nest_level + 1) for item in obj)
f",\n{closing_bracket_indent}]"
n start + body + end
|
(self, obj: Union[Tuple, List], nest_level: int) -> str
|
72,452 |
dom_toml.encoder
|
format_inline_table
|
Format an inline table.
:param obj:
:rtype:
.. versionadded:: 2.0.0
|
mat_inline_table(self, obj: dict) -> str:
an inline table.
obj:
:
sionadded:: 2.0.0
k cache first
= id(obj)
_id in self.inline_table_cache:
n self.inline_table_cache[obj_id]
obj:
red = "{}"
red = (
"
, ".join(f"{self.format_key_part(k)} = {self.format_literal(v)}" for k, v in obj.items())
}"
nline_table_cache[obj_id] = rendered
rendered
|
(self, obj: dict) -> str
|
72,453 |
dom_toml.encoder
|
format_key_part
|
Format part of a key.
:param part:
:rtype:
.. versionadded:: 2.0.0
|
mat_key_part(self, part: str) -> str:
part of a key.
part:
:
sionadded:: 2.0.0
t and BARE_KEY_CHARS.issuperset(part):
n part
self.format_string(part, allow_multiline=False)
|
(self, part: str) -> str
|
72,454 |
dom_toml.encoder
|
format_literal
|
Format a literal value.
:param obj:
:param nest_level:
:rtype:
.. versionadded:: 2.0.0
|
mat_literal(self, obj: object, *, nest_level: int = 0) -> str:
a literal value.
obj:
nest_level:
:
sionadded:: 2.0.0
nstance(obj, bool):
n "true" if obj else "false"
nstance(obj, (int, float, date, datetime)):
n str(obj)
nstance(obj, Decimal):
n self.format_decimal(obj)
nstance(obj, time):
j.tzinfo:
e ValueError("TOML does not support offset times")
n str(obj)
nstance(obj, str):
n self.format_string(obj, allow_multiline=self.allow_multiline)
nstance(obj, ARRAY_TYPES):
n self.format_inline_array(obj, nest_level)
nstance(obj, dict):
n self.format_inline_table(obj)
TypeError(f"Object of type {type(obj)} is not TOML serializable")
|
(self, obj: object, *, nest_level: int = 0) -> str
|
72,455 |
dom_toml.encoder
|
format_string
|
Format a string.
:param s:
:param allow_multiline:
:rtype:
.. versionadded:: 2.0.0
.. latex:clearpage::
|
mat_string(self, s: str, *, allow_multiline: bool) -> str:
a string.
s:
allow_multiline:
:
sionadded:: 2.0.0
ex:clearpage::
tiline = allow_multiline and '\n' in s
multiline:
t = '"""\n'
.replace("\r\n", '\n')
t = '"'
seq_start = 0
True:
= s[pos]
t IndexError:
lt += s[seq_start:pos]
o_multiline:
urn result + '"""'
rn result + '"'
ar in ILLEGAL_BASIC_STR_CHARS:
lt += s[seq_start:pos]
har in COMPACT_ESCAPES:
do_multiline and char == '\n':
sult += '\n'
e:
sult += COMPACT_ESCAPES[char]
:
ult += "\\u" + hex(ord(char))[2:].rjust(4, '0')
start = pos + 1
= 1
|
(self, s: str, *, allow_multiline: bool) -> str
|
72,457 |
dom_toml
|
dump
|
Writes out ``data`` as TOML to the given file.
:param data:
:param filename: The filename to write to.
:param encoder: The :class:`~.TomlEncoder` to use for constructing the output string.
:returns: A string containing the ``TOML`` corresponding to ``data``.
.. versionchanged:: 2.0.0 ``encoder`` must now be a :class:`~.TomlEncoder` type or instance.
|
def dump(
data: Mapping[str, Any],
filename: PathLike,
encoder: Union[Type[TomlEncoder], TomlEncoder] = TomlEncoder,
) -> str:
r"""
Writes out ``data`` as TOML to the given file.
:param data:
:param filename: The filename to write to.
:param encoder: The :class:`~.TomlEncoder` to use for constructing the output string.
:returns: A string containing the ``TOML`` corresponding to ``data``.
.. versionchanged:: 2.0.0 ``encoder`` must now be a :class:`~.TomlEncoder` type or instance.
"""
filename = PathPlus(filename)
as_toml = dumps(data, encoder=encoder)
filename.write_clean(as_toml)
return as_toml
|
(data: Mapping[str, Any], filename: Union[str, pathlib.Path, os.PathLike], encoder: Union[Type[dom_toml.encoder.TomlEncoder], dom_toml.encoder.TomlEncoder] = <class 'dom_toml.encoder.TomlEncoder'>) -> str
|
72,458 |
dom_toml
|
dumps
|
Convert ``data`` to a TOML string.
:param data:
:param encoder: The :class:`~.TomlEncoder` to use for constructing the output string.
:returns: A string containing the ``TOML`` corresponding to ``data``.
.. versionchanged:: 2.0.0 ``encoder`` must now be a :class:`~.TomlEncoder` type or instance.
.. latex:clearpage::
|
def dumps(
data: Mapping[str, Any],
encoder: Union[Type[TomlEncoder], TomlEncoder] = TomlEncoder,
) -> str:
r"""
Convert ``data`` to a TOML string.
:param data:
:param encoder: The :class:`~.TomlEncoder` to use for constructing the output string.
:returns: A string containing the ``TOML`` corresponding to ``data``.
.. versionchanged:: 2.0.0 ``encoder`` must now be a :class:`~.TomlEncoder` type or instance.
.. latex:clearpage::
"""
if isinstance(encoder, type):
encoder = encoder()
return ''.join(encoder.dumps(data, name=''))
|
(data: Mapping[str, Any], encoder: Union[Type[dom_toml.encoder.TomlEncoder], dom_toml.encoder.TomlEncoder] = <class 'dom_toml.encoder.TomlEncoder'>) -> str
|
72,460 |
dom_toml
|
load
|
Parse TOML from the given file.
:param filename: The filename to read from to.
:param decoder: The :class:`~.TomlEncoder` to use for constructing the output string.
:returns: A mapping containing the ``TOML`` data.
.. versionchanged:: 2.0.0 ``decoder`` must now be a :class:`~.TomlDecoder` type or instance.
|
def load(
filename: PathLike,
decoder: Union[Type[TomlDecoder], TomlDecoder] = TomlDecoder,
) -> Dict[str, Any]:
r"""
Parse TOML from the given file.
:param filename: The filename to read from to.
:param decoder: The :class:`~.TomlEncoder` to use for constructing the output string.
:returns: A mapping containing the ``TOML`` data.
.. versionchanged:: 2.0.0 ``decoder`` must now be a :class:`~.TomlDecoder` type or instance.
"""
return loads(
PathPlus(filename).read_text(),
decoder=decoder,
)
|
(filename: Union[str, pathlib.Path, os.PathLike], decoder: Union[Type[dom_toml.decoder.TomlDecoder], dom_toml.decoder.TomlDecoder] = <class 'dom_toml.decoder.TomlDecoder'>) -> Dict[str, Any]
|
72,461 |
dom_toml
|
loads
|
Parse the given string as TOML.
:param s:
:param decoder: The :class:`~.TomlEncoder` to use for constructing the output string.
:returns: A mapping containing the ``TOML`` data.
.. versionchanged:: 2.0.0 ``decoder`` must now be a :class:`~.TomlDecoder` type or instance.
|
def loads(
s: str,
decoder: Union[Type[TomlDecoder], TomlDecoder] = TomlDecoder,
) -> Dict[str, Any]:
r"""
Parse the given string as TOML.
:param s:
:param decoder: The :class:`~.TomlEncoder` to use for constructing the output string.
:returns: A mapping containing the ``TOML`` data.
.. versionchanged:: 2.0.0 ``decoder`` must now be a :class:`~.TomlDecoder` type or instance.
"""
if not isinstance(s, str):
raise TypeError("Expecting something like a string")
if isinstance(decoder, type):
decoder = decoder()
return decoder.loads(s)
|
(s: str, decoder: Union[Type[dom_toml.decoder.TomlDecoder], dom_toml.decoder.TomlDecoder] = <class 'dom_toml.decoder.TomlDecoder'>) -> Dict[str, Any]
|
72,464 |
clvm.SExp
|
SExp
|
SExp provides higher level API on top of any object implementing the CLVM
object protocol.
The tree of values is not a tree of SExp objects, it's a tree of CLVMObject
like objects. SExp simply wraps them to privide a uniform view of any
underlying conforming tree structure.
The CLVM object protocol (concept) exposes two attributes:
1. "atom" which is either None or bytes
2. "pair" which is either None or a tuple of exactly two elements. Both
elements implementing the CLVM object protocol.
Exactly one of "atom" and "pair" must be None.
|
class SExp:
"""
SExp provides higher level API on top of any object implementing the CLVM
object protocol.
The tree of values is not a tree of SExp objects, it's a tree of CLVMObject
like objects. SExp simply wraps them to privide a uniform view of any
underlying conforming tree structure.
The CLVM object protocol (concept) exposes two attributes:
1. "atom" which is either None or bytes
2. "pair" which is either None or a tuple of exactly two elements. Both
elements implementing the CLVM object protocol.
Exactly one of "atom" and "pair" must be None.
"""
true: typing.ClassVar[SExp]
false: typing.ClassVar[SExp]
__null__: typing.ClassVar[SExp]
# the underlying object implementing the clvm object protocol
atom: typing.Optional[bytes]
# this is a tuple of the otherlying CLVMObject-like objects. i.e. not
# SExp objects with higher level functions, or None
pair: typing.Optional[typing.Tuple[CLVMStorage, CLVMStorage]]
def __init__(self, obj: CLVMStorage) -> None:
self.atom = obj.atom
self.pair = obj.pair
# this returns a tuple of two SExp objects, or None
def as_pair(self) -> typing.Optional[typing.Tuple[SExp, SExp]]:
pair = self.pair
if pair is None:
return pair
return (self.__class__(pair[0]), self.__class__(pair[1]))
# TODO: deprecate this. Same as .atom property
def as_atom(self) -> typing.Optional[bytes]:
return self.atom
def listp(self) -> bool:
return self.pair is not None
def nullp(self) -> bool:
v = self.atom
return v is not None and len(v) == 0
def as_int(self) -> int:
if self.atom is None:
raise TypeError("Unable to convert a pair to an int")
return int_from_bytes(self.atom)
def as_bin(self) -> bytes:
f = io.BytesIO()
sexp_to_stream(self, f)
return f.getvalue()
# TODO: should be `v: CastableType`
@classmethod
def to(cls: typing.Type[_T_SExp], v: typing.Any) -> _T_SExp:
if isinstance(v, cls):
return v
if looks_like_clvm_object(v):
return cls(v)
# this will lazily convert elements
return cls(to_sexp_type(v))
def cons(self: _T_SExp, right: _T_SExp) -> _T_SExp:
return self.to((self, right))
def first(self: _T_SExp) -> _T_SExp:
pair = self.pair
if pair:
return self.__class__(pair[0])
raise EvalError("first of non-cons", self)
def rest(self: _T_SExp) -> _T_SExp:
pair = self.pair
if pair:
return self.__class__(pair[1])
raise EvalError("rest of non-cons", self)
@classmethod
def null(class_) -> SExp:
return class_.__null__
def as_iter(self: _T_SExp) -> typing.Iterator[_T_SExp]:
v = self
while not v.nullp():
yield v.first()
v = v.rest()
def __eq__(self, other: object) -> bool:
try:
other = self.to(typing.cast(CastableType, other))
to_compare_stack = [(self, other)]
while to_compare_stack:
s1, s2 = to_compare_stack.pop()
p1 = s1.as_pair()
if p1:
p2 = s2.as_pair()
if p2:
to_compare_stack.append((p1[0], p2[0]))
to_compare_stack.append((p1[1], p2[1]))
else:
return False
elif s2.as_pair() or s1.as_atom() != s2.as_atom():
return False
return True
except ValueError:
return False
def list_len(self) -> int:
v = self
size = 0
while v.listp():
size += 1
v = v.rest()
return size
def as_python(self) -> typing.Any:
return as_python(self)
def __str__(self) -> str:
return self.as_bin().hex()
def __repr__(self) -> str:
return "%s(%s)" % (self.__class__.__name__, str(self))
|
(obj: 'CLVMStorage') -> 'None'
|
72,465 |
clvm.SExp
|
__eq__
| null |
def __eq__(self, other: object) -> bool:
try:
other = self.to(typing.cast(CastableType, other))
to_compare_stack = [(self, other)]
while to_compare_stack:
s1, s2 = to_compare_stack.pop()
p1 = s1.as_pair()
if p1:
p2 = s2.as_pair()
if p2:
to_compare_stack.append((p1[0], p2[0]))
to_compare_stack.append((p1[1], p2[1]))
else:
return False
elif s2.as_pair() or s1.as_atom() != s2.as_atom():
return False
return True
except ValueError:
return False
|
(self, other: object) -> bool
|
72,466 |
clvm.SExp
|
__init__
| null |
def __init__(self, obj: CLVMStorage) -> None:
self.atom = obj.atom
self.pair = obj.pair
|
(self, obj: clvm.CLVMObject.CLVMStorage) -> NoneType
|
72,467 |
clvm.SExp
|
__repr__
| null |
def __repr__(self) -> str:
return "%s(%s)" % (self.__class__.__name__, str(self))
|
(self) -> str
|
72,468 |
clvm.SExp
|
__str__
| null |
def __str__(self) -> str:
return self.as_bin().hex()
|
(self) -> str
|
72,469 |
clvm.SExp
|
as_atom
| null |
def as_atom(self) -> typing.Optional[bytes]:
return self.atom
|
(self) -> Optional[bytes]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.