index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
36,383 | collections | namedtuple | Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
| def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
| (typename, field_names, *, rename=False, defaults=None, module=None) |
36,385 | statistics | pstdev | Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
>>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
0.986893273527251
| def pstdev(data, mu=None):
"""Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
>>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
0.986893273527251
"""
# Fixme: Despite the exact sum of squared deviations, some inaccuracy
# remain because there are two rounding steps. The first occurs in
# the _convert() step for pvariance(), the second occurs in math.sqrt().
var = pvariance(data, mu)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
| (data, mu=None) |
36,386 | statistics | pvariance | Return the population variance of ``data``.
data should be a sequence or iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
Examples:
>>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25]
>>> pvariance(data)
1.25
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
>>> mu = mean(data)
>>> pvariance(data, mu)
1.25
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> pvariance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('24.815')
>>> from fractions import Fraction as F
>>> pvariance([F(1, 4), F(5, 4), F(1, 2)])
Fraction(13, 72)
| def pvariance(data, mu=None):
"""Return the population variance of ``data``.
data should be a sequence or iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
Examples:
>>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25]
>>> pvariance(data)
1.25
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
>>> mu = mean(data)
>>> pvariance(data, mu)
1.25
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> pvariance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('24.815')
>>> from fractions import Fraction as F
>>> pvariance([F(1, 4), F(5, 4), F(1, 2)])
Fraction(13, 72)
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
T, ss = _ss(data, mu)
return _convert(ss / n, T)
| (data, mu=None) |
36,387 | statistics | quantiles | Divide *data* into *n* continuous intervals with equal probability.
Returns a list of (n - 1) cut points separating the intervals.
Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles.
Set *n* to 100 for percentiles which gives the 99 cuts points that
separate *data* in to 100 equal sized groups.
The *data* can be any iterable containing sample.
The cut points are linearly interpolated between data points.
If *method* is set to *inclusive*, *data* is treated as population
data. The minimum value is treated as the 0th percentile and the
maximum value is treated as the 100th percentile.
| def quantiles(data, *, n=4, method='exclusive'):
"""Divide *data* into *n* continuous intervals with equal probability.
Returns a list of (n - 1) cut points separating the intervals.
Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles.
Set *n* to 100 for percentiles which gives the 99 cuts points that
separate *data* in to 100 equal sized groups.
The *data* can be any iterable containing sample.
The cut points are linearly interpolated between data points.
If *method* is set to *inclusive*, *data* is treated as population
data. The minimum value is treated as the 0th percentile and the
maximum value is treated as the 100th percentile.
"""
if n < 1:
raise StatisticsError('n must be at least 1')
data = sorted(data)
ld = len(data)
if ld < 2:
raise StatisticsError('must have at least two data points')
if method == 'inclusive':
m = ld - 1
result = []
for i in range(1, n):
j, delta = divmod(i * m, n)
interpolated = (data[j] * (n - delta) + data[j + 1] * delta) / n
result.append(interpolated)
return result
if method == 'exclusive':
m = ld + 1
result = []
for i in range(1, n):
j = i * m // n # rescale i to m/n
j = 1 if j < 1 else ld-1 if j > ld-1 else j # clamp to 1 .. ld-1
delta = i*m - j*n # exact integer math
interpolated = (data[j - 1] * (n - delta) + data[j] * delta) / n
result.append(interpolated)
return result
raise ValueError(f'Unknown method: {method!r}')
| (data, *, n=4, method='exclusive') |
36,390 | statistics | stdev | Return the square root of the sample variance.
See ``variance`` for arguments and other details.
>>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
1.0810874155219827
| def stdev(data, xbar=None):
"""Return the square root of the sample variance.
See ``variance`` for arguments and other details.
>>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
1.0810874155219827
"""
# Fixme: Despite the exact sum of squared deviations, some inaccuracy
# remain because there are two rounding steps. The first occurs in
# the _convert() step for variance(), the second occurs in math.sqrt().
var = variance(data, xbar)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
| (data, xbar=None) |
36,391 | statistics | variance | Return the sample variance of data.
data should be an iterable of Real-valued numbers, with at least two
values. The optional argument xbar, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function when your data is a sample from a population. To
calculate the variance from the entire population, see ``pvariance``.
Examples:
>>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
>>> variance(data)
1.3720238095238095
If you have already calculated the mean of your data, you can pass it as
the optional second argument ``xbar`` to avoid recalculating it:
>>> m = mean(data)
>>> variance(data, m)
1.3720238095238095
This function does not check that ``xbar`` is actually the mean of
``data``. Giving arbitrary values for ``xbar`` may lead to invalid or
impossible results.
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> variance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('31.01875')
>>> from fractions import Fraction as F
>>> variance([F(1, 6), F(1, 2), F(5, 3)])
Fraction(67, 108)
| def variance(data, xbar=None):
"""Return the sample variance of data.
data should be an iterable of Real-valued numbers, with at least two
values. The optional argument xbar, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function when your data is a sample from a population. To
calculate the variance from the entire population, see ``pvariance``.
Examples:
>>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
>>> variance(data)
1.3720238095238095
If you have already calculated the mean of your data, you can pass it as
the optional second argument ``xbar`` to avoid recalculating it:
>>> m = mean(data)
>>> variance(data, m)
1.3720238095238095
This function does not check that ``xbar`` is actually the mean of
``data``. Giving arbitrary values for ``xbar`` may lead to invalid or
impossible results.
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> variance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('31.01875')
>>> from fractions import Fraction as F
>>> variance([F(1, 6), F(1, 2), F(5, 3)])
Fraction(67, 108)
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 2:
raise StatisticsError('variance requires at least two data points')
T, ss = _ss(data, xbar)
return _convert(ss / (n - 1), T)
| (data, xbar=None) |
36,394 | vobject | iCalendar | null | def iCalendar():
return newFromBehavior('vcalendar', '2.0')
| () |
36,396 | vobject.base | newFromBehavior |
Given a name, return a behaviored ContentLine or Component.
| def newFromBehavior(name, id=None):
"""
Given a name, return a behaviored ContentLine or Component.
"""
name = name.upper()
behavior = getBehavior(name, id)
if behavior is None:
raise VObjectError("No behavior found named {0!s}".format(name))
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '')
obj.behavior = behavior
obj.isNative = False
return obj
| (name, id=None) |
36,397 | vobject.base | readComponents |
Generate one Component at a time from a stream.
| def readComponents(streamOrString, validate=False, transform=True,
ignoreUnreadable=False, allowQP=False):
"""
Generate one Component at a time from a stream.
"""
if isinstance(streamOrString, basestring):
stream = six.StringIO(streamOrString)
else:
stream = streamOrString
try:
stack = Stack()
versionLine = None
n = 0
for line, n in getLogicalLines(stream, allowQP):
if ignoreUnreadable:
try:
vline = textLineToContentLine(line, n)
except VObjectError as e:
if e.lineNumber is not None:
msg = "Skipped line {lineNumber}, message: {msg}"
else:
msg = "Skipped a line, message: {msg}"
logger.error(msg.format(**{'lineNumber': e.lineNumber, 'msg': str(e)}))
continue
else:
vline = textLineToContentLine(line, n)
if vline.name == "VERSION":
versionLine = vline
stack.modifyTop(vline)
elif vline.name == "BEGIN":
stack.push(Component(vline.value, group=vline.group))
elif vline.name == "PROFILE":
if not stack.top():
stack.push(Component())
stack.top().setProfile(vline.value)
elif vline.name == "END":
if len(stack) == 0:
err = "Attempted to end the {0} component but it was never opened"
raise ParseError(err.format(vline.value), n)
if vline.value.upper() == stack.topName(): # START matches END
if len(stack) == 1:
component = stack.pop()
if versionLine is not None:
component.setBehaviorFromVersionLine(versionLine)
else:
behavior = getBehavior(component.name)
if behavior:
component.setBehavior(behavior)
if validate:
component.validate(raiseException=True)
if transform:
component.transformChildrenToNative()
yield component # EXIT POINT
else:
stack.modifyTop(stack.pop())
else:
err = "{0} component wasn't closed"
raise ParseError(err.format(stack.topName()), n)
else:
stack.modifyTop(vline) # not a START or END line
if stack.top():
if stack.topName() is None:
logger.warning("Top level component was never named")
elif stack.top().useBegin:
raise ParseError("Component {0!s} was never closed".format(
(stack.topName())), n)
yield stack.pop()
except ParseError as e:
e.input = streamOrString
raise
| (streamOrString, validate=False, transform=True, ignoreUnreadable=False, allowQP=False) |
36,398 | vobject.base | readOne |
Return the first component from stream.
| def readOne(stream, validate=False, transform=True, ignoreUnreadable=False,
allowQP=False):
"""
Return the first component from stream.
"""
return next(readComponents(stream, validate, transform, ignoreUnreadable,
allowQP))
| (stream, validate=False, transform=True, ignoreUnreadable=False, allowQP=False) |
36,399 | vobject | vCard | null | def vCard():
return newFromBehavior('vcard', '3.0')
| () |
36,401 | torch_pso.optim.AcceleratedPSO | AcceleratedPSO |
ChaoticPSO is a variation on the original Particle Swarm Optimization developed to quickly train SVMs. It simplifies
the original PSO algorithm--most notably eschewing the need for velocity states.
Original Paper:
Yang, X. S., Deb, S., and Fong, S., (2011), Accelerated Particle Swarm Optimization and Support Vector Machine for
Business Optimization and Applications, in: NDT2011, CCIS 136, Springer, pp. 53-66 (2011)
https://arxiv.org/abs/1203.6577
| class AcceleratedPSO(GenericPSO):
"""
ChaoticPSO is a variation on the original Particle Swarm Optimization developed to quickly train SVMs. It simplifies
the original PSO algorithm--most notably eschewing the need for velocity states.
Original Paper:
Yang, X. S., Deb, S., and Fong, S., (2011), Accelerated Particle Swarm Optimization and Support Vector Machine for
Business Optimization and Applications, in: NDT2011, CCIS 136, Springer, pp. 53-66 (2011)
https://arxiv.org/abs/1203.6577
"""
def __init__(self,
params: Iterable[torch.nn.Parameter],
num_particles: int = 100,
alpha: float = 0.3,
beta: float = 0.45,
decay_parameter: float = 0.7,
max_param_value: float = -10,
min_param_value: float = 10):
particle_kwargs = {'alpha': alpha,
'beta': beta,
'decay_parameter': decay_parameter,
'max_param_value': max_param_value,
'min_param_value': min_param_value,
}
super().__init__(params, num_particles, particle_class=AcceleratedParticle, particle_kwargs=particle_kwargs)
| (params: Iterable[torch.nn.parameter.Parameter], num_particles: int = 100, alpha: float = 0.3, beta: float = 0.45, decay_parameter: float = 0.7, max_param_value: float = -10, min_param_value: float = 10) |
36,402 | torch.optim.optimizer | __getstate__ | null | def __getstate__(self) -> Dict[str, Any]:
return {
'defaults': self.defaults,
'state': self.state,
'param_groups': self.param_groups,
}
| (self) -> Dict[str, Any] |
36,403 | torch_pso.optim.AcceleratedPSO | __init__ | null | def __init__(self,
params: Iterable[torch.nn.Parameter],
num_particles: int = 100,
alpha: float = 0.3,
beta: float = 0.45,
decay_parameter: float = 0.7,
max_param_value: float = -10,
min_param_value: float = 10):
particle_kwargs = {'alpha': alpha,
'beta': beta,
'decay_parameter': decay_parameter,
'max_param_value': max_param_value,
'min_param_value': min_param_value,
}
super().__init__(params, num_particles, particle_class=AcceleratedParticle, particle_kwargs=particle_kwargs)
| (self, params: Iterable[torch.nn.parameter.Parameter], num_particles: int = 100, alpha: float = 0.3, beta: float = 0.45, decay_parameter: float = 0.7, max_param_value: float = -10, min_param_value: float = 10) |
36,404 | torch.optim.optimizer | __repr__ | null | def __repr__(self) -> str:
format_string = self.__class__.__name__ + ' ('
for i, group in enumerate(self.param_groups):
format_string += '\n'
format_string += f'Parameter Group {i}\n'
for key in sorted(group.keys()):
if key != 'params':
format_string += f' {key}: {group[key]}\n'
format_string += ')'
return format_string
| (self) -> str |
36,405 | torch.optim.optimizer | __setstate__ | null | def __setstate__(self, state: Dict[str, Any]) -> None:
self.__dict__.update(state)
if '_optimizer_step_pre_hooks' not in self.__dict__:
self._optimizer_step_pre_hooks = OrderedDict()
if '_optimizer_step_post_hooks' not in self.__dict__:
self._optimizer_step_post_hooks = OrderedDict()
if '_optimizer_state_dict_pre_hooks' not in self.__dict__:
self._optimizer_state_dict_pre_hooks = OrderedDict()
if '_optimizer_state_dict_post_hooks' not in self.__dict__:
self._optimizer_state_dict_post_hooks = OrderedDict()
if '_optimizer_load_state_dict_pre_hooks' not in self.__dict__:
self._optimizer_load_state_dict_pre_hooks = OrderedDict()
if '_optimizer_load_state_dict_post_hooks' not in self.__dict__:
self._optimizer_load_state_dict_post_hooks = OrderedDict()
self._patch_step_function() # To support multiprocessing pickle/unpickle
self.defaults.setdefault('differentiable', False)
| (self, state: Dict[str, Any]) -> NoneType |
36,406 | torch.optim.optimizer | _cuda_graph_capture_health_check | null | def _cuda_graph_capture_health_check(self) -> None:
# Note [torch.compile x capturable]
# If we are compiling, we try to take the capturable path automatically by
# setting the flag to True during tracing. Due to this, we skip all the checks
# normally required for determining whether we can use CUDA graphs and
# shunt the responsibility to torch.inductor. This saves time during tracing
# since the checks are slow without sacrificing UX since inductor will warn
# later if CUDA graphs cannot be enabled, e.g.,
# https://github.com/pytorch/pytorch/blob/d3ba8901d8640eb16f88b2bfef9df7fa383d4b47/torch/_inductor/compile_fx.py#L390.
# Thus, when compiling, inductor will determine if cudagraphs
# can be enabled based on whether there is input mutation or CPU tensors.
if not is_compiling() and torch.backends.cuda.is_built() and torch.cuda.is_available():
capturing = torch.cuda.is_current_stream_capturing()
if capturing and not all(group['capturable'] for group in self.param_groups):
raise RuntimeError("Attempting CUDA graph capture of step() for an instance of " +
self.__class__.__name__ +
" but param_groups' capturable is False.")
if (
(not getattr(self, "_warned_capturable_if_run_uncaptured", False))
and all(group['capturable'] for group in self.param_groups)
and (not capturing)
):
warnings.warn(
"This instance was constructed with capturable=True or some of all the param_groups came with capturable=True, "
"but step() is running without CUDA graph capture. If you never intend to graph-capture this "
"instance, capturable=True can impair performance, and you should set capturable=False."
)
self._warned_capturable_if_run_uncaptured = True
| (self) -> NoneType |
36,407 | torch.optim.optimizer | _group_tensors_by_device_and_dtype | Groups a list of lists of tensors by device and dtype.
Skips this step if we are compiling since this will occur during inductor lowering. | @staticmethod
def _group_tensors_by_device_and_dtype(
tensorlistlist: TensorListList,
with_indices: bool = False,
) -> Union[
Dict[Tuple[None, None], Tuple[TensorListList, Indices]],
Dict[Tuple[torch.device, torch.dtype], Tuple[TensorListList, Indices]],
]:
"""Groups a list of lists of tensors by device and dtype.
Skips this step if we are compiling since this will occur during inductor lowering."""
if is_compiling():
return {(None, None): (tensorlistlist, list(range(len(tensorlistlist[0]))))}
else:
return _group_tensors_by_device_and_dtype(tensorlistlist, with_indices)
| (tensorlistlist: List[List[Optional[torch.Tensor]]], with_indices: bool = False) -> Union[Dict[Tuple[NoneType, NoneType], Tuple[List[List[Optional[torch.Tensor]]], List[int]]], Dict[Tuple[torch.device, torch.dtype], Tuple[List[List[Optional[torch.Tensor]]], List[int]]]] |
36,408 | torch.optim.optimizer | _optimizer_step_code | Entry point for `torch.profile.profiler`.
When python tracing is enabled the profiler will hook into this
function at the CPython level to inspect the optimizer's parameters and
param groups. It is called it after `step()` since many optimizers
lazily initialize state.
This is a workaround due to lack of a proper step hook on the optimizer,
and will be removed if it exists.
| def _optimizer_step_code(self) -> None:
"""Entry point for `torch.profile.profiler`.
When python tracing is enabled the profiler will hook into this
function at the CPython level to inspect the optimizer's parameters and
param groups. It is called it after `step()` since many optimizers
lazily initialize state.
This is a workaround due to lack of a proper step hook on the optimizer,
and will be removed if it exists.
"""
pass
| (self) -> NoneType |
36,409 | torch.optim.optimizer | _patch_step_function | null | def _patch_step_function(self) -> None:
self._zero_grad_profile_name = f"Optimizer.zero_grad#{self.__class__.__name__}.zero_grad"
hooked = getattr(self.__class__.step, "hooked", None)
if not hooked:
self.__class__.step = self.profile_hook_step(self.__class__.step) # type: ignore[assignment]
self.__class__.step.hooked = True # type: ignore[attr-defined]
| (self) -> NoneType |
36,410 | torch.optim.optimizer | _process_value_according_to_param_policy | null | @staticmethod
def _process_value_according_to_param_policy(
param: torch.Tensor,
value: torch.Tensor,
param_id: int,
param_groups: List[Dict[Any, Any]],
key: Hashable = None,
) -> torch.Tensor:
# Floating-point types are a bit special here. They are the only ones
# that are assumed to always match the type of params.
# Make sure state['step'] is not casted https://github.com/pytorch/pytorch/issues/74424
# UNLESS fused or capturable, see note [special device hosting for step]
fused = False
capturable = False
assert param_groups is not None
for pg in param_groups:
if param_id in pg["params"]:
fused = pg["fused"] if "fused" in pg else False
capturable = pg["capturable"] if "capturable" in pg else False
break
if key == "step":
if capturable or fused:
return value.to(dtype=torch.float32, device=param.device)
else:
return value
else:
if param.is_floating_point():
return value.to(dtype=param.dtype, device=param.device)
else:
return value.to(device=param.device)
| (param: torch.Tensor, value: torch.Tensor, param_id: int, param_groups: List[Dict[Any, Any]], key: Optional[Hashable] = None) -> torch.Tensor |
36,411 | torch.optim.optimizer | add_param_group | Add a param group to the :class:`Optimizer` s `param_groups`.
This can be useful when fine tuning a pre-trained network as frozen layers can be made
trainable and added to the :class:`Optimizer` as training progresses.
Args:
param_group (dict): Specifies what Tensors should be optimized along with group
specific optimization options.
| import math
import functools
import warnings
from collections import OrderedDict, defaultdict
from copy import deepcopy
from itertools import chain
from typing import (
Any,
Callable,
DefaultDict,
Dict,
Hashable,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import ParamSpec, Self, TypeAlias
import torch
import torch.utils.hooks as hooks
from torch.utils.hooks import RemovableHandle
from torch.utils._foreach_utils import (
Indices,
TensorListList,
_get_foreach_kernels_supported_devices,
_get_fused_kernels_supported_devices,
)
from torch._utils import is_compiling
from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype
Args: TypeAlias = Tuple[Any, ...]
Kwargs: TypeAlias = Dict[str, Any]
StateDict: TypeAlias = Dict[str, Any]
GlobalOptimizerPreHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], Optional[Tuple[Args, Kwargs]]]
GlobalOptimizerPostHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], None]
__all__ = ['Optimizer', 'register_optimizer_step_pre_hook', 'register_optimizer_step_post_hook']
_global_optimizer_pre_hooks: Dict[int, GlobalOptimizerPreHook] = OrderedDict()
_global_optimizer_post_hooks: Dict[int, GlobalOptimizerPostHook] = OrderedDict()
_foreach_supported_types = [torch.Tensor, torch.nn.parameter.Parameter]
class _RequiredParameter:
"""Singleton class representing a required parameter for an Optimizer."""
def __repr__(self) -> str:
return "<required parameter>"
| (self, param_group: Dict[str, Any]) -> NoneType |
36,412 | torch.optim.optimizer | load_state_dict | Loads the optimizer state.
Args:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
| import math
import functools
import warnings
from collections import OrderedDict, defaultdict
from copy import deepcopy
from itertools import chain
from typing import (
Any,
Callable,
DefaultDict,
Dict,
Hashable,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import ParamSpec, Self, TypeAlias
import torch
import torch.utils.hooks as hooks
from torch.utils.hooks import RemovableHandle
from torch.utils._foreach_utils import (
Indices,
TensorListList,
_get_foreach_kernels_supported_devices,
_get_fused_kernels_supported_devices,
)
from torch._utils import is_compiling
from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype
Args: TypeAlias = Tuple[Any, ...]
Kwargs: TypeAlias = Dict[str, Any]
StateDict: TypeAlias = Dict[str, Any]
GlobalOptimizerPreHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], Optional[Tuple[Args, Kwargs]]]
GlobalOptimizerPostHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], None]
__all__ = ['Optimizer', 'register_optimizer_step_pre_hook', 'register_optimizer_step_post_hook']
_global_optimizer_pre_hooks: Dict[int, GlobalOptimizerPreHook] = OrderedDict()
_global_optimizer_post_hooks: Dict[int, GlobalOptimizerPostHook] = OrderedDict()
_foreach_supported_types = [torch.Tensor, torch.nn.parameter.Parameter]
class _RequiredParameter:
"""Singleton class representing a required parameter for an Optimizer."""
def __repr__(self) -> str:
return "<required parameter>"
| (self, state_dict: Dict[str, Any]) -> NoneType |
36,413 | torch.optim.optimizer | profile_hook_step | null | @staticmethod
def profile_hook_step(func: Callable[_P, R]) -> Callable[_P, R]:
@functools.wraps(func)
def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> R:
self, *_ = args
self = cast(Optimizer, self)
profile_name = f"Optimizer.step#{self.__class__.__name__}.step"
with torch.autograd.profiler.record_function(profile_name):
# call optimizer step pre hooks
for pre_hook in chain(_global_optimizer_pre_hooks.values(), self._optimizer_step_pre_hooks.values()):
result = pre_hook(self, args, kwargs)
if result is not None:
if isinstance(result, tuple) and len(result) == 2:
args, kwargs = result # type: ignore[assignment]
else:
raise RuntimeError(
f"{func} must return None or a tuple of (new_args, new_kwargs), but got {result}."
)
out = func(*args, **kwargs)
self._optimizer_step_code()
# call optimizer step post hooks
for post_hook in chain(self._optimizer_step_post_hooks.values(), _global_optimizer_post_hooks.values()):
post_hook(self, args, kwargs)
return out
return wrapper
| (func: Callable[~_P, ~R]) -> Callable[~_P, ~R] |
36,414 | torch.optim.optimizer | register_load_state_dict_post_hook | Register a load_state_dict post-hook which will be called after
:meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the
following signature::
hook(optimizer) -> None
The ``optimizer`` argument is the optimizer instance being used.
The hook will be called with argument ``self`` after calling
``load_state_dict`` on ``self``. The registered hook can be used to
perform post-processing after ``load_state_dict`` has loaded the
``state_dict``.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided post ``hook`` will be fired before
all the already registered post-hooks on ``load_state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
post-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
| def register_load_state_dict_post_hook(
self, hook: Callable[["Optimizer"], None], prepend: bool = False
) -> RemovableHandle:
r"""Register a load_state_dict post-hook which will be called after
:meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the
following signature::
hook(optimizer) -> None
The ``optimizer`` argument is the optimizer instance being used.
The hook will be called with argument ``self`` after calling
``load_state_dict`` on ``self``. The registered hook can be used to
perform post-processing after ``load_state_dict`` has loaded the
``state_dict``.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided post ``hook`` will be fired before
all the already registered post-hooks on ``load_state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
post-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_load_state_dict_post_hooks)
self._optimizer_load_state_dict_post_hooks[handle.id] = hook
if prepend:
self._optimizer_load_state_dict_post_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined]
return handle
| (self, hook: Callable[[torch.optim.optimizer.Optimizer], NoneType], prepend: bool = False) -> torch.utils.hooks.RemovableHandle |
36,415 | torch.optim.optimizer | register_load_state_dict_pre_hook | Register a load_state_dict pre-hook which will be called before
:meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the
following signature::
hook(optimizer, state_dict) -> state_dict or None
The ``optimizer`` argument is the optimizer instance being used and the
``state_dict`` argument is a shallow copy of the ``state_dict`` the user
passed in to ``load_state_dict``. The hook may modify the state_dict inplace
or optionally return a new one. If a state_dict is returned, it will be used
to be loaded into the optimizer.
The hook will be called with argument ``self`` and ``state_dict`` before
calling ``load_state_dict`` on ``self``. The registered hook can be used to
perform pre-processing before the ``load_state_dict`` call is made.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided pre ``hook`` will be fired before
all the already registered pre-hooks on ``load_state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
pre-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
| def register_load_state_dict_pre_hook(
self,
hook: Callable[["Optimizer", StateDict], Optional[StateDict]],
prepend: bool = False,
) -> RemovableHandle:
r"""Register a load_state_dict pre-hook which will be called before
:meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the
following signature::
hook(optimizer, state_dict) -> state_dict or None
The ``optimizer`` argument is the optimizer instance being used and the
``state_dict`` argument is a shallow copy of the ``state_dict`` the user
passed in to ``load_state_dict``. The hook may modify the state_dict inplace
or optionally return a new one. If a state_dict is returned, it will be used
to be loaded into the optimizer.
The hook will be called with argument ``self`` and ``state_dict`` before
calling ``load_state_dict`` on ``self``. The registered hook can be used to
perform pre-processing before the ``load_state_dict`` call is made.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided pre ``hook`` will be fired before
all the already registered pre-hooks on ``load_state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
pre-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_load_state_dict_pre_hooks)
self._optimizer_load_state_dict_pre_hooks[handle.id] = hook
if prepend:
self._optimizer_load_state_dict_pre_hooks.move_to_end(handle.id, last=False)
return handle
| (self, hook: Callable[[torch.optim.optimizer.Optimizer, Dict[str, Any]], Optional[Dict[str, Any]]], prepend: bool = False) -> torch.utils.hooks.RemovableHandle |
36,416 | torch.optim.optimizer | register_state_dict_post_hook | Register a state dict post-hook which will be called after
:meth:`~torch.optim.Optimizer.state_dict` is called. It should have the
following signature::
hook(optimizer, state_dict) -> state_dict or None
The hook will be called with arguments ``self`` and ``state_dict`` after generating
a ``state_dict`` on ``self``. The hook may modify the state_dict inplace or optionally
return a new one. The registered hook can be used to perform post-processing
on the ``state_dict`` before it is returned.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided post ``hook`` will be fired before
all the already registered post-hooks on ``state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
post-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
| def register_state_dict_post_hook(
self,
hook: Callable[["Optimizer", StateDict], Optional[StateDict]],
prepend: bool = False,
) -> RemovableHandle:
r"""Register a state dict post-hook which will be called after
:meth:`~torch.optim.Optimizer.state_dict` is called. It should have the
following signature::
hook(optimizer, state_dict) -> state_dict or None
The hook will be called with arguments ``self`` and ``state_dict`` after generating
a ``state_dict`` on ``self``. The hook may modify the state_dict inplace or optionally
return a new one. The registered hook can be used to perform post-processing
on the ``state_dict`` before it is returned.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided post ``hook`` will be fired before
all the already registered post-hooks on ``state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
post-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_state_dict_post_hooks)
self._optimizer_state_dict_post_hooks[handle.id] = hook
if prepend:
self._optimizer_state_dict_post_hooks.move_to_end(handle.id, last=False)
return handle
| (self, hook: Callable[[torch.optim.optimizer.Optimizer, Dict[str, Any]], Optional[Dict[str, Any]]], prepend: bool = False) -> torch.utils.hooks.RemovableHandle |
36,417 | torch.optim.optimizer | register_state_dict_pre_hook | Register a state dict pre-hook which will be called before
:meth:`~torch.optim.Optimizer.state_dict` is called. It should have the
following signature::
hook(optimizer) -> None
The ``optimizer`` argument is the optimizer instance being used.
The hook will be called with argument ``self`` before calling ``state_dict`` on ``self``.
The registered hook can be used to perform pre-processing before the ``state_dict``
call is made.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided pre ``hook`` will be fired before
all the already registered pre-hooks on ``state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
pre-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
| def register_state_dict_pre_hook(
self, hook: Callable[["Optimizer"], None], prepend: bool = False
) -> RemovableHandle:
r"""Register a state dict pre-hook which will be called before
:meth:`~torch.optim.Optimizer.state_dict` is called. It should have the
following signature::
hook(optimizer) -> None
The ``optimizer`` argument is the optimizer instance being used.
The hook will be called with argument ``self`` before calling ``state_dict`` on ``self``.
The registered hook can be used to perform pre-processing before the ``state_dict``
call is made.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided pre ``hook`` will be fired before
all the already registered pre-hooks on ``state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
pre-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_state_dict_pre_hooks)
self._optimizer_state_dict_pre_hooks[handle.id] = hook
if prepend:
self._optimizer_state_dict_pre_hooks.move_to_end(handle.id, last=False)
return handle
| (self, hook: Callable[[torch.optim.optimizer.Optimizer], NoneType], prepend: bool = False) -> torch.utils.hooks.RemovableHandle |
36,418 | torch.optim.optimizer | register_step_post_hook | Register an optimizer step post hook which will be called after optimizer step.
It should have the following signature::
hook(optimizer, args, kwargs) -> None
The ``optimizer`` argument is the optimizer instance being used.
Args:
hook (Callable): The user defined hook to be registered.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
| def register_step_post_hook(self, hook: OptimizerPostHook) -> RemovableHandle:
r"""Register an optimizer step post hook which will be called after optimizer step.
It should have the following signature::
hook(optimizer, args, kwargs) -> None
The ``optimizer`` argument is the optimizer instance being used.
Args:
hook (Callable): The user defined hook to be registered.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_step_post_hooks)
self._optimizer_step_post_hooks[handle.id] = hook
return handle
| (self, hook: Callable[[typing_extensions.Self, Tuple[Any, ...], Dict[str, Any]], NoneType]) -> torch.utils.hooks.RemovableHandle |
36,419 | torch.optim.optimizer | register_step_pre_hook | Register an optimizer step pre hook which will be called before
optimizer step. It should have the following signature::
hook(optimizer, args, kwargs) -> None or modified args and kwargs
The ``optimizer`` argument is the optimizer instance being used. If
args and kwargs are modified by the pre-hook, then the transformed
values are returned as a tuple containing the new_args and new_kwargs.
Args:
hook (Callable): The user defined hook to be registered.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
| def register_step_pre_hook(self, hook: OptimizerPreHook) -> RemovableHandle:
r"""Register an optimizer step pre hook which will be called before
optimizer step. It should have the following signature::
hook(optimizer, args, kwargs) -> None or modified args and kwargs
The ``optimizer`` argument is the optimizer instance being used. If
args and kwargs are modified by the pre-hook, then the transformed
values are returned as a tuple containing the new_args and new_kwargs.
Args:
hook (Callable): The user defined hook to be registered.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_step_pre_hooks)
self._optimizer_step_pre_hooks[handle.id] = hook
return handle
| (self, hook: Callable[[typing_extensions.Self, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]]]) -> torch.utils.hooks.RemovableHandle |
36,420 | torch.optim.optimizer | state_dict | Returns the state of the optimizer as a :class:`dict`.
It contains two entries:
* ``state``: a Dict holding current optimization state. Its content
differs between optimizer classes, but some common characteristics
hold. For example, state is saved per parameter, and the parameter
itself is NOT saved. ``state`` is a Dictionary mapping parameter ids
to a Dict with state corresponding to each parameter.
* ``param_groups``: a List containing all parameter groups where each
parameter group is a Dict. Each parameter group contains metadata
specific to the optimizer, such as learning rate and weight decay,
as well as a List of parameter IDs of the parameters in the group.
NOTE: The parameter IDs may look like indices but they are just IDs
associating state with param_group. When loading from a state_dict,
the optimizer will zip the param_group ``params`` (int IDs) and the
optimizer ``param_groups`` (actual ``nn.Parameter`` s) in order to
match state WITHOUT additional verification.
A returned state dict might look something like:
.. code-block:: text
{
'state': {
0: {'momentum_buffer': tensor(...), ...},
1: {'momentum_buffer': tensor(...), ...},
2: {'momentum_buffer': tensor(...), ...},
3: {'momentum_buffer': tensor(...), ...}
},
'param_groups': [
{
'lr': 0.01,
'weight_decay': 0,
...
'params': [0]
},
{
'lr': 0.001,
'weight_decay': 0.5,
...
'params': [1, 2, 3]
}
]
}
| import math
import functools
import warnings
from collections import OrderedDict, defaultdict
from copy import deepcopy
from itertools import chain
from typing import (
Any,
Callable,
DefaultDict,
Dict,
Hashable,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import ParamSpec, Self, TypeAlias
import torch
import torch.utils.hooks as hooks
from torch.utils.hooks import RemovableHandle
from torch.utils._foreach_utils import (
Indices,
TensorListList,
_get_foreach_kernels_supported_devices,
_get_fused_kernels_supported_devices,
)
from torch._utils import is_compiling
from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype
Args: TypeAlias = Tuple[Any, ...]
Kwargs: TypeAlias = Dict[str, Any]
StateDict: TypeAlias = Dict[str, Any]
GlobalOptimizerPreHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], Optional[Tuple[Args, Kwargs]]]
GlobalOptimizerPostHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], None]
__all__ = ['Optimizer', 'register_optimizer_step_pre_hook', 'register_optimizer_step_post_hook']
_global_optimizer_pre_hooks: Dict[int, GlobalOptimizerPreHook] = OrderedDict()
_global_optimizer_post_hooks: Dict[int, GlobalOptimizerPostHook] = OrderedDict()
_foreach_supported_types = [torch.Tensor, torch.nn.parameter.Parameter]
class _RequiredParameter:
"""Singleton class representing a required parameter for an Optimizer."""
def __repr__(self) -> str:
return "<required parameter>"
| (self) -> Dict[str, Any] |
36,421 | torch_pso.optim.GenericPSO | step |
Performs a single optimization step.
:param particle_step_kwargs: Dict of keyword arguments to pass to the particle step function, if needed.
:param closure: A callable that reevaluates the model and returns the loss.
:return: the final loss after the step (as calculated by the closure)
| null | (self, closure: Callable[[], torch.Tensor], particle_step_kwargs: Optional[Dict] = None) -> torch.Tensor |
36,422 | torch.optim.optimizer | zero_grad | Resets the gradients of all optimized :class:`torch.Tensor` s.
Args:
set_to_none (bool): instead of setting to zero, set the grads to None.
This will in general have lower memory footprint, and can modestly improve performance.
However, it changes certain behaviors. For example:
1. When the user tries to access a gradient and perform manual ops on it,
a None attribute or a Tensor full of 0s will behave differently.
2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
are guaranteed to be None for params that did not receive a gradient.
3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None
(in one case it does the step with a gradient of 0 and in the other it skips
the step altogether).
| import math
import functools
import warnings
from collections import OrderedDict, defaultdict
from copy import deepcopy
from itertools import chain
from typing import (
Any,
Callable,
DefaultDict,
Dict,
Hashable,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import ParamSpec, Self, TypeAlias
import torch
import torch.utils.hooks as hooks
from torch.utils.hooks import RemovableHandle
from torch.utils._foreach_utils import (
Indices,
TensorListList,
_get_foreach_kernels_supported_devices,
_get_fused_kernels_supported_devices,
)
from torch._utils import is_compiling
from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype
Args: TypeAlias = Tuple[Any, ...]
Kwargs: TypeAlias = Dict[str, Any]
StateDict: TypeAlias = Dict[str, Any]
GlobalOptimizerPreHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], Optional[Tuple[Args, Kwargs]]]
GlobalOptimizerPostHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], None]
__all__ = ['Optimizer', 'register_optimizer_step_pre_hook', 'register_optimizer_step_post_hook']
_global_optimizer_pre_hooks: Dict[int, GlobalOptimizerPreHook] = OrderedDict()
_global_optimizer_post_hooks: Dict[int, GlobalOptimizerPostHook] = OrderedDict()
_foreach_supported_types = [torch.Tensor, torch.nn.parameter.Parameter]
class _RequiredParameter:
"""Singleton class representing a required parameter for an Optimizer."""
def __repr__(self) -> str:
return "<required parameter>"
| (self, set_to_none: bool = True) -> NoneType |
36,423 | torch_pso.optim.AutotuningPSO | AutotuningPSO |
Autotuning Particle Swarm Optimization is a modification of Particle Swarm Optimization where the coefficients
change over time, as prescribed by Axel Thevenot in his Medium Article entitled ["Particle Swarm Optimization
Visually Explained"](https://towardsdatascience.com/particle-swarm-optimization-visually-explained-46289eeb2e14).
| class AutotuningPSO(ParticleSwarmOptimizer):
"""
Autotuning Particle Swarm Optimization is a modification of Particle Swarm Optimization where the coefficients
change over time, as prescribed by Axel Thevenot in his Medium Article entitled ["Particle Swarm Optimization
Visually Explained"](https://towardsdatascience.com/particle-swarm-optimization-visually-explained-46289eeb2e14).
"""
def __init__(self,
params: Iterable[torch.nn.Parameter],
num_total_iterations: int = 1000,
inertial_weight: float = .9,
cognitive_coefficient: float = 1.,
social_coefficient: float = 1.,
num_particles: int = 100,
max_param_value: float = 10.,
min_param_value: float = -10.,
):
super().__init__(params, inertial_weight, cognitive_coefficient, social_coefficient, num_particles,
max_param_value, min_param_value)
self.num_total_iterations = num_total_iterations
self.current_step = 0
@torch.no_grad()
def step(self, closure: Callable[[], torch.Tensor], n: Optional[int] = None) -> torch.Tensor:
"""
Performs a single optimization step. This is a standard PSO step, followed by a weight adjustment.
:param n: integer representing which "step number" this step should be treated as in calculating
the weight decays
:param closure: A callable that reevaluates the model and returns the loss.
:return: the final loss after the step (as calculated by the closure)
"""
# Calculate the new coefficients for the swarm
n = n if n is not None else self.current_step
w_t = 0.4 * (n - self.num_total_iterations) / self.num_total_iterations ** 2 + 0.4
cognitive_t = -3 * n / self.num_total_iterations + 3.5
social_t = 3 * n / self.num_total_iterations + 0.5
self.inertial_weight = w_t
self.cognitive_coefficient = cognitive_t
self.social_coefficient = social_t
# Update the step number
for particle in self.particles:
particle_loss = particle.step(closure, self.best_known_global_param_groups)
if particle_loss < self.best_known_global_loss_value:
self.best_known_global_param_groups = clone_param_groups(particle.position)
self.best_known_global_loss_value = particle_loss
# Update the particle coefficients
particle.inertial_weight = w_t
particle.cognitive_coefficient = cognitive_t
particle.social_coefficient = social_t
# set the module's parameters to be the best performing ones
for master_group, best_group in zip(self.param_groups, self.best_known_global_param_groups):
clone = clone_param_group(best_group)['params']
for i in range(len(clone)):
master_group['params'][i].data = clone[i].data
return closure() # loss = closure()
| (params: Iterable[torch.nn.parameter.Parameter], num_total_iterations: int = 1000, inertial_weight: float = 0.9, cognitive_coefficient: float = 1.0, social_coefficient: float = 1.0, num_particles: int = 100, max_param_value: float = 10.0, min_param_value: float = -10.0) |
36,425 | torch_pso.optim.AutotuningPSO | __init__ | null | def __init__(self,
params: Iterable[torch.nn.Parameter],
num_total_iterations: int = 1000,
inertial_weight: float = .9,
cognitive_coefficient: float = 1.,
social_coefficient: float = 1.,
num_particles: int = 100,
max_param_value: float = 10.,
min_param_value: float = -10.,
):
super().__init__(params, inertial_weight, cognitive_coefficient, social_coefficient, num_particles,
max_param_value, min_param_value)
self.num_total_iterations = num_total_iterations
self.current_step = 0
| (self, params: Iterable[torch.nn.parameter.Parameter], num_total_iterations: int = 1000, inertial_weight: float = 0.9, cognitive_coefficient: float = 1.0, social_coefficient: float = 1.0, num_particles: int = 100, max_param_value: float = 10.0, min_param_value: float = -10.0) |
36,443 | torch_pso.optim.AutotuningPSO | step |
Performs a single optimization step. This is a standard PSO step, followed by a weight adjustment.
:param n: integer representing which "step number" this step should be treated as in calculating
the weight decays
:param closure: A callable that reevaluates the model and returns the loss.
:return: the final loss after the step (as calculated by the closure)
| null | (self, closure: Callable[[], torch.Tensor], n: Optional[int] = None) -> torch.Tensor |
36,445 | torch_pso.optim.ChaoticPSO | ChaoticPSO |
ChaoticPSO is a variation on the original Particle Swarm Optimization inspired by techniques in training
Hopfield Networks. It introduces some chaos-like mechanics into the optimization, theoretically improving
convergence speed in some contexts.
Original Paper:
Sun, Yanxia & Qi, Guoyuan & Wang, Zenghui & Van Wyk, Barend & Hamam, Yskandar. (2009). Chaotic particle
swarm optimization. 505-510. 10.1145/1543834.1543902.
https://www.researchgate.net/publication/220741402_Chaotic_particle_swarm_optimization
| class ChaoticPSO(GenericPSO):
"""
ChaoticPSO is a variation on the original Particle Swarm Optimization inspired by techniques in training
Hopfield Networks. It introduces some chaos-like mechanics into the optimization, theoretically improving
convergence speed in some contexts.
Original Paper:
Sun, Yanxia & Qi, Guoyuan & Wang, Zenghui & Van Wyk, Barend & Hamam, Yskandar. (2009). Chaotic particle
swarm optimization. 505-510. 10.1145/1543834.1543902.
https://www.researchgate.net/publication/220741402_Chaotic_particle_swarm_optimization
"""
def __init__(self,
params: Iterable[torch.nn.Parameter],
num_particles: int = 100,
a: float = 0.02,
b: float = 0.01,
c: float = 0.01,
beta: float = .001,
k: float = 15.,
epsilon: float = 1.,
i0: float = 0.2,
z: float = 0.7,
max_param_value: float = -10,
min_param_value: float = 10):
particle_kwargs = {'a': a,
'b': b,
'c': c,
'beta': beta,
'k': k,
'epsilon': epsilon,
'i0': i0,
'z':z,
'max_param_value': max_param_value,
'min_param_value': min_param_value,
}
super().__init__(params, num_particles, particle_class=ChaoticParticle, particle_kwargs=particle_kwargs)
| (params: Iterable[torch.nn.parameter.Parameter], num_particles: int = 100, a: float = 0.02, b: float = 0.01, c: float = 0.01, beta: float = 0.001, k: float = 15.0, epsilon: float = 1.0, i0: float = 0.2, z: float = 0.7, max_param_value: float = -10, min_param_value: float = 10) |
36,447 | torch_pso.optim.ChaoticPSO | __init__ | null | def __init__(self,
params: Iterable[torch.nn.Parameter],
num_particles: int = 100,
a: float = 0.02,
b: float = 0.01,
c: float = 0.01,
beta: float = .001,
k: float = 15.,
epsilon: float = 1.,
i0: float = 0.2,
z: float = 0.7,
max_param_value: float = -10,
min_param_value: float = 10):
particle_kwargs = {'a': a,
'b': b,
'c': c,
'beta': beta,
'k': k,
'epsilon': epsilon,
'i0': i0,
'z':z,
'max_param_value': max_param_value,
'min_param_value': min_param_value,
}
super().__init__(params, num_particles, particle_class=ChaoticParticle, particle_kwargs=particle_kwargs)
| (self, params: Iterable[torch.nn.parameter.Parameter], num_particles: int = 100, a: float = 0.02, b: float = 0.01, c: float = 0.01, beta: float = 0.001, k: float = 15.0, epsilon: float = 1.0, i0: float = 0.2, z: float = 0.7, max_param_value: float = -10, min_param_value: float = 10) |
36,467 | torch_pso.optim.GenerationalPSO | GenerationalPSO |
Generational PSO is a modification of the naive Particle Swarm Optimization Algorithm, where a certain percentage of
randomly-chosen, low-performing particles are re-initialized after each step.
This is a sample algorithm designed by Andrew Sansom with the sole purpose of demonstrating an example of
alternate PSO algorithms.
| class GenerationalPSO(ParticleSwarmOptimizer):
"""
Generational PSO is a modification of the naive Particle Swarm Optimization Algorithm, where a certain percentage of
randomly-chosen, low-performing particles are re-initialized after each step.
This is a sample algorithm designed by Andrew Sansom with the sole purpose of demonstrating an example of
alternate PSO algorithms.
"""
def __init__(self,
params: Iterable[torch.nn.Parameter],
inertial_weight: float = .9,
cognitive_coefficient: float = 1.,
social_coefficient: float = 1.,
num_particles: int = 100,
max_param_value: float = 10.,
min_param_value: float = -10.,
generational_turnover_ratio: float = .05,
keep_top_performers: Union[float, int] = .5):
super().__init__(params, inertial_weight, cognitive_coefficient, social_coefficient, num_particles,
max_param_value, min_param_value)
self.generational_turnover_ratio = generational_turnover_ratio
if isinstance(keep_top_performers, float):
keep_top_performers = round(num_particles*keep_top_performers)
self.keep_top_performers = keep_top_performers
if round(num_particles*generational_turnover_ratio) > num_particles-keep_top_performers:
raise ValueError(f'The generational turnover ratio is higher than the number of bottom performers. '
f'Turnover: {round(num_particles*generational_turnover_ratio)}, '
f'Bottom Performers: {num_particles-keep_top_performers}')
@torch.no_grad()
def step(self, closure: Callable[[], torch.Tensor]) -> torch.Tensor:
"""
Performs a single optimization step.
:param closure: A callable that reevaluates the model and returns the loss.
:return: the final loss after the step (as calculated by the closure)
"""
losses = {}
for i, particle in enumerate(self.particles):
particle_loss = particle.step(closure, self.best_known_global_param_groups)
losses[i] = particle_loss
if particle_loss < self.best_known_global_loss_value:
self.best_known_global_param_groups = clone_param_groups(particle.position)
self.best_known_global_loss_value = particle_loss
# set the module's parameters to be the best performing ones
for master_group, best_group in zip(self.param_groups, self.best_known_global_param_groups):
clone = clone_param_group(best_group)['params']
for i in range(len(clone)):
master_group['params'][i].data = clone[i].data
# Respawn a certain proportion of the worst performing particles, chosen at random
best_performers_indices = list(sorted(losses, key=losses.get, reverse=True))
bottom_performers = best_performers_indices[self.keep_top_performers:]
indices_to_respawn = random.sample(bottom_performers, round(self.generational_turnover_ratio * len(losses)))
for index in indices_to_respawn:
self.particles[index] = Particle(self.param_groups,
self.inertial_weight,
self.cognitive_coefficient,
self.social_coefficient,
max_param_value=self.max_param_value,
min_param_value=self.min_param_value)
return closure() # loss = closure()
| (params: Iterable[torch.nn.parameter.Parameter], inertial_weight: float = 0.9, cognitive_coefficient: float = 1.0, social_coefficient: float = 1.0, num_particles: int = 100, max_param_value: float = 10.0, min_param_value: float = -10.0, generational_turnover_ratio: float = 0.05, keep_top_performers: Union[float, int] = 0.5) |
36,469 | torch_pso.optim.GenerationalPSO | __init__ | null | def __init__(self,
params: Iterable[torch.nn.Parameter],
inertial_weight: float = .9,
cognitive_coefficient: float = 1.,
social_coefficient: float = 1.,
num_particles: int = 100,
max_param_value: float = 10.,
min_param_value: float = -10.,
generational_turnover_ratio: float = .05,
keep_top_performers: Union[float, int] = .5):
super().__init__(params, inertial_weight, cognitive_coefficient, social_coefficient, num_particles,
max_param_value, min_param_value)
self.generational_turnover_ratio = generational_turnover_ratio
if isinstance(keep_top_performers, float):
keep_top_performers = round(num_particles*keep_top_performers)
self.keep_top_performers = keep_top_performers
if round(num_particles*generational_turnover_ratio) > num_particles-keep_top_performers:
raise ValueError(f'The generational turnover ratio is higher than the number of bottom performers. '
f'Turnover: {round(num_particles*generational_turnover_ratio)}, '
f'Bottom Performers: {num_particles-keep_top_performers}')
| (self, params: Iterable[torch.nn.parameter.Parameter], inertial_weight: float = 0.9, cognitive_coefficient: float = 1.0, social_coefficient: float = 1.0, num_particles: int = 100, max_param_value: float = 10.0, min_param_value: float = -10.0, generational_turnover_ratio: float = 0.05, keep_top_performers: Union[float, int] = 0.5) |
36,487 | torch_pso.optim.GenerationalPSO | step |
Performs a single optimization step.
:param closure: A callable that reevaluates the model and returns the loss.
:return: the final loss after the step (as calculated by the closure)
| null | (self, closure: Callable[[], torch.Tensor]) -> torch.Tensor |
36,489 | torch_pso.optim.GenericPSO | GenericPSO | null | class GenericPSO(Optimizer):
def __init__(self, params: Iterable[torch.nn.Parameter], num_particles: int, particle_class: Type[GenericParticle],
particle_args: Optional[List] = None, particle_kwargs: Optional[Dict] = None):
defaults = {}
super().__init__(params, defaults)
if particle_args is None:
particle_args = []
if particle_kwargs is None:
particle_kwargs = {}
self.particles = [particle_class(self.param_groups, *particle_args, **particle_kwargs)
for _ in range(num_particles)]
self.best_known_global_param_groups = clone_param_groups(self.param_groups)
self.best_known_global_loss_value = torch.inf
@torch.no_grad()
def step(self, closure: Callable[[], torch.Tensor], particle_step_kwargs: Optional[Dict] = None) -> torch.Tensor:
"""
Performs a single optimization step.
:param particle_step_kwargs: Dict of keyword arguments to pass to the particle step function, if needed.
:param closure: A callable that reevaluates the model and returns the loss.
:return: the final loss after the step (as calculated by the closure)
"""
if particle_step_kwargs is None:
particle_step_kwargs = {}
for particle in self.particles:
particle_loss = particle.step(closure, self.best_known_global_param_groups, **particle_step_kwargs)
if particle_loss < self.best_known_global_loss_value:
self.best_known_global_param_groups = clone_param_groups(particle.position)
self.best_known_global_loss_value = particle_loss
# set the module's parameters to be the best performing ones
for master_group, best_group in zip(self.param_groups, self.best_known_global_param_groups):
clone = clone_param_group(best_group)['params']
for i in range(len(clone)):
master_group['params'][i].data = clone[i].data
return closure() # loss = closure()
subclasses = []
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.subclasses.append(cls)
| (params: Iterable[torch.nn.parameter.Parameter], num_particles: int, particle_class: Type[torch_pso.optim.GenericPSO.GenericParticle], particle_args: Optional[List] = None, particle_kwargs: Optional[Dict] = None) |
36,491 | torch_pso.optim.GenericPSO | __init__ | null | def __init__(self, params: Iterable[torch.nn.Parameter], num_particles: int, particle_class: Type[GenericParticle],
particle_args: Optional[List] = None, particle_kwargs: Optional[Dict] = None):
defaults = {}
super().__init__(params, defaults)
if particle_args is None:
particle_args = []
if particle_kwargs is None:
particle_kwargs = {}
self.particles = [particle_class(self.param_groups, *particle_args, **particle_kwargs)
for _ in range(num_particles)]
self.best_known_global_param_groups = clone_param_groups(self.param_groups)
self.best_known_global_loss_value = torch.inf
| (self, params: Iterable[torch.nn.parameter.Parameter], num_particles: int, particle_class: Type[torch_pso.optim.GenericPSO.GenericParticle], particle_args: Optional[List] = None, particle_kwargs: Optional[Dict] = None) |
36,511 | torch_pso.optim.ImprovedSineCosineAlgorithm | ImprovedSineCosineAlgorithm |
The Improved Sine Cosine Algorithm is a variation on the original Sine Cosine Algorithm, specifically attempting to
improve its resilience against local optima and unbalanced exploitation. It also demonstrates better behavior in
high-dimensional optimization problems.
Original Paper:
Long, W., Wu, T., Liang, X., & Xu, S. (2019). Solving high-dimensional global optimization problems using an
improved sine cosine algorithm. Expert systems with applications, 123, 108-126.
https://e-tarjome.com/storage/panel/fileuploads/2019-08-22/1566462251_E11587-e-tarjome.pdf
| class ImprovedSineCosineAlgorithm(GenericPSO):
"""
The Improved Sine Cosine Algorithm is a variation on the original Sine Cosine Algorithm, specifically attempting to
improve its resilience against local optima and unbalanced exploitation. It also demonstrates better behavior in
high-dimensional optimization problems.
Original Paper:
Long, W., Wu, T., Liang, X., & Xu, S. (2019). Solving high-dimensional global optimization problems using an
improved sine cosine algorithm. Expert systems with applications, 123, 108-126.
https://e-tarjome.com/storage/panel/fileuploads/2019-08-22/1566462251_E11587-e-tarjome.pdf
"""
def __init__(self,
params: Iterable[torch.nn.Parameter],
num_particles: int = 100,
a_start: float = 2,
a_end: float = 0,
max_time_steps: int = 1000,
w_end: float = 0,
w_start: float = .1,
k: float = 15.,
max_param_value: float = -10,
min_param_value: float = 10):
particle_kwargs = {
'max_param_value': max_param_value,
'min_param_value': min_param_value,
}
super().__init__(params, num_particles, particle_class=ImprovedSCAParticle, particle_kwargs=particle_kwargs)
self.max_param_value = max_param_value
self.min_param_value = min_param_value
self.magnitude = max_param_value - min_param_value
self.w_end = w_end
self.w_start = w_start
self.a_start = a_start
self.a_end = a_end
self.k = k
self.max_time_steps = max_time_steps
self.current_time_step = 0
def w(self) -> float:
"""
Calculate the inertial weight of the particle at the current time step.
"""
return self.w_end + (self.w_start - self.w_end) * (
self.max_time_steps - self.current_time_step) / self.max_time_steps
def r1(self):
"""Calculate the r1 value for the current timestep"""
return (self.a_start - self.a_end) * math.exp(
-self.current_time_step ** 2 / (self.k * self.max_time_steps) ** 2) + self.a_end
@torch.no_grad()
def step(self, closure: Callable[[], torch.Tensor]) -> torch.Tensor:
"""
Performs a single optimization step.
:param closure: A callable that reevaluates the model and returns the loss.
:return: the final loss after the step (as calculated by the closure)
"""
r3 = 2 * torch.rand((1,))
r2 = 2 * torch.pi * torch.rand((1,))
use_sine = torch.rand((1,)).item() < .5
r1 = self.r1()
w = self.w()
self.current_time_step += 1
return super().step(closure, particle_step_kwargs={'r1': r1,
'r2': r2,
'r3': r3,
'use_sine': use_sine,
'w': w
})
| (params: Iterable[torch.nn.parameter.Parameter], num_particles: int = 100, a_start: float = 2, a_end: float = 0, max_time_steps: int = 1000, w_end: float = 0, w_start: float = 0.1, k: float = 15.0, max_param_value: float = -10, min_param_value: float = 10) |
36,513 | torch_pso.optim.ImprovedSineCosineAlgorithm | __init__ | null | def __init__(self,
params: Iterable[torch.nn.Parameter],
num_particles: int = 100,
a_start: float = 2,
a_end: float = 0,
max_time_steps: int = 1000,
w_end: float = 0,
w_start: float = .1,
k: float = 15.,
max_param_value: float = -10,
min_param_value: float = 10):
particle_kwargs = {
'max_param_value': max_param_value,
'min_param_value': min_param_value,
}
super().__init__(params, num_particles, particle_class=ImprovedSCAParticle, particle_kwargs=particle_kwargs)
self.max_param_value = max_param_value
self.min_param_value = min_param_value
self.magnitude = max_param_value - min_param_value
self.w_end = w_end
self.w_start = w_start
self.a_start = a_start
self.a_end = a_end
self.k = k
self.max_time_steps = max_time_steps
self.current_time_step = 0
| (self, params: Iterable[torch.nn.parameter.Parameter], num_particles: int = 100, a_start: float = 2, a_end: float = 0, max_time_steps: int = 1000, w_end: float = 0, w_start: float = 0.1, k: float = 15.0, max_param_value: float = -10, min_param_value: float = 10) |
36,524 | torch_pso.optim.ImprovedSineCosineAlgorithm | r1 | Calculate the r1 value for the current timestep | def r1(self):
"""Calculate the r1 value for the current timestep"""
return (self.a_start - self.a_end) * math.exp(
-self.current_time_step ** 2 / (self.k * self.max_time_steps) ** 2) + self.a_end
| (self) |
36,532 | torch_pso.optim.ImprovedSineCosineAlgorithm | step |
Performs a single optimization step.
:param closure: A callable that reevaluates the model and returns the loss.
:return: the final loss after the step (as calculated by the closure)
| def r1(self):
"""Calculate the r1 value for the current timestep"""
return (self.a_start - self.a_end) * math.exp(
-self.current_time_step ** 2 / (self.k * self.max_time_steps) ** 2) + self.a_end
| (self, closure: Callable[[], torch.Tensor]) -> torch.Tensor |
36,533 | torch_pso.optim.ImprovedSineCosineAlgorithm | w |
Calculate the inertial weight of the particle at the current time step.
| def w(self) -> float:
"""
Calculate the inertial weight of the particle at the current time step.
"""
return self.w_end + (self.w_start - self.w_end) * (
self.max_time_steps - self.current_time_step) / self.max_time_steps
| (self) -> float |
36,535 | torch_pso.optim.ParticleSwarmOptimizer | ParticleSwarmOptimizer |
Algorithm from Wikipedia: https://en.wikipedia.org/wiki/Particle_swarm_optimization
Let S be the number of particles in the swarm, each having a position xi ∈ ℝn in the search-space
and a velocity vi ∈ ℝn. Let pi be the best known position of particle i and let g be the best known
position of the entire swarm.
The values blo and bup represent the lower and upper boundaries of the search-space respectively.
The w parameter is the inertia weight. The parameters φp and φg are often called cognitive coefficient and
social coefficient.
The termination criterion can be the number of iterations performed, or a solution where the adequate
objective function value is found. The parameters w, φp, and φg are selected by the practitioner and control
the behaviour and efficacy of the PSO method.
for each particle i = 1, ..., S do
Initialize the particle's position with a uniformly distributed random vector: xi ~ U(blo, bup)
Initialize the particle's best known position to its initial position: pi ← xi
if f(pi) < f(g) then
update the swarm's best known position: g ← pi
Initialize the particle's velocity: vi ~ U(-|bup-blo|, |bup-blo|)
while a termination criterion is not met do:
for each particle i = 1, ..., S do
for each dimension d = 1, ..., n do
Pick random numbers: rp, rg ~ U(0,1)
Update the particle's velocity: vi,d ← w vi,d + φp rp (pi,d-xi,d) + φg rg (gd-xi,d)
Update the particle's position: xi ← xi + vi
if f(xi) < f(pi) then
Update the particle's best known position: pi ← xi
if f(pi) < f(g) then
Update the swarm's best known position: g ← pi
:param params:iterable of parameters to optimize or dicts defining parameter groups
:param inertial_weight: float representing inertial weight of the particles
:param cognitive_coefficient: float representing cognitive coefficient of the particles
:param social_coefficient: float representing social coefficient of the particles
:param num_particles: int representing the number of particles in the swarm
:param max_param_value: Maximum value of the parameters in the search space
:param min_param_value: Minimum value of the parameters in the search space
| class ParticleSwarmOptimizer(GenericPSO):
r"""
Algorithm from Wikipedia: https://en.wikipedia.org/wiki/Particle_swarm_optimization
Let S be the number of particles in the swarm, each having a position xi ∈ ℝn in the search-space
and a velocity vi ∈ ℝn. Let pi be the best known position of particle i and let g be the best known
position of the entire swarm.
The values blo and bup represent the lower and upper boundaries of the search-space respectively.
The w parameter is the inertia weight. The parameters φp and φg are often called cognitive coefficient and
social coefficient.
The termination criterion can be the number of iterations performed, or a solution where the adequate
objective function value is found. The parameters w, φp, and φg are selected by the practitioner and control
the behaviour and efficacy of the PSO method.
for each particle i = 1, ..., S do
Initialize the particle's position with a uniformly distributed random vector: xi ~ U(blo, bup)
Initialize the particle's best known position to its initial position: pi ← xi
if f(pi) < f(g) then
update the swarm's best known position: g ← pi
Initialize the particle's velocity: vi ~ U(-|bup-blo|, |bup-blo|)
while a termination criterion is not met do:
for each particle i = 1, ..., S do
for each dimension d = 1, ..., n do
Pick random numbers: rp, rg ~ U(0,1)
Update the particle's velocity: vi,d ← w vi,d + φp rp (pi,d-xi,d) + φg rg (gd-xi,d)
Update the particle's position: xi ← xi + vi
if f(xi) < f(pi) then
Update the particle's best known position: pi ← xi
if f(pi) < f(g) then
Update the swarm's best known position: g ← pi
:param params:iterable of parameters to optimize or dicts defining parameter groups
:param inertial_weight: float representing inertial weight of the particles
:param cognitive_coefficient: float representing cognitive coefficient of the particles
:param social_coefficient: float representing social coefficient of the particles
:param num_particles: int representing the number of particles in the swarm
:param max_param_value: Maximum value of the parameters in the search space
:param min_param_value: Minimum value of the parameters in the search space
"""
def __init__(self,
params: Iterable[torch.nn.Parameter],
inertial_weight: float = .9,
cognitive_coefficient: float = 1.,
social_coefficient: float = 1.,
num_particles: int = 100,
max_param_value: float = 10.,
min_param_value: float = -10.):
self.num_particles = num_particles
self.inertial_weight = inertial_weight
self.cognitive_coefficient = cognitive_coefficient
self.social_coefficient = social_coefficient
self.max_param_value = max_param_value
self.min_param_value = min_param_value
kwargs = {'inertial_weight': inertial_weight,
'cognitive_coefficient': cognitive_coefficient,
'social_coefficient': social_coefficient,
'max_param_value': max_param_value,
'min_param_value': min_param_value}
super().__init__(params, num_particles, Particle, particle_kwargs=kwargs)
# defaults = {}
# super().__init__(params, defaults)
# # print('self.param_groups', self.param_groups)
# self.particles = [Particle(self.param_groups,
# self.inertial_weight,
# self.cognitive_coefficient,
# self.social_coefficient,
# max_param_value=self.max_param_value,
# min_param_value=self.min_param_value)
# for _ in range(self.num_particles)]
#
# self.best_known_global_param_groups = clone_param_groups(self.param_groups)
# self.best_known_global_loss_value = torch.inf
# @torch.no_grad()
# def step(self, closure: Callable[[], torch.Tensor]) -> torch.Tensor:
# """
# Performs a single optimization step.
#
# :param closure: A callable that reevaluates the model and returns the loss.
# :return: the final loss after the step (as calculated by the closure)
# """
# for particle in self.particles:
# particle_loss = particle.step(closure, self.best_known_global_param_groups)
# if particle_loss < self.best_known_global_loss_value:
# self.best_known_global_param_groups = clone_param_groups(particle.position)
# self.best_known_global_loss_value = particle_loss
#
# # set the module's parameters to be the best performing ones
# for master_group, best_group in zip(self.param_groups, self.best_known_global_param_groups):
# clone = clone_param_group(best_group)['params']
# for i in range(len(clone)):
# master_group['params'][i].data = clone[i].data
#
# return closure() # loss = closure()
| (params: Iterable[torch.nn.parameter.Parameter], inertial_weight: float = 0.9, cognitive_coefficient: float = 1.0, social_coefficient: float = 1.0, num_particles: int = 100, max_param_value: float = 10.0, min_param_value: float = -10.0) |
36,537 | torch_pso.optim.ParticleSwarmOptimizer | __init__ | null | def __init__(self,
params: Iterable[torch.nn.Parameter],
inertial_weight: float = .9,
cognitive_coefficient: float = 1.,
social_coefficient: float = 1.,
num_particles: int = 100,
max_param_value: float = 10.,
min_param_value: float = -10.):
self.num_particles = num_particles
self.inertial_weight = inertial_weight
self.cognitive_coefficient = cognitive_coefficient
self.social_coefficient = social_coefficient
self.max_param_value = max_param_value
self.min_param_value = min_param_value
kwargs = {'inertial_weight': inertial_weight,
'cognitive_coefficient': cognitive_coefficient,
'social_coefficient': social_coefficient,
'max_param_value': max_param_value,
'min_param_value': min_param_value}
super().__init__(params, num_particles, Particle, particle_kwargs=kwargs)
# defaults = {}
# super().__init__(params, defaults)
# # print('self.param_groups', self.param_groups)
# self.particles = [Particle(self.param_groups,
# self.inertial_weight,
# self.cognitive_coefficient,
# self.social_coefficient,
# max_param_value=self.max_param_value,
# min_param_value=self.min_param_value)
# for _ in range(self.num_particles)]
#
# self.best_known_global_param_groups = clone_param_groups(self.param_groups)
# self.best_known_global_loss_value = torch.inf
| (self, params: Iterable[torch.nn.parameter.Parameter], inertial_weight: float = 0.9, cognitive_coefficient: float = 1.0, social_coefficient: float = 1.0, num_particles: int = 100, max_param_value: float = 10.0, min_param_value: float = -10.0) |
36,557 | torch_pso.optim.RingTopologyPSO | RingTopologyPSO |
Ring Topology PSO is a modification of the naive Particle Swarm Optimization Algorithm, where instead of feeding
each particle the global optimum, each particle only receives the minimum of itself and two other particles. These
two particles are the same through the entire iteration.
This is called a "ring" because the network graph showing these connections has a single cycle.
| class RingTopologyPSO(ParticleSwarmOptimizer):
"""
Ring Topology PSO is a modification of the naive Particle Swarm Optimization Algorithm, where instead of feeding
each particle the global optimum, each particle only receives the minimum of itself and two other particles. These
two particles are the same through the entire iteration.
This is called a "ring" because the network graph showing these connections has a single cycle.
"""
def __init__(self,
params: Iterable[torch.nn.Parameter],
num_neighbors: int = 2,
inertial_weight: float = .9,
cognitive_coefficient: float = 1.,
social_coefficient: float = 1.,
num_particles: int = 100,
max_param_value: float = 10.,
min_param_value: float = -10.):
super().__init__(params, inertial_weight, cognitive_coefficient, social_coefficient, num_particles,
max_param_value, min_param_value)
self.losses = {i: (particle.position, torch.inf) for i, particle in enumerate(self.particles)}
self.num_neighbors = num_neighbors
def _find_minimum_of_neighbors(self, particle_index: int) -> List[Dict]:
neighbors = [(particle_index + i) % len(self.particles) for i in range(self.num_neighbors)]
best = sorted([self.losses[n] for n in neighbors],
key=lambda x: x[1],
reverse=True)[0]
return clone_param_groups(best[0])
@torch.no_grad()
def step(self, closure: Callable[[], torch.Tensor]) -> torch.Tensor:
"""
Performs a single optimization step.
:param closure: A callable that reevaluates the model and returns the loss.
:return: the final loss after the step (as calculated by the closure)
"""
losses = {}
for i, particle in enumerate(self.particles):
particle_loss = particle.step(closure, self._find_minimum_of_neighbors(i))
losses[i] = (particle.position, particle_loss)
if particle_loss < self.best_known_global_loss_value:
self.best_known_global_param_groups = clone_param_groups(particle.position)
self.best_known_global_loss_value = particle_loss
# set the module's parameters to be the best performing ones
for master_group, best_group in zip(self.param_groups, self.best_known_global_param_groups):
clone = clone_param_group(best_group)['params']
for i in range(len(clone)):
master_group['params'][i].data = clone[i].data
self.losses = losses
return closure() # loss = closure()
| (params: Iterable[torch.nn.parameter.Parameter], num_neighbors: int = 2, inertial_weight: float = 0.9, cognitive_coefficient: float = 1.0, social_coefficient: float = 1.0, num_particles: int = 100, max_param_value: float = 10.0, min_param_value: float = -10.0) |
36,559 | torch_pso.optim.RingTopologyPSO | __init__ | null | def __init__(self,
params: Iterable[torch.nn.Parameter],
num_neighbors: int = 2,
inertial_weight: float = .9,
cognitive_coefficient: float = 1.,
social_coefficient: float = 1.,
num_particles: int = 100,
max_param_value: float = 10.,
min_param_value: float = -10.):
super().__init__(params, inertial_weight, cognitive_coefficient, social_coefficient, num_particles,
max_param_value, min_param_value)
self.losses = {i: (particle.position, torch.inf) for i, particle in enumerate(self.particles)}
self.num_neighbors = num_neighbors
| (self, params: Iterable[torch.nn.parameter.Parameter], num_neighbors: int = 2, inertial_weight: float = 0.9, cognitive_coefficient: float = 1.0, social_coefficient: float = 1.0, num_particles: int = 100, max_param_value: float = 10.0, min_param_value: float = -10.0) |
36,563 | torch_pso.optim.RingTopologyPSO | _find_minimum_of_neighbors | null | def _find_minimum_of_neighbors(self, particle_index: int) -> List[Dict]:
neighbors = [(particle_index + i) % len(self.particles) for i in range(self.num_neighbors)]
best = sorted([self.losses[n] for n in neighbors],
key=lambda x: x[1],
reverse=True)[0]
return clone_param_groups(best[0])
| (self, particle_index: int) -> List[Dict] |
36,580 | torch_pso.optim.SineCosineAlgorithm | SineCosineAlgorithm |
The Sine Cosine Algorithm is an algorithm, conceived by Seyedali Mirjalili, similar to PSO, although the author
does not use particle terminology in the paper. Each particle moves towards a destination (the best candidate
solution) in a step size determined by several random parameters. The "sine" and "cosine" in the name come from the
use of sine and cosine functions to determine whether the particle's step will explore or exploit the currently
best solution.
Original Paper:
Seyedali Mirjalili , SCA: A Sine Cosine Algorithm for Solving Optimization Problems, Knowledge-Based Systems (2016),
doi: 10.1016/j.knosys.2015.12.022
https://dl.programstore.ir/files/Uploades/Lib/PDF/SCA.pdf
| class SineCosineAlgorithm(GenericPSO):
"""
The Sine Cosine Algorithm is an algorithm, conceived by Seyedali Mirjalili, similar to PSO, although the author
does not use particle terminology in the paper. Each particle moves towards a destination (the best candidate
solution) in a step size determined by several random parameters. The "sine" and "cosine" in the name come from the
use of sine and cosine functions to determine whether the particle's step will explore or exploit the currently
best solution.
Original Paper:
Seyedali Mirjalili , SCA: A Sine Cosine Algorithm for Solving Optimization Problems, Knowledge-Based Systems (2016),
doi: 10.1016/j.knosys.2015.12.022
https://dl.programstore.ir/files/Uploades/Lib/PDF/SCA.pdf
"""
def __init__(self,
params: Iterable[torch.nn.Parameter],
num_particles: int = 100,
max_movement_radius: float = 2,
max_param_value: float = -10,
min_param_value: float = 10):
particle_kwargs = {
'max_param_value': max_param_value,
'min_param_value': min_param_value,
}
super().__init__(params, num_particles, particle_class=SCAParticle, particle_kwargs=particle_kwargs)
self.max_param_value = max_param_value
self.min_param_value = min_param_value
self.max_movement_radius = max_movement_radius
self.magnitude = max_param_value-min_param_value
self.initial_movement_radius = max_movement_radius
@torch.no_grad()
def step(self, closure: Callable[[], torch.Tensor]) -> torch.Tensor:
"""
Performs a single optimization step.
:param closure: A callable that reevaluates the model and returns the loss.
:return: the final loss after the step (as calculated by the closure)
"""
r3 = 2 * torch.rand((1,))
r2 = 2*torch.pi*torch.rand((1,))
use_sine = torch.rand((1,)).item() < .5
r1 = self.max_movement_radius*torch.rand((1,))
# self.max_movement_radius *= .99
return super().step(closure, particle_step_kwargs={'r1': r1, 'r2': r2, 'r3': r3, 'use_sine': use_sine})
| (params: Iterable[torch.nn.parameter.Parameter], num_particles: int = 100, max_movement_radius: float = 2, max_param_value: float = -10, min_param_value: float = 10) |
36,582 | torch_pso.optim.SineCosineAlgorithm | __init__ | null | def __init__(self,
params: Iterable[torch.nn.Parameter],
num_particles: int = 100,
max_movement_radius: float = 2,
max_param_value: float = -10,
min_param_value: float = 10):
particle_kwargs = {
'max_param_value': max_param_value,
'min_param_value': min_param_value,
}
super().__init__(params, num_particles, particle_class=SCAParticle, particle_kwargs=particle_kwargs)
self.max_param_value = max_param_value
self.min_param_value = min_param_value
self.max_movement_radius = max_movement_radius
self.magnitude = max_param_value-min_param_value
self.initial_movement_radius = max_movement_radius
| (self, params: Iterable[torch.nn.parameter.Parameter], num_particles: int = 100, max_movement_radius: float = 2, max_param_value: float = -10, min_param_value: float = 10) |
36,603 | rfc3987 | <lambda> | null | _bmp = lambda s: _re.sub(r'\\U[0-9A-F]{8}-\\U[0-9A-F]{8}', '', s)
| (s) |
36,604 | rfc3987 | _i2u | null | def _i2u(dic):
for (name, iname) in [('authority', 'iauthority'), ('path', 'ipath'),
('query', 'iquery'), ('fragment', 'ifragment')]:
if dic.get(name) is None:
dic[name] = dic.get(iname)
return dic
| (dic) |
36,605 | rfc3987 | _interpret_unicode_escapes | null | def _interpret_unicode_escapes(string):
return string.encode('ascii').decode('raw-unicode-escape')
| (string) |
36,607 | rfc3987 | _remove_dot_segments | null | def _remove_dot_segments(path):
path = _dot_segments.sub('', path)
c = 1
while c:
path, c = _2dots_segments.subn('/', path, 1)
return path
| (path) |
36,610 | rfc3987 | compose | Returns an URI composed_ from named parts.
.. _composed: http://tools.ietf.org/html/rfc3986#section-5.3
| def compose(scheme=None, authority=None, path=None, query=None, fragment=None,
iauthority=None, ipath=None, iquery=None, ifragment=None, **kw):
"""Returns an URI composed_ from named parts.
.. _composed: http://tools.ietf.org/html/rfc3986#section-5.3
"""
_i2u(locals())
res = ''
if scheme is not None:
res += scheme + ':'
if authority is not None:
res += '//' + authority
res += path or ''
if query is not None:
res += '?' + query
if fragment is not None:
res += '#' + fragment
return res
| (scheme=None, authority=None, path=None, query=None, fragment=None, iauthority=None, ipath=None, iquery=None, ifragment=None, **kw) |
36,611 | rfc3987 | format_patterns | Returns a dict of patterns (regular expressions) keyed by
`rule names for URIs`_ and `rule names for IRIs`_.
See also the module level dicts of patterns, and `get_compiled_pattern`.
To wrap a rule in a named capture group, pass it as keyword argument:
rule_name='group_name'. By default, the formatted patterns contain no
named groups.
Patterns are `str` instances (be it in python 2.x or 3.x) containing ASCII
characters only.
Caveats:
- with re_, named capture groups cannot occur on multiple branches of an
alternation
- with re_ before python 3.3, ``\u`` and ``\U`` escapes must be
preprocessed (see `issue3665 <http://bugs.python.org/issue3665>`_)
- on narrow builds, character ranges beyond BMP are not supported
.. _rule names for URIs: http://tools.ietf.org/html/rfc3986#appendix-A
.. _rule names for IRIs: http://tools.ietf.org/html/rfc3987#section-2.2
| def format_patterns(**names):
r"""Returns a dict of patterns (regular expressions) keyed by
`rule names for URIs`_ and `rule names for IRIs`_.
See also the module level dicts of patterns, and `get_compiled_pattern`.
To wrap a rule in a named capture group, pass it as keyword argument:
rule_name='group_name'. By default, the formatted patterns contain no
named groups.
Patterns are `str` instances (be it in python 2.x or 3.x) containing ASCII
characters only.
Caveats:
- with re_, named capture groups cannot occur on multiple branches of an
alternation
- with re_ before python 3.3, ``\u`` and ``\U`` escapes must be
preprocessed (see `issue3665 <http://bugs.python.org/issue3665>`_)
- on narrow builds, character ranges beyond BMP are not supported
.. _rule names for URIs: http://tools.ietf.org/html/rfc3986#appendix-A
.. _rule names for IRIs: http://tools.ietf.org/html/rfc3987#section-2.2
"""
formatted = {}
for name, pat in _common_rules[::-1] + _uri_rules[::-1] + _iri_rules[::-1]:
if name in names:
n = names[name]
if callable(n):
pat = n(pat)
else:
pat = '(?P<%s>%s)' % (n, pat)
formatted[name] = pat.format(**formatted)
return formatted
| (**names) |
36,612 | rfc3987 | get_compiled_pattern | Returns a compiled pattern object for a rule name or template string.
Usage for validation::
>>> uri = get_compiled_pattern('^%(URI)s$')
>>> assert uri.match('http://tools.ietf.org/html/rfc3986#appendix-A')
>>> assert not get_compiled_pattern('^%(relative_ref)s$').match('#f#g')
>>> from unicodedata import lookup
>>> smp = 'urn:' + lookup('OLD ITALIC LETTER A') # U+00010300
>>> assert not uri.match(smp)
>>> m = get_compiled_pattern('^%(IRI)s$').match(smp)
On narrow builds, non-BMP characters are (incorrectly) excluded::
>>> assert NARROW_BUILD == (not m)
For parsing, some subcomponents are captured in named groups (*only if*
regex_ is available, otherwise see `parse`)::
>>> match = uri.match('http://tools.ietf.org/html/rfc3986#appendix-A')
>>> d = match.groupdict()
>>> if REGEX:
... assert all([ d['scheme'] == 'http',
... d['authority'] == 'tools.ietf.org',
... d['path'] == '/html/rfc3986',
... d['query'] == None,
... d['fragment'] == 'appendix-A' ])
>>> for r in patterns.keys():
... assert get_compiled_pattern(r)
| def get_compiled_pattern(rule, flags=0):
"""Returns a compiled pattern object for a rule name or template string.
Usage for validation::
>>> uri = get_compiled_pattern('^%(URI)s$')
>>> assert uri.match('http://tools.ietf.org/html/rfc3986#appendix-A')
>>> assert not get_compiled_pattern('^%(relative_ref)s$').match('#f#g')
>>> from unicodedata import lookup
>>> smp = 'urn:' + lookup('OLD ITALIC LETTER A') # U+00010300
>>> assert not uri.match(smp)
>>> m = get_compiled_pattern('^%(IRI)s$').match(smp)
On narrow builds, non-BMP characters are (incorrectly) excluded::
>>> assert NARROW_BUILD == (not m)
For parsing, some subcomponents are captured in named groups (*only if*
regex_ is available, otherwise see `parse`)::
>>> match = uri.match('http://tools.ietf.org/html/rfc3986#appendix-A')
>>> d = match.groupdict()
>>> if REGEX:
... assert all([ d['scheme'] == 'http',
... d['authority'] == 'tools.ietf.org',
... d['path'] == '/html/rfc3986',
... d['query'] == None,
... d['fragment'] == 'appendix-A' ])
>>> for r in patterns.keys():
... assert get_compiled_pattern(r)
"""
cache, key = get_compiled_pattern.cache, (rule, flags)
if key not in cache:
if NARROW_BUILD:
pats = bmp_patterns if REGEX else bmp_upatterns_no_names
else:
pats = patterns if REGEX else upatterns_no_names
p = pats.get(rule) or rule % pats
cache[key] = _re.compile(p, flags)
return cache[key]
| (rule, flags=0) |
36,613 | rfc3987 | match | Convenience function for checking if `string` matches a specific rule.
Returns a match object or None::
>>> assert match('%C7X', 'pct_encoded') is None
>>> assert match('%C7', 'pct_encoded')
>>> assert match('%c7', 'pct_encoded')
| def match(string, rule='IRI_reference'):
"""Convenience function for checking if `string` matches a specific rule.
Returns a match object or None::
>>> assert match('%C7X', 'pct_encoded') is None
>>> assert match('%C7', 'pct_encoded')
>>> assert match('%c7', 'pct_encoded')
"""
return get_compiled_pattern('^%%(%s)s$' % rule).match(string)
| (string, rule='IRI_reference') |
36,614 | rfc3987 | normalize | Syntax-Based Normalization | def normalize(uri):
"Syntax-Based Normalization"
# TODO:
raise NotImplementedError
| (uri) |
36,615 | rfc3987 | parse | Parses `string` according to `rule` into a dict of subcomponents.
If `rule` is None, parse an IRI_reference `without validation
<http://tools.ietf.org/html/rfc3986#appendix-B>`_.
If regex_ is available, any rule is supported; with re_, `rule` must be
'IRI_reference' or some special case thereof ('IRI', 'absolute_IRI',
'irelative_ref', 'irelative_part', 'URI_reference', 'URI', 'absolute_URI',
'relative_ref', 'relative_part'). ::
>>> d = parse('http://tools.ietf.org/html/rfc3986#appendix-A',
... rule='URI')
>>> assert all([ d['scheme'] == 'http',
... d['authority'] == 'tools.ietf.org',
... d['path'] == '/html/rfc3986',
... d['query'] == None,
... d['fragment'] == 'appendix-A' ])
| def parse(string, rule='IRI_reference'):
"""Parses `string` according to `rule` into a dict of subcomponents.
If `rule` is None, parse an IRI_reference `without validation
<http://tools.ietf.org/html/rfc3986#appendix-B>`_.
If regex_ is available, any rule is supported; with re_, `rule` must be
'IRI_reference' or some special case thereof ('IRI', 'absolute_IRI',
'irelative_ref', 'irelative_part', 'URI_reference', 'URI', 'absolute_URI',
'relative_ref', 'relative_part'). ::
>>> d = parse('http://tools.ietf.org/html/rfc3986#appendix-A',
... rule='URI')
>>> assert all([ d['scheme'] == 'http',
... d['authority'] == 'tools.ietf.org',
... d['path'] == '/html/rfc3986',
... d['query'] == None,
... d['fragment'] == 'appendix-A' ])
"""
if not REGEX and rule and rule not in REFERENCE_RULES:
raise ValueError(rule)
if rule:
m = match(string, rule)
if not m:
raise ValueError('%r is not a valid %r.' % (string, rule))
if REGEX:
return _i2u(m.groupdict())
return _i2u(_iri_non_validating_re.match(string).groupdict())
| (string, rule='IRI_reference') |
36,616 | rfc3987 | resolve | Resolves_ an `URI reference` relative to a `base` URI.
`Test cases <http://tools.ietf.org/html/rfc3986#section-5.4>`_::
>>> base = resolve.test_cases_base
>>> for relative, resolved in resolve.test_cases.items():
... assert resolve(base, relative) == resolved
If `return_parts` is True, returns a dict of named parts instead of
a string.
Examples::
>>> assert resolve('urn:rootless', '../../name') == 'urn:name'
>>> assert resolve('urn:root/less', '../../name') == 'urn:/name'
>>> assert resolve('http://a/b', 'http:g') == 'http:g'
>>> assert resolve('http://a/b', 'http:g', strict=False) == 'http://a/g'
.. _Resolves: http://tools.ietf.org/html/rfc3986#section-5.2
| def resolve(base, uriref, strict=True, return_parts=False):
"""Resolves_ an `URI reference` relative to a `base` URI.
`Test cases <http://tools.ietf.org/html/rfc3986#section-5.4>`_::
>>> base = resolve.test_cases_base
>>> for relative, resolved in resolve.test_cases.items():
... assert resolve(base, relative) == resolved
If `return_parts` is True, returns a dict of named parts instead of
a string.
Examples::
>>> assert resolve('urn:rootless', '../../name') == 'urn:name'
>>> assert resolve('urn:root/less', '../../name') == 'urn:/name'
>>> assert resolve('http://a/b', 'http:g') == 'http:g'
>>> assert resolve('http://a/b', 'http:g', strict=False) == 'http://a/g'
.. _Resolves: http://tools.ietf.org/html/rfc3986#section-5.2
"""
#base = normalize(base)
if isinstance(base, basestring):
B = parse(base, 'IRI')
else:
B = _i2u(dict(base))
if not B.get('scheme'):
raise ValueError('Expected an IRI (with scheme), not %r.' % base)
if isinstance(uriref, basestring):
R = parse(uriref, 'IRI_reference')
else:
R = _i2u(dict(uriref))
# _last_segment = get_compiled_pattern(r'(?<=^|/)%(segment)s$')
if R['scheme'] and (strict or R['scheme'] != B['scheme']):
T = R
else:
T = {}
T['scheme'] = B['scheme']
if R['authority'] is not None:
T['authority'] = R['authority']
T['path'] = R['path']
T['query'] = R['query']
else:
T['authority'] = B['authority']
if R['path']:
if R['path'][:1] == "/":
T['path'] = R['path']
elif B['authority'] is not None and not B['path']:
T['path'] = '/%s' % R['path']
else:
T['path'] = ''.join(B['path'].rpartition('/')[:2]) + R['path']
# _last_segment.sub(R['path'], B['path'])
T['query'] = R['query']
else:
T['path'] = B['path']
if R['query'] is not None:
T['query'] = R['query']
else:
T['query'] = B['query']
T['fragment'] = R['fragment']
T['path'] = _remove_dot_segments(T['path'])
if return_parts:
return T
else:
return compose(**T)
| (base, uriref, strict=True, return_parts=False) |
36,620 | j2cli.cli | main | CLI Entry point | def main():
""" CLI Entry point """
output = render_command(
os.getcwd(),
os.environ,
sys.stdin,
sys.argv[1:]
)
outstream = getattr(sys.stdout, 'buffer', sys.stdout)
outstream.write(output)
| () |
36,622 | clr_loader.types | Assembly | null | class Assembly:
def __init__(self, runtime: "Runtime", path: StrOrPath):
self._runtime = runtime
self._path = path
def get_function(self, name: str, func: Optional[str] = None) -> ClrFunction:
"""Get a wrapped .NET function instance
The function must be ``static``, and it must have the signature
``int Func(IntPtr ptr, int size)``. The returned wrapped instance will
take a ``binary`` and call the .NET function with a pointer to that
buffer and the buffer length. The buffer is reflected using CFFI's
`from_buffer`.
:param name: If ``func`` is not given, this is the fully qualified name
of the function. If ``func`` is given, this is the fully
qualified name of the containing class
:param func: Name of the function
:return: A function object that takes a single ``binary`` parameter
and returns an ``int``
"""
if func is None:
name, func = name.rsplit(".", 1)
return ClrFunction(self._runtime, self._path, name, func)
def __repr__(self) -> str:
return f"<Assembly {self._path} in {self._runtime}>"
| (runtime: 'Runtime', path: Union[str, os.PathLike]) |
36,623 | clr_loader.types | __init__ | null | def __init__(self, runtime: "Runtime", path: StrOrPath):
self._runtime = runtime
self._path = path
| (self, runtime: clr_loader.types.Runtime, path: Union[str, os.PathLike]) |
36,624 | clr_loader.types | __repr__ | null | def __repr__(self) -> str:
return f"<Assembly {self._path} in {self._runtime}>"
| (self) -> str |
36,625 | clr_loader.types | get_function | Get a wrapped .NET function instance
The function must be ``static``, and it must have the signature
``int Func(IntPtr ptr, int size)``. The returned wrapped instance will
take a ``binary`` and call the .NET function with a pointer to that
buffer and the buffer length. The buffer is reflected using CFFI's
`from_buffer`.
:param name: If ``func`` is not given, this is the fully qualified name
of the function. If ``func`` is given, this is the fully
qualified name of the containing class
:param func: Name of the function
:return: A function object that takes a single ``binary`` parameter
and returns an ``int``
| def get_function(self, name: str, func: Optional[str] = None) -> ClrFunction:
"""Get a wrapped .NET function instance
The function must be ``static``, and it must have the signature
``int Func(IntPtr ptr, int size)``. The returned wrapped instance will
take a ``binary`` and call the .NET function with a pointer to that
buffer and the buffer length. The buffer is reflected using CFFI's
`from_buffer`.
:param name: If ``func`` is not given, this is the fully qualified name
of the function. If ``func`` is given, this is the fully
qualified name of the containing class
:param func: Name of the function
:return: A function object that takes a single ``binary`` parameter
and returns an ``int``
"""
if func is None:
name, func = name.rsplit(".", 1)
return ClrFunction(self._runtime, self._path, name, func)
| (self, name: str, func: Optional[str] = None) -> clr_loader.types.ClrFunction |
36,626 | clr_loader.util.runtime_spec | DotnetCoreRuntimeSpec | Specification of an installed .NET Core runtime | class DotnetCoreRuntimeSpec:
"""Specification of an installed .NET Core runtime"""
name: str
version: str
path: Path
@property
def tfm(self) -> str:
return f"net{self.version[:3]}"
@property
def floor_version(self) -> str:
return f"{self.version[:3]}.0"
@property
def runtime_config(self) -> Dict[str, Any]:
return {
"runtimeOptions": {
"tfm": self.tfm,
"framework": {"name": self.name, "version": self.floor_version},
}
}
def write_config(self, f: TextIO) -> None:
json.dump(self.runtime_config, f)
| (name: str, version: str, path: pathlib.Path) -> None |
36,627 | clr_loader.util.runtime_spec | __eq__ | null | import json
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, TextIO
@dataclass
class DotnetCoreRuntimeSpec:
"""Specification of an installed .NET Core runtime"""
name: str
version: str
path: Path
@property
def tfm(self) -> str:
return f"net{self.version[:3]}"
@property
def floor_version(self) -> str:
return f"{self.version[:3]}.0"
@property
def runtime_config(self) -> Dict[str, Any]:
return {
"runtimeOptions": {
"tfm": self.tfm,
"framework": {"name": self.name, "version": self.floor_version},
}
}
def write_config(self, f: TextIO) -> None:
json.dump(self.runtime_config, f)
| (self, other) |
36,630 | clr_loader.util.runtime_spec | write_config | null | def write_config(self, f: TextIO) -> None:
json.dump(self.runtime_config, f)
| (self, f: <class 'TextIO'>) -> NoneType |
36,698 | clr_loader.types | Runtime | CLR Runtime
Encapsulates the lifetime of a CLR (.NET) runtime. If the instance is
deleted, the runtime will be shut down.
| class Runtime(metaclass=ABCMeta):
"""CLR Runtime
Encapsulates the lifetime of a CLR (.NET) runtime. If the instance is
deleted, the runtime will be shut down.
"""
@abstractmethod
def info(self) -> RuntimeInfo:
"""Get configuration and version information"""
pass
def get_assembly(self, assembly_path: StrOrPath) -> Assembly:
"""Get an assembly wrapper
This function does not guarantee that the respective assembly is or can
be loaded. Due to the design of the different hosting APIs, loading only
happens when the first function is referenced, and only then potential
errors will be raised."""
return Assembly(self, assembly_path)
@abstractmethod
def _get_callable(
self, assembly_path: StrOrPath, typename: str, function: str
) -> Callable[[Any, int], Any]:
"""Private function to retrieve a low-level callable object"""
pass
@abstractmethod
def shutdown(self) -> None:
"""Shut down the runtime as much as possible
Implementations should still be able to "reinitialize", thus the final
cleanup will usually happen in an ``atexit`` handler."""
pass
def __del__(self) -> None:
self.shutdown()
| () |
36,699 | clr_loader.types | __del__ | null | def __del__(self) -> None:
self.shutdown()
| (self) -> NoneType |
36,700 | clr_loader.types | _get_callable | Private function to retrieve a low-level callable object | @abstractmethod
def _get_callable(
self, assembly_path: StrOrPath, typename: str, function: str
) -> Callable[[Any, int], Any]:
"""Private function to retrieve a low-level callable object"""
pass
| (self, assembly_path: Union[str, os.PathLike], typename: str, function: str) -> Callable[[Any, int], Any] |
36,701 | clr_loader.types | get_assembly | Get an assembly wrapper
This function does not guarantee that the respective assembly is or can
be loaded. Due to the design of the different hosting APIs, loading only
happens when the first function is referenced, and only then potential
errors will be raised. | def get_assembly(self, assembly_path: StrOrPath) -> Assembly:
"""Get an assembly wrapper
This function does not guarantee that the respective assembly is or can
be loaded. Due to the design of the different hosting APIs, loading only
happens when the first function is referenced, and only then potential
errors will be raised."""
return Assembly(self, assembly_path)
| (self, assembly_path: Union[str, os.PathLike]) -> clr_loader.types.Assembly |
36,702 | clr_loader.types | info | Get configuration and version information | @abstractmethod
def info(self) -> RuntimeInfo:
"""Get configuration and version information"""
pass
| (self) -> clr_loader.types.RuntimeInfo |
36,703 | clr_loader.types | shutdown | Shut down the runtime as much as possible
Implementations should still be able to "reinitialize", thus the final
cleanup will usually happen in an ``atexit`` handler. | @abstractmethod
def shutdown(self) -> None:
"""Shut down the runtime as much as possible
Implementations should still be able to "reinitialize", thus the final
cleanup will usually happen in an ``atexit`` handler."""
pass
| (self) -> NoneType |
36,704 | clr_loader.types | RuntimeInfo | Information on a Runtime instance
An informative text can be retrieved from this by converting it to a
``str``, in particular the following results in readable debug information:
>>> ri = RuntimeInfo()
>>> print(ri)
6.12.0.122 (tarball)
Runtime: Mono
=============
Version: 6.12.0.122 (tarball)
Initialized: True
Shut down: False
Properties:
| class RuntimeInfo:
"""Information on a Runtime instance
An informative text can be retrieved from this by converting it to a
``str``, in particular the following results in readable debug information:
>>> ri = RuntimeInfo()
>>> print(ri)
6.12.0.122 (tarball)
Runtime: Mono
=============
Version: 6.12.0.122 (tarball)
Initialized: True
Shut down: False
Properties:
"""
kind: str
version: str
initialized: bool
shutdown: bool
properties: Dict[str, str] = field(repr=False)
def __str__(self) -> str:
return (
f"Runtime: {self.kind}\n"
"=============\n"
f" Version: {self.version}\n"
f" Initialized: {self.initialized}\n"
f" Shut down: {self.shutdown}\n"
f" Properties:\n"
+ "\n".join(
f" {key} = {_truncate(value, 65 - len(key))}"
for key, value in self.properties.items()
)
)
| (kind: str, version: str, initialized: bool, shutdown: bool, properties: Dict[str, str]) -> None |
36,705 | clr_loader.types | __eq__ | null | from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field
from os import PathLike
from typing import Any, Callable, Dict, Optional, Union
__all__ = ["StrOrPath"]
StrOrPath = Union[str, PathLike]
@dataclass
class RuntimeInfo:
"""Information on a Runtime instance
An informative text can be retrieved from this by converting it to a
``str``, in particular the following results in readable debug information:
>>> ri = RuntimeInfo()
>>> print(ri)
6.12.0.122 (tarball)
Runtime: Mono
=============
Version: 6.12.0.122 (tarball)
Initialized: True
Shut down: False
Properties:
"""
kind: str
version: str
initialized: bool
shutdown: bool
properties: Dict[str, str] = field(repr=False)
def __str__(self) -> str:
return (
f"Runtime: {self.kind}\n"
"=============\n"
f" Version: {self.version}\n"
f" Initialized: {self.initialized}\n"
f" Shut down: {self.shutdown}\n"
f" Properties:\n"
+ "\n".join(
f" {key} = {_truncate(value, 65 - len(key))}"
for key, value in self.properties.items()
)
)
| (self, other) |
36,708 | clr_loader.types | __str__ | null | def __str__(self) -> str:
return (
f"Runtime: {self.kind}\n"
"=============\n"
f" Version: {self.version}\n"
f" Initialized: {self.initialized}\n"
f" Shut down: {self.shutdown}\n"
f" Properties:\n"
+ "\n".join(
f" {key} = {_truncate(value, 65 - len(key))}"
for key, value in self.properties.items()
)
)
| (self) -> str |
36,709 | tempfile | TemporaryDirectory | Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
| class TemporaryDirectory:
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix=None, prefix=None, dir=None,
ignore_cleanup_errors=False):
self.name = mkdtemp(suffix, prefix, dir)
self._ignore_cleanup_errors = ignore_cleanup_errors
self._finalizer = _weakref.finalize(
self, self._cleanup, self.name,
warn_message="Implicitly cleaning up {!r}".format(self),
ignore_errors=self._ignore_cleanup_errors)
@classmethod
def _rmtree(cls, name, ignore_errors=False):
def onerror(func, path, exc_info):
if issubclass(exc_info[0], PermissionError):
try:
if path != name:
_resetperms(_os.path.dirname(path))
_resetperms(path)
try:
_os.unlink(path)
# PermissionError is raised on FreeBSD for directories
except (IsADirectoryError, PermissionError):
cls._rmtree(path, ignore_errors=ignore_errors)
except FileNotFoundError:
pass
elif issubclass(exc_info[0], FileNotFoundError):
pass
else:
if not ignore_errors:
raise
_shutil.rmtree(name, onerror=onerror)
@classmethod
def _cleanup(cls, name, warn_message, ignore_errors=False):
cls._rmtree(name, ignore_errors=ignore_errors)
_warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach() or _os.path.exists(self.name):
self._rmtree(self.name, ignore_errors=self._ignore_cleanup_errors)
__class_getitem__ = classmethod(_types.GenericAlias)
| (suffix=None, prefix=None, dir=None, ignore_cleanup_errors=False) |
36,710 | tempfile | __enter__ | null | def __enter__(self):
return self.name
| (self) |
36,711 | tempfile | __exit__ | null | def __exit__(self, exc, value, tb):
self.cleanup()
| (self, exc, value, tb) |
36,712 | tempfile | __init__ | null | def __init__(self, suffix=None, prefix=None, dir=None,
ignore_cleanup_errors=False):
self.name = mkdtemp(suffix, prefix, dir)
self._ignore_cleanup_errors = ignore_cleanup_errors
self._finalizer = _weakref.finalize(
self, self._cleanup, self.name,
warn_message="Implicitly cleaning up {!r}".format(self),
ignore_errors=self._ignore_cleanup_errors)
| (self, suffix=None, prefix=None, dir=None, ignore_cleanup_errors=False) |
36,713 | tempfile | __repr__ | null | def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
| (self) |
36,714 | tempfile | cleanup | null | def cleanup(self):
if self._finalizer.detach() or _os.path.exists(self.name):
self._rmtree(self.name, ignore_errors=self._ignore_cleanup_errors)
| (self) |
36,715 | clr_loader | _maybe_path | null | def _maybe_path(p: Optional[StrOrPath]) -> Optional[Path]:
if p is None:
return None
else:
return Path(p)
| (p: Union[str, os.PathLike, NoneType]) -> Optional[pathlib.Path] |
36,716 | clr_loader.util.find | find_dotnet_root | Try to discover the .NET Core root directory
If the environment variable ``DOTNET_ROOT`` is defined, we will use that.
Otherwise, we probe the default installation paths on Windows and macOS.
If none of these lead to a result, we try to discover the ``dotnet`` CLI
tool and use its (real) parent directory.
Otherwise, this function raises an exception.
:return: Path to the .NET Core root
| def find_dotnet_root() -> Path:
"""Try to discover the .NET Core root directory
If the environment variable ``DOTNET_ROOT`` is defined, we will use that.
Otherwise, we probe the default installation paths on Windows and macOS.
If none of these lead to a result, we try to discover the ``dotnet`` CLI
tool and use its (real) parent directory.
Otherwise, this function raises an exception.
:return: Path to the .NET Core root
"""
dotnet_root = os.environ.get("DOTNET_ROOT", None)
if dotnet_root is not None:
return Path(dotnet_root)
if sys.platform == "win32":
# On Windows, the host library is stored separately from dotnet.exe for x86
prog_files = os.environ.get("ProgramFiles")
if not prog_files:
raise RuntimeError("Could not find ProgramFiles")
prog_files = Path(prog_files)
dotnet_root = prog_files / "dotnet"
elif sys.platform == "darwin":
if "ARM64" in os.uname().version and platform.machine() == "x86_64":
# Apple Silicon in Rosetta 2 mode
dotnet_root = Path("/usr/local/share/dotnet/x64")
else:
dotnet_root = Path("/usr/local/share/dotnet")
if dotnet_root is not None and dotnet_root.is_dir():
return dotnet_root
# Try to discover dotnet from PATH otherwise
dotnet_cli = find_dotnet_cli()
if not dotnet_cli:
raise RuntimeError("Can not determine dotnet root")
return dotnet_cli.resolve().parent
| () -> pathlib.Path |
36,717 | clr_loader.util.find | find_libmono | Find a suitable libmono dynamic library
On Windows and macOS, we check the default installation directories.
:param sgen:
Whether to look for an SGen or Boehm GC instance. This parameter is
ignored on Windows, as only ``sgen`` is installed with the default
installer
:return:
Path to usable ``libmono``
| def find_libmono(*, assembly_dir: str = None, sgen: bool = True) -> Path:
"""Find a suitable libmono dynamic library
On Windows and macOS, we check the default installation directories.
:param sgen:
Whether to look for an SGen or Boehm GC instance. This parameter is
ignored on Windows, as only ``sgen`` is installed with the default
installer
:return:
Path to usable ``libmono``
"""
unix_name = f"mono{'sgen' if sgen else ''}-2.0"
if sys.platform == "win32":
if sys.maxsize > 2**32:
prog_files = os.environ.get("ProgramFiles")
else:
prog_files = os.environ.get("ProgramFiles(x86)")
if prog_files is None:
raise RuntimeError("Could not determine Program Files location")
# Ignore sgen on Windows, the main installation only contains this DLL
path = Path(prog_files) / "Mono/bin/mono-2.0-sgen.dll"
elif sys.platform == "darwin":
path = (
Path("/Library/Frameworks/Mono.framework/Versions/Current/lib")
/ f"lib{unix_name}.dylib"
)
else:
if assembly_dir == None:
from ctypes.util import find_library
path = find_library(unix_name)
else:
libname = "lib" + unix_name + ".so"
path = Path(assembly_dir) / "lib" / libname
if path is None:
raise RuntimeError("Could not find libmono")
return Path(path)
| (*, assembly_dir: Optional[str] = None, sgen: bool = True) -> pathlib.Path |
36,718 | clr_loader.util.find | find_runtimes | Find installed .NET Core runtimes
If the ``dotnet`` CLI can be found, we will call it as ``dotnet
--list-runtimes`` and parse the result.
If it is not available, we try to discover the dotnet root directory using
:py:func:`find_dotnet_root` and enumerate the runtimes installed in the
``shared`` subdirectory.
:return: Iterable of :py:class:`DotnetCoreRuntimeSpec` objects
| def find_runtimes() -> Iterator[DotnetCoreRuntimeSpec]:
"""Find installed .NET Core runtimes
If the ``dotnet`` CLI can be found, we will call it as ``dotnet
--list-runtimes`` and parse the result.
If it is not available, we try to discover the dotnet root directory using
:py:func:`find_dotnet_root` and enumerate the runtimes installed in the
``shared`` subdirectory.
:return: Iterable of :py:class:`DotnetCoreRuntimeSpec` objects
"""
dotnet_cli = find_dotnet_cli()
if dotnet_cli is not None:
return find_runtimes_using_cli(dotnet_cli)
else:
dotnet_root = find_dotnet_root()
return find_runtimes_in_root(dotnet_root)
| () -> Iterator[clr_loader.util.runtime_spec.DotnetCoreRuntimeSpec] |
36,719 | clr_loader | get_coreclr | Get a CoreCLR (.NET Core) runtime instance
The returned ``DotnetCoreRuntime`` also acts as a mapping of the config
properties. They can be retrieved using the index operator and can be
written until the runtime is initialized. The runtime is initialized when
the first function object is retrieved.
:param runtime_config:
Pass to a ``runtimeconfig.json`` as generated by
``dotnet publish``. If this parameter is not given, a temporary runtime
config will be generated.
:param dotnet_root:
The root directory of the .NET Core installation. If this is not
specified, we try to discover it using :py:func:`find_dotnet_root`.
:param properties:
Additional runtime properties. These can also be passed using the
``configProperties`` section in the runtime config.
:param runtime_spec:
If the ``runtime_config`` is not specified, the concrete runtime to use
can be controlled by passing this parameter. Possible values can be
retrieved using :py:func:`find_runtimes`. | def get_coreclr(
*,
runtime_config: Optional[StrOrPath] = None,
dotnet_root: Optional[StrOrPath] = None,
properties: Optional[Dict[str, str]] = None,
runtime_spec: Optional[DotnetCoreRuntimeSpec] = None,
) -> Runtime:
"""Get a CoreCLR (.NET Core) runtime instance
The returned ``DotnetCoreRuntime`` also acts as a mapping of the config
properties. They can be retrieved using the index operator and can be
written until the runtime is initialized. The runtime is initialized when
the first function object is retrieved.
:param runtime_config:
Pass to a ``runtimeconfig.json`` as generated by
``dotnet publish``. If this parameter is not given, a temporary runtime
config will be generated.
:param dotnet_root:
The root directory of the .NET Core installation. If this is not
specified, we try to discover it using :py:func:`find_dotnet_root`.
:param properties:
Additional runtime properties. These can also be passed using the
``configProperties`` section in the runtime config.
:param runtime_spec:
If the ``runtime_config`` is not specified, the concrete runtime to use
can be controlled by passing this parameter. Possible values can be
retrieved using :py:func:`find_runtimes`."""
from .hostfxr import DotnetCoreRuntime
dotnet_root = _maybe_path(dotnet_root)
if dotnet_root is None:
dotnet_root = find_dotnet_root()
temp_dir = None
runtime_config = _maybe_path(runtime_config)
if runtime_config is None:
if runtime_spec is None:
candidates = [
rt for rt in find_runtimes() if rt.name == "Microsoft.NETCore.App"
]
candidates.sort(key=lambda spec: spec.version, reverse=True)
if not candidates:
raise RuntimeError("Failed to find a suitable runtime")
runtime_spec = candidates[0]
temp_dir = TemporaryDirectory()
runtime_config = Path(temp_dir.name) / "runtimeconfig.json"
with open(runtime_config, "w") as f:
runtime_spec.write_config(f)
impl = DotnetCoreRuntime(runtime_config=runtime_config, dotnet_root=dotnet_root)
if properties:
for key, value in properties.items():
impl[key] = value
if temp_dir:
temp_dir.cleanup()
return impl
| (*, runtime_config: Union[str, os.PathLike, NoneType] = None, dotnet_root: Union[str, os.PathLike, NoneType] = None, properties: Optional[Dict[str, str]] = None, runtime_spec: Optional[clr_loader.util.runtime_spec.DotnetCoreRuntimeSpec] = None) -> clr_loader.types.Runtime |
36,720 | clr_loader | get_mono | Get a Mono runtime instance
:param config_file:
Path to the domain configuration file
:param global_config_file:
Path to the global configuration file to load (defaults to, e.g.,
``/etc/mono/config``)
:param libmono:
Path to the Mono runtime dll/so/dylib. If this is not specified, we try
to discover a globally installed instance using :py:func:`find_libmono`
:param sgen:
If ``libmono`` is not specified, this is passed to
:py:func:`find_libmono`
:param debug:
Whether to initialise Mono debugging
:param jit_options:
"Command line options" passed to Mono's ``mono_jit_parse_options``
:param assembly_dir:
The base directory for assemblies, passed to ``mono_set_dirs``
:param config_dir:
The base directory for configuration files, passed to ``mono_set_dirs``
:param set_signal_chaining:
Whether to enable signal chaining, passed to ``mono_set_signal_chaining``.
If it is enabled, the runtime saves the original signal handlers before
installing its own, and calls the original ones in the following cases:
- SIGSEGV/SIGABRT while executing native code
- SIGPROF
- SIGFPE
- SIGQUIT
- SIGUSR2
This currently only works on POSIX platforms
| def get_mono(
*,
# domain: Optional[str] = None,
config_file: Optional[StrOrPath] = None,
global_config_file: Optional[StrOrPath] = None,
libmono: Optional[StrOrPath] = None,
sgen: bool = True,
debug: bool = False,
jit_options: Optional[Sequence[str]] = None,
assembly_dir: Optional[str] = None,
config_dir: Optional[str] = None,
set_signal_chaining: bool = False
) -> Runtime:
"""Get a Mono runtime instance
:param config_file:
Path to the domain configuration file
:param global_config_file:
Path to the global configuration file to load (defaults to, e.g.,
``/etc/mono/config``)
:param libmono:
Path to the Mono runtime dll/so/dylib. If this is not specified, we try
to discover a globally installed instance using :py:func:`find_libmono`
:param sgen:
If ``libmono`` is not specified, this is passed to
:py:func:`find_libmono`
:param debug:
Whether to initialise Mono debugging
:param jit_options:
"Command line options" passed to Mono's ``mono_jit_parse_options``
:param assembly_dir:
The base directory for assemblies, passed to ``mono_set_dirs``
:param config_dir:
The base directory for configuration files, passed to ``mono_set_dirs``
:param set_signal_chaining:
Whether to enable signal chaining, passed to ``mono_set_signal_chaining``.
If it is enabled, the runtime saves the original signal handlers before
installing its own, and calls the original ones in the following cases:
- SIGSEGV/SIGABRT while executing native code
- SIGPROF
- SIGFPE
- SIGQUIT
- SIGUSR2
This currently only works on POSIX platforms
"""
from .mono import Mono
libmono = _maybe_path(libmono)
if libmono is None:
libmono = find_libmono(sgen=sgen, assembly_dir=assembly_dir)
impl = Mono(
# domain=domain,
debug=debug,
jit_options=jit_options,
config_file=_maybe_path(config_file),
global_config_file=_maybe_path(global_config_file),
libmono=libmono,
assembly_dir=assembly_dir,
config_dir=config_dir,
set_signal_chaining=set_signal_chaining,
)
return impl
| (*, config_file: Union[str, os.PathLike, NoneType] = None, global_config_file: Union[str, os.PathLike, NoneType] = None, libmono: Union[str, os.PathLike, NoneType] = None, sgen: bool = True, debug: bool = False, jit_options: Optional[Sequence[str]] = None, assembly_dir: Optional[str] = None, config_dir: Optional[str] = None, set_signal_chaining: bool = False) -> clr_loader.types.Runtime |
36,721 | clr_loader | get_netfx | Get a .NET Framework runtime instance
:param domain:
Name of the domain to create. If no value is passed, assemblies will be
loaded into the root domain.
:param config_file:
Configuration file to use to initialize the ``AppDomain``. This will
only be used for non-root-domains as we can not control the
configuration of the implicitly loaded root domain.
| def get_netfx(
*, domain: Optional[str] = None, config_file: Optional[StrOrPath] = None
) -> Runtime:
"""Get a .NET Framework runtime instance
:param domain:
Name of the domain to create. If no value is passed, assemblies will be
loaded into the root domain.
:param config_file:
Configuration file to use to initialize the ``AppDomain``. This will
only be used for non-root-domains as we can not control the
configuration of the implicitly loaded root domain.
"""
from .netfx import NetFx
impl = NetFx(domain=domain, config_file=_maybe_path(config_file))
return impl
| (*, domain: Optional[str] = None, config_file: Union[str, os.PathLike, NoneType] = None) -> clr_loader.types.Runtime |
36,724 | lxml | get_include |
Returns a list of header include paths (for lxml itself, libxml2
and libxslt) needed to compile C code against lxml if it was built
with statically linked libraries.
| def get_include():
"""
Returns a list of header include paths (for lxml itself, libxml2
and libxslt) needed to compile C code against lxml if it was built
with statically linked libraries.
"""
import os
lxml_path = __path__[0]
include_path = os.path.join(lxml_path, 'includes')
includes = [include_path, lxml_path]
for name in os.listdir(include_path):
path = os.path.join(include_path, name)
if os.path.isdir(path):
includes.append(path)
return includes
| () |
36,725 | pytest_cache | Cache | null | class Cache:
def __init__(self, config):
self.config = config
self._cachedir = getrootdir(config, ".cache")
self.trace = config.trace.root.get("cache")
if config.getvalue("clearcache"):
self.trace("clearing cachedir")
if self._cachedir.check():
self._cachedir.remove()
self._cachedir.mkdir()
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
if name.count("/") != 0:
raise ValueError("name is not allowed to contain '/'")
p = self._cachedir.join("d/" + name)
p.ensure(dir=1)
return p
def _getpath(self, key):
if not key.count("/") > 1:
raise KeyError("Key must be of format 'dir/.../subname")
return self._cachedir.join(key)
def _getvaluepath(self, key):
p = self._getpath("v/" + key)
p.dirpath().ensure(dir=1)
return p
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
from execnet import loads, DataFormatError
path = self._getvaluepath(key)
if path.check():
f = path.open("rb")
try:
try:
return loads(f.read())
finally:
f.close()
except DataFormatError:
self.trace("cache-invalid at %s" % (key,))
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
from execnet import dumps, DataFormatError
path = self._getvaluepath(key)
f = path.open("wb")
try:
try:
self.trace("cache-write %s: %r" % (key, value,))
return f.write(dumps(value))
finally:
f.close()
except DataFormatError:
raise ValueError("cannot serialize a builtin python type")
| (config) |
36,726 | pytest_cache | __init__ | null | def __init__(self, config):
self.config = config
self._cachedir = getrootdir(config, ".cache")
self.trace = config.trace.root.get("cache")
if config.getvalue("clearcache"):
self.trace("clearing cachedir")
if self._cachedir.check():
self._cachedir.remove()
self._cachedir.mkdir()
| (self, config) |
36,727 | pytest_cache | _getpath | null | def _getpath(self, key):
if not key.count("/") > 1:
raise KeyError("Key must be of format 'dir/.../subname")
return self._cachedir.join(key)
| (self, key) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.