index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
40,324 |
pyglove.core.symbolic.base
|
sym_clone
|
Clones current object symbolically.
|
def sym_clone(self,
deep: bool = False,
memo: Optional[Any] = None,
override: Optional[Dict[str, Any]] = None):
"""Clones current object symbolically."""
assert deep or not memo
new_value = self._sym_clone(deep, memo)
if override:
new_value.sym_rebind(override, raise_on_no_change=False)
if flags.is_tracking_origin():
new_value.sym_setorigin(self, 'deepclone' if deep else 'clone')
return new_value
|
(self, deep: bool = False, memo: Optional[Any] = None, override: Optional[Dict[str, Any]] = None)
|
40,325 |
pyglove.core.symbolic.base
|
sym_contains
|
Returns True if the object contains sub-nodes of given value or type.
|
def sym_contains(
self,
value: Any = None,
type: Union[None, Type[Any], Tuple[Type[Any]]] = None # pylint: disable=redefined-builtin
) -> bool:
"""Returns True if the object contains sub-nodes of given value or type."""
return contains(self, value, type)
|
(self, value: Optional[Any] = None, type: Union[NoneType, Type[Any], Tuple[Type[Any]]] = None) -> bool
|
40,326 |
pyglove.core.symbolic.base
|
sym_descendants
|
Returns all descendants of specific classes.
Args:
where: Optional callable object as the filter of descendants to return.
option: Descendant query options, indicating whether all matched,
immediate matched or only the matched leaf nodes will be returned.
include_self: If True, `self` will be included in the query, otherwise
only strict descendants are included.
Returns:
A list of objects that match the descendant_cls.
|
def sym_descendants(
self,
where: Optional[Callable[[Any], bool]] = None,
option: DescendantQueryOption = DescendantQueryOption.ALL,
include_self: bool = False) -> List[Any]:
"""Returns all descendants of specific classes.
Args:
where: Optional callable object as the filter of descendants to return.
option: Descendant query options, indicating whether all matched,
immediate matched or only the matched leaf nodes will be returned.
include_self: If True, `self` will be included in the query, otherwise
only strict descendants are included.
Returns:
A list of objects that match the descendant_cls.
"""
descendants = []
where = where or (lambda x: True)
def visit(k, v, p):
del k, p
if not where(v):
return TraverseAction.ENTER
if not include_self and self is v:
return TraverseAction.ENTER
if option == DescendantQueryOption.IMMEDIATE:
descendants.append(v)
return TraverseAction.CONTINUE
# Dealing with option = ALL or LEAF.
leaf_descendants = []
if isinstance(v, Symbolic):
leaf_descendants = v.sym_descendants(where, option)
if option is DescendantQueryOption.ALL or not leaf_descendants:
descendants.append(v)
descendants.extend(leaf_descendants)
return TraverseAction.CONTINUE
traverse(self, visit)
return descendants
|
(self, where: Optional[Callable[[Any], bool]] = None, option: pyglove.core.symbolic.base.DescendantQueryOption = <DescendantQueryOption.ALL: 0>, include_self: bool = False) -> List[Any]
|
40,327 |
pyglove.core.symbolic.object
|
sym_eq
|
Tests symbolic equality.
|
def sym_eq(self, other: Any) -> bool:
"""Tests symbolic equality."""
return self is other or (
type(self) is type(other) and base.eq(
self._sym_attributes, other._sym_attributes)) # pylint: disable=protected-access
|
(self, other: Any) -> bool
|
40,328 |
pyglove.core.symbolic.base
|
sym_get
|
Returns a sub-node by path.
NOTE: there is no `sym_set`, use `sym_rebind`.
Args:
path: A KeyPath object or equivalence.
default: Default value if path does not exists. If absent, `KeyError` will
be thrown.
Returns:
Value of symbolic attribute specified by path if found, otherwise the
default value if it's specified.
Raises:
KeyError if `path` does not exist and `default` is not specified.
|
def sym_get(
self,
path: Union[object_utils.KeyPath, str, int],
default: Any = RAISE_IF_NOT_FOUND) -> Any:
"""Returns a sub-node by path.
NOTE: there is no `sym_set`, use `sym_rebind`.
Args:
path: A KeyPath object or equivalence.
default: Default value if path does not exists. If absent, `KeyError` will
be thrown.
Returns:
Value of symbolic attribute specified by path if found, otherwise the
default value if it's specified.
Raises:
KeyError if `path` does not exist and `default` is not specified.
"""
path = object_utils.KeyPath.from_value(path)
if default is RAISE_IF_NOT_FOUND:
return path.query(self)
else:
return path.get(self, default)
|
(self, path: Union[pyglove.core.object_utils.value_location.KeyPath, str, int], default: Any = (MISSING_VALUE,)) -> Any
|
40,329 |
pyglove.core.symbolic.base
|
sym_getattr
|
Gets a symbolic attribute.
Args:
key: Key of symbolic attribute.
default: Default value if attribute does not exist. If absent,
Returns:
Value of symbolic attribute if found, otherwise the default value
if it's specified.
Raises:
AttributeError if `key` does not exist and `default` is not provided.
|
def sym_getattr(
self, key: Union[str, int], default: Any = RAISE_IF_NOT_FOUND
) -> Any:
"""Gets a symbolic attribute.
Args:
key: Key of symbolic attribute.
default: Default value if attribute does not exist. If absent,
Returns:
Value of symbolic attribute if found, otherwise the default value
if it's specified.
Raises:
AttributeError if `key` does not exist and `default` is not provided.
"""
if not self.sym_hasattr(key):
if default is RAISE_IF_NOT_FOUND:
raise AttributeError(
self._error_message(
f'{self.__class__!r} object has no symbolic attribute {key!r}.'
)
)
return default
return self._sym_getattr(key)
|
(self, key: Union[str, int], default: Any = (MISSING_VALUE,)) -> Any
|
40,330 |
pyglove.core.symbolic.base
|
sym_gt
|
Returns if this object is symbolically greater than another object.
|
def sym_gt(self, other: Any) -> bool:
"""Returns if this object is symbolically greater than another object."""
return gt(self, other)
|
(self, other: Any) -> bool
|
40,331 |
pyglove.core.symbolic.base
|
sym_has
|
Returns True if a path exists in the sub-tree.
Args:
path: A KeyPath object or equivalence.
Returns:
True if the path exists in current sub-tree, otherwise False.
|
def sym_has(self, path: Union[object_utils.KeyPath, str, int]) -> bool:
"""Returns True if a path exists in the sub-tree.
Args:
path: A KeyPath object or equivalence.
Returns:
True if the path exists in current sub-tree, otherwise False.
"""
return object_utils.KeyPath.from_value(path).exists(self)
|
(self, path: Union[pyglove.core.object_utils.value_location.KeyPath, str, int]) -> bool
|
40,332 |
pyglove.core.symbolic.object
|
sym_hasattr
|
Tests if a symbolic attribute exists.
|
def sym_hasattr(self, key: Union[str, int]) -> bool:
"""Tests if a symbolic attribute exists."""
if key == '_sym_attributes':
raise ValueError(
f'{self.__class__.__name__}.__init__ should call `super().__init__`.')
return (
isinstance(key, str)
and not key.startswith('_')
and key in self._sym_attributes
)
|
(self, key: Union[str, int]) -> bool
|
40,333 |
pyglove.core.symbolic.object
|
sym_hash
|
Symbolically hashing.
|
def sym_hash(self) -> int:
"""Symbolically hashing."""
return base.sym_hash((self.__class__, base.sym_hash(self._sym_attributes)))
|
(self) -> int
|
40,334 |
pyglove.core.symbolic.base
|
sym_inferrable
|
Returns True if the attribute under key can be inferred.
|
def sym_inferrable(self, key: Union[str, int], **kwargs) -> bool:
"""Returns True if the attribute under key can be inferred."""
return (
self.sym_inferred(key, pg_typing.MISSING_VALUE, **kwargs)
!= pg_typing.MISSING_VALUE
)
|
(self, key: Union[str, int], **kwargs) -> bool
|
40,335 |
pyglove.core.symbolic.base
|
sym_inferred
|
Returns the inferred value of the attribute under key.
|
def sym_inferred(
self,
key: Union[str, int],
default: Any = RAISE_IF_NOT_FOUND,
**kwargs,
) -> Any:
"""Returns the inferred value of the attribute under key."""
if default is RAISE_IF_NOT_FOUND:
return self._sym_inferred(key, **kwargs)
else:
try:
return self._sym_inferred(key, **kwargs)
except Exception: # pylint: disable=broad-exception-caught
return default
|
(self, key: Union[str, int], default: Any = (MISSING_VALUE,), **kwargs) -> Any
|
40,336 |
pyglove.core.symbolic.object
|
sym_items
|
Iterates the (key, value) pairs of symbolic attributes.
|
def sym_items(self):
"""Iterates the (key, value) pairs of symbolic attributes."""
return self._sym_attributes.sym_items()
|
(self)
|
40,337 |
pyglove.core.symbolic.object
|
sym_jsonify
|
Converts current object to a dict of plain Python objects.
|
def sym_jsonify(self, **kwargs) -> object_utils.JSONValueType:
"""Converts current object to a dict of plain Python objects."""
return object_utils.merge([
{
object_utils.JSONConvertible.TYPE_NAME_KEY: (
self.__class__.__serialization_key__
)
},
self._sym_attributes.to_json(**kwargs),
])
|
(self, **kwargs) -> Union[int, float, bool, str, List[Any], Dict[str, Any]]
|
40,338 |
pyglove.core.symbolic.object
|
sym_keys
|
Iterates the keys of symbolic attributes.
|
def sym_keys(self) -> Iterator[str]:
"""Iterates the keys of symbolic attributes."""
return self._sym_attributes.sym_keys()
|
(self) -> Iterator[str]
|
40,339 |
pyglove.core.symbolic.object
|
sym_lt
|
Tests symbolic less-than.
|
def sym_lt(self, other: Any) -> bool:
"""Tests symbolic less-than."""
if type(self) is not type(other):
return base.lt(self, other)
return base.lt(self._sym_attributes, other._sym_attributes) # pylint: disable=protected-access
|
(self, other: Any) -> bool
|
40,340 |
pyglove.core.symbolic.base
|
sym_missing
|
Returns missing values.
|
def sym_missing(self, flatten: bool = True) -> Dict[str, Any]:
"""Returns missing values."""
missing = getattr(self, '_sym_missing_values')
if missing is None:
missing = self._sym_missing()
self._set_raw_attr('_sym_missing_values', missing)
if flatten:
missing = object_utils.flatten(missing)
return missing
|
(self, flatten: bool = True) -> Dict[str, Any]
|
40,341 |
pyglove.core.symbolic.base
|
sym_ne
|
Returns if this object does not equal to another object symbolically.
|
def sym_ne(self, other: Any) -> bool:
"""Returns if this object does not equal to another object symbolically."""
return ne(self, other)
|
(self, other: Any) -> bool
|
40,342 |
pyglove.core.symbolic.base
|
sym_nondefault
|
Returns missing values.
|
def sym_nondefault(self, flatten: bool = True) -> Dict[Union[int, str], Any]:
"""Returns missing values."""
nondefault = getattr(self, '_sym_nondefault_values')
if nondefault is None:
nondefault = self._sym_nondefault()
self._set_raw_attr('_sym_nondefault_values', nondefault)
if flatten:
nondefault = object_utils.flatten(nondefault)
return nondefault
|
(self, flatten: bool = True) -> Dict[Union[int, str], Any]
|
40,343 |
pyglove.core.symbolic.base
|
sym_rebind
|
Mutates the sub-nodes of current object. Please see `rebind`.
|
def sym_rebind(
self,
path_value_pairs: Optional[Union[
Dict[
Union[object_utils.KeyPath, str, int],
Any],
Callable]] = None, # pylint: disable=g-bare-generic
*,
raise_on_no_change: bool = True,
notify_parents: bool = True,
skip_notification: Optional[bool] = None,
**kwargs,
) -> 'Symbolic':
"""Mutates the sub-nodes of current object. Please see `rebind`."""
assert Symbolic.DictType is not None
if callable(path_value_pairs):
path_value_pairs = get_rebind_dict(path_value_pairs, self)
elif path_value_pairs is None:
path_value_pairs = {}
elif isinstance(path_value_pairs, Symbolic.DictType):
# Rebind work on symbolic form, thus we get their symbol instead of
# their evaluated value when building the rebind dict.
sd = typing.cast(Symbolic.DictType, path_value_pairs)
path_value_pairs = {k: v for k, v in sd.sym_items()}
if not isinstance(path_value_pairs, dict):
raise ValueError(
self._error_message(
f'Argument \'path_value_pairs\' should be a dict. '
f'Encountered {path_value_pairs}'))
path_value_pairs.update(kwargs)
path_value_pairs = {object_utils.KeyPath.from_value(k): v
for k, v in path_value_pairs.items()}
if not path_value_pairs and raise_on_no_change:
raise ValueError(self._error_message('There are no values to rebind.'))
updates = self._sym_rebind(path_value_pairs)
if skip_notification is None:
skip_notification = not flags.is_change_notification_enabled()
if not skip_notification:
self._notify_field_updates(updates, notify_parents=notify_parents)
return self
|
(self, path_value_pairs: Union[Dict[Union[pyglove.core.object_utils.value_location.KeyPath, str, int], Any], Callable, NoneType] = None, *, raise_on_no_change: bool = True, notify_parents: bool = True, skip_notification: Optional[bool] = None, **kwargs) -> pyglove.core.symbolic.base.Symbolic
|
40,344 |
pyglove.core.symbolic.base
|
sym_seal
|
Seals or unseals current object from further modification.
|
def sym_seal(self, is_seal: bool = True) -> 'Symbolic':
"""Seals or unseals current object from further modification."""
return self._set_raw_attr('_sealed', is_seal)
|
(self, is_seal: bool = True) -> pyglove.core.symbolic.base.Symbolic
|
40,345 |
pyglove.core.symbolic.base
|
sym_setorigin
|
Sets the symbolic origin of current object.
Args:
source: Source value for current object.
tag: A descriptive tag of the origin. Built-in tags are:
`__init__`, `clone`, `deepclone`, `return`. Users can manually
call `sym_setorigin` with custom tag value.
stacktrace: If True, enable stack trace for the origin. If None, enable
stack trace if `pg.tracek_origin()` is called. Otherwise stack trace is
disabled.
stacklimit: An optional integer to limit the stack depth. If None, it's
determined by the value passed to `pg.set_origin_stacktrace_limit`,
which is 10 by default.
stacktop: A negative or zero-value integer indicating the stack top among
the stack frames that we want to present to user, by default it's
1-level up from the stack within current `sym_setorigin` call.
Example::
def foo():
return bar()
def bar():
s = MyObject()
t = s.build()
t.sym_setorigin(s, 'builder',
stacktrace=True, stacklimit=5, stacktop=-1)
This example sets the origin of `t` using `s` as its source with tag
'builder'. We also record the callstack where the `sym_setorigin` is
called, so users can call `t.sym_origin.stacktrace` to get the call stack
later. The `stacktop` -1 indicates that we do not need the stack frame
within ``sym_setorigin``, so users will see the stack top within the
function `bar`. We also set the max number of stack frames to display to 5,
not including the stack frame inside ``sym_setorigin``.
|
def sym_setorigin(
self,
source: Any,
tag: str,
stacktrace: Optional[bool] = None,
stacklimit: Optional[int] = None,
stacktop: int = -1):
"""Sets the symbolic origin of current object.
Args:
source: Source value for current object.
tag: A descriptive tag of the origin. Built-in tags are:
`__init__`, `clone`, `deepclone`, `return`. Users can manually
call `sym_setorigin` with custom tag value.
stacktrace: If True, enable stack trace for the origin. If None, enable
stack trace if `pg.tracek_origin()` is called. Otherwise stack trace is
disabled.
stacklimit: An optional integer to limit the stack depth. If None, it's
determined by the value passed to `pg.set_origin_stacktrace_limit`,
which is 10 by default.
stacktop: A negative or zero-value integer indicating the stack top among
the stack frames that we want to present to user, by default it's
1-level up from the stack within current `sym_setorigin` call.
Example::
def foo():
return bar()
def bar():
s = MyObject()
t = s.build()
t.sym_setorigin(s, 'builder',
stacktrace=True, stacklimit=5, stacktop=-1)
This example sets the origin of `t` using `s` as its source with tag
'builder'. We also record the callstack where the `sym_setorigin` is
called, so users can call `t.sym_origin.stacktrace` to get the call stack
later. The `stacktop` -1 indicates that we do not need the stack frame
within ``sym_setorigin``, so users will see the stack top within the
function `bar`. We also set the max number of stack frames to display to 5,
not including the stack frame inside ``sym_setorigin``.
"""
if self.sym_origin is not None:
current_source = self.sym_origin.source
if current_source is not None and current_source is not source:
raise ValueError(
f'Cannot set the origin with a different source value. '
f'Origin source: {current_source!r}, New source: {source!r}.')
# NOTE(daiyip): We decrement the stacktop by 1 as the physical stack top
# is within Origin.
self._set_raw_attr(
'_sym_origin',
Origin(source, tag, stacktrace, stacklimit, stacktop - 1))
|
(self, source: Any, tag: str, stacktrace: Optional[bool] = None, stacklimit: Optional[int] = None, stacktop: int = -1)
|
40,346 |
pyglove.core.symbolic.object
|
sym_setparent
|
Sets the parent of current node in the symbolic tree.
|
def sym_setparent(self, parent: base.Symbolic):
"""Sets the parent of current node in the symbolic tree."""
old_parent = self.sym_parent
super().sym_setparent(parent)
if old_parent is not parent:
self._on_parent_change(old_parent, parent)
|
(self, parent: pyglove.core.symbolic.base.Symbolic)
|
40,347 |
pyglove.core.symbolic.base
|
sym_setpath
|
Sets the path of current node in its symbolic tree.
|
def sym_setpath(
self, path: Optional[Union[str, object_utils.KeyPath]]) -> None:
"""Sets the path of current node in its symbolic tree."""
if self.sym_path != path:
old_path = self.sym_path
self._set_raw_attr('_sym_path', path)
self._update_children_paths(old_path, path)
|
(self, path: Union[str, pyglove.core.object_utils.value_location.KeyPath, NoneType]) -> NoneType
|
40,348 |
pyglove.core.symbolic.object
|
sym_values
|
Iterates the values of symbolic attributes.
|
def sym_values(self):
"""Iterates the values of symbolic attributes."""
return self._sym_attributes.sym_values()
|
(self)
|
40,349 |
pyglove.core.symbolic.base
|
to_json
|
Alias for `sym_jsonify`.
|
def to_json(self, **kwargs) -> object_utils.JSONValueType:
"""Alias for `sym_jsonify`."""
return self.sym_jsonify(**kwargs)
|
(self, **kwargs) -> Union[int, float, bool, str, List[Any], Dict[str, Any]]
|
40,350 |
pyglove.core.symbolic.base
|
to_json_str
|
Serializes current object into a JSON string.
|
def to_json_str(self, json_indent: Optional[int] = None, **kwargs) -> str:
"""Serializes current object into a JSON string."""
return json.dumps(self.sym_jsonify(**kwargs), indent=json_indent)
|
(self, json_indent: Optional[int] = None, **kwargs) -> str
|
40,351 |
pyglove.core.typing.custom_typing
|
CustomTyping
|
Interface of custom value type.
Instances of subclasses of CustomTyping can be assigned to fields of
any ValueSpec, and take over `apply` via `custom_apply` method.
As a result, CustomTyping makes the schema system extensible without modifying
existing value specs. For example, value generators can extend CustomTyping
and be assignable to any fields.
|
class CustomTyping(metaclass=abc.ABCMeta):
"""Interface of custom value type.
Instances of subclasses of CustomTyping can be assigned to fields of
any ValueSpec, and take over `apply` via `custom_apply` method.
As a result, CustomTyping makes the schema system extensible without modifying
existing value specs. For example, value generators can extend CustomTyping
and be assignable to any fields.
"""
@abc.abstractmethod
def custom_apply(
self,
path: object_utils.KeyPath,
value_spec: class_schema.ValueSpec,
allow_partial: bool,
child_transform: Optional[Callable[
[object_utils.KeyPath, class_schema.Field, Any], Any]] = None
) -> Tuple[bool, Any]:
"""Custom apply on a value based on its original value spec.
Args:
path: KeyPath of current object under its object tree.
value_spec: Original value spec for this field.
allow_partial: Whether allow partial object to be created.
child_transform: Function to transform child node values into their final
values. Transform function is called on leaf nodes first, then on their
parents, recursively.
Returns:
A tuple (proceed_with_standard_apply, value_to_proceed).
If proceed_with_standard_apply is set to False, value_to_proceed
will be used as final value.
Raises:
Error when the value is not compatible with the value spec.
"""
|
()
|
40,352 |
pyglove.core.typing.custom_typing
|
custom_apply
|
Custom apply on a value based on its original value spec.
Args:
path: KeyPath of current object under its object tree.
value_spec: Original value spec for this field.
allow_partial: Whether allow partial object to be created.
child_transform: Function to transform child node values into their final
values. Transform function is called on leaf nodes first, then on their
parents, recursively.
Returns:
A tuple (proceed_with_standard_apply, value_to_proceed).
If proceed_with_standard_apply is set to False, value_to_proceed
will be used as final value.
Raises:
Error when the value is not compatible with the value spec.
|
@abc.abstractmethod
def custom_apply(
self,
path: object_utils.KeyPath,
value_spec: class_schema.ValueSpec,
allow_partial: bool,
child_transform: Optional[Callable[
[object_utils.KeyPath, class_schema.Field, Any], Any]] = None
) -> Tuple[bool, Any]:
"""Custom apply on a value based on its original value spec.
Args:
path: KeyPath of current object under its object tree.
value_spec: Original value spec for this field.
allow_partial: Whether allow partial object to be created.
child_transform: Function to transform child node values into their final
values. Transform function is called on leaf nodes first, then on their
parents, recursively.
Returns:
A tuple (proceed_with_standard_apply, value_to_proceed).
If proceed_with_standard_apply is set to False, value_to_proceed
will be used as final value.
Raises:
Error when the value is not compatible with the value spec.
"""
|
(self, path: pyglove.core.object_utils.value_location.KeyPath, value_spec: pyglove.core.typing.class_schema.ValueSpec, allow_partial: bool, child_transform: Optional[Callable[[pyglove.core.object_utils.value_location.KeyPath, pyglove.core.typing.class_schema.Field, Any], Any]] = None) -> Tuple[bool, Any]
|
40,353 |
pyglove.core.geno.base
|
DNA
|
The genome of a symbolic object relative to its search space.
DNA is a hierarchical structure - each DNA node has a value, and a list of
child DNA nodes. The root node represents the genome that encodes an entire
object relative to its space. The value of a DNA node could be None, an
integer, a float number or a string, dependening on its specification
(:class:`pg.DNASpec`). A valid DNA has a form of the following.
+--------------------------------------+-----------------+-----------------------------+
| Hyper value type | Possible values | Child nodes |
| (DNASpec type) | | |
+======================================+=================+=============================+
|:class:`pg.hyper.ObjectTemplate` | None |DNA of child decision points |
|(:class:`pg.geno.Space`) |(elements > 1) |(Choices/Float) in the |
| | |template. |
+--------------------------------------+-----------------+-----------------------------+
| |None |Children of elements[0] |
| |(elements == 1 | |
| |and elements[0]. | |
| |num_choices > 1) | |
+--------------------------------------+-----------------+-----------------------------+
| |int |Children of: |
| |(elements == 1 |elements[0][0] |
| |and elements[0]. | |
| |num_choices ==1) | |
+--------------------------------------+-----------------+-----------------------------+
| |float |Empty |
| |(elements == 1 | |
| |and elements[0] | |
| |is geno.Float) | |
+--------------------------------------+-----------------+-----------------------------+
|:func:`pg.oneof` |int |Children of Space |
|(:class:`pg.geno.Choices`) |(candidate index |for the chosen candidate |
| |as choice) | |
+--------------------------------------+-----------------+-----------------------------+
|:func:`pg.manyof` |None |DNA of each chosen candidate |
|(:class:`pg.geno.Choices) |(num_choices > 1 | |
+--------------------------------------+-----------------+-----------------------------+
| |int |Children of chosen candidate |
| |(num_choices==1) | |
+--------------------------------------+-----------------+-----------------------------+
|:func:`pg.floatv` |float |Empty |
|(:class:`pg.geno.Float` ) | | |
+--------------------------------------+-----------------+-----------------------------+
|:class:`pg.hyper.CustomHyper` |string |User defined. |
|(:class:`pg.geno.CustomDecisionPoint`)|(serialized | |
| | object) | |
+--------------------------------------+-----------------+-----------------------------+
DNA can also be represented in a compact form - a tree of numbers/strs,
formally defined as::
<dna> := empty | <decision>
<decision>: = <single-decision>
| <multi-decisions>
| <conditional-choice>
| <custom-decision>
<single-decision> := <categorical-decision>
| <float-decision>
| <custom-decision>
<categorical-decision> := int
<float-decision> := float
<custom-decision> := str
<multiple-decisions> := [<decision>, <decision>, ...]
<conditional-choice> := (<categorical-decision>,
<categorical-decision>,
...
<decision>)
Thus DNA can be constructed by nested structures of list, tuple and numbers.
The numeric value for DNA can be integer (as index of choice) or float (the
value itself will used as decoded value).
Examples::
# Empty DNA. This may be generated by an empty template.
DNA()
# A DNA of a nested choice of depth 3.
DNA(0, 0, 0)
# A DNA of three choices at the same level,
# positioned at 0, 1, 2, each choice has value 0.
DNA([0, 0, 0])
# A DNA of two choices (one two-level conditional,
# one non-conditional), position 0 is with choice path: 0 -> [0, 1],
# while [0, 1] means it's a multi-choice, decodable by Sublist or Subset.
# position 1 is a single non-conditional choice: 0.
DNA([(0, [0, 1]), 0])
# A DNA with custom decision point whose encoding
# is defined by the user.
DNA('abc')
|
class DNA(symbolic.Object):
"""The genome of a symbolic object relative to its search space.
DNA is a hierarchical structure - each DNA node has a value, and a list of
child DNA nodes. The root node represents the genome that encodes an entire
object relative to its space. The value of a DNA node could be None, an
integer, a float number or a string, dependening on its specification
(:class:`pg.DNASpec`). A valid DNA has a form of the following.
+--------------------------------------+-----------------+-----------------------------+
| Hyper value type | Possible values | Child nodes |
| (DNASpec type) | | |
+======================================+=================+=============================+
|:class:`pg.hyper.ObjectTemplate` | None |DNA of child decision points |
|(:class:`pg.geno.Space`) |(elements > 1) |(Choices/Float) in the |
| | |template. |
+--------------------------------------+-----------------+-----------------------------+
| |None |Children of elements[0] |
| |(elements == 1 | |
| |and elements[0]. | |
| |num_choices > 1) | |
+--------------------------------------+-----------------+-----------------------------+
| |int |Children of: |
| |(elements == 1 |elements[0][0] |
| |and elements[0]. | |
| |num_choices ==1) | |
+--------------------------------------+-----------------+-----------------------------+
| |float |Empty |
| |(elements == 1 | |
| |and elements[0] | |
| |is geno.Float) | |
+--------------------------------------+-----------------+-----------------------------+
|:func:`pg.oneof` |int |Children of Space |
|(:class:`pg.geno.Choices`) |(candidate index |for the chosen candidate |
| |as choice) | |
+--------------------------------------+-----------------+-----------------------------+
|:func:`pg.manyof` |None |DNA of each chosen candidate |
|(:class:`pg.geno.Choices) |(num_choices > 1 | |
+--------------------------------------+-----------------+-----------------------------+
| |int |Children of chosen candidate |
| |(num_choices==1) | |
+--------------------------------------+-----------------+-----------------------------+
|:func:`pg.floatv` |float |Empty |
|(:class:`pg.geno.Float` ) | | |
+--------------------------------------+-----------------+-----------------------------+
|:class:`pg.hyper.CustomHyper` |string |User defined. |
|(:class:`pg.geno.CustomDecisionPoint`)|(serialized | |
| | object) | |
+--------------------------------------+-----------------+-----------------------------+
DNA can also be represented in a compact form - a tree of numbers/strs,
formally defined as::
<dna> := empty | <decision>
<decision>: = <single-decision>
| <multi-decisions>
| <conditional-choice>
| <custom-decision>
<single-decision> := <categorical-decision>
| <float-decision>
| <custom-decision>
<categorical-decision> := int
<float-decision> := float
<custom-decision> := str
<multiple-decisions> := [<decision>, <decision>, ...]
<conditional-choice> := (<categorical-decision>,
<categorical-decision>,
...
<decision>)
Thus DNA can be constructed by nested structures of list, tuple and numbers.
The numeric value for DNA can be integer (as index of choice) or float (the
value itself will used as decoded value).
Examples::
# Empty DNA. This may be generated by an empty template.
DNA()
# A DNA of a nested choice of depth 3.
DNA(0, 0, 0)
# A DNA of three choices at the same level,
# positioned at 0, 1, 2, each choice has value 0.
DNA([0, 0, 0])
# A DNA of two choices (one two-level conditional,
# one non-conditional), position 0 is with choice path: 0 -> [0, 1],
# while [0, 1] means it's a multi-choice, decodable by Sublist or Subset.
# position 1 is a single non-conditional choice: 0.
DNA([(0, [0, 1]), 0])
# A DNA with custom decision point whose encoding
# is defined by the user.
DNA('abc')
"""
# pylint: enable=line-too-long
# Use compact format for __str__ output.
__str_format_kwargs__ = dict(compact=True)
# Allow assignment on symbolic attributes.
allow_symbolic_assignment = True
@object_utils.explicit_method_override
def __init__(
self,
value: Union[None, int, float, str, List[Any], Tuple[Any]] = None,
# Set MISSING_VALUE to use default from pg_typing.
children: Optional[List['DNA']] = None,
spec: Optional[DNASpec] = None,
metadata: Optional[Dict[str, Any]] = None,
*,
allow_partial: bool = False,
**kwargs):
"""Constructor.
Args:
value: Value for current node.
children: Child DNA(s).
spec: DNA spec that constraint current node.
metadata: Optional dict as controller metadata for the DNA.
allow_partial: If True, allow the object to be partial.
**kwargs: keyword arguments that will be passed through to
symbolic.Object.
"""
value, children, metadata = self._parse_value_and_children(
value, children, metadata, spec)
super().__init__(
value=value,
children=children,
metadata=metadata or symbolic.Dict(),
allow_partial=allow_partial,
**kwargs)
self._decision_by_id_cache = None
self._named_decisions = None
self._userdata = AttributeDict()
self._cloneable_metadata_keys = set()
self._cloneable_userdata_keys = set()
self._spec = None
if spec:
self.use_spec(spec)
def _on_bound(self):
"""Event that is triggered when any symbolic member changes."""
super()._on_bound()
self._decision_by_id_cache = None
self._named_decisions = None
def _parse_value_and_children(
self,
value: Union[
int, # As a single chosen index.
float, # As a single chosen value.
str, # As a custom genome.
List[Any], # As multi-choice. (coexisting)
Tuple[Any], # As a conditional choice.
None],
children: Optional[List['DNA']],
metadata: Optional[Dict[str, Any]],
dna_spec: Optional[DNASpec]
) -> Tuple[Union[int, None, float], Optional[List['DNA']]]:
"""Parse value (leaf) and children from maybe compositional value."""
if isinstance(value, (list, tuple)):
# The value is compositional, therefore we need to parse the decision
# for current node and construct the children.
if children is not None:
raise ValueError(
f'\'children\' ({children!r}) must be None when '
f'\'value\' ({value!r}) is compositional.')
new_value = None
children = []
if isinstance(value, list):
# Space or multi-choices
children = [DNA(v) for v in value]
else:
# Conditional choices.
if len(value) < 2:
raise ValueError(
f'Tuple as conditional choices must have at least 2 '
f'items. Encountered {value}.')
if isinstance(value[0], (float, int)):
new_value = value[0]
if len(value) == 2:
if isinstance(value[1], list):
# NOTE(daiyip): Multi-choice is allowed only as a leaf choice.
children = [DNA(v) for v in value[1]]
elif isinstance(value[1], (int, float, str)):
children = [DNA(value[1])]
else:
children.append(DNA(value[1:]))
else:
raise ValueError(
f'Tuple as conditional choices only allow multiple '
f'choices to be used at the last position. '
f'Encountered: {value}')
else:
# Normalize DNA by removing trivial intermediate nodes,
# which is DNA with empty value and only one child.
# NOTE(daiyip): during deserialization (from_json) of nested DNA,
# the elements in children might be dicts that are not yet converted
# to DNA. Therefore, we always call `symbolic.from_json` on children,
# which is a no-op for already initialized child DNA.
new_value = value
children = symbolic.from_json(children) or []
if len(children) == 1 and children[0].value is None:
children = children[0].children
if new_value is None and len(children) == 1:
c = children[0]
new_value, children, metadata = c.value, c.children, c.metadata
return new_value, children, metadata
def set_metadata(
self, key: str, value: Any, cloneable: bool = False) -> 'DNA':
"""Set metadata associated with a key.
Metadata associated with the DNA will be persisted and carried over across
processes, which is different the `userdata`. (See `set_userdata` for more
details.)
Args:
key: Key for the metadata.
value: Value for the metadata.
cloneable: If True, the key/value will be propagated during clone.
Returns:
Self.
"""
self.metadata.rebind(
{key: value}, raise_on_no_change=False, skip_notification=True)
if cloneable:
self._cloneable_metadata_keys.add(key)
return self
def set_userdata(
self, key: str, value: Any, cloneable: bool = False) -> 'DNA':
"""Sets user data associated with a key.
User data associated with the DNA will live only within current process,
and is not carried over during serialization/deserialization, which is
different from DNA metadata. (See `set_metadata` for more details.)
Args:
key: Key of the user data.
value: Value of the user data.
cloneable: If True, the key/value will be carry over to the cloned DNA.
Returns:
Self.
"""
self._userdata[key] = value
if cloneable:
self._cloneable_userdata_keys.add(key)
return self
@property
def userdata(self) -> AttributeDict:
"""Gets user data."""
return self._userdata
def _ensure_dna_spec(self) -> None:
"""Raises error if current DNA is not bound with a DNASpec."""
if self._spec is None:
raise ValueError(f'{self!r} is not bound with a DNASpec.')
@property
def spec(self) -> Optional[DNASpec]:
"""Returns DNA spec of current DNA."""
return self._spec
@property
def parent_dna(self) -> Optional['DNA']:
"""Returns parent DNA."""
if self.sym_parent is None:
return None
# NOTE(daiyip): `self.sym_parent` is the `children` field of parent DNA,
# its presence should always align with parent DNA.
parent = self.sym_parent.sym_parent
assert parent is not None
return parent
@property
def root(self) -> 'DNA':
"""Returns the DNA root."""
current = self
parent = current.parent_dna
while parent is not None:
current = parent
parent = parent.parent_dna
return current
@property
def is_subchoice(self) -> bool:
"""Returns True if current DNA is a subchoice of a multi-choice."""
self._ensure_dna_spec()
return self._spec.is_subchoice
@property
def multi_choice_spec(self) -> Optional['DecisionPoint']:
"""Returns the multi-choice spec for child DNAs.
Returns:
If the children of this DNA are decisions of a multi-choice's subchoices,
return the multi-choice spec (`pg.geno.Choices`). Otherwise returns None.
"""
self._ensure_dna_spec()
multi_choice_spec = None
if self.children:
child_spec = self.children[0].spec
if child_spec.is_subchoice:
multi_choice_spec = child_spec.parent_spec
return multi_choice_spec
@property
def is_multi_choice_container(self) -> bool:
"""Returns True if the children of this DNA are multi-choice subchoices."""
return self.multi_choice_spec is not None
@property
def literal_value(self) -> Union[str, int, float,
List[Union[str, int, float]]]:
"""Returns the literal value represented by current DNA."""
self._ensure_dna_spec()
def _literal_value(dna, prefix):
if dna.children:
if dna.value is not None:
assert dna.spec.is_categorical, dna.spec
prefix += f'{dna.value}/{len(dna.spec.candidates)} -> '
v = [_literal_value(c, prefix) for c in dna.children]
return v[0] if len(v) == 1 else v
if dna.spec.is_numerical:
value = str(dna.value)
elif dna.spec.is_custom_decision_point:
value = dna.value
elif dna.spec.literal_values:
value = dna.spec.literal_values[dna.value]
else:
value = f'{dna.value}/{len(dna.spec.candidates)}'
if not prefix:
return value
return prefix + str(value)
return _literal_value(self, '')
@property
def _decision_by_id(self):
"""Lazy loaded decision by ID dict."""
if self._decision_by_id_cache is None:
self._decision_by_id_cache = self.to_dict(
key_type='id', value_type='dna',
include_inactive_decisions=True,
multi_choice_key='both')
return self._decision_by_id_cache
@property
def decision_ids(self) -> List[object_utils.KeyPath]:
"""Returns decision IDs."""
self._ensure_dna_spec()
return self._spec.decision_ids
@property
def named_decisions(self) -> Dict[str, Union['DNA', List['DNA']]]:
"""Returns a dict of name to the named DNA in the sub-tree."""
if self._named_decisions is None:
named_decisions = {}
for spec, dna in self.to_dict(
key_type='dna_spec', value_type='dna',
multi_choice_key='parent',
include_inactive_decisions=True).items():
if spec.name is not None:
v = named_decisions.get(spec.name, None)
if v is None:
v = dna
else:
if not isinstance(dna, list):
dna = [dna]
if isinstance(v, list):
v.extend(dna)
else:
v = [v] + dna
named_decisions[spec.name] = v
self._named_decisions = named_decisions
return self._named_decisions
def use_spec(self, spec: DNASpec) -> 'DNA':
"""Use a DNA spec for this node and children recursively.
Args:
spec: DNA spec.
Returns:
Self.
Raises:
ValueError: current DNA tree does not conform to the DNA spec.
"""
if not isinstance(spec, DNASpec):
raise ValueError(
f'Argument \'spec\' must be a `pg.DNASpec` object. '
f'Encountered: {spec!r}.')
if self._spec is spec:
return self
def _use_spec_for_child_choices(spec: DNASpec, children: List[DNA]):
"""Use spec for child choices."""
assert spec.is_categorical, spec
if spec.num_choices != len(children):
raise ValueError(
f'Number of choices ({spec.num_choices}) does not match with '
f'the number of child values (len(children)). '
f'Spec: {spec!r}, Children: {children!r}.')
for i, child in enumerate(children):
subchoice = spec.subchoice(i)
child.use_spec(subchoice)
child_values = [c.value for c in children]
if spec.sorted and sorted(child_values) != child_values:
raise ValueError(
f'Child values {child_values!r} are not sorted. Spec: {spec!r}.')
if spec.distinct and len(set(child_values)) != len(child_values):
raise ValueError(
f'Child values {child_values!r} are not distinct. Spec: {spec!r}.')
# Skip dummy DNA specs.
while spec.is_space and len(spec.elements) == 1:
spec = spec.elements[0]
if spec.is_space:
# Multiple value composition.
if self.value is not None:
raise ValueError(
f'DNA value type mismatch. Value: {self.value}, Spec: {spec!r}.')
if len(spec.elements) != len(self.children):
raise ValueError(
f'Length of DNA child values ({len(self.children)}) is different '
f'from the number of elements ({len(spec.elements)}) '
f'in Spec: {spec!r}.')
for i, elem_spec in enumerate(spec.elements):
self.children[i].use_spec(elem_spec)
elif spec.is_categorical:
if spec.num_choices == 1:
# Single choice.
if not isinstance(self.value, int):
raise ValueError(
f'DNA value type mismatch. Value: {self.value}, Spec: {spec!r}.')
if self.value >= len(spec.candidates):
raise ValueError(
f'Value of DNA is out of range according to the DNA spec. '
f'Value: {self.value}, Spec: {spec!r}.')
chosen_candidate = spec.candidates[self.value]
assert chosen_candidate.is_space, chosen_candidate
# Empty template in chosen candidate.
if not chosen_candidate.elements and self.children:
raise ValueError(
f'There is no DNA spec for child DNA values. '
f'Child values: {self.children}.')
# None-empty template in chosen candidate.
if len(chosen_candidate.elements) > 1:
# Children are DNA of multiple encoders in chosen composition.
if len(chosen_candidate.elements) != len(self.children):
raise ValueError(
f'Number of elements in child templates '
f'({len(chosen_candidate.elements)}) does not match with '
f'the length of children ({len(self.children)}) from DNA: '
f'{self!r}, Spec: {chosen_candidate}.')
for i, elem_spec in enumerate(chosen_candidate.elements):
self.children[i].use_spec(elem_spec)
elif len(chosen_candidate.elements) == 1:
# Children are multiple choices of the only encoder
# in chosen composition.
sub_spec = chosen_candidate
while sub_spec.is_space and len(sub_spec.elements) == 1:
sub_spec = sub_spec.elements[0]
if sub_spec.is_numerical or sub_spec.is_custom_decision_point:
if len(self.children) != 1:
raise ValueError(
f'Encountered more than 1 value.'
f'Child value: {self.children}, Spec: {sub_spec}.')
self.children[0].use_spec(sub_spec)
else:
assert sub_spec.is_categorical, sub_spec
_use_spec_for_child_choices(sub_spec, self.children)
else:
# Multiple choices.
if self.value is not None:
raise ValueError(
f'Cannot apply multi-choice DNA spec on '
f'value {self.value}: {spec!r}.')
_use_spec_for_child_choices(spec, self.children)
elif spec.is_numerical:
if not isinstance(self.value, float):
raise ValueError(
f'DNA value type mismatch. Value: {self.value}, '
f'Spec: {spec!r}.')
if self.value < spec.min_value:
raise ValueError(
f'DNA value should be no less than {spec.min_value}. '
f'Encountered {self.value}, Spec: {spec!r}.')
if self.value > spec.max_value:
raise ValueError(
f'DNA value should be no greater than {spec.max_value}. '
f'Encountered {self.value}, Spec: {spec!r}.')
else:
assert spec.is_custom_decision_point, spec
if not isinstance(self.value, str):
raise ValueError(
f'DNA value type mismatch, Value: {self.value!r}, Spec: {spec!r}.')
self._spec = spec
return self
@classmethod
def parse(
cls,
json_value: Union[int, # As a single chosen index.
float, # As a single chosen value.
str, # As a custom genome.
List[Any], # As multi-choice. (coexisting)
Tuple[Any], # As a conditional choice.
None # An empty DNA.
],
spec: Optional[DNASpec] = None) -> 'DNA':
"""Parse DNA from a nested structure of numbers.
Deprecated: use `DNA.__init__` instead.
Args:
json_value: A nested structure of numbers.
spec: DNA spec that will be applied to current DNA tree.
Returns:
an instance of DNA object.
Raises:
ValueError: Bad format for json_value or parsed DNA does not conform to
the DNA spec.
"""
return DNA(json_value, spec=spec)
@classmethod
def from_dict(
cls,
dict_repr: Dict[Union['DecisionPoint', str],
Union[None, 'DNA', float, int, str]],
dna_spec: DNASpec,
use_ints_as_literals: bool = False) -> 'DNA':
"""Create a DNA from its dictionary representation.
Args:
dict_repr: The dictionary representation of the DNA.
The keys should be either strings as the decision point ID
or DNASpec objects. The values should be either numeric or literal
values for the decisions.
For inactive decisions, their ID/spec should either be absent from the
dictionary, or use None as their values.
dna_spec: The DNASpec that applies to the DNA.
use_ints_as_literals: If True, when an integer is encountered for
a dictinonary value, treat it as the literal value.
Otherwise, always treat it as a candidate index.
Returns:
A DNA object.
"""
def _get_decision(spec: DNASpec):
"""Gets the decision for DNASpec."""
decision = dict_repr.get(spec.id, None)
if decision is None:
decision = dict_repr.get(spec, None)
if decision is None and spec.name:
decision = dict_repr.get(spec.name, None)
# A spec can result in multiple decision points (e.g. multi-choices)
# therefore we always pop up the next single decision if a name
# is associated with multiple decisions.
if isinstance(decision, list):
dict_repr[spec.name] = decision[1:]
decision = decision[0] if decision else None
return decision
def _choice_index(subchoice, value: Union[int, float, str]) -> int:
"""Gets the index of choice value based on its spec."""
if isinstance(value, int) and not use_ints_as_literals:
index = value
if index < 0 or index >= len(subchoice.candidates):
identifier = subchoice.name or subchoice.id
raise ValueError(
f'Candidate index out of range at choice \'{identifier}\'. Index='
f'{index}, Number of candidates={len(subchoice.candidates)}.')
else:
index = subchoice.candidate_index(value)
return index
def _make_dna(spec: DNASpec) -> DNA:
"""Lookup DNA value from parameter values according to DNA spec."""
if spec.is_space:
children = []
for elem in spec.elements:
child = _make_dna(elem)
if child is not None:
children.append(child)
return DNA(None, children)
else:
if spec.is_categorical:
children = []
for choice_idx in range(spec.num_choices):
subchoice = spec.subchoice(choice_idx)
value = _get_decision(subchoice)
# It's possible that the decisions for multiple choices are
# collapsed as a single entry in the dictionary. e.g: {'x': [0, 1]}.
# In this case, we will make another attempt to get the decision
# from the parent spec entry.
if value is None and subchoice.is_subchoice:
parent_decisions = _get_decision(spec)
if parent_decisions is not None:
assert len(parent_decisions) == spec.num_choices, (
parent_decisions, spec)
value = parent_decisions[choice_idx]
if value is None:
identifier = subchoice.name or subchoice.id
raise ValueError(
f'Value for \'{identifier}\' is not found in '
f'the dictionary {dict_repr!r}.')
if isinstance(value, DNA):
children.append(value)
else:
choice_index = _choice_index(subchoice, value)
subspace_dna = _make_dna(subchoice.candidates[choice_index])
children.append(
DNA(choice_index, [subspace_dna] if subspace_dna else []))
return DNA(None, children)
elif spec.is_numerical or spec.is_custom_decision_point:
value = _get_decision(spec)
if value is None:
raise ValueError(
f'Value for \'{spec.name or spec.id}\' is not found '
f'in the dictionary {dict_repr!r}.')
if isinstance(value, DNA):
value = value.value
if spec.is_numerical:
if value < spec.min_value:
raise ValueError(
f'The decision for \'{spec.name or spec.id}\' should '
f'be no less than {spec.min_value}. Encountered {value}.')
if value > spec.max_value:
raise ValueError(
f'The decision for \'{spec.name or spec.id}\' should '
f'be no greater than {spec.max_value}. Encountered {value}.')
else:
if not isinstance(value, str):
raise ValueError(
f'The decision for \'{spec.name or spec.id}\' should '
f'be a string. Encountered {value}.')
return DNA(value, None)
else:
raise NotImplementedError('Should never happen.')
dna = _make_dna(dna_spec)
return dna.use_spec(dna_spec)
def to_dict(
self,
key_type='id',
value_type='value',
multi_choice_key='subchoice',
include_inactive_decisions=False,
filter_fn: Optional[Callable[[DecisionPoint], bool]] = None
) -> Dict[Union[DecisionPoint, str],
Union[None, 'DNA', float, int, str,
List['DNA'], List[int], List[str]]]:
"""Returns the dict representation of current DNA.
Args:
key_type: Key type in returned dictionary. Acceptable values are:
* 'id': Use the ID (canonical location) of each decision point as key.
This is the default behavior.
* 'name_or_id': Use the name of each decision point as key if it's
present, otherwise use ID as key. When the name of a decision
point is presented, it is guaranteed not to clash with other
decision points' names or IDs.
* 'dna_spec': Use the DNASpec object of each decision point as key.
value_type: Value type for choices in returned dictionary.
Acceptable values are:
* 'value': Use the index of the chosen candidate for `Choices`, and
use the float number for `Float`. This is the default behavior.
* 'dna': Use `DNA` for all decision points.
* 'choice': Use '{index}/{num_candidates}' for the chosen candidate
for `Choices`, and the chosen float number for `Float`.
* 'literal': Use the literal value for the chosen candidate for
`Choices`, and the chosen float number for `Float`. If the literal
value for the `Choices` decision point is not present, fall back
to the '{index}/{num_candidates}' format.
* 'choice_and_literal': Use '{index}/{num_candidates} ({literal})'
for the chosen candidate for `Choices` and then chosen float number
for `Float`. If the literal value for the `Choices` decision point
is not present, fall back to the '{index}/{num_candidates}' format.
multi_choice_key: 'subchoice', 'parent', or 'both'. If 'subchoice', each
subchoice will insert a key into the dict. If 'parent', subchoices of
a multi-choice will share the parent spec as key, its value will be
a list of decisions from the subchoices. If 'both', the dict will
contain both the keys for subchoices and the key for the parent
multi-choice.
include_inactive_decisions: If True, inactive decisions from the search
space will be added to the dict with value None. Otherwise they will
be absent in the dict.
filter_fn: Decision point filter. If None, all the decision points will be
included in the dict. Otherwise only the decision points that pass
the filter (returns True) will be included.
Returns:
A dictionary of requested key type to value type mapped from the DNA.
Raises:
ValueError: argument `key_type` or `value_type` is not valid.
RuntimeError: If DNA is not associated with a DNASpec.
"""
if key_type not in ['id', 'name_or_id', 'dna_spec']:
raise ValueError(
f'\'key_type\' must be either \'id\', \'name_or_id\' '
f'or \'dna_spec\'. Encountered: {key_type!r}.')
if value_type not in ['dna', 'value', 'choice',
'literal', 'choice_and_literal']:
raise ValueError(
f'\'value_type\' must be either \'dna\', \'value\', \'choice\' '
f'\'literal\' or \'choice_and_literal\'. '
f'Encountered: {value_type!r}.')
if multi_choice_key not in ['subchoice', 'parent', 'both']:
raise ValueError(
f'\'multi_choice_key\' must be either \'subchoice\', \'parent\', or '
f'\'both\'. Encountered: {multi_choice_key!r}.')
multi_choice_use_parent_as_key = multi_choice_key != 'subchoice'
multi_choice_use_subchoice_as_key = multi_choice_key != 'parent'
filter_fn = filter_fn or (lambda x: True)
self._ensure_dna_spec()
dict_repr = dict()
def _needs_subchoice_key(subchoice):
return (multi_choice_use_subchoice_as_key
and (not multi_choice_use_parent_as_key
or (key_type != 'name_or_id' or subchoice.name is None)))
def _key(spec: 'DecisionPoint'):
if key_type == 'id':
return spec.id.path
elif key_type == 'name_or_id':
return spec.name if spec.name else spec.id.path
else:
return spec
def _put(key, value):
if key in dict_repr:
accumulated = dict_repr[key]
if not isinstance(accumulated, list):
accumulated = [accumulated]
accumulated.append(value)
value = accumulated
dict_repr[key] = value
return value
def _dump_node(dna: DNA):
"""Dump node value to dict representation."""
if isinstance(dna.spec, DecisionPoint) and filter_fn(dna.spec):
key = _key(dna.spec)
value = None
if dna.spec.is_categorical and dna.value is not None:
if value_type == 'dna':
value = dna
elif value_type == 'value':
value = dna.value
else:
value = dna.spec.format_candidate(
dna.value, display_format=value_type)
if dna.spec.is_subchoice:
# Append multi-choice values into parent's key.
if multi_choice_use_parent_as_key:
_put(_key(dna.spec.parent_spec), value)
# Insert subchoice in its own key.
if _needs_subchoice_key(dna.spec):
_put(key, value)
else:
_put(key, value)
elif dna.spec.is_numerical or dna.spec.is_custom_decision_point:
if value_type == 'dna':
value = dna
else:
value = dna.value
_put(key, value)
for child_dna in dna.children:
_dump_node(child_dna)
_dump_node(self)
if not include_inactive_decisions:
return dict_repr
result = dict()
for dp in self.spec.decision_points:
if not filter_fn(dp):
continue
if dp.is_categorical and dp.is_subchoice:
if multi_choice_use_parent_as_key:
if dp.subchoice_index == 0:
k = _key(dp.parent_spec)
result[k] = dict_repr.get(k, None)
if _needs_subchoice_key(dp):
k = _key(dp)
result[k] = dict_repr.get(k, None)
else:
k = _key(dp)
result[k] = dict_repr.get(k, None)
return result
@classmethod
def from_numbers(
cls,
dna_values: List[Union[int, float, str]],
dna_spec: DNASpec) -> 'DNA':
"""Create a DNA from a flattened list of dna values.
Args:
dna_values: A list of DNA values.
dna_spec: DNASpec that interprets the dna values.
Returns:
A DNA object.
"""
context = dict(index=0)
def _next_decision():
if context['index'] >= len(dna_values):
raise ValueError(
f'The input {dna_values!r} is too short for {dna_spec!r}.')
decision = dna_values[context['index']]
context['index'] += 1
return decision
def _bind_decisions(dna_spec):
value = None
children = None
if dna_spec.is_space:
children = [_bind_decisions(elem) for elem in dna_spec.elements]
elif dna_spec.is_categorical:
if dna_spec.num_choices == 1:
value = _next_decision()
if value < 0 or value >= len(dna_spec.candidates):
raise ValueError(
f'Candidate index out of range at choice '
f'\'{dna_spec.name or dna_spec.id}\'. Index={value}, '
f'Number of candidates={len(dna_spec.candidates)}.')
children = [_bind_decisions(dna_spec.candidates[value])]
else:
children = [_bind_decisions(spec) for spec in dna_spec.choice_specs]
else:
value = _next_decision()
return DNA(value, children, spec=dna_spec)
dna = _bind_decisions(dna_spec)
if context['index'] != len(dna_values):
end_pos = context['index']
raise ValueError(
f'The input {dna_values!r} is too long for {dna_spec!r}. '
f'Remaining: {dna_values[end_pos:]!r}.')
return dna
def to_numbers(
self, flatten: bool = True,
) -> Union[List[Union[int, float, str]],
object_utils.Nestable[Union[int, float, str]]]:
"""Returns a (maybe) nested structure of numbers as decisions.
Args:
flatten: If True, the hierarchy of the numbers will not be preserved.
Decisions will be returned as a flat list in DFS order. Otherwise, a
nestable structure of numbers will be returned.
Returns:
A flat list or a hierarchical structure of numbers as the decisions made
for each decision point.
"""
if flatten:
decisions = [self.value] if self.value is not None else []
for c in self.children:
decisions.extend(c.to_numbers(flatten))
return decisions
else:
if self.value is None:
return [c.to_numbers(flatten) for c in self.children]
elif not self.children:
return self.value
elif len(self.children) == 1:
child = self.children[0].to_numbers(flatten)
if isinstance(child, tuple):
return tuple([self.value, list(child)])
else:
return (self.value, child)
else:
assert len(self.children) > 1
return (self.value, [c.to_numbers(flatten) for c in self.children])
@classmethod
def from_fn(
cls,
dna_spec: DNASpec,
generator_fn: Callable[['DecisionPoint'],
Union[List[int], float, str, 'DNA']]
) -> 'DNA':
"""Generate a DNA with user generator function.
Args:
dna_spec: The DNASpec for the DNA.
generator_fn: A callable object with signature:
`(decision_point) -> decision`
The decision_point is a `Choices` object or a `Float` object.
The returned decision should be:
* a list of integer or a DNA object for a `Choices` decision point.
When a DNA is returned, it will be used as the DNA for the entire
sub-tree, hence `generate_fn` will not be called on sub-decision
points.
* a float or a DNA object for a Float decision point.
* a string or a DNA object for a CustomDecisionPoint.
Returns:
A DNA generated from the user function.
"""
if not isinstance(dna_spec, DNASpec):
raise TypeError(
f'Argument \'dna_spec\' should be DNASpec type. '
f'Encountered {dna_spec}.')
if dna_spec.is_space:
# Generate values for Space.
children = []
for child_spec in dna_spec.elements:
children.append(DNA.from_fn(child_spec, generator_fn))
if len(children) == 1:
return children[0]
dna = DNA(None, children)
elif dna_spec.is_categorical:
assert isinstance(dna_spec, DecisionPoint), dna_spec
decision = generator_fn(dna_spec)
if isinstance(decision, DNA):
dna = decision
else:
if len(decision) != dna_spec.num_choices:
raise ValueError(
f'Number of DNA child values does not match the number of '
f'choices. Child values: {decision!r}, '
f'Choices: {dna_spec.num_choices}, '
f'Location: {dna_spec.location.path}.')
children = []
for i, choice in enumerate(decision):
choice_location = object_utils.KeyPath(i, dna_spec.location)
if not isinstance(choice, int):
raise ValueError(
f'Choice value should be int. Encountered: {choice}, '
f'Location: {choice_location.path}.')
if choice >= len(dna_spec.candidates):
raise ValueError(
f'Choice out of range. Value: {choice}, '
f'Candidates: {len(dna_spec.candidates)}, '
f'Location: {choice_location.path}.')
child_dna = DNA.from_fn(dna_spec.candidates[choice], generator_fn)
children.append(DNA(choice, [child_dna]))
dna = DNA(None, children)
else:
assert isinstance(dna_spec, DecisionPoint), dna_spec
decision = generator_fn(dna_spec)
if isinstance(decision, DNA):
dna = decision
else:
dna = DNA(decision)
dna_spec.validate(dna)
return dna
def sym_jsonify(
self,
compact: bool = True,
type_info: bool = True,
**kwargs) -> Any:
"""Convert DNA to JSON object.
Args:
compact: Whether use compact form. If compact, the nested number structure
in DNA.parse will be used, otherwise members will be rendered out as
regular symbolic Object.
type_info: If True, type information will be included in output, otherwise
type information will not be included. Applicable when compact is set
to True.
**kwargs: Keyword arguments that will be passed to symbolic.Object if
compact is False.
Returns:
JSON representation of DNA.
"""
if not compact:
json_value = super().sym_jsonify(**kwargs)
assert isinstance(json_value, dict), json_value
if self._cloneable_metadata_keys:
json_value['_cloneable_metadata_keys'] = list(
self._cloneable_metadata_keys)
return json_value
if self.children:
child_nodes = [c.sym_jsonify(compact, type_info=False, **kwargs)
for c in self.children]
if self.value is not None:
if len(child_nodes) == 1:
# Chain single choices into one tuple.
single_choice = child_nodes[0]
if isinstance(single_choice, tuple):
value = (self.value,) + single_choice
else:
value = (self.value, single_choice)
else:
# Put multiple choice as sub-nodes.
value = (self.value, child_nodes)
else:
value = child_nodes
else:
value = self.value
if type_info:
json_value = {
object_utils.JSONConvertible.TYPE_NAME_KEY: (
self.__class__.__serialization_key__
),
'format': 'compact',
'value': symbolic.to_json(value),
}
# NOTE(daiyip): For now, we only attach metadata from the root node for
# faster serialization/deserialization speed. This should be revised if
# metadata for child DNA is used.
if self.metadata:
json_value['metadata'] = symbolic.to_json(self.metadata)
if self._cloneable_metadata_keys:
json_value['_cloneable_metadata_keys'] = list(
self._cloneable_metadata_keys)
return json_value
else:
return value
@classmethod
def from_json(
cls,
json_value: Dict[str, Any],
*,
allow_partial: bool = False,
root_path: Optional[object_utils.KeyPath] = None) -> 'DNA':
"""Class method that load a DNA from a JSON value.
Args:
json_value: Input JSON value, only JSON dict is acceptable.
allow_partial: Whether to allow elements of the list to be partial.
root_path: KeyPath of loaded object in its object tree.
Returns:
A DNA object.
"""
cloneable_metadata_keys = json_value.pop('_cloneable_metadata_keys', None)
if json_value.get('format', None) == 'compact':
# NOTE(daiyip): DNA.parse will validate the input. Therefore, we can
# disable runtime type check during constructing the DNA objects.
with symbolic.enable_type_check(False):
dna = DNA.parse(symbolic.from_json(json_value.get('value')))
if 'metadata' in json_value:
dna.rebind(
metadata=symbolic.from_json(json_value.get('metadata')),
raise_on_no_change=False, skip_notification=True)
else:
dna = super(DNA, cls).from_json(
json_value,
allow_partial=allow_partial,
root_path=root_path) # pytype: disable=bad-return-type
assert isinstance(dna, DNA)
if cloneable_metadata_keys:
dna._cloneable_metadata_keys = set(cloneable_metadata_keys) # pylint: disable=protected-access
return dna
@property
def is_leaf(self) -> bool:
"""Returns whether the current node is a leaf node."""
return not self.children
def __getitem__(
self, key: Union[int, slice, str, object_utils.KeyPath, 'DecisionPoint']
) -> Union[None, 'DNA', List[Optional['DNA']]]:
"""Get an immediate child DNA or DNA in the sub-tree.
Args:
key: The key for retrieving the sub-DNA or sub-DNA list. The key should
be one of:
1) An integer as the index of an immediate child DNA.
2) A name (string) for named decisions whose DNASpec has a not-None
`name` argument.
3) An ID (string or KeyPath) for the decision point to retrieve.
See `DNASpec.id` for details.
4) A DecisionPoint object whose decision value will be retrived.
Returns:
The return value should be one of the following:
1) A DNA object if the key only maps to a single DNA object.
2) None if the decision point exists but it's inactive.
3) A list of DNA or None if there are multiple decision points associated
with the key.
"""
if isinstance(key, (int, slice)):
return self.children[key]
if isinstance(key, DNASpec):
key = key.id
return self._decision_by_id[key]
else:
v = self.named_decisions.get(key, None)
if v is None:
v = self._decision_by_id[key]
return v
def get(self,
key: Union[int, slice, str, object_utils.KeyPath, 'DecisionPoint'],
default: Any = None
) -> Union[Any, None, 'DNA', List[Optional['DNA']]]:
"""Get an immediate child DNA or DNA in the sub-tree."""
try:
return self[key]
except KeyError:
return default
def __iter__(self):
"""Iterate child DNA(s)."""
return self.children.__iter__()
def __contains__(self, dna_or_value: Union[int, 'DNA']) -> bool:
"""Returns whether child DNA(s) contains a value."""
for child in self.children:
if isinstance(dna_or_value, (int, float, str)):
if child.value == dna_or_value:
return True
elif isinstance(dna_or_value, DNA):
if child == dna_or_value:
return True
else:
raise ValueError(
f'DNA.__contains__ does not accept '
f'{object_utils.quote_if_str(dna_or_value)!r}.')
return False
def __hash__(self):
"""Hash code."""
return hash((self.value, tuple(self.children)))
def __cmp__(self, other: 'DNA') -> int:
"""DNA comparison."""
if other is None:
return 1
def compare_dna_value(x, y):
if x == y:
return 0
if x is None:
return -1
if y is None:
return 1
if isinstance(x, (int, float)) and isinstance(y, str):
return -1
if isinstance(x, str) and isinstance(y, (int, float)):
return 1
return -1 if x < y else 1
result = compare_dna_value(self.value, other.value)
if result != 0:
return result
if len(self.children) != len(other.children):
raise ValueError(
f'The two input DNA have different number of children. '
f'(Left={self!r}, Right={other!r})')
for i, c in enumerate(self.children):
result = c.__cmp__(other.children[i])
if result != 0:
return result
return 0
def __eq__(self, other: 'DNA') -> bool:
if not isinstance(other, DNA):
return False
return not self.__cmp__(other)
def __ne__(self, other: 'DNA') -> bool:
return not self == other
def __lt__(self, other: 'DNA') -> bool:
if not isinstance(other, DNA):
raise TypeError(f'unorderable types: DNA & {type(other)}')
return self.__cmp__(other) == -1
def next_dna(self) -> Optional['DNA']:
"""Get the next DNA in the spec."""
self._ensure_dna_spec()
return self.spec.next_dna(self)
def iter_dna(self):
"""Iterate DNA of the space starting from self."""
self._ensure_dna_spec()
return self.spec.iter_dna(self)
def format(self,
compact: bool = False,
verbose: bool = True,
root_indent: int = 0,
list_wrap_threshold: int = 80,
as_dict: bool = False,
**kwargs):
"""Customize format method for DNA for more compact representation."""
if as_dict and self.spec:
details = object_utils.format(
self.to_dict(value_type='choice_and_literal'),
False,
verbose,
root_indent,
**kwargs)
return f'DNA({details})'
if 'list_wrap_threshold' not in kwargs:
kwargs['list_wrap_threshold'] = list_wrap_threshold
if not verbose:
return super().format(False, verbose, root_indent, **kwargs)
if self.is_leaf:
return f'DNA({self.value!r})'
rep = object_utils.format(
self.to_json(compact=True, type_info=False),
compact, verbose, root_indent, **kwargs)
if rep and rep[0] == '(':
# NOTE(daiyip): for conditional choice from the root,
# we don't want to keep duplicate round bracket.
return f'DNA{rep}'
return f'DNA({rep})'
def parameters(
self, use_literal_values: bool = False) -> Dict[str, str]:
"""Returns parameters for this DNA to emit based on its spec.
Deprecated: use `to_dict` instead.
Args:
use_literal_values: If True, use literal values from DNASpec for Choices,
otherwise returns '{choice}/{num_candidates} ({literal})'. Otherwise
returns '{choice}/{num_candidates}'.
Returns:
Dict of parameter names to their values mapped from this DNA.
Raises:
RuntimeError: If DNA is not associated with a DNASpec.
"""
value_type = 'choice_and_literal' if use_literal_values else 'choice'
return self.to_dict(value_type=value_type)
def _sym_clone(self, deep: bool, memo: Any = None) -> 'DNA':
"""Override to copy DNASpec."""
other = super()._sym_clone(deep, memo)
other._spec = self._spec # pylint: disable=protected-access
for k, v in self._userdata.items():
if k in self._cloneable_userdata_keys:
other._userdata[k] = v # pylint: disable=protected-access
other._cloneable_userdata_keys = set(self._cloneable_userdata_keys) # pylint: disable=protected-access
# Remove none-clonable meta-data.
metadata = {}
for k, v in self.metadata.items():
if k in self._cloneable_metadata_keys:
metadata[k] = v
other.rebind(metadata=metadata)
other._cloneable_metadata_keys = set(self._cloneable_metadata_keys) # pylint: disable=protected-access
return other
@classmethod
def from_parameters(cls,
parameters: Dict[str, Any],
dna_spec: DNASpec,
use_literal_values: bool = False) -> 'DNA':
"""Create DNA from parameters based on DNASpec.
Deprecated: use `from_dict` instead.
Args:
parameters: A 1-depth dict of parameter names to parameter values.
dna_spec: DNASpec to interpret the parameters.
use_literal_values: If True, parameter values are literal values from
DNASpec.
Returns:
DNA instance bound with the DNASpec.
Raises:
ValueError: If parameters are not aligned with DNA spec.
"""
del use_literal_values
return cls.from_dict(parameters, dna_spec)
|
(value: Union[NoneType, int, float, str, List[Any], Tuple[Any]] = None, children: Optional[List[ForwardRef('DNA')]] = None, spec: Optional[pyglove.core.geno.base.DNASpec] = None, metadata: Optional[Dict[str, Any]] = None, *, allow_partial: bool = False, **kwargs)
|
40,355 |
pyglove.core.geno.base
|
__cmp__
|
DNA comparison.
|
def __cmp__(self, other: 'DNA') -> int:
"""DNA comparison."""
if other is None:
return 1
def compare_dna_value(x, y):
if x == y:
return 0
if x is None:
return -1
if y is None:
return 1
if isinstance(x, (int, float)) and isinstance(y, str):
return -1
if isinstance(x, str) and isinstance(y, (int, float)):
return 1
return -1 if x < y else 1
result = compare_dna_value(self.value, other.value)
if result != 0:
return result
if len(self.children) != len(other.children):
raise ValueError(
f'The two input DNA have different number of children. '
f'(Left={self!r}, Right={other!r})')
for i, c in enumerate(self.children):
result = c.__cmp__(other.children[i])
if result != 0:
return result
return 0
|
(self, other: pyglove.core.geno.base.DNA) -> int
|
40,356 |
pyglove.core.geno.base
|
__contains__
|
Returns whether child DNA(s) contains a value.
|
def __contains__(self, dna_or_value: Union[int, 'DNA']) -> bool:
"""Returns whether child DNA(s) contains a value."""
for child in self.children:
if isinstance(dna_or_value, (int, float, str)):
if child.value == dna_or_value:
return True
elif isinstance(dna_or_value, DNA):
if child == dna_or_value:
return True
else:
raise ValueError(
f'DNA.__contains__ does not accept '
f'{object_utils.quote_if_str(dna_or_value)!r}.')
return False
|
(self, dna_or_value: Union[int, pyglove.core.geno.base.DNA]) -> bool
|
40,359 |
pyglove.core.geno.base
|
__eq__
| null |
def __eq__(self, other: 'DNA') -> bool:
if not isinstance(other, DNA):
return False
return not self.__cmp__(other)
|
(self, other: pyglove.core.geno.base.DNA) -> bool
|
40,362 |
pyglove.core.geno.base
|
__getitem__
|
Get an immediate child DNA or DNA in the sub-tree.
Args:
key: The key for retrieving the sub-DNA or sub-DNA list. The key should
be one of:
1) An integer as the index of an immediate child DNA.
2) A name (string) for named decisions whose DNASpec has a not-None
`name` argument.
3) An ID (string or KeyPath) for the decision point to retrieve.
See `DNASpec.id` for details.
4) A DecisionPoint object whose decision value will be retrived.
Returns:
The return value should be one of the following:
1) A DNA object if the key only maps to a single DNA object.
2) None if the decision point exists but it's inactive.
3) A list of DNA or None if there are multiple decision points associated
with the key.
|
def __getitem__(
self, key: Union[int, slice, str, object_utils.KeyPath, 'DecisionPoint']
) -> Union[None, 'DNA', List[Optional['DNA']]]:
"""Get an immediate child DNA or DNA in the sub-tree.
Args:
key: The key for retrieving the sub-DNA or sub-DNA list. The key should
be one of:
1) An integer as the index of an immediate child DNA.
2) A name (string) for named decisions whose DNASpec has a not-None
`name` argument.
3) An ID (string or KeyPath) for the decision point to retrieve.
See `DNASpec.id` for details.
4) A DecisionPoint object whose decision value will be retrived.
Returns:
The return value should be one of the following:
1) A DNA object if the key only maps to a single DNA object.
2) None if the decision point exists but it's inactive.
3) A list of DNA or None if there are multiple decision points associated
with the key.
"""
if isinstance(key, (int, slice)):
return self.children[key]
if isinstance(key, DNASpec):
key = key.id
return self._decision_by_id[key]
else:
v = self.named_decisions.get(key, None)
if v is None:
v = self._decision_by_id[key]
return v
|
(self, key: Union[int, slice, str, pyglove.core.object_utils.value_location.KeyPath, pyglove.core.geno.base.DecisionPoint]) -> Union[NoneType, pyglove.core.geno.base.DNA, List[Optional[pyglove.core.geno.base.DNA]]]
|
40,365 |
pyglove.core.geno.base
|
__hash__
|
Hash code.
|
def __hash__(self):
"""Hash code."""
return hash((self.value, tuple(self.children)))
|
(self)
|
40,366 |
pyglove.core.geno.base
|
__init__
|
Constructor.
Args:
value: Value for current node.
children: Child DNA(s).
spec: DNA spec that constraint current node.
metadata: Optional dict as controller metadata for the DNA.
allow_partial: If True, allow the object to be partial.
**kwargs: keyword arguments that will be passed through to
symbolic.Object.
|
@object_utils.explicit_method_override
def __init__(
self,
value: Union[None, int, float, str, List[Any], Tuple[Any]] = None,
# Set MISSING_VALUE to use default from pg_typing.
children: Optional[List['DNA']] = None,
spec: Optional[DNASpec] = None,
metadata: Optional[Dict[str, Any]] = None,
*,
allow_partial: bool = False,
**kwargs):
"""Constructor.
Args:
value: Value for current node.
children: Child DNA(s).
spec: DNA spec that constraint current node.
metadata: Optional dict as controller metadata for the DNA.
allow_partial: If True, allow the object to be partial.
**kwargs: keyword arguments that will be passed through to
symbolic.Object.
"""
value, children, metadata = self._parse_value_and_children(
value, children, metadata, spec)
super().__init__(
value=value,
children=children,
metadata=metadata or symbolic.Dict(),
allow_partial=allow_partial,
**kwargs)
self._decision_by_id_cache = None
self._named_decisions = None
self._userdata = AttributeDict()
self._cloneable_metadata_keys = set()
self._cloneable_userdata_keys = set()
self._spec = None
if spec:
self.use_spec(spec)
|
(self, value: Union[NoneType, int, float, str, List[Any], Tuple[Any]] = None, children: Optional[List[pyglove.core.geno.base.DNA]] = None, spec: Optional[pyglove.core.geno.base.DNASpec] = None, metadata: Optional[Dict[str, Any]] = None, *, allow_partial: bool = False, **kwargs)
|
40,367 |
pyglove.core.geno.base
|
__iter__
|
Iterate child DNA(s).
|
def __iter__(self):
"""Iterate child DNA(s)."""
return self.children.__iter__()
|
(self)
|
40,369 |
pyglove.core.geno.base
|
__lt__
| null |
def __lt__(self, other: 'DNA') -> bool:
if not isinstance(other, DNA):
raise TypeError(f'unorderable types: DNA & {type(other)}')
return self.__cmp__(other) == -1
|
(self, other: pyglove.core.geno.base.DNA) -> bool
|
40,370 |
pyglove.core.geno.base
|
__ne__
| null |
def __ne__(self, other: 'DNA') -> bool:
return not self == other
|
(self, other: pyglove.core.geno.base.DNA) -> bool
|
40,375 |
pyglove.core.geno.base
|
_ensure_dna_spec
|
Raises error if current DNA is not bound with a DNASpec.
|
def _ensure_dna_spec(self) -> None:
"""Raises error if current DNA is not bound with a DNASpec."""
if self._spec is None:
raise ValueError(f'{self!r} is not bound with a DNASpec.')
|
(self) -> NoneType
|
40,380 |
pyglove.core.geno.base
|
_on_bound
|
Event that is triggered when any symbolic member changes.
|
def _on_bound(self):
"""Event that is triggered when any symbolic member changes."""
super()._on_bound()
self._decision_by_id_cache = None
self._named_decisions = None
|
(self)
|
40,385 |
pyglove.core.geno.base
|
_parse_value_and_children
|
Parse value (leaf) and children from maybe compositional value.
|
def _parse_value_and_children(
self,
value: Union[
int, # As a single chosen index.
float, # As a single chosen value.
str, # As a custom genome.
List[Any], # As multi-choice. (coexisting)
Tuple[Any], # As a conditional choice.
None],
children: Optional[List['DNA']],
metadata: Optional[Dict[str, Any]],
dna_spec: Optional[DNASpec]
) -> Tuple[Union[int, None, float], Optional[List['DNA']]]:
"""Parse value (leaf) and children from maybe compositional value."""
if isinstance(value, (list, tuple)):
# The value is compositional, therefore we need to parse the decision
# for current node and construct the children.
if children is not None:
raise ValueError(
f'\'children\' ({children!r}) must be None when '
f'\'value\' ({value!r}) is compositional.')
new_value = None
children = []
if isinstance(value, list):
# Space or multi-choices
children = [DNA(v) for v in value]
else:
# Conditional choices.
if len(value) < 2:
raise ValueError(
f'Tuple as conditional choices must have at least 2 '
f'items. Encountered {value}.')
if isinstance(value[0], (float, int)):
new_value = value[0]
if len(value) == 2:
if isinstance(value[1], list):
# NOTE(daiyip): Multi-choice is allowed only as a leaf choice.
children = [DNA(v) for v in value[1]]
elif isinstance(value[1], (int, float, str)):
children = [DNA(value[1])]
else:
children.append(DNA(value[1:]))
else:
raise ValueError(
f'Tuple as conditional choices only allow multiple '
f'choices to be used at the last position. '
f'Encountered: {value}')
else:
# Normalize DNA by removing trivial intermediate nodes,
# which is DNA with empty value and only one child.
# NOTE(daiyip): during deserialization (from_json) of nested DNA,
# the elements in children might be dicts that are not yet converted
# to DNA. Therefore, we always call `symbolic.from_json` on children,
# which is a no-op for already initialized child DNA.
new_value = value
children = symbolic.from_json(children) or []
if len(children) == 1 and children[0].value is None:
children = children[0].children
if new_value is None and len(children) == 1:
c = children[0]
new_value, children, metadata = c.value, c.children, c.metadata
return new_value, children, metadata
|
(self, value: Union[int, float, str, List[Any], Tuple[Any], NoneType], children: Optional[List[pyglove.core.geno.base.DNA]], metadata: Optional[Dict[str, Any]], dna_spec: Optional[pyglove.core.geno.base.DNASpec]) -> Tuple[Union[int, NoneType, float], Optional[List[pyglove.core.geno.base.DNA]]]
|
40,390 |
pyglove.core.geno.base
|
_sym_clone
|
Override to copy DNASpec.
|
def _sym_clone(self, deep: bool, memo: Any = None) -> 'DNA':
"""Override to copy DNASpec."""
other = super()._sym_clone(deep, memo)
other._spec = self._spec # pylint: disable=protected-access
for k, v in self._userdata.items():
if k in self._cloneable_userdata_keys:
other._userdata[k] = v # pylint: disable=protected-access
other._cloneable_userdata_keys = set(self._cloneable_userdata_keys) # pylint: disable=protected-access
# Remove none-clonable meta-data.
metadata = {}
for k, v in self.metadata.items():
if k in self._cloneable_metadata_keys:
metadata[k] = v
other.rebind(metadata=metadata)
other._cloneable_metadata_keys = set(self._cloneable_metadata_keys) # pylint: disable=protected-access
return other
|
(self, deep: bool, memo: Optional[Any] = None) -> pyglove.core.geno.base.DNA
|
40,399 |
pyglove.core.geno.base
|
format
|
Customize format method for DNA for more compact representation.
|
def format(self,
compact: bool = False,
verbose: bool = True,
root_indent: int = 0,
list_wrap_threshold: int = 80,
as_dict: bool = False,
**kwargs):
"""Customize format method for DNA for more compact representation."""
if as_dict and self.spec:
details = object_utils.format(
self.to_dict(value_type='choice_and_literal'),
False,
verbose,
root_indent,
**kwargs)
return f'DNA({details})'
if 'list_wrap_threshold' not in kwargs:
kwargs['list_wrap_threshold'] = list_wrap_threshold
if not verbose:
return super().format(False, verbose, root_indent, **kwargs)
if self.is_leaf:
return f'DNA({self.value!r})'
rep = object_utils.format(
self.to_json(compact=True, type_info=False),
compact, verbose, root_indent, **kwargs)
if rep and rep[0] == '(':
# NOTE(daiyip): for conditional choice from the root,
# we don't want to keep duplicate round bracket.
return f'DNA{rep}'
return f'DNA({rep})'
|
(self, compact: bool = False, verbose: bool = True, root_indent: int = 0, list_wrap_threshold: int = 80, as_dict: bool = False, **kwargs)
|
40,400 |
pyglove.core.geno.base
|
get
|
Get an immediate child DNA or DNA in the sub-tree.
|
def get(self,
key: Union[int, slice, str, object_utils.KeyPath, 'DecisionPoint'],
default: Any = None
) -> Union[Any, None, 'DNA', List[Optional['DNA']]]:
"""Get an immediate child DNA or DNA in the sub-tree."""
try:
return self[key]
except KeyError:
return default
|
(self, key: Union[int, slice, str, pyglove.core.object_utils.value_location.KeyPath, pyglove.core.geno.base.DecisionPoint], default: Optional[Any] = None) -> Union[Any, NoneType, pyglove.core.geno.base.DNA, List[Optional[pyglove.core.geno.base.DNA]]]
|
40,402 |
pyglove.core.geno.base
|
iter_dna
|
Iterate DNA of the space starting from self.
|
def iter_dna(self):
"""Iterate DNA of the space starting from self."""
self._ensure_dna_spec()
return self.spec.iter_dna(self)
|
(self)
|
40,404 |
pyglove.core.geno.base
|
next_dna
|
Get the next DNA in the spec.
|
def next_dna(self) -> Optional['DNA']:
"""Get the next DNA in the spec."""
self._ensure_dna_spec()
return self.spec.next_dna(self)
|
(self) -> Optional[pyglove.core.geno.base.DNA]
|
40,406 |
pyglove.core.geno.base
|
parameters
|
Returns parameters for this DNA to emit based on its spec.
Deprecated: use `to_dict` instead.
Args:
use_literal_values: If True, use literal values from DNASpec for Choices,
otherwise returns '{choice}/{num_candidates} ({literal})'. Otherwise
returns '{choice}/{num_candidates}'.
Returns:
Dict of parameter names to their values mapped from this DNA.
Raises:
RuntimeError: If DNA is not associated with a DNASpec.
|
def parameters(
self, use_literal_values: bool = False) -> Dict[str, str]:
"""Returns parameters for this DNA to emit based on its spec.
Deprecated: use `to_dict` instead.
Args:
use_literal_values: If True, use literal values from DNASpec for Choices,
otherwise returns '{choice}/{num_candidates} ({literal})'. Otherwise
returns '{choice}/{num_candidates}'.
Returns:
Dict of parameter names to their values mapped from this DNA.
Raises:
RuntimeError: If DNA is not associated with a DNASpec.
"""
value_type = 'choice_and_literal' if use_literal_values else 'choice'
return self.to_dict(value_type=value_type)
|
(self, use_literal_values: bool = False) -> Dict[str, str]
|
40,411 |
pyglove.core.geno.base
|
set_metadata
|
Set metadata associated with a key.
Metadata associated with the DNA will be persisted and carried over across
processes, which is different the `userdata`. (See `set_userdata` for more
details.)
Args:
key: Key for the metadata.
value: Value for the metadata.
cloneable: If True, the key/value will be propagated during clone.
Returns:
Self.
|
def set_metadata(
self, key: str, value: Any, cloneable: bool = False) -> 'DNA':
"""Set metadata associated with a key.
Metadata associated with the DNA will be persisted and carried over across
processes, which is different the `userdata`. (See `set_userdata` for more
details.)
Args:
key: Key for the metadata.
value: Value for the metadata.
cloneable: If True, the key/value will be propagated during clone.
Returns:
Self.
"""
self.metadata.rebind(
{key: value}, raise_on_no_change=False, skip_notification=True)
if cloneable:
self._cloneable_metadata_keys.add(key)
return self
|
(self, key: str, value: Any, cloneable: bool = False) -> pyglove.core.geno.base.DNA
|
40,412 |
pyglove.core.geno.base
|
set_userdata
|
Sets user data associated with a key.
User data associated with the DNA will live only within current process,
and is not carried over during serialization/deserialization, which is
different from DNA metadata. (See `set_metadata` for more details.)
Args:
key: Key of the user data.
value: Value of the user data.
cloneable: If True, the key/value will be carry over to the cloned DNA.
Returns:
Self.
|
def set_userdata(
self, key: str, value: Any, cloneable: bool = False) -> 'DNA':
"""Sets user data associated with a key.
User data associated with the DNA will live only within current process,
and is not carried over during serialization/deserialization, which is
different from DNA metadata. (See `set_metadata` for more details.)
Args:
key: Key of the user data.
value: Value of the user data.
cloneable: If True, the key/value will be carry over to the cloned DNA.
Returns:
Self.
"""
self._userdata[key] = value
if cloneable:
self._cloneable_userdata_keys.add(key)
return self
|
(self, key: str, value: Any, cloneable: bool = False) -> pyglove.core.geno.base.DNA
|
40,428 |
pyglove.core.geno.base
|
sym_jsonify
|
Convert DNA to JSON object.
Args:
compact: Whether use compact form. If compact, the nested number structure
in DNA.parse will be used, otherwise members will be rendered out as
regular symbolic Object.
type_info: If True, type information will be included in output, otherwise
type information will not be included. Applicable when compact is set
to True.
**kwargs: Keyword arguments that will be passed to symbolic.Object if
compact is False.
Returns:
JSON representation of DNA.
|
def sym_jsonify(
self,
compact: bool = True,
type_info: bool = True,
**kwargs) -> Any:
"""Convert DNA to JSON object.
Args:
compact: Whether use compact form. If compact, the nested number structure
in DNA.parse will be used, otherwise members will be rendered out as
regular symbolic Object.
type_info: If True, type information will be included in output, otherwise
type information will not be included. Applicable when compact is set
to True.
**kwargs: Keyword arguments that will be passed to symbolic.Object if
compact is False.
Returns:
JSON representation of DNA.
"""
if not compact:
json_value = super().sym_jsonify(**kwargs)
assert isinstance(json_value, dict), json_value
if self._cloneable_metadata_keys:
json_value['_cloneable_metadata_keys'] = list(
self._cloneable_metadata_keys)
return json_value
if self.children:
child_nodes = [c.sym_jsonify(compact, type_info=False, **kwargs)
for c in self.children]
if self.value is not None:
if len(child_nodes) == 1:
# Chain single choices into one tuple.
single_choice = child_nodes[0]
if isinstance(single_choice, tuple):
value = (self.value,) + single_choice
else:
value = (self.value, single_choice)
else:
# Put multiple choice as sub-nodes.
value = (self.value, child_nodes)
else:
value = child_nodes
else:
value = self.value
if type_info:
json_value = {
object_utils.JSONConvertible.TYPE_NAME_KEY: (
self.__class__.__serialization_key__
),
'format': 'compact',
'value': symbolic.to_json(value),
}
# NOTE(daiyip): For now, we only attach metadata from the root node for
# faster serialization/deserialization speed. This should be revised if
# metadata for child DNA is used.
if self.metadata:
json_value['metadata'] = symbolic.to_json(self.metadata)
if self._cloneable_metadata_keys:
json_value['_cloneable_metadata_keys'] = list(
self._cloneable_metadata_keys)
return json_value
else:
return value
|
(self, compact: bool = True, type_info: bool = True, **kwargs) -> Any
|
40,440 |
pyglove.core.geno.base
|
to_dict
|
Returns the dict representation of current DNA.
Args:
key_type: Key type in returned dictionary. Acceptable values are:
* 'id': Use the ID (canonical location) of each decision point as key.
This is the default behavior.
* 'name_or_id': Use the name of each decision point as key if it's
present, otherwise use ID as key. When the name of a decision
point is presented, it is guaranteed not to clash with other
decision points' names or IDs.
* 'dna_spec': Use the DNASpec object of each decision point as key.
value_type: Value type for choices in returned dictionary.
Acceptable values are:
* 'value': Use the index of the chosen candidate for `Choices`, and
use the float number for `Float`. This is the default behavior.
* 'dna': Use `DNA` for all decision points.
* 'choice': Use '{index}/{num_candidates}' for the chosen candidate
for `Choices`, and the chosen float number for `Float`.
* 'literal': Use the literal value for the chosen candidate for
`Choices`, and the chosen float number for `Float`. If the literal
value for the `Choices` decision point is not present, fall back
to the '{index}/{num_candidates}' format.
* 'choice_and_literal': Use '{index}/{num_candidates} ({literal})'
for the chosen candidate for `Choices` and then chosen float number
for `Float`. If the literal value for the `Choices` decision point
is not present, fall back to the '{index}/{num_candidates}' format.
multi_choice_key: 'subchoice', 'parent', or 'both'. If 'subchoice', each
subchoice will insert a key into the dict. If 'parent', subchoices of
a multi-choice will share the parent spec as key, its value will be
a list of decisions from the subchoices. If 'both', the dict will
contain both the keys for subchoices and the key for the parent
multi-choice.
include_inactive_decisions: If True, inactive decisions from the search
space will be added to the dict with value None. Otherwise they will
be absent in the dict.
filter_fn: Decision point filter. If None, all the decision points will be
included in the dict. Otherwise only the decision points that pass
the filter (returns True) will be included.
Returns:
A dictionary of requested key type to value type mapped from the DNA.
Raises:
ValueError: argument `key_type` or `value_type` is not valid.
RuntimeError: If DNA is not associated with a DNASpec.
|
def to_dict(
self,
key_type='id',
value_type='value',
multi_choice_key='subchoice',
include_inactive_decisions=False,
filter_fn: Optional[Callable[[DecisionPoint], bool]] = None
) -> Dict[Union[DecisionPoint, str],
Union[None, 'DNA', float, int, str,
List['DNA'], List[int], List[str]]]:
"""Returns the dict representation of current DNA.
Args:
key_type: Key type in returned dictionary. Acceptable values are:
* 'id': Use the ID (canonical location) of each decision point as key.
This is the default behavior.
* 'name_or_id': Use the name of each decision point as key if it's
present, otherwise use ID as key. When the name of a decision
point is presented, it is guaranteed not to clash with other
decision points' names or IDs.
* 'dna_spec': Use the DNASpec object of each decision point as key.
value_type: Value type for choices in returned dictionary.
Acceptable values are:
* 'value': Use the index of the chosen candidate for `Choices`, and
use the float number for `Float`. This is the default behavior.
* 'dna': Use `DNA` for all decision points.
* 'choice': Use '{index}/{num_candidates}' for the chosen candidate
for `Choices`, and the chosen float number for `Float`.
* 'literal': Use the literal value for the chosen candidate for
`Choices`, and the chosen float number for `Float`. If the literal
value for the `Choices` decision point is not present, fall back
to the '{index}/{num_candidates}' format.
* 'choice_and_literal': Use '{index}/{num_candidates} ({literal})'
for the chosen candidate for `Choices` and then chosen float number
for `Float`. If the literal value for the `Choices` decision point
is not present, fall back to the '{index}/{num_candidates}' format.
multi_choice_key: 'subchoice', 'parent', or 'both'. If 'subchoice', each
subchoice will insert a key into the dict. If 'parent', subchoices of
a multi-choice will share the parent spec as key, its value will be
a list of decisions from the subchoices. If 'both', the dict will
contain both the keys for subchoices and the key for the parent
multi-choice.
include_inactive_decisions: If True, inactive decisions from the search
space will be added to the dict with value None. Otherwise they will
be absent in the dict.
filter_fn: Decision point filter. If None, all the decision points will be
included in the dict. Otherwise only the decision points that pass
the filter (returns True) will be included.
Returns:
A dictionary of requested key type to value type mapped from the DNA.
Raises:
ValueError: argument `key_type` or `value_type` is not valid.
RuntimeError: If DNA is not associated with a DNASpec.
"""
if key_type not in ['id', 'name_or_id', 'dna_spec']:
raise ValueError(
f'\'key_type\' must be either \'id\', \'name_or_id\' '
f'or \'dna_spec\'. Encountered: {key_type!r}.')
if value_type not in ['dna', 'value', 'choice',
'literal', 'choice_and_literal']:
raise ValueError(
f'\'value_type\' must be either \'dna\', \'value\', \'choice\' '
f'\'literal\' or \'choice_and_literal\'. '
f'Encountered: {value_type!r}.')
if multi_choice_key not in ['subchoice', 'parent', 'both']:
raise ValueError(
f'\'multi_choice_key\' must be either \'subchoice\', \'parent\', or '
f'\'both\'. Encountered: {multi_choice_key!r}.')
multi_choice_use_parent_as_key = multi_choice_key != 'subchoice'
multi_choice_use_subchoice_as_key = multi_choice_key != 'parent'
filter_fn = filter_fn or (lambda x: True)
self._ensure_dna_spec()
dict_repr = dict()
def _needs_subchoice_key(subchoice):
return (multi_choice_use_subchoice_as_key
and (not multi_choice_use_parent_as_key
or (key_type != 'name_or_id' or subchoice.name is None)))
def _key(spec: 'DecisionPoint'):
if key_type == 'id':
return spec.id.path
elif key_type == 'name_or_id':
return spec.name if spec.name else spec.id.path
else:
return spec
def _put(key, value):
if key in dict_repr:
accumulated = dict_repr[key]
if not isinstance(accumulated, list):
accumulated = [accumulated]
accumulated.append(value)
value = accumulated
dict_repr[key] = value
return value
def _dump_node(dna: DNA):
"""Dump node value to dict representation."""
if isinstance(dna.spec, DecisionPoint) and filter_fn(dna.spec):
key = _key(dna.spec)
value = None
if dna.spec.is_categorical and dna.value is not None:
if value_type == 'dna':
value = dna
elif value_type == 'value':
value = dna.value
else:
value = dna.spec.format_candidate(
dna.value, display_format=value_type)
if dna.spec.is_subchoice:
# Append multi-choice values into parent's key.
if multi_choice_use_parent_as_key:
_put(_key(dna.spec.parent_spec), value)
# Insert subchoice in its own key.
if _needs_subchoice_key(dna.spec):
_put(key, value)
else:
_put(key, value)
elif dna.spec.is_numerical or dna.spec.is_custom_decision_point:
if value_type == 'dna':
value = dna
else:
value = dna.value
_put(key, value)
for child_dna in dna.children:
_dump_node(child_dna)
_dump_node(self)
if not include_inactive_decisions:
return dict_repr
result = dict()
for dp in self.spec.decision_points:
if not filter_fn(dp):
continue
if dp.is_categorical and dp.is_subchoice:
if multi_choice_use_parent_as_key:
if dp.subchoice_index == 0:
k = _key(dp.parent_spec)
result[k] = dict_repr.get(k, None)
if _needs_subchoice_key(dp):
k = _key(dp)
result[k] = dict_repr.get(k, None)
else:
k = _key(dp)
result[k] = dict_repr.get(k, None)
return result
|
(self, key_type='id', value_type='value', multi_choice_key='subchoice', include_inactive_decisions=False, filter_fn: Optional[Callable[[pyglove.core.geno.base.DecisionPoint], bool]] = None) -> Dict[Union[pyglove.core.geno.base.DecisionPoint, str], Union[NoneType, pyglove.core.geno.base.DNA, float, int, str, List[pyglove.core.geno.base.DNA], List[int], List[str]]]
|
40,443 |
pyglove.core.geno.base
|
to_numbers
|
Returns a (maybe) nested structure of numbers as decisions.
Args:
flatten: If True, the hierarchy of the numbers will not be preserved.
Decisions will be returned as a flat list in DFS order. Otherwise, a
nestable structure of numbers will be returned.
Returns:
A flat list or a hierarchical structure of numbers as the decisions made
for each decision point.
|
def to_numbers(
self, flatten: bool = True,
) -> Union[List[Union[int, float, str]],
object_utils.Nestable[Union[int, float, str]]]:
"""Returns a (maybe) nested structure of numbers as decisions.
Args:
flatten: If True, the hierarchy of the numbers will not be preserved.
Decisions will be returned as a flat list in DFS order. Otherwise, a
nestable structure of numbers will be returned.
Returns:
A flat list or a hierarchical structure of numbers as the decisions made
for each decision point.
"""
if flatten:
decisions = [self.value] if self.value is not None else []
for c in self.children:
decisions.extend(c.to_numbers(flatten))
return decisions
else:
if self.value is None:
return [c.to_numbers(flatten) for c in self.children]
elif not self.children:
return self.value
elif len(self.children) == 1:
child = self.children[0].to_numbers(flatten)
if isinstance(child, tuple):
return tuple([self.value, list(child)])
else:
return (self.value, child)
else:
assert len(self.children) > 1
return (self.value, [c.to_numbers(flatten) for c in self.children])
|
(self, flatten: bool = True) -> Union[List[Union[str, int, float]], Any, int, float, str]
|
40,444 |
pyglove.core.geno.base
|
use_spec
|
Use a DNA spec for this node and children recursively.
Args:
spec: DNA spec.
Returns:
Self.
Raises:
ValueError: current DNA tree does not conform to the DNA spec.
|
def use_spec(self, spec: DNASpec) -> 'DNA':
"""Use a DNA spec for this node and children recursively.
Args:
spec: DNA spec.
Returns:
Self.
Raises:
ValueError: current DNA tree does not conform to the DNA spec.
"""
if not isinstance(spec, DNASpec):
raise ValueError(
f'Argument \'spec\' must be a `pg.DNASpec` object. '
f'Encountered: {spec!r}.')
if self._spec is spec:
return self
def _use_spec_for_child_choices(spec: DNASpec, children: List[DNA]):
"""Use spec for child choices."""
assert spec.is_categorical, spec
if spec.num_choices != len(children):
raise ValueError(
f'Number of choices ({spec.num_choices}) does not match with '
f'the number of child values (len(children)). '
f'Spec: {spec!r}, Children: {children!r}.')
for i, child in enumerate(children):
subchoice = spec.subchoice(i)
child.use_spec(subchoice)
child_values = [c.value for c in children]
if spec.sorted and sorted(child_values) != child_values:
raise ValueError(
f'Child values {child_values!r} are not sorted. Spec: {spec!r}.')
if spec.distinct and len(set(child_values)) != len(child_values):
raise ValueError(
f'Child values {child_values!r} are not distinct. Spec: {spec!r}.')
# Skip dummy DNA specs.
while spec.is_space and len(spec.elements) == 1:
spec = spec.elements[0]
if spec.is_space:
# Multiple value composition.
if self.value is not None:
raise ValueError(
f'DNA value type mismatch. Value: {self.value}, Spec: {spec!r}.')
if len(spec.elements) != len(self.children):
raise ValueError(
f'Length of DNA child values ({len(self.children)}) is different '
f'from the number of elements ({len(spec.elements)}) '
f'in Spec: {spec!r}.')
for i, elem_spec in enumerate(spec.elements):
self.children[i].use_spec(elem_spec)
elif spec.is_categorical:
if spec.num_choices == 1:
# Single choice.
if not isinstance(self.value, int):
raise ValueError(
f'DNA value type mismatch. Value: {self.value}, Spec: {spec!r}.')
if self.value >= len(spec.candidates):
raise ValueError(
f'Value of DNA is out of range according to the DNA spec. '
f'Value: {self.value}, Spec: {spec!r}.')
chosen_candidate = spec.candidates[self.value]
assert chosen_candidate.is_space, chosen_candidate
# Empty template in chosen candidate.
if not chosen_candidate.elements and self.children:
raise ValueError(
f'There is no DNA spec for child DNA values. '
f'Child values: {self.children}.')
# None-empty template in chosen candidate.
if len(chosen_candidate.elements) > 1:
# Children are DNA of multiple encoders in chosen composition.
if len(chosen_candidate.elements) != len(self.children):
raise ValueError(
f'Number of elements in child templates '
f'({len(chosen_candidate.elements)}) does not match with '
f'the length of children ({len(self.children)}) from DNA: '
f'{self!r}, Spec: {chosen_candidate}.')
for i, elem_spec in enumerate(chosen_candidate.elements):
self.children[i].use_spec(elem_spec)
elif len(chosen_candidate.elements) == 1:
# Children are multiple choices of the only encoder
# in chosen composition.
sub_spec = chosen_candidate
while sub_spec.is_space and len(sub_spec.elements) == 1:
sub_spec = sub_spec.elements[0]
if sub_spec.is_numerical or sub_spec.is_custom_decision_point:
if len(self.children) != 1:
raise ValueError(
f'Encountered more than 1 value.'
f'Child value: {self.children}, Spec: {sub_spec}.')
self.children[0].use_spec(sub_spec)
else:
assert sub_spec.is_categorical, sub_spec
_use_spec_for_child_choices(sub_spec, self.children)
else:
# Multiple choices.
if self.value is not None:
raise ValueError(
f'Cannot apply multi-choice DNA spec on '
f'value {self.value}: {spec!r}.')
_use_spec_for_child_choices(spec, self.children)
elif spec.is_numerical:
if not isinstance(self.value, float):
raise ValueError(
f'DNA value type mismatch. Value: {self.value}, '
f'Spec: {spec!r}.')
if self.value < spec.min_value:
raise ValueError(
f'DNA value should be no less than {spec.min_value}. '
f'Encountered {self.value}, Spec: {spec!r}.')
if self.value > spec.max_value:
raise ValueError(
f'DNA value should be no greater than {spec.max_value}. '
f'Encountered {self.value}, Spec: {spec!r}.')
else:
assert spec.is_custom_decision_point, spec
if not isinstance(self.value, str):
raise ValueError(
f'DNA value type mismatch, Value: {self.value!r}, Spec: {spec!r}.')
self._spec = spec
return self
|
(self, spec: pyglove.core.geno.base.DNASpec) -> pyglove.core.geno.base.DNA
|
40,445 |
pyglove.core.geno.dna_generator
|
DNAGenerator
|
Base class for DNA generator.
A DNA generator is an object that produces :class:`pyglove.DNA`, and
optionally takes feedback from the caller to improve its future proposals.
To implement a DNA generator, the user must implement the `_propose` method,
and can optionally override the `_setup`, `_feedback` and `_replay` methods.
* Making proposals (Required): This method defines what to return as the
next DNA from the generator, users MUST override the `_propose` method to
implement this logic. `_propose` can raise `StopIteration` when no more
DNA can be produced.
* Custom setup (Optional): Usually a DNAGenerator subclass has its internal
state, which can be initialized when the search space definition is
attached to the DNAGenerator. To do so, the user can override the `_setup`
method, in which we can access the search space definition (DNASpec object)
via `self.dna_spec`.
* Taking feedback (Optional): A DNAGenerator may take feedbacks from the
caller on the fitness of proposed DNA to improve future proposals. The
fitness is measured by a reward (a float number as the measure of a single
objective, or a tuple of float numbers as the measure for multiple
objectives). The user should override the `_feedback` method to implement
such logics. If the reward is for multiple objectives. The user should
override the `multi_objective` property to return True.
* State recovery (Optional): DNAGenerator was designed with distributed
computing in mind, in which a process can be preempted or killed
unexpectedly. Therefore, a DNAGenerator should be able to recover its
state from historical proposals and rewards. The `recover` method was
introduced for such purpose, whose default implementation is to replay the
history through the `_feedback` method. If the user has a custom replay
logic other than `_feedback`, they should override the `_replay` method.
In some use cases, the user may want to implement their own checkpointing
logic. In such cases, the user can override the `recover` method as a
no-op. As aside note, the `recover` method will be called by the tuning
backend (see `tuning.py`) after `setup` but before `propose`.
See also:
* :class:`pyglove.geno.Sweeping`
* :class:`pyglove.geno.Random`
* :func:`pyglove.geno.dna_generator`
|
class DNAGenerator(symbolic.Object):
"""Base class for DNA generator.
A DNA generator is an object that produces :class:`pyglove.DNA`, and
optionally takes feedback from the caller to improve its future proposals.
To implement a DNA generator, the user must implement the `_propose` method,
and can optionally override the `_setup`, `_feedback` and `_replay` methods.
* Making proposals (Required): This method defines what to return as the
next DNA from the generator, users MUST override the `_propose` method to
implement this logic. `_propose` can raise `StopIteration` when no more
DNA can be produced.
* Custom setup (Optional): Usually a DNAGenerator subclass has its internal
state, which can be initialized when the search space definition is
attached to the DNAGenerator. To do so, the user can override the `_setup`
method, in which we can access the search space definition (DNASpec object)
via `self.dna_spec`.
* Taking feedback (Optional): A DNAGenerator may take feedbacks from the
caller on the fitness of proposed DNA to improve future proposals. The
fitness is measured by a reward (a float number as the measure of a single
objective, or a tuple of float numbers as the measure for multiple
objectives). The user should override the `_feedback` method to implement
such logics. If the reward is for multiple objectives. The user should
override the `multi_objective` property to return True.
* State recovery (Optional): DNAGenerator was designed with distributed
computing in mind, in which a process can be preempted or killed
unexpectedly. Therefore, a DNAGenerator should be able to recover its
state from historical proposals and rewards. The `recover` method was
introduced for such purpose, whose default implementation is to replay the
history through the `_feedback` method. If the user has a custom replay
logic other than `_feedback`, they should override the `_replay` method.
In some use cases, the user may want to implement their own checkpointing
logic. In such cases, the user can override the `recover` method as a
no-op. As aside note, the `recover` method will be called by the tuning
backend (see `tuning.py`) after `setup` but before `propose`.
See also:
* :class:`pyglove.geno.Sweeping`
* :class:`pyglove.geno.Random`
* :func:`pyglove.geno.dna_generator`
"""
def setup(self, dna_spec: DNASpec) -> None:
"""Setup DNA spec."""
self._dna_spec = dna_spec
self._num_proposals = 0
self._num_feedbacks = 0
self._setup()
def _setup(self) -> None:
"""Subclass should override this for adding additional setup logics."""
@property
def multi_objective(self) -> bool:
"""If True, current DNA generator supports multi-objective optimization."""
return False
@property
def needs_feedback(self) -> bool:
"""Returns True if the DNAGenerator needs feedback."""
return self._feedback.__code__ is not DNAGenerator._feedback.__code__ # pytype: disable=attribute-error
@property
def dna_spec(self) -> Optional[DNASpec]:
return getattr(self, '_dna_spec', None)
@property
def num_proposals(self):
"""Get number of proposals that are already produced."""
return self._num_proposals
@property
def num_feedbacks(self):
"""Get number of proposals whose feedback are provided."""
return self._num_feedbacks
def propose(self) -> DNA:
"""Propose a DNA to evaluate."""
dna = self._propose()
self._num_proposals += 1
return dna
def _propose(self) -> DNA:
"""Actual propose method which should be implemented by the child class."""
raise NotImplementedError()
def feedback(self, dna: DNA, reward: Union[float, Tuple[float]]) -> None:
"""Feedback a completed trial to the algorithm.
Args:
dna: a DNA object.
reward: reward for the DNA. It is a float if `self.multi_objective`
returns False, otherwise it's a tuple of floats.
"""
if self.needs_feedback:
if self.multi_objective and isinstance(reward, float):
reward = (reward,)
elif not self.multi_objective and isinstance(reward, tuple):
if len(reward) != 1:
raise ValueError(
f'{self!r} is single objective, but the reward {reward!r} '
f'contains multiple objectives.')
reward = reward[0]
self._feedback(dna, reward)
self._num_feedbacks += 1
def _feedback(self, dna: DNA, reward: Union[float, Tuple[float]]) -> None:
"""Actual feedback method which should be implemented by the child class.
The default implementation is no-op.
Args:
dna: a DNA object.
reward: reward for the DNA. It is a float if `self.multi_objective`
returns False, otherwise it's a tuple of floats.
"""
def recover(
self,
history: Iterable[Tuple[DNA, Union[None, float, Tuple[float]]]]
) -> None:
"""Recover states by replaying the proposal history.
NOTE: `recover` will always be called before first `propose` and could be
called multiple times if there are multiple source of history, e.g: trials
from a previous study and existing trials from current study.
Args:
history: An iterable object that consists of historically proposed DNA
with its reward. The reward will be None if it is not yet provided
(via feedback).
"""
for i, (dna, reward) in enumerate(history):
self._replay(i, dna, reward)
self._num_proposals += 1
if reward is not None:
self._num_feedbacks += 1
def _replay(
self,
trial_id: int,
dna: DNA,
reward: Union[None, float, Tuple[float]]):
"""Replay a single DNA from the history for state recovery.
The default implementation to call `DNAGenerator._feedback`. Subclasses that
have states and can be recovered from replaying the history should override
this method. See class `Sweeping` as an example.
Args:
trial_id: A zero-based integer as the trial ID for the DNA.
dna: A historically proposed DNA.
reward: The reward for the DNA. If None, the reward is not yet fed back
to the optimizer.
"""
del trial_id
if reward is not None:
self._feedback(dna, reward)
def __iter__(self) -> Iterator[
Union[DNA,
Tuple[DNA, Callable[[Union[float, Tuple[float]]], None]]]]:
"""Iterates DNA generated from current DNAGenerator.
NOTE(daiyip): `setup` needs to be called first before a DNAGenerator can
be iterated.
Yields:
A tuple of (DNA, feedback) if current DNAGenerator requires feedback,
otherwise DNA.
"""
while True:
try:
dna = self.propose()
if self.needs_feedback:
feedback = lambda r: self.feedback(dna, r)
yield (dna, feedback)
else:
yield dna
except StopIteration:
break
|
()
|
40,454 |
pyglove.core.geno.dna_generator
|
__iter__
|
Iterates DNA generated from current DNAGenerator.
NOTE(daiyip): `setup` needs to be called first before a DNAGenerator can
be iterated.
Yields:
A tuple of (DNA, feedback) if current DNAGenerator requires feedback,
otherwise DNA.
|
def __iter__(self) -> Iterator[
Union[DNA,
Tuple[DNA, Callable[[Union[float, Tuple[float]]], None]]]]:
"""Iterates DNA generated from current DNAGenerator.
NOTE(daiyip): `setup` needs to be called first before a DNAGenerator can
be iterated.
Yields:
A tuple of (DNA, feedback) if current DNAGenerator requires feedback,
otherwise DNA.
"""
while True:
try:
dna = self.propose()
if self.needs_feedback:
feedback = lambda r: self.feedback(dna, r)
yield (dna, feedback)
else:
yield dna
except StopIteration:
break
|
(self) -> Iterator[Union[pyglove.core.geno.base.DNA, Tuple[pyglove.core.geno.base.DNA, Callable[[Union[float, Tuple[float]]], NoneType]]]]
|
40,461 |
pyglove.core.geno.dna_generator
|
_feedback
|
Actual feedback method which should be implemented by the child class.
The default implementation is no-op.
Args:
dna: a DNA object.
reward: reward for the DNA. It is a float if `self.multi_objective`
returns False, otherwise it's a tuple of floats.
|
def _feedback(self, dna: DNA, reward: Union[float, Tuple[float]]) -> None:
"""Actual feedback method which should be implemented by the child class.
The default implementation is no-op.
Args:
dna: a DNA object.
reward: reward for the DNA. It is a float if `self.multi_objective`
returns False, otherwise it's a tuple of floats.
"""
|
(self, dna: pyglove.core.geno.base.DNA, reward: Union[float, Tuple[float]]) -> NoneType
|
40,470 |
pyglove.core.geno.dna_generator
|
_propose
|
Actual propose method which should be implemented by the child class.
|
def _propose(self) -> DNA:
"""Actual propose method which should be implemented by the child class."""
raise NotImplementedError()
|
(self) -> pyglove.core.geno.base.DNA
|
40,472 |
pyglove.core.geno.dna_generator
|
_replay
|
Replay a single DNA from the history for state recovery.
The default implementation to call `DNAGenerator._feedback`. Subclasses that
have states and can be recovered from replaying the history should override
this method. See class `Sweeping` as an example.
Args:
trial_id: A zero-based integer as the trial ID for the DNA.
dna: A historically proposed DNA.
reward: The reward for the DNA. If None, the reward is not yet fed back
to the optimizer.
|
def _replay(
self,
trial_id: int,
dna: DNA,
reward: Union[None, float, Tuple[float]]):
"""Replay a single DNA from the history for state recovery.
The default implementation to call `DNAGenerator._feedback`. Subclasses that
have states and can be recovered from replaying the history should override
this method. See class `Sweeping` as an example.
Args:
trial_id: A zero-based integer as the trial ID for the DNA.
dna: A historically proposed DNA.
reward: The reward for the DNA. If None, the reward is not yet fed back
to the optimizer.
"""
del trial_id
if reward is not None:
self._feedback(dna, reward)
|
(self, trial_id: int, dna: pyglove.core.geno.base.DNA, reward: Union[NoneType, float, Tuple[float]])
|
40,476 |
pyglove.core.geno.dna_generator
|
_setup
|
Subclass should override this for adding additional setup logics.
|
def _setup(self) -> None:
"""Subclass should override this for adding additional setup logics."""
|
(self) -> NoneType
|
40,486 |
pyglove.core.geno.dna_generator
|
feedback
|
Feedback a completed trial to the algorithm.
Args:
dna: a DNA object.
reward: reward for the DNA. It is a float if `self.multi_objective`
returns False, otherwise it's a tuple of floats.
|
def feedback(self, dna: DNA, reward: Union[float, Tuple[float]]) -> None:
"""Feedback a completed trial to the algorithm.
Args:
dna: a DNA object.
reward: reward for the DNA. It is a float if `self.multi_objective`
returns False, otherwise it's a tuple of floats.
"""
if self.needs_feedback:
if self.multi_objective and isinstance(reward, float):
reward = (reward,)
elif not self.multi_objective and isinstance(reward, tuple):
if len(reward) != 1:
raise ValueError(
f'{self!r} is single objective, but the reward {reward!r} '
f'contains multiple objectives.')
reward = reward[0]
self._feedback(dna, reward)
self._num_feedbacks += 1
|
(self, dna: pyglove.core.geno.base.DNA, reward: Union[float, Tuple[float]]) -> NoneType
|
40,491 |
pyglove.core.geno.dna_generator
|
propose
|
Propose a DNA to evaluate.
|
def propose(self) -> DNA:
"""Propose a DNA to evaluate."""
dna = self._propose()
self._num_proposals += 1
return dna
|
(self) -> pyglove.core.geno.base.DNA
|
40,493 |
pyglove.core.geno.dna_generator
|
recover
|
Recover states by replaying the proposal history.
NOTE: `recover` will always be called before first `propose` and could be
called multiple times if there are multiple source of history, e.g: trials
from a previous study and existing trials from current study.
Args:
history: An iterable object that consists of historically proposed DNA
with its reward. The reward will be None if it is not yet provided
(via feedback).
|
def recover(
self,
history: Iterable[Tuple[DNA, Union[None, float, Tuple[float]]]]
) -> None:
"""Recover states by replaying the proposal history.
NOTE: `recover` will always be called before first `propose` and could be
called multiple times if there are multiple source of history, e.g: trials
from a previous study and existing trials from current study.
Args:
history: An iterable object that consists of historically proposed DNA
with its reward. The reward will be None if it is not yet provided
(via feedback).
"""
for i, (dna, reward) in enumerate(history):
self._replay(i, dna, reward)
self._num_proposals += 1
if reward is not None:
self._num_feedbacks += 1
|
(self, history: Iterable[Tuple[pyglove.core.geno.base.DNA, Union[NoneType, float, Tuple[float]]]]) -> NoneType
|
40,497 |
pyglove.core.geno.dna_generator
|
setup
|
Setup DNA spec.
|
def setup(self, dna_spec: DNASpec) -> None:
"""Setup DNA spec."""
self._dna_spec = dna_spec
self._num_proposals = 0
self._num_feedbacks = 0
self._setup()
|
(self, dna_spec: pyglove.core.geno.base.DNASpec) -> NoneType
|
40,527 |
pyglove.core.geno.base
|
DNASpec
|
Base class for DNA specifications (genotypes).
A DNASpec object describes the rules and tips for generating a DNA.
* :class:`pyglove.geno.Space`: Represents a space or sub-space, which
contains a list of decision points.
* :class:`pyglove.geno.DecisionPoint`: Represents a concrete decision
point.
Concrete decision points are the following:
* :class:`pyglove.geno.Choices`: Represents a single categorical
choice or multiple related categorical choices. Each candidate
is a sub-space.
* :class:`pyglove.geno.Float`: Represents a continuous float value
within a range.
* :class:`pyglove.geno.CustomDecisionPoint`: Represents the
genotype for a :class:`pyglove.hyper.CustomHyper`.
All `DecisionPoints` provide `hints` for DNA generator as a reference
when generating values, it needs to be serializable to pass between client
and servers.
All DNASpec types allow user to attach their data via `set_userdata`
method, it's aimed to be used within the same process, thus not required to
be serializable.
|
class DNASpec(symbolic.Object):
"""Base class for DNA specifications (genotypes).
A DNASpec object describes the rules and tips for generating a DNA.
* :class:`pyglove.geno.Space`: Represents a space or sub-space, which
contains a list of decision points.
* :class:`pyglove.geno.DecisionPoint`: Represents a concrete decision
point.
Concrete decision points are the following:
* :class:`pyglove.geno.Choices`: Represents a single categorical
choice or multiple related categorical choices. Each candidate
is a sub-space.
* :class:`pyglove.geno.Float`: Represents a continuous float value
within a range.
* :class:`pyglove.geno.CustomDecisionPoint`: Represents the
genotype for a :class:`pyglove.hyper.CustomHyper`.
All `DecisionPoints` provide `hints` for DNA generator as a reference
when generating values, it needs to be serializable to pass between client
and servers.
All DNASpec types allow user to attach their data via `set_userdata`
method, it's aimed to be used within the same process, thus not required to
be serializable.
"""
# Override format kwargs for __str__.
__str_format_kwargs__ = dict(
compact=True,
verbose=False,
hide_default_values=True,
hide_missing_values=True
)
# NOTE(daiyip): we disable the symbolic comparison to allow hashing DNASpec
# by object ID, therefore we can use DNASpec objects as the keys for a dict.
# This is helpful when we want to align decision points using DNASpec as
# dictionary key. Users can use `pg.eq`/`pg.ne` for symbolic comparisons
# and `pg.hash` for symbolic hashing.
use_symbolic_comparison = False
def _on_bound(self):
"""Event that is triggered when object is modified."""
super()._on_bound()
self._id = None
self._named_decision_points_cache = None
self._decision_point_by_id_cache = None
self._userdata = AttributeDict()
def _on_path_change(self, old_path, new_path):
"""Event that is triggered when path changes."""
super()._on_path_change(old_path, new_path)
# We invalidate the ID cache and decision_point_by_id cache
# when the ancestor hierarchy changes, which will force the ID and
# the cache to be recomputed upon usage.
self._id = None
self._decision_point_by_id_cache = None
@property
def _decision_point_by_id(self):
"""Returns lazy-loaded ID to decision point mapping."""
if self._decision_point_by_id_cache is None:
cache = {}
for dp in self.decision_points:
if dp.is_categorical and dp.is_subchoice:
parent_key = dp.parent_spec.id
parent_value = cache.get(parent_key, None)
if parent_value is None:
parent_value = []
cache[parent_key] = parent_value
parent_value.append(dp)
else:
cache[dp.id] = dp
self._decision_point_by_id_cache = cache
return self._decision_point_by_id_cache
@property
def _named_decision_points(self):
"""Return lazy-loaded named decision points."""
if self._named_decision_points_cache is None:
named_decision_points = {}
for dp in self.decision_points:
if dp.name is not None:
v = named_decision_points.get(dp.name, None)
if v is None:
named_decision_points[dp.name] = dp
elif isinstance(v, list):
v.append(dp)
else:
named_decision_points[dp.name] = [v, dp]
self._named_decision_points_cache = named_decision_points
return self._named_decision_points_cache
@property
@abc.abstractmethod
def is_space(self) -> bool:
"""Returns True if current node is a sub-space."""
@property
@abc.abstractmethod
def is_categorical(self) -> bool:
"""Returns True if current node is a categorical choice."""
@property
@abc.abstractmethod
def is_subchoice(self) -> bool:
"""Returns True if current node is a subchoice of a multi-choice."""
@property
@abc.abstractmethod
def is_numerical(self) -> bool:
"""Returns True if current node is numerical decision."""
@property
@abc.abstractmethod
def is_custom_decision_point(self) -> bool:
"""Returns True if current node is a custom decision point."""
@abc.abstractmethod
def validate(self, dna: 'DNA') -> bool:
"""Validate whether a DNA value conforms to this spec."""
@property
@abc.abstractmethod
def decision_points(self) -> List['DecisionPoint']:
"""Returns all decision points in their declaration order."""
@property
def decision_ids(self) -> List[object_utils.KeyPath]:
"""Returns decision IDs."""
return list(self._decision_point_by_id.keys())
@property
def named_decision_points(
self) -> Dict[str, Union['DecisionPoint', List['DecisionPoint']]]:
"""Returns all named decision points in their declaration order."""
return self._named_decision_points
@property
@abc.abstractmethod
def space_size(self) -> int:
"""Returns the size of the search space. Use -1 for infinity."""
@abc.abstractmethod
def __len__(self) -> int:
"""Returns the number of decision points."""
def first_dna(self, attach_spec: bool = True) -> 'DNA':
"""Returns the first DNA in the spec."""
return self.next_dna(None, attach_spec)
def next_dna(self,
dna: Optional['DNA'] = None,
attach_spec: bool = True) -> Optional['DNA']:
"""Returns the next DNA in the space represented by this spec.
Args:
dna: The DNA whose next will be returned. If None, `next_dna` will return
the first DNA.
attach_spec: If True, current spec will be attached to the returned DNA.
Returns:
The next DNA or None if there is no next DNA.
"""
dna = self._next_dna(dna)
if attach_spec and dna is not None:
dna.use_spec(self)
return dna
@abc.abstractmethod
def _next_dna(self, dna: Optional['DNA'] = None) -> Optional['DNA']:
"""Next DNA generation logic that should be overridden by subclasses."""
def random_dna(self,
random_generator: Union[types.ModuleType,
random.Random,
None] = None,
attach_spec: bool = True,
previous_dna: Optional['DNA'] = None) -> 'DNA':
"""Returns a random DNA based on current spec.
Args:
random_generator: An optional Random object. If None, the global random
module will be used.
attach_spec: If True, current spec will be attached to the returned DNA.
previous_dna: An optional DNA representing previous DNA. This field might
be useful for generating stateful random DNAs.
Returns:
A random DNA based on current spec.
"""
random_generator = random_generator or random
dna = self._random_dna(random_generator, previous_dna)
if attach_spec:
dna.use_spec(self)
return dna
@abc.abstractmethod
def _random_dna(self,
random_generator: Union[types.ModuleType, random.Random],
previous_dna: Optional['DNA']) -> 'DNA':
"""Random DNA generation logic that should be overridden by subclasses."""
def iter_dna(self, dna: Optional['DNA'] = None, attach_spec: bool = True):
"""Iterate the DNA in the space represented by this spec.
Args:
dna: An optional DNA as the start point (exclusive) for iteration.
attach_spec: If True, the DNASpec will be attached to the DNA returned.
Yields:
The next DNA according to the spec.
"""
while True:
dna = self.next_dna(dna, attach_spec)
if dna is None:
break
yield dna
@property
def parent_spec(self) -> Optional['DNASpec']:
"""Returns parent spec. None if spec is root."""
if self.sym_parent is None:
return None
# NOTE(daiyip):
# For child specs of Space, `self.sym_parent` points to `Space.elements`.
# For child specs of Choices, `self.sym_parent` points to
# `Choices.candidates` or `Choices._subchoice_specs`.
assert self.sym_parent.sym_parent is not None
return self.sym_parent.sym_parent # pytype: disable=bad-return-type
@property
def parent_choice(self) -> Optional['DecisionPoint']:
"""Returns the parent choice of current space."""
if self.parent_spec is None:
return None
return self.parent_spec if self.is_space else self.parent_spec.parent_choice
@property
def id(self) -> object_utils.KeyPath:
"""Returns a path of locations from the root as the ID for current node."""
if self._id is None:
parent = self.parent_spec
if parent is None:
self._id = self.location
elif self.is_space:
assert parent.is_categorical, parent
assert self.index is not None
self._id = object_utils.KeyPath(
ConditionalKey(self.index, len(parent.candidates)),
parent.id) + self.location
else:
# Float() or a multi-choice spec of a parent Choice.
self._id = parent.id + self.location
return self._id
def get(self,
name_or_id: Union[object_utils.KeyPath, str],
default: Any = None
) -> Union['DecisionPoint', List['DecisionPoint']]:
"""Get decision point(s) by name or ID."""
try:
return self[name_or_id]
except KeyError:
return default
def __getitem__(
self,
name_or_id: Union[object_utils.KeyPath, str]
) -> Union['DecisionPoint', List['DecisionPoint']]:
"""Get decision point(s) by name or ID ."""
v = self._named_decision_points.get(name_or_id, None)
if v is None:
v = self._decision_point_by_id[name_or_id]
return v
def set_userdata(self, key: str, value: Any) -> None:
"""Sets user data.
User data can be used for storing state associated with the DNASpec, and
is not persisted across processes or during serialization. Use `hints` to
carry persistent objects for the DNASpec.
Args:
key: Key of the user data.
value: Value of the user data.
"""
self._userdata[key] = value
@property
def userdata(self) -> AttributeDict:
"""Gets user data."""
return self._userdata
@classmethod
def from_json(cls, json_value, *args, **kwargs) -> symbolic.Object:
"""Override from_json for backward compatibility with serialized data."""
assert isinstance(json_value, dict)
json_value.pop('userdata', None)
return super().from_json(json_value, *args, **kwargs)
|
(location: pyglove.core.object_utils.value_location.KeyPath = , hints=None)
|
40,533 |
pyglove.core.geno.base
|
__getitem__
|
Get decision point(s) by name or ID .
|
def __getitem__(
self,
name_or_id: Union[object_utils.KeyPath, str]
) -> Union['DecisionPoint', List['DecisionPoint']]:
"""Get decision point(s) by name or ID ."""
v = self._named_decision_points.get(name_or_id, None)
if v is None:
v = self._decision_point_by_id[name_or_id]
return v
|
(self, name_or_id: Union[pyglove.core.object_utils.value_location.KeyPath, str]) -> Union[pyglove.core.geno.base.DecisionPoint, List[pyglove.core.geno.base.DecisionPoint]]
|
40,536 |
pyglove.core.geno.base
|
__init__
| null |
@symbolic.members([
('name', pg_typing.Str().noneable(),
('Name of current node. If present, it should be unique in the search '
'space. We can use `root_spec[name]` to access named DNASpec.')),
])
class DecisionPoint(DNASpec):
"""Base class for decision points.
Child classes:
* :class:`pyglove.geno.Choices`
* :class:`pyglove.geno.Float`
* :class:`pyglove.geno.CustomDecisionPoint`
"""
@property
def is_space(self) -> bool:
return False
|
(self, location: pyglove.core.object_utils.value_location.KeyPath = , hints=None)
|
40,537 |
pyglove.core.geno.base
|
__len__
|
Returns the number of decision points.
|
@abc.abstractmethod
def __len__(self) -> int:
"""Returns the number of decision points."""
|
(self) -> int
|
40,546 |
pyglove.core.geno.base
|
_next_dna
|
Next DNA generation logic that should be overridden by subclasses.
|
@abc.abstractmethod
def _next_dna(self, dna: Optional['DNA'] = None) -> Optional['DNA']:
"""Next DNA generation logic that should be overridden by subclasses."""
|
(self, dna: Optional[pyglove.core.geno.base.DNA] = None) -> Optional[pyglove.core.geno.base.DNA]
|
40,548 |
pyglove.core.geno.base
|
_on_bound
|
Event that is triggered when object is modified.
|
def _on_bound(self):
"""Event that is triggered when object is modified."""
super()._on_bound()
self._id = None
self._named_decision_points_cache = None
self._decision_point_by_id_cache = None
self._userdata = AttributeDict()
|
(self)
|
40,552 |
pyglove.core.geno.base
|
_on_path_change
|
Event that is triggered when path changes.
|
def _on_path_change(self, old_path, new_path):
"""Event that is triggered when path changes."""
super()._on_path_change(old_path, new_path)
# We invalidate the ID cache and decision_point_by_id cache
# when the ancestor hierarchy changes, which will force the ID and
# the cache to be recomputed upon usage.
self._id = None
self._decision_point_by_id_cache = None
|
(self, old_path, new_path)
|
40,553 |
pyglove.core.geno.base
|
_random_dna
|
Random DNA generation logic that should be overridden by subclasses.
|
@abc.abstractmethod
def _random_dna(self,
random_generator: Union[types.ModuleType, random.Random],
previous_dna: Optional['DNA']) -> 'DNA':
"""Random DNA generation logic that should be overridden by subclasses."""
|
(self, random_generator: Union[module, random.Random], previous_dna: Optional[pyglove.core.geno.base.DNA]) -> pyglove.core.geno.base.DNA
|
40,567 |
pyglove.core.geno.base
|
first_dna
|
Returns the first DNA in the spec.
|
def first_dna(self, attach_spec: bool = True) -> 'DNA':
"""Returns the first DNA in the spec."""
return self.next_dna(None, attach_spec)
|
(self, attach_spec: bool = True) -> pyglove.core.geno.base.DNA
|
40,569 |
pyglove.core.geno.base
|
get
|
Get decision point(s) by name or ID.
|
def get(self,
name_or_id: Union[object_utils.KeyPath, str],
default: Any = None
) -> Union['DecisionPoint', List['DecisionPoint']]:
"""Get decision point(s) by name or ID."""
try:
return self[name_or_id]
except KeyError:
return default
|
(self, name_or_id: Union[pyglove.core.object_utils.value_location.KeyPath, str], default: Optional[Any] = None) -> Union[pyglove.core.geno.base.DecisionPoint, List[pyglove.core.geno.base.DecisionPoint]]
|
40,571 |
pyglove.core.geno.base
|
iter_dna
|
Iterate the DNA in the space represented by this spec.
Args:
dna: An optional DNA as the start point (exclusive) for iteration.
attach_spec: If True, the DNASpec will be attached to the DNA returned.
Yields:
The next DNA according to the spec.
|
def iter_dna(self, dna: Optional['DNA'] = None, attach_spec: bool = True):
"""Iterate the DNA in the space represented by this spec.
Args:
dna: An optional DNA as the start point (exclusive) for iteration.
attach_spec: If True, the DNASpec will be attached to the DNA returned.
Yields:
The next DNA according to the spec.
"""
while True:
dna = self.next_dna(dna, attach_spec)
if dna is None:
break
yield dna
|
(self, dna: Optional[pyglove.core.geno.base.DNA] = None, attach_spec: bool = True)
|
40,573 |
pyglove.core.geno.base
|
next_dna
|
Returns the next DNA in the space represented by this spec.
Args:
dna: The DNA whose next will be returned. If None, `next_dna` will return
the first DNA.
attach_spec: If True, current spec will be attached to the returned DNA.
Returns:
The next DNA or None if there is no next DNA.
|
def next_dna(self,
dna: Optional['DNA'] = None,
attach_spec: bool = True) -> Optional['DNA']:
"""Returns the next DNA in the space represented by this spec.
Args:
dna: The DNA whose next will be returned. If None, `next_dna` will return
the first DNA.
attach_spec: If True, current spec will be attached to the returned DNA.
Returns:
The next DNA or None if there is no next DNA.
"""
dna = self._next_dna(dna)
if attach_spec and dna is not None:
dna.use_spec(self)
return dna
|
(self, dna: Optional[pyglove.core.geno.base.DNA] = None, attach_spec: bool = True) -> Optional[pyglove.core.geno.base.DNA]
|
40,575 |
pyglove.core.geno.base
|
random_dna
|
Returns a random DNA based on current spec.
Args:
random_generator: An optional Random object. If None, the global random
module will be used.
attach_spec: If True, current spec will be attached to the returned DNA.
previous_dna: An optional DNA representing previous DNA. This field might
be useful for generating stateful random DNAs.
Returns:
A random DNA based on current spec.
|
def random_dna(self,
random_generator: Union[types.ModuleType,
random.Random,
None] = None,
attach_spec: bool = True,
previous_dna: Optional['DNA'] = None) -> 'DNA':
"""Returns a random DNA based on current spec.
Args:
random_generator: An optional Random object. If None, the global random
module will be used.
attach_spec: If True, current spec will be attached to the returned DNA.
previous_dna: An optional DNA representing previous DNA. This field might
be useful for generating stateful random DNAs.
Returns:
A random DNA based on current spec.
"""
random_generator = random_generator or random
dna = self._random_dna(random_generator, previous_dna)
if attach_spec:
dna.use_spec(self)
return dna
|
(self, random_generator: Union[module, random.Random, NoneType] = None, attach_spec: bool = True, previous_dna: Optional[pyglove.core.geno.base.DNA] = None) -> pyglove.core.geno.base.DNA
|
40,580 |
pyglove.core.geno.base
|
set_userdata
|
Sets user data.
User data can be used for storing state associated with the DNASpec, and
is not persisted across processes or during serialization. Use `hints` to
carry persistent objects for the DNASpec.
Args:
key: Key of the user data.
value: Value of the user data.
|
def set_userdata(self, key: str, value: Any) -> None:
"""Sets user data.
User data can be used for storing state associated with the DNASpec, and
is not persisted across processes or during serialization. Use `hints` to
carry persistent objects for the DNASpec.
Args:
key: Key of the user data.
value: Value of the user data.
"""
self._userdata[key] = value
|
(self, key: str, value: Any) -> NoneType
|
40,610 |
pyglove.core.geno.base
|
validate
|
Validate whether a DNA value conforms to this spec.
|
@abc.abstractmethod
def validate(self, dna: 'DNA') -> bool:
"""Validate whether a DNA value conforms to this spec."""
|
(self, dna: pyglove.core.geno.base.DNA) -> bool
|
40,611 |
pyglove.core.symbolic.dict
|
Dict
|
Symbolic dict.
``pg.Dict`` implements a dict type whose instances are symbolically
programmable, which is a subclass of the built-in Python ``dict`` and
a subclass of :class:`pyglove.Symbolic`.
``pg.Dict`` provides the following features:
* It a symbolic programmable dict with string keys.
* It enables attribute access on dict keys.
* It supports symbolic validation and value completitions based on schema.
* It provides events to handle sub-nodes changes.
``pg.Dict`` can be used as a regular dict with string keys::
# Construct a symbolic dict from key value pairs.
d = pg.Dict(x=1, y=2)
or::
# Construct a symbolic dict from a mapping object.
d = pg.Dict({'x': 1, 'y': 2})
Besides regular items access using ``[]``, it allows attribute access
to its keys::
# Read access to key `x`.
assert d.x == 1
# Write access to key 'y'.
d.y = 1
``pg.Dict`` supports symbolic validation when the ``value_spec`` argument
is provided::
d = pg.Dict(x=1, y=2, value_spec=pg.typing.Dict([
('x', pg.typing.Int(min_value=1)),
('y', pg.typing.Int(min_value=1)),
(pg.typing.StrKey('foo.*'), pg.typing.Str())
])
# Okay: all keys started with 'foo' is acceptable and are strings.
d.foo1 = 'abc'
# Raises: 'bar' is not acceptable as keys in the dict.
d.bar = 'abc'
Users can mutate the values contained in it::
d = pg.Dict(x=pg.Dict(y=1), p=pg.List([0]))
d.rebind({
'x.y': 2,
'p[0]': 1
})
It also allows the users to subscribe subtree updates::
def on_change(updates):
print(updates)
d = pg.Dict(x=1, onchange_callaback=on_change)
# `on_change` will be triggered on item insertion.
d['y'] = {'z': 1}
# `on_change` will be triggered on item removal.
del d.x
# `on_change` will also be triggered on subtree change.
d.rebind({'y.z': 2})
|
class Dict(dict, base.Symbolic, pg_typing.CustomTyping):
"""Symbolic dict.
``pg.Dict`` implements a dict type whose instances are symbolically
programmable, which is a subclass of the built-in Python ``dict`` and
a subclass of :class:`pyglove.Symbolic`.
``pg.Dict`` provides the following features:
* It a symbolic programmable dict with string keys.
* It enables attribute access on dict keys.
* It supports symbolic validation and value completitions based on schema.
* It provides events to handle sub-nodes changes.
``pg.Dict`` can be used as a regular dict with string keys::
# Construct a symbolic dict from key value pairs.
d = pg.Dict(x=1, y=2)
or::
# Construct a symbolic dict from a mapping object.
d = pg.Dict({'x': 1, 'y': 2})
Besides regular items access using ``[]``, it allows attribute access
to its keys::
# Read access to key `x`.
assert d.x == 1
# Write access to key 'y'.
d.y = 1
``pg.Dict`` supports symbolic validation when the ``value_spec`` argument
is provided::
d = pg.Dict(x=1, y=2, value_spec=pg.typing.Dict([
('x', pg.typing.Int(min_value=1)),
('y', pg.typing.Int(min_value=1)),
(pg.typing.StrKey('foo.*'), pg.typing.Str())
])
# Okay: all keys started with 'foo' is acceptable and are strings.
d.foo1 = 'abc'
# Raises: 'bar' is not acceptable as keys in the dict.
d.bar = 'abc'
Users can mutate the values contained in it::
d = pg.Dict(x=pg.Dict(y=1), p=pg.List([0]))
d.rebind({
'x.y': 2,
'p[0]': 1
})
It also allows the users to subscribe subtree updates::
def on_change(updates):
print(updates)
d = pg.Dict(x=1, onchange_callaback=on_change)
# `on_change` will be triggered on item insertion.
d['y'] = {'z': 1}
# `on_change` will be triggered on item removal.
del d.x
# `on_change` will also be triggered on subtree change.
d.rebind({'y.z': 2})
"""
@classmethod
def partial(cls,
dict_obj: Optional[typing.Dict[str, Any]] = None,
value_spec: Optional[pg_typing.Dict] = None,
*,
onchange_callback: Optional[Callable[
[typing.Dict[object_utils.KeyPath, base.FieldUpdate]], None]
] = None, # pylint: disable=bad-continuation
**kwargs) -> 'Dict':
"""Class method that creates a partial Dict object."""
return cls(dict_obj,
value_spec=value_spec,
onchange_callback=onchange_callback,
allow_partial=True,
**kwargs)
@classmethod
def from_json(cls,
json_value: Any,
*,
value_spec: Optional[pg_typing.Dict] = None,
allow_partial: bool = False,
root_path: Optional[object_utils.KeyPath] = None,
**kwargs) -> 'Dict':
"""Class method that load an symbolic Dict from a JSON value.
Args:
json_value: Input JSON value, only JSON dict is acceptable.
value_spec: An optional value spec to apply.
allow_partial: Whether to allow members of the dict to be partial.
root_path: KeyPath of loaded object in its object tree.
**kwargs: Allow passing through keyword arguments that are not applicable.
Returns:
A schemaless symbolic dict. For example::
d = Dict.from_json({
'a': {
'_type': '__main__.Foo',
'f1': 1,
'f2': {
'f21': True
}
}
})
assert d.value_spec is None
# Okay:
d.b = 1
# a.f2 is bound by class Foo's field 'f2' definition (assume it defines
# a schema for the Dict field).
assert d.a.f2.value_spec is not None
# Not okay:
d.a.f2.abc = 1
"""
return cls(json_value,
value_spec=value_spec,
allow_partial=allow_partial,
root_path=root_path)
def __init__(self,
dict_obj: Union[
None,
Iterable[Tuple[str, Any]],
typing.Dict[str, Any]] = None,
*,
value_spec: Optional[pg_typing.Dict] = None,
onchange_callback: Optional[Callable[
[typing.Dict[object_utils.KeyPath, base.FieldUpdate]], None]
] = None, # pylint: disable=bad-continuation
**kwargs):
"""Constructor.
Args:
dict_obj: A dict as initial value for this Dict.
value_spec: Value spec that applies to this Dict.
onchange_callback: Callback when sub-tree has been modified.
**kwargs: Key value pairs that will be inserted into the dict as initial
value, which provides a syntax sugar for usage as below: d =
pg.Dict(a=1, b=2)
"""
if value_spec and not isinstance(value_spec, pg_typing.Dict):
raise TypeError(
f'Argument \'value_spec\' must be a `pg.typing.Dict` object. '
f'Encountered {value_spec}')
allow_partial = kwargs.pop('allow_partial', False)
accessor_writable = kwargs.pop('accessor_writable', True)
sealed = kwargs.pop('sealed', False)
root_path = kwargs.pop('root_path', None)
# Skip schema check when dict_obj is validated against
# schema externally. This flag is helpful to avoid duplicated schema
# check in nested structures, which takes effect only when value_spec
# is not None.
pass_through = kwargs.pop('pass_through', False)
# If True, the parent of dict items should be set to `self.sym_parent`,
# This is useful when Dict is used as the field container of
# pg.Object.
self._set_raw_attr(
'_as_object_attributes_container',
kwargs.pop('as_object_attributes_container', False),
)
# We copy the symbolic form of dict values instead of their evaluated
# values.
if isinstance(dict_obj, Dict):
dict_obj = {k: v for k, v in dict_obj.sym_items()}
elif dict_obj is not None:
dict_obj = dict(dict_obj)
# NOTE(daiyip): we call __init__ of superclasses explicitly instead of
# calling super().__init__(...) since dict.__init__ does
# not follow super(...).__init__ fashion, which will lead to
# Symbolic.__init__ uncalled.
base.Symbolic.__init__(
self,
allow_partial=allow_partial,
accessor_writable=True,
# We delay seal operation until members are filled.
sealed=False,
root_path=root_path)
dict.__init__(self)
self._value_spec = None
self._onchange_callback = None
# NOTE(daiyip): values in kwargs is prior to dict_obj.
dict_obj = dict_obj or {}
for k, v in kwargs.items():
dict_obj[k] = v
if value_spec:
if pass_through:
for k, v in dict_obj.items():
super().__setitem__(k, self._relocate_if_symbolic(k, v))
# NOTE(daiyip): when pass_through is on, we simply trust input
# dict is validated and filled with values of their final form (
# symbolic Dict/List vs. dict/list). This prevents members from
# repeated validation and transformation.
self._value_spec = value_spec
else:
for k, v in dict_obj.items():
super().__setitem__(k, self._formalized_value(k, None, v))
self.use_value_spec(value_spec, allow_partial)
else:
for k, v in dict_obj.items():
self._set_item_without_permission_check(k, v)
# NOTE(daiyip): We set onchange callback at the end of init to avoid
# triggering during initialization.
self._onchange_callback = onchange_callback
self.set_accessor_writable(accessor_writable)
self.seal(sealed)
@property
def value_spec(self) -> Optional[pg_typing.Dict]:
"""Returns value spec of this dict.
NOTE(daiyip): If this dict is schema-less, value_spec will be None.
"""
return self._value_spec
def use_value_spec(self,
value_spec: Optional[pg_typing.Dict],
allow_partial: bool = False) -> 'Dict':
"""Applies a ``pg.typing.Dict`` as the value spec for current dict.
Args:
value_spec: A Dict ValueSpec to apply to this Dict.
If current Dict is schema-less (whose immediate members are not
validated against schema), and `value_spec` is not None, the value spec
will be applied to the Dict.
Or else if current Dict is already symbolic (whose immediate members
are under the constraint of a Dict value spec), and `value_spec` is
None, current Dict will become schema-less. However, the schema
constraints for non-immediate members will remain.
allow_partial: Whether allow partial dict based on the schema. This flag
will override allow_partial flag in __init__ for spec-less Dict.
Returns:
Self.
Raises:
ValueError: validation failed due to value error.
RuntimeError: Dict is already bound with another spec.
TypeError: type errors during validation.
KeyError: key errors during validation.
"""
if value_spec is None:
self._value_spec = None
self._accessor_writable = True
return self
if not isinstance(value_spec, pg_typing.Dict):
raise ValueError(
self._error_message(
f'Value spec for list must be a `pg.typing.Dict` object. '
f'Encountered: {value_spec!r}'))
if self._value_spec and self._value_spec != value_spec:
raise RuntimeError(
self._error_message(
f'Dict is already bound with a different value spec: '
f'{self._value_spec}. New value spec: {value_spec}.'))
self._allow_partial = allow_partial
if flags.is_type_check_enabled():
# NOTE(daiyip): self._value_spec will be set in Dict.custom_apply method
# called by value_spec.apply, thus we don't need to set self._value_spec
# explicitly.
value_spec.apply(
self,
allow_partial=base.accepts_partial(self),
child_transform=base.symbolic_transform_fn(self._allow_partial),
root_path=self.sym_path)
else:
self._value_spec = value_spec
return self
def _sym_parent_for_children(self) -> Optional[base.Symbolic]:
if self._as_object_attributes_container:
return self.sym_parent
return self
def _sym_rebind(
self, path_value_pairs: typing.Dict[object_utils.KeyPath, Any]
) -> List[base.FieldUpdate]:
"""Subclass specific rebind implementation."""
updates = []
for k, v in path_value_pairs.items():
update = self._set_item_of_current_tree(k, v)
if update is not None:
updates.append(update)
return updates
def _sym_missing(self) -> typing.Dict[str, Any]:
"""Returns missing values.
Returns:
A dict of key to MISSING_VALUE.
"""
missing = dict()
if self._value_spec and self._value_spec.schema:
matched_keys, _ = self._value_spec.schema.resolve(self.keys())
for key_spec, keys in matched_keys.items():
field = self._value_spec.schema[key_spec]
assert keys or isinstance(key_spec, pg_typing.NonConstKey), key_spec
if keys:
for key in keys:
v = self.sym_getattr(key)
if object_utils.MISSING_VALUE == v:
missing[key] = field.value.default
else:
if isinstance(v, base.Symbolic):
missing_child = v.sym_missing(flatten=False)
if missing_child:
missing[key] = missing_child
else:
for k, v in self.sym_items():
if isinstance(v, base.Symbolic):
missing_child = v.sym_missing(flatten=False)
if missing_child:
missing[k] = missing_child
return missing
def _sym_nondefault(self) -> typing.Dict[str, Any]:
"""Returns non-default values as key/value pairs in a dict."""
non_defaults = dict()
if self._value_spec is not None and self._value_spec.schema:
dict_schema = self._value_spec.schema
matched_keys, _ = dict_schema.resolve(self.keys())
for key_spec, keys in matched_keys.items():
value_spec = dict_schema[key_spec].value
for key in keys:
diff = self._diff_base(self.sym_getattr(key), value_spec.default)
if pg_typing.MISSING_VALUE != diff:
non_defaults[key] = diff
else:
for k, v in self.sym_items():
if isinstance(v, base.Symbolic):
non_defaults_child = v.sym_nondefault(flatten=False)
if non_defaults_child:
non_defaults[k] = non_defaults_child
else:
non_defaults[k] = v
return non_defaults
def _diff_base(self, value: Any, base_value: Any) -> Any:
"""Computes the diff between a value and a base value."""
if base.eq(value, base_value):
return pg_typing.MISSING_VALUE
if (isinstance(value, list)
or not isinstance(value, base.Symbolic)
or pg_typing.MISSING_VALUE == base_value):
return value
if value.__class__ is base_value.__class__:
getter = lambda x, k: x.sym_getattr(k)
elif isinstance(value, dict) and isinstance(base_value, dict):
getter = lambda x, k: x[k]
else:
return value
diff = {}
for k, v in value.sym_items():
base_v = getter(base_value, k)
child_diff = self._diff_base(v, base_v)
if pg_typing.MISSING_VALUE != child_diff:
diff[k] = child_diff
return diff
def seal(self, sealed: bool = True) -> 'Dict':
"""Seals or unseals current object from further modification."""
if self.is_sealed == sealed:
return self
for v in self.sym_values():
if isinstance(v, base.Symbolic):
v.seal(sealed)
super().seal(sealed)
return self
def sym_attr_field(
self, key: Union[str, int]
) -> Optional[pg_typing.Field]:
"""Returns the field definition for a symbolic attribute."""
if self._value_spec is None or self._value_spec.schema is None:
return None
return self._value_spec.schema.get_field(key) # pytype: disable=attribute-error
def sym_hasattr(self, key: Union[str, int]) -> bool:
"""Tests if a symbolic attribute exists."""
return key in self
def sym_keys(self) -> Iterator[str]:
"""Iterates the keys of symbolic attributes."""
if self._value_spec is None or self._value_spec.schema is None:
for key in super().__iter__():
yield key
else:
traversed = set()
for key_spec in self._value_spec.schema.keys(): # pytype: disable=attribute-error
if isinstance(key_spec, pg_typing.ConstStrKey) and key_spec in self:
yield key_spec.text
traversed.add(key_spec.text)
if len(traversed) < len(self):
for key in super().__iter__():
if key not in traversed:
yield key
def sym_values(self) -> Iterator[Any]:
"""Iterates the values of symbolic attributes."""
for k in self.sym_keys():
yield self._sym_getattr(k)
def sym_items(self) -> Iterator[
Tuple[str, Any]]:
"""Iterates the (key, value) pairs of symbolic attributes."""
for k in self.sym_keys():
yield k, self._sym_getattr(k)
def sym_setparent(self, parent: base.Symbolic):
"""Override set parent of Dict to handle the passing through scenario."""
super().sym_setparent(parent)
# NOTE(daiyip): when flag `as_object_attributes_container` is on, it sets
# the parent of child symbolic values using its parent.
if self._as_object_attributes_container:
for v in self.sym_values():
if isinstance(v, base.TopologyAware):
v.sym_setparent(parent)
def sym_hash(self) -> int:
"""Symbolic hashing."""
return base.sym_hash(
(self.__class__,
tuple([(k, base.sym_hash(v)) for k, v in self.sym_items()
if v != pg_typing.MISSING_VALUE])))
def _sym_getattr( # pytype: disable=signature-mismatch # overriding-parameter-type-checks
self, key: str) -> Any:
"""Gets symbolic attribute by key."""
return super().__getitem__(key)
def _sym_clone(self, deep: bool, memo=None) -> 'Dict':
"""Override Symbolic._sym_clone."""
source = dict()
for k, v in self.sym_items():
if deep or isinstance(v, base.Symbolic):
v = base.clone(v, deep, memo)
source[k] = v
return Dict(
source,
value_spec=self._value_spec,
allow_partial=self._allow_partial,
accessor_writable=self._accessor_writable,
sealed=self._sealed,
onchange_callback=self._onchange_callback,
# NOTE(daiyip): parent and root_path are reset to empty
# for copy object.
root_path=None,
pass_through=True)
def _update_children_paths(
self,
old_path: object_utils.KeyPath,
new_path: object_utils.KeyPath) -> None:
"""Update children paths according to root_path of current node."""
del old_path
for k, v in self.sym_items():
if isinstance(v, base.TopologyAware):
v.sym_setpath(object_utils.KeyPath(k, new_path))
def _set_item_without_permission_check( # pytype: disable=signature-mismatch # overriding-parameter-type-checks
self, key: str, value: Any) -> Optional[base.FieldUpdate]:
"""Set item without permission check."""
if not isinstance(key, str):
raise KeyError(self._error_message(
f'Key must be string type. Encountered {key!r}.'))
old_value = self.get(key, pg_typing.MISSING_VALUE)
if old_value is value:
return None
field = None
if self._value_spec and self._value_spec.schema:
field = self._value_spec.schema.get_field(key)
if not field:
if (self.sym_parent is not None
and self.sym_parent.sym_path == self.sym_path):
container_cls = self.sym_parent.__class__
else:
container_cls = self.__class__
raise KeyError(
self._error_message(
f'Key \'{key}\' is not allowed for {container_cls}.'))
# Detach old value from object tree.
if isinstance(old_value, base.TopologyAware):
old_value.sym_setparent(None)
old_value.sym_setpath(object_utils.KeyPath())
if (pg_typing.MISSING_VALUE == value and
(not field or isinstance(field.key, pg_typing.NonConstKey))):
if key in self:
# Using pg.MISSING_VALUE for deleting keys.
super().__delitem__(key)
new_value = pg_typing.MISSING_VALUE
else:
# This condition could trigger when copying a partial Dict to a Dict
# without schema.
return None
else:
new_value = self._formalized_value(key, field, value)
super().__setitem__(key, new_value)
# NOTE(daiyip): If current dict is the field dict of a symbolic object,
# Use parent object as update target.
target = self
if (self.sym_parent is not None
and self.sym_parent.sym_path == self.sym_path):
target = self.sym_parent
return base.FieldUpdate(
self.sym_path + key, target, field, old_value, new_value)
def _formalized_value(self, name: str,
field: Optional[pg_typing.Field],
value: Any) -> Any:
"""Get transformed (formal) value from user input."""
allow_partial = base.accepts_partial(self)
if field and pg_typing.MISSING_VALUE == value:
# NOTE(daiyip): default value is already in transformed form.
value = field.default_value
else:
value = base.from_json(
value,
allow_partial=allow_partial,
root_path=object_utils.KeyPath(name, self.sym_path))
if field and flags.is_type_check_enabled():
value = field.apply(
value,
allow_partial=allow_partial,
transform_fn=base.symbolic_transform_fn(self._allow_partial),
root_path=object_utils.KeyPath(name, self.sym_path))
return self._relocate_if_symbolic(name, value)
@property
def _subscribes_field_updates(self) -> bool:
"""Returns True if current dict subscribes field updates."""
return self._onchange_callback is not None
def _on_change(self, field_updates: typing.Dict[object_utils.KeyPath,
base.FieldUpdate]):
"""On change event of Dict."""
if self._onchange_callback:
self._onchange_callback(field_updates)
def _init_kwargs(self) -> typing.Dict[str, Any]:
kwargs = super()._init_kwargs()
if not self._accessor_writable:
kwargs['accessor_writable'] = False
if self._onchange_callback is not None:
kwargs['onchange_callback'] = self._onchange_callback
# NOTE(daiyip): We do not serialize ValueSpec for now as in most use
# cases they come from the subclasses of `pg.Object`.
return kwargs
def __getstate__(self) -> Any:
"""Customizes pickle.dump."""
return dict(value=dict(self), kwargs=self._init_kwargs())
def __setstate__(self, state) -> None:
"""Customizes pickle.load."""
self.__init__(state['value'], **state['kwargs'])
def __getitem__(self, key: str) -> Any:
"""Get item in this Dict."""
try:
return self.sym_inferred(key)
except AttributeError as e:
raise KeyError(key) from e
def __setitem__(self, key: str, value: Any) -> None:
"""Set item in this Dict.
Args:
key: String key. (Please be noted that key path is not supported.)
value: Value to be inserted.
Raises:
WritePermissionError: when Dict cannot be modified by accessor or
is sealed.
KeyError: Key is not allowed according to the value spec.
ValueError: Value is not acceptable according to the value spec.
"""
# NOTE(daiyip): THIS IS A WORKAROUND FOR WORKING WITH PICKLE.
# `pg.Dict` is a subclass of `dict`, therefore, when pickle loads a dict,
# it will try to set its items directly by calling `__setitem__` without
# calling `pg.Dict.__init__` at the first place. As a result, an error will
# raise, which complains about that an attribute set up during `__init__` is
# not available. A mitigation to this issue is to detect such calls in
# `__setitem__` as the follows, and simply do nothing, which will give a
# chance to `pg.Dict.__getstate__` to deal with the restoration logic as
# an object (instead of a dict).
if not hasattr(self, '_sym_parent'):
return
if base.treats_as_sealed(self):
raise base.WritePermissionError(
self._error_message('Cannot modify field of a sealed Dict.'))
if not base.writtable_via_accessors(self):
raise base.WritePermissionError(
self._error_message(
'Cannot modify Dict field by attribute or key while '
'accessor_writable is set to False. '
'Use \'rebind\' method instead.'))
update = self._set_item_without_permission_check(key, value)
if flags.is_change_notification_enabled() and update:
self._notify_field_updates([update])
def __setattr__(self, name: str, value: Any) -> None:
"""Set attribute of this Dict.
NOTE(daiyip): When setting attributes, public attributes (not started with
'_') are set as dict fields, while private attributes (started with '_') are
set on the object instance.
Args:
name: Name of attribute.
value: Value of attribute.
"""
if name.startswith('_'):
super().__setattr__(name, value)
else:
self[name] = value
def __delitem__(self, name: str) -> None:
"""Delete a key from the Dict.
This is used to delete a key which resolves to a pg.typing.NonConstKey.
Args:
name: Key to delete.
Raises:
WritePermissionError: When Dict is sealed.
KeyError: When key is not a NonConstKey.
"""
if base.treats_as_sealed(self):
raise base.WritePermissionError('Cannot del item from a sealed Dict.')
if not base.writtable_via_accessors(self):
raise base.WritePermissionError(
self._error_message('Cannot del Dict field by attribute or key while '
'accessor_writable is set to False. '
'Use \'rebind\' method instead.'))
if name not in self:
raise KeyError(
self._error_message(f'Key does not exist in Dict: {name!r}.'))
update = self._set_item_without_permission_check(
name, pg_typing.MISSING_VALUE)
if flags.is_change_notification_enabled() and update:
self._notify_field_updates([update])
def __delattr__(self, name: str) -> None:
"""Delete an attribute."""
del self[name]
def __getattr__(self, name: str) -> Any:
"""Get attribute that is not defined as property."""
if name in self:
return self.sym_inferred(name)
raise AttributeError(
f'Attribute \'{name}\' does not exist in {self.__class__!r}.')
def __iter__(self):
"""Iterate keys in field declaration order."""
return self.sym_keys()
def keys(self) -> Iterator[str]: # pytype: disable=signature-mismatch
"""Returns an iterator of keys in current dict."""
return self.sym_keys()
def items(self) -> Iterator[Tuple[str, Any]]: # pytype: disable=signature-mismatch
"""Returns an iterator of (key, value) items in current dict."""
return self.sym_items()
def values(self) -> Iterator[Any]: # pytype: disable=signature-mismatch
"""Returns an iterator of values in current dict.."""
return self.sym_values()
def copy(self) -> 'Dict':
"""Overridden copy using symbolic copy."""
return self.sym_clone(deep=False)
def pop(
self, key: Any, default: Any = base.RAISE_IF_NOT_FOUND # pylint: disable=protected-access
) -> Any:
"""Pops a key from current dict."""
if key in self:
value = self[key]
with flags.allow_writable_accessors(True):
del self[key]
return value if value != pg_typing.MISSING_VALUE else default
if default is base.RAISE_IF_NOT_FOUND:
raise KeyError(key)
return default
def popitem(self) -> Tuple[str, Any]:
if self._value_spec is not None:
raise ValueError(
'\'popitem\' cannot be performed on a Dict with value spec.')
if base.treats_as_sealed(self):
raise base.WritePermissionError('Cannot pop item from a sealed Dict.')
return super().popitem()
def clear(self) -> None:
"""Removes all the keys in current dict."""
if base.treats_as_sealed(self):
raise base.WritePermissionError('Cannot clear a sealed Dict.')
value_spec = self._value_spec
self._value_spec = None
super().clear()
if value_spec:
self.use_value_spec(value_spec, self._allow_partial)
def setdefault(self, key: str, default: Any = None) -> Any:
"""Sets default as the value to key if not present."""
value = pg_typing.MISSING_VALUE
if key in self:
value = self.sym_getattr(key)
if value == pg_typing.MISSING_VALUE:
self[key] = default
value = default
return value
def update(self,
other: Union[
None,
typing.Dict[str, Any],
Iterable[Tuple[str, Any]]] = None,
**kwargs) -> None: # pytype: disable=signature-mismatch
"""Update Dict with the same semantic as update on standard dict."""
updates = dict(other) if other else {}
updates.update(kwargs)
self.rebind(
updates, raise_on_no_change=False, skip_notification=True)
def sym_jsonify(
self,
hide_default_values: bool = False,
exclude_keys: Optional[Sequence[str]] = None,
use_inferred: bool = False,
**kwargs) -> object_utils.JSONValueType:
"""Converts current object to a dict with plain Python objects."""
exclude_keys = set(exclude_keys or [])
if self._value_spec and self._value_spec.schema:
json_repr = dict()
matched_keys, _ = self._value_spec.schema.resolve(self.keys()) # pytype: disable=attribute-error
for key_spec, keys in matched_keys.items():
# NOTE(daiyip): The key values of frozen field can safely be excluded
# since they will be the same for a class.
field = self._value_spec.schema[key_spec]
if not field.frozen:
for key in keys:
if key not in exclude_keys:
value = self.sym_getattr(key)
if use_inferred and isinstance(value, base.Inferential):
value = self.sym_inferred(key, default=value)
if pg_typing.MISSING_VALUE == value:
continue
if hide_default_values and base.eq(value, field.default_value):
continue
json_repr[key] = base.to_json(
value, hide_default_values=hide_default_values,
use_inferred=use_inferred,
**kwargs)
return json_repr
else:
return {
k: base.to_json(
self.sym_inferred(k, default=v) if (
use_inferred and isinstance(v, base.Inferential)) else v,
hide_default_values=hide_default_values,
use_inferred=use_inferred,
**kwargs)
for k, v in self.sym_items()
if k not in exclude_keys
}
def custom_apply(
self,
path: object_utils.KeyPath,
value_spec: pg_typing.ValueSpec,
allow_partial: bool,
child_transform: Optional[
Callable[[object_utils.KeyPath, pg_typing.Field, Any], Any]] = None
) -> Tuple[bool, 'Dict']:
"""Implement pg.typing.CustomTyping interface.
Args:
path: KeyPath of current object.
value_spec: Origin value spec of the field.
allow_partial: Whether allow partial object to be created.
child_transform: Function to transform child node values in dict_obj into
their final values. Transform function is called on leaf nodes first,
then on their containers, recursively.
Returns:
A tuple (proceed_with_standard_apply, transformed value)
"""
proceed_with_standard_apply = True
if self._value_spec:
if value_spec and not value_spec.is_compatible(self._value_spec):
raise ValueError(
object_utils.message_on_path(
f'Dict (spec={self._value_spec!r}) cannot be assigned to an '
f'incompatible field (spec={value_spec!r}).', path))
if self._allow_partial == allow_partial:
proceed_with_standard_apply = False
else:
self._allow_partial = allow_partial
elif isinstance(value_spec, pg_typing.Dict):
self._value_spec = value_spec
return (proceed_with_standard_apply, self)
def format(
self,
compact: bool = False,
verbose: bool = True,
root_indent: int = 0,
*,
python_format: bool = False,
hide_default_values: bool = False,
hide_missing_values: bool = False,
include_keys: Optional[Set[str]] = None,
exclude_keys: Optional[Set[str]] = None,
use_inferred: bool = False,
cls_name: Optional[str] = None,
bracket_type: object_utils.BracketType = object_utils.BracketType.CURLY,
key_as_attribute: bool = False,
extra_blankline_for_field_docstr: bool = False,
**kwargs) -> str:
"""Formats this Dict."""
cls_name = cls_name or ''
exclude_keys = exclude_keys or set()
def _indent(text, indent):
return ' ' * 2 * indent + text
def _should_include_key(key):
if include_keys:
return key in include_keys
return key not in exclude_keys
field_list = []
if self._value_spec and self._value_spec.schema:
matched_keys, unmatched = self._value_spec.schema.resolve(self.keys()) # pytype: disable=attribute-error
assert not unmatched
for key_spec, keys in matched_keys.items():
for key in keys:
if _should_include_key(key):
field = self._value_spec.schema[key_spec]
v = self.sym_getattr(key)
if use_inferred and isinstance(v, base.Inferential):
v = self.sym_inferred(key, default=v)
if pg_typing.MISSING_VALUE == v:
if hide_missing_values:
continue
elif hide_default_values and base.eq(v, field.default_value):
continue
field_list.append((field, key, v))
else:
for k, v in self.sym_items():
if _should_include_key(k):
if use_inferred and isinstance(v, base.Inferential):
v = self.sym_inferred(k, default=v)
field_list.append((None, k, v))
open_bracket, close_bracket = object_utils.bracket_chars(bracket_type)
if not field_list:
return f'{cls_name}{open_bracket}{close_bracket}'
if compact:
s = [f'{cls_name}{open_bracket}']
kv_strs = []
for _, k, v in field_list:
v_str = object_utils.format(
v,
compact,
verbose,
root_indent + 1,
hide_default_values=hide_default_values,
hide_missing_values=hide_missing_values,
python_format=python_format,
use_inferred=use_inferred,
extra_blankline_for_field_docstr=extra_blankline_for_field_docstr,
**kwargs)
if not python_format or key_as_attribute:
kv_strs.append(f'{k}={v_str}')
else:
kv_strs.append(f'\'{k}\': {v_str}')
s.append(', '.join(kv_strs))
s.append(close_bracket)
else:
s = [f'{cls_name}{open_bracket}\n']
for i, (f, k, v) in enumerate(field_list):
if i != 0:
s.append(',\n')
if verbose and f and typing.cast(pg_typing.Field, f).description:
if i != 0 and extra_blankline_for_field_docstr:
s.append('\n')
description = typing.cast(pg_typing.Field, f).description
for line in description.split('\n'):
s.append(_indent(f'# {line}\n', root_indent + 1))
v_str = object_utils.format(
v,
compact,
verbose,
root_indent + 1,
hide_default_values=hide_default_values,
hide_missing_values=hide_missing_values,
python_format=python_format,
use_inferred=use_inferred,
extra_blankline_for_field_docstr=extra_blankline_for_field_docstr,
**kwargs)
if not python_format:
# Format in PyGlove's format (default).
s.append(_indent(f'{k} = {v_str}', root_indent + 1))
elif key_as_attribute:
# Format `pg.Objects` under Python format.
s.append(_indent(f'{k}={v_str}', root_indent + 1))
else:
# Format regular `pg.Dict` under Python format.
s.append(_indent(f'\'{k}\': {v_str}', root_indent + 1))
s.append('\n')
s.append(_indent(close_bracket, root_indent))
return ''.join(s)
def __repr__(self) -> str:
"""Operator repr()."""
return base.Symbolic.__repr__(self)
def __eq__(self, other: Any) -> bool:
"""Operator ==."""
if isinstance(other, dict):
return dict.__eq__(self, other)
return False
def __ne__(self, other: Any) -> bool:
"""Operator !=."""
return not self.__eq__(other)
def __hash__(self) -> int:
"""Overridden hashing function using symbolic hash."""
return self.sym_hash()
|
(dict_obj: Union[NoneType, Iterable[Tuple[str, Any]], Dict[str, Any]] = None, *, value_spec: Optional[pyglove.core.typing.value_specs.Dict] = None, onchange_callback: Optional[Callable[[Dict[pyglove.core.object_utils.value_location.KeyPath, pyglove.core.symbolic.base.FieldUpdate]], NoneType]] = None, **kwargs)
|
40,615 |
pyglove.core.symbolic.dict
|
__delattr__
|
Delete an attribute.
|
def __delattr__(self, name: str) -> None:
"""Delete an attribute."""
del self[name]
|
(self, name: str) -> NoneType
|
40,616 |
pyglove.core.symbolic.dict
|
__delitem__
|
Delete a key from the Dict.
This is used to delete a key which resolves to a pg.typing.NonConstKey.
Args:
name: Key to delete.
Raises:
WritePermissionError: When Dict is sealed.
KeyError: When key is not a NonConstKey.
|
def __delitem__(self, name: str) -> None:
"""Delete a key from the Dict.
This is used to delete a key which resolves to a pg.typing.NonConstKey.
Args:
name: Key to delete.
Raises:
WritePermissionError: When Dict is sealed.
KeyError: When key is not a NonConstKey.
"""
if base.treats_as_sealed(self):
raise base.WritePermissionError('Cannot del item from a sealed Dict.')
if not base.writtable_via_accessors(self):
raise base.WritePermissionError(
self._error_message('Cannot del Dict field by attribute or key while '
'accessor_writable is set to False. '
'Use \'rebind\' method instead.'))
if name not in self:
raise KeyError(
self._error_message(f'Key does not exist in Dict: {name!r}.'))
update = self._set_item_without_permission_check(
name, pg_typing.MISSING_VALUE)
if flags.is_change_notification_enabled() and update:
self._notify_field_updates([update])
|
(self, name: str) -> NoneType
|
40,617 |
pyglove.core.symbolic.dict
|
__eq__
|
Operator ==.
|
def __eq__(self, other: Any) -> bool:
"""Operator ==."""
if isinstance(other, dict):
return dict.__eq__(self, other)
return False
|
(self, other: Any) -> bool
|
40,618 |
pyglove.core.symbolic.dict
|
__getattr__
|
Get attribute that is not defined as property.
|
def __getattr__(self, name: str) -> Any:
"""Get attribute that is not defined as property."""
if name in self:
return self.sym_inferred(name)
raise AttributeError(
f'Attribute \'{name}\' does not exist in {self.__class__!r}.')
|
(self, name: str) -> Any
|
40,619 |
pyglove.core.symbolic.dict
|
__getitem__
|
Get item in this Dict.
|
def __getitem__(self, key: str) -> Any:
"""Get item in this Dict."""
try:
return self.sym_inferred(key)
except AttributeError as e:
raise KeyError(key) from e
|
(self, key: str) -> Any
|
40,620 |
pyglove.core.symbolic.dict
|
__getstate__
|
Customizes pickle.dump.
|
def __getstate__(self) -> Any:
"""Customizes pickle.dump."""
return dict(value=dict(self), kwargs=self._init_kwargs())
|
(self) -> Any
|
40,621 |
pyglove.core.symbolic.dict
|
__hash__
|
Overridden hashing function using symbolic hash.
|
def __hash__(self) -> int:
"""Overridden hashing function using symbolic hash."""
return self.sym_hash()
|
(self) -> int
|
40,622 |
pyglove.core.symbolic.dict
|
__init__
|
Constructor.
Args:
dict_obj: A dict as initial value for this Dict.
value_spec: Value spec that applies to this Dict.
onchange_callback: Callback when sub-tree has been modified.
**kwargs: Key value pairs that will be inserted into the dict as initial
value, which provides a syntax sugar for usage as below: d =
pg.Dict(a=1, b=2)
|
def __init__(self,
dict_obj: Union[
None,
Iterable[Tuple[str, Any]],
typing.Dict[str, Any]] = None,
*,
value_spec: Optional[pg_typing.Dict] = None,
onchange_callback: Optional[Callable[
[typing.Dict[object_utils.KeyPath, base.FieldUpdate]], None]
] = None, # pylint: disable=bad-continuation
**kwargs):
"""Constructor.
Args:
dict_obj: A dict as initial value for this Dict.
value_spec: Value spec that applies to this Dict.
onchange_callback: Callback when sub-tree has been modified.
**kwargs: Key value pairs that will be inserted into the dict as initial
value, which provides a syntax sugar for usage as below: d =
pg.Dict(a=1, b=2)
"""
if value_spec and not isinstance(value_spec, pg_typing.Dict):
raise TypeError(
f'Argument \'value_spec\' must be a `pg.typing.Dict` object. '
f'Encountered {value_spec}')
allow_partial = kwargs.pop('allow_partial', False)
accessor_writable = kwargs.pop('accessor_writable', True)
sealed = kwargs.pop('sealed', False)
root_path = kwargs.pop('root_path', None)
# Skip schema check when dict_obj is validated against
# schema externally. This flag is helpful to avoid duplicated schema
# check in nested structures, which takes effect only when value_spec
# is not None.
pass_through = kwargs.pop('pass_through', False)
# If True, the parent of dict items should be set to `self.sym_parent`,
# This is useful when Dict is used as the field container of
# pg.Object.
self._set_raw_attr(
'_as_object_attributes_container',
kwargs.pop('as_object_attributes_container', False),
)
# We copy the symbolic form of dict values instead of their evaluated
# values.
if isinstance(dict_obj, Dict):
dict_obj = {k: v for k, v in dict_obj.sym_items()}
elif dict_obj is not None:
dict_obj = dict(dict_obj)
# NOTE(daiyip): we call __init__ of superclasses explicitly instead of
# calling super().__init__(...) since dict.__init__ does
# not follow super(...).__init__ fashion, which will lead to
# Symbolic.__init__ uncalled.
base.Symbolic.__init__(
self,
allow_partial=allow_partial,
accessor_writable=True,
# We delay seal operation until members are filled.
sealed=False,
root_path=root_path)
dict.__init__(self)
self._value_spec = None
self._onchange_callback = None
# NOTE(daiyip): values in kwargs is prior to dict_obj.
dict_obj = dict_obj or {}
for k, v in kwargs.items():
dict_obj[k] = v
if value_spec:
if pass_through:
for k, v in dict_obj.items():
super().__setitem__(k, self._relocate_if_symbolic(k, v))
# NOTE(daiyip): when pass_through is on, we simply trust input
# dict is validated and filled with values of their final form (
# symbolic Dict/List vs. dict/list). This prevents members from
# repeated validation and transformation.
self._value_spec = value_spec
else:
for k, v in dict_obj.items():
super().__setitem__(k, self._formalized_value(k, None, v))
self.use_value_spec(value_spec, allow_partial)
else:
for k, v in dict_obj.items():
self._set_item_without_permission_check(k, v)
# NOTE(daiyip): We set onchange callback at the end of init to avoid
# triggering during initialization.
self._onchange_callback = onchange_callback
self.set_accessor_writable(accessor_writable)
self.seal(sealed)
|
(self, dict_obj: Union[NoneType, Iterable[Tuple[str, Any]], Dict[str, Any]] = None, *, value_spec: Optional[pyglove.core.typing.value_specs.Dict] = None, onchange_callback: Optional[Callable[[Dict[pyglove.core.object_utils.value_location.KeyPath, pyglove.core.symbolic.base.FieldUpdate]], NoneType]] = None, **kwargs)
|
40,623 |
pyglove.core.symbolic.dict
|
__iter__
|
Iterate keys in field declaration order.
|
def __iter__(self):
"""Iterate keys in field declaration order."""
return self.sym_keys()
|
(self)
|
40,624 |
pyglove.core.symbolic.dict
|
__ne__
|
Operator !=.
|
def __ne__(self, other: Any) -> bool:
"""Operator !=."""
return not self.__eq__(other)
|
(self, other: Any) -> bool
|
40,625 |
pyglove.core.symbolic.dict
|
__repr__
|
Operator repr().
|
def __repr__(self) -> str:
"""Operator repr()."""
return base.Symbolic.__repr__(self)
|
(self) -> str
|
40,626 |
pyglove.core.symbolic.dict
|
__setattr__
|
Set attribute of this Dict.
NOTE(daiyip): When setting attributes, public attributes (not started with
'_') are set as dict fields, while private attributes (started with '_') are
set on the object instance.
Args:
name: Name of attribute.
value: Value of attribute.
|
def __setattr__(self, name: str, value: Any) -> None:
"""Set attribute of this Dict.
NOTE(daiyip): When setting attributes, public attributes (not started with
'_') are set as dict fields, while private attributes (started with '_') are
set on the object instance.
Args:
name: Name of attribute.
value: Value of attribute.
"""
if name.startswith('_'):
super().__setattr__(name, value)
else:
self[name] = value
|
(self, name: str, value: Any) -> NoneType
|
40,627 |
pyglove.core.symbolic.dict
|
__setitem__
|
Set item in this Dict.
Args:
key: String key. (Please be noted that key path is not supported.)
value: Value to be inserted.
Raises:
WritePermissionError: when Dict cannot be modified by accessor or
is sealed.
KeyError: Key is not allowed according to the value spec.
ValueError: Value is not acceptable according to the value spec.
|
def __setitem__(self, key: str, value: Any) -> None:
"""Set item in this Dict.
Args:
key: String key. (Please be noted that key path is not supported.)
value: Value to be inserted.
Raises:
WritePermissionError: when Dict cannot be modified by accessor or
is sealed.
KeyError: Key is not allowed according to the value spec.
ValueError: Value is not acceptable according to the value spec.
"""
# NOTE(daiyip): THIS IS A WORKAROUND FOR WORKING WITH PICKLE.
# `pg.Dict` is a subclass of `dict`, therefore, when pickle loads a dict,
# it will try to set its items directly by calling `__setitem__` without
# calling `pg.Dict.__init__` at the first place. As a result, an error will
# raise, which complains about that an attribute set up during `__init__` is
# not available. A mitigation to this issue is to detect such calls in
# `__setitem__` as the follows, and simply do nothing, which will give a
# chance to `pg.Dict.__getstate__` to deal with the restoration logic as
# an object (instead of a dict).
if not hasattr(self, '_sym_parent'):
return
if base.treats_as_sealed(self):
raise base.WritePermissionError(
self._error_message('Cannot modify field of a sealed Dict.'))
if not base.writtable_via_accessors(self):
raise base.WritePermissionError(
self._error_message(
'Cannot modify Dict field by attribute or key while '
'accessor_writable is set to False. '
'Use \'rebind\' method instead.'))
update = self._set_item_without_permission_check(key, value)
if flags.is_change_notification_enabled() and update:
self._notify_field_updates([update])
|
(self, key: str, value: Any) -> NoneType
|
40,628 |
pyglove.core.symbolic.dict
|
__setstate__
|
Customizes pickle.load.
|
def __setstate__(self, state) -> None:
"""Customizes pickle.load."""
self.__init__(state['value'], **state['kwargs'])
|
(self, state) -> NoneType
|
40,630 |
pyglove.core.symbolic.dict
|
_diff_base
|
Computes the diff between a value and a base value.
|
def _diff_base(self, value: Any, base_value: Any) -> Any:
"""Computes the diff between a value and a base value."""
if base.eq(value, base_value):
return pg_typing.MISSING_VALUE
if (isinstance(value, list)
or not isinstance(value, base.Symbolic)
or pg_typing.MISSING_VALUE == base_value):
return value
if value.__class__ is base_value.__class__:
getter = lambda x, k: x.sym_getattr(k)
elif isinstance(value, dict) and isinstance(base_value, dict):
getter = lambda x, k: x[k]
else:
return value
diff = {}
for k, v in value.sym_items():
base_v = getter(base_value, k)
child_diff = self._diff_base(v, base_v)
if pg_typing.MISSING_VALUE != child_diff:
diff[k] = child_diff
return diff
|
(self, value: Any, base_value: Any) -> Any
|
40,632 |
pyglove.core.symbolic.dict
|
_formalized_value
|
Get transformed (formal) value from user input.
|
def _formalized_value(self, name: str,
field: Optional[pg_typing.Field],
value: Any) -> Any:
"""Get transformed (formal) value from user input."""
allow_partial = base.accepts_partial(self)
if field and pg_typing.MISSING_VALUE == value:
# NOTE(daiyip): default value is already in transformed form.
value = field.default_value
else:
value = base.from_json(
value,
allow_partial=allow_partial,
root_path=object_utils.KeyPath(name, self.sym_path))
if field and flags.is_type_check_enabled():
value = field.apply(
value,
allow_partial=allow_partial,
transform_fn=base.symbolic_transform_fn(self._allow_partial),
root_path=object_utils.KeyPath(name, self.sym_path))
return self._relocate_if_symbolic(name, value)
|
(self, name: str, field: Optional[pyglove.core.typing.class_schema.Field], value: Any) -> Any
|
40,633 |
pyglove.core.symbolic.dict
|
_init_kwargs
| null |
def _init_kwargs(self) -> typing.Dict[str, Any]:
kwargs = super()._init_kwargs()
if not self._accessor_writable:
kwargs['accessor_writable'] = False
if self._onchange_callback is not None:
kwargs['onchange_callback'] = self._onchange_callback
# NOTE(daiyip): We do not serialize ValueSpec for now as in most use
# cases they come from the subclasses of `pg.Object`.
return kwargs
|
(self) -> Dict[str, Any]
|
40,636 |
pyglove.core.symbolic.dict
|
_on_change
|
On change event of Dict.
|
def _on_change(self, field_updates: typing.Dict[object_utils.KeyPath,
base.FieldUpdate]):
"""On change event of Dict."""
if self._onchange_callback:
self._onchange_callback(field_updates)
|
(self, field_updates: Dict[pyglove.core.object_utils.value_location.KeyPath, pyglove.core.symbolic.base.FieldUpdate])
|
40,639 |
pyglove.core.symbolic.dict
|
_set_item_without_permission_check
|
Set item without permission check.
|
def _set_item_without_permission_check( # pytype: disable=signature-mismatch # overriding-parameter-type-checks
self, key: str, value: Any) -> Optional[base.FieldUpdate]:
"""Set item without permission check."""
if not isinstance(key, str):
raise KeyError(self._error_message(
f'Key must be string type. Encountered {key!r}.'))
old_value = self.get(key, pg_typing.MISSING_VALUE)
if old_value is value:
return None
field = None
if self._value_spec and self._value_spec.schema:
field = self._value_spec.schema.get_field(key)
if not field:
if (self.sym_parent is not None
and self.sym_parent.sym_path == self.sym_path):
container_cls = self.sym_parent.__class__
else:
container_cls = self.__class__
raise KeyError(
self._error_message(
f'Key \'{key}\' is not allowed for {container_cls}.'))
# Detach old value from object tree.
if isinstance(old_value, base.TopologyAware):
old_value.sym_setparent(None)
old_value.sym_setpath(object_utils.KeyPath())
if (pg_typing.MISSING_VALUE == value and
(not field or isinstance(field.key, pg_typing.NonConstKey))):
if key in self:
# Using pg.MISSING_VALUE for deleting keys.
super().__delitem__(key)
new_value = pg_typing.MISSING_VALUE
else:
# This condition could trigger when copying a partial Dict to a Dict
# without schema.
return None
else:
new_value = self._formalized_value(key, field, value)
super().__setitem__(key, new_value)
# NOTE(daiyip): If current dict is the field dict of a symbolic object,
# Use parent object as update target.
target = self
if (self.sym_parent is not None
and self.sym_parent.sym_path == self.sym_path):
target = self.sym_parent
return base.FieldUpdate(
self.sym_path + key, target, field, old_value, new_value)
|
(self, key: str, value: Any) -> Optional[pyglove.core.symbolic.base.FieldUpdate]
|
40,641 |
pyglove.core.symbolic.dict
|
_sym_clone
|
Override Symbolic._sym_clone.
|
def _sym_clone(self, deep: bool, memo=None) -> 'Dict':
"""Override Symbolic._sym_clone."""
source = dict()
for k, v in self.sym_items():
if deep or isinstance(v, base.Symbolic):
v = base.clone(v, deep, memo)
source[k] = v
return Dict(
source,
value_spec=self._value_spec,
allow_partial=self._allow_partial,
accessor_writable=self._accessor_writable,
sealed=self._sealed,
onchange_callback=self._onchange_callback,
# NOTE(daiyip): parent and root_path are reset to empty
# for copy object.
root_path=None,
pass_through=True)
|
(self, deep: bool, memo=None) -> pyglove.core.symbolic.dict.Dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.