index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
44,082
dataclass_array.array_dataclass
__post_init__
Validate and normalize inputs.
def __post_init__(self) -> None: """Validate and normalize inputs.""" cls = type(self) # First time, we perform additional check & updates if cls._dca_fields_metadata is None: # pylint: disable=protected-access _init_cls(self) # Register the tree_map here instead of `__init_subclass__` as `jax` may # not have been imported yet during import. if enp.lazy.has_jax and not cls._dca_jax_tree_registered: # pylint: disable=protected-access enp.lazy.jax.tree_util.register_pytree_node_class(cls) cls._dca_jax_tree_registered = True # pylint: disable=protected-access if enp.lazy.has_torch and not cls._dca_torch_tree_registered: # pylint: disable=protected-access # Note: Torch is updating it's tree API to make it public and use `optree` # as backend: https://github.com/pytorch/pytorch/issues/65761 enp.lazy.torch.utils._pytree._register_pytree_node( # pylint: disable=protected-access cls, flatten_fn=lambda a: a.tree_flatten(), unflatten_fn=lambda vals, ctx: cls.tree_unflatten(ctx, vals), ) cls._dca_torch_tree_registered = True # pylint: disable=protected-access # Validate and normalize array fields # * Maybe cast (list, np) -> xnp # * Maybe cast dtype # * Maybe broadcast shapes # Because this is only done inside `__init__`, it is ok to mutate self. # Cast and validate the array xnp are consistent xnp = self._cast_xnp_dtype_inplace() # Validate the batch shape is consistent # However, we need to be careful that `_ArrayField` never uses # `@epy.cached_property` shape = self._broadcast_shape_inplace() # TODO(epot): When to validate (`field.validate()`) if xnp is None: # No values # Inside `jax.tree_utils`, tree-def can be created with `None` values. # Inside `jax.vmap`, tree can be created with `object()` sentinel values. assert shape is None xnp = None # Cache results # Should the state be stored in a separate object to avoid collisions ? assert shape is None or isinstance(shape, tuple), shape self._setattr('_shape', shape) self._setattr('_xnp', xnp)
(self) -> NoneType
44,083
dataclass_array.array_dataclass
__tf_flatten__
null
def __tf_flatten__(self) -> tuple[_TreeMetadata, tuple[DcOrArray, ...]]: components, metadata = self.tree_flatten() return metadata, components
(self) -> 'tuple[_TreeMetadata, tuple[DcOrArray, ...]]'
44,084
dataclass_array.array_dataclass
_broadcast_shape_inplace
Validate the shapes are consistent and broadcast values in-place.
def _broadcast_shape_inplace(self) -> Optional[Shape]: """Validate the shapes are consistent and broadcast values in-place.""" if self._all_fields_empty: # pylint: disable=using-constant-test return None # First collect all shapes and compute the final shape. shape_to_names = epy.groupby( self._array_fields, key=lambda f: f.host_shape, value=lambda f: f.name, ) shape_lengths = {len(s) for s in shape_to_names.keys()} # Broadcast all shape together try: final_shape = np.broadcast_shapes(*shape_to_names.keys()) except ValueError: final_shape = None # Bad broadcast # Currently, we restrict broadcasting to either scalar or fixed length. # This is to avoid confusion broadcasting vs vectorization rules. # This restriction could be lifted if we encounter a use-case. if ( final_shape is None or len(shape_lengths) > 2 or (len(shape_lengths) == 2 and 0 not in shape_lengths) ): raise ValueError( f'Conflicting batch shapes: {shape_to_names}. ' f'Currently {type(self).__qualname__}.__init__ broadcasting is ' 'restricted to scalar or dim=1 . ' 'Please open an issue if you need more fine-grained broadcasting.' ) def _broadcast_field(f: _ArrayField) -> None: if f.host_shape == final_shape: # Already broadcasted return elif not self.__dca_params__.broadcast: # Broadcasing disabled raise ValueError( f'{type(self).__qualname__} has `broadcast=False`. ' f'Cannot broadcast {f.name} from {f.full_shape} to {final_shape}. ' 'To enable broadcast, use `@dca.dataclass_array(broadcast=True)`.' ) self._setattr(f.name, f.broadcast_to(final_shape)) self._map_field( array_fn=_broadcast_field, dc_fn=_broadcast_field, # pytype: disable=wrong-arg-types _inplace=True, ) return final_shape
(self) -> Optional[Tuple[int, ...]]
44,085
dataclass_array.array_dataclass
_cast_xnp_dtype_inplace
Validate `xnp` are consistent and cast `np` -> `xnp` in-place.
def _cast_xnp_dtype_inplace(self) -> Optional[enp.NpModule]: """Validate `xnp` are consistent and cast `np` -> `xnp` in-place.""" if self._all_fields_empty: # pylint: disable=using-constant-test return None # Validate the dtype def _get_xnp(f: _ArrayField) -> enp.NpModule: try: return np_utils.get_xnp( f.value, strict=not self.__dca_params__.cast_list, ) except Exception as e: # pylint: disable=broad-except epy.reraise(e, prefix=f'Invalid {f.qualname}: ') xnps = epy.groupby( self._array_fields, key=_get_xnp, value=lambda f: f.name, ) if not xnps: return None xnp = _infer_xnp(xnps) def _cast_field(f: _ArrayField) -> None: try: # Supports for TensorSpec (e.g. in `tf.function` signature) if enp.lazy.is_tf_xnp(xnp) and isinstance( f.value, enp.lazy.tf.TensorSpec ): # TODO(epot): Actually check the dtype new_value = f.value else: new_value = np_utils.asarray( f.value, xnp=xnp, dtype=f.dtype, cast_dtype=self.__dca_params__.cast_dtype, ) self._setattr(f.name, new_value) # After the field has been set, we validate the shape f.assert_shape() except Exception as e: # pylint: disable=broad-except epy.reraise(e, prefix=f'Invalid {f.qualname}: ') self._map_field( array_fn=_cast_field, dc_fn=_cast_field, # pytype: disable=wrong-arg-types _inplace=True, ) return xnp
(self) -> Optional[Any]
44,086
dataclass_array.array_dataclass
_map_field
Apply a transformation on all array fields structure. Args: array_fn: Function applied on the `xnp.ndarray` fields dc_fn: Function applied on the `dca.DataclassArray` fields (to recurse) _inplace: If True, assume the function mutate the object in-place. Should only be used inside `__init__` for performances. Returns: The transformed dataclass array.
def _map_field( self: _DcT, *, array_fn: Callable[[_ArrayField[Array['*din']]], Array['*dout']], dc_fn: Optional[Callable[[_ArrayField[_DcT]], _DcT]], _inplace: bool = False, ) -> _DcT: """Apply a transformation on all array fields structure. Args: array_fn: Function applied on the `xnp.ndarray` fields dc_fn: Function applied on the `dca.DataclassArray` fields (to recurse) _inplace: If True, assume the function mutate the object in-place. Should only be used inside `__init__` for performances. Returns: The transformed dataclass array. """ def _apply_field_dn(f: _ArrayField): if f.is_dataclass: # Recurse on dataclasses return dc_fn(f) # pylint: disable=protected-access else: return array_fn(f) new_values = {f.name: _apply_field_dn(f) for f in self._array_fields} # pylint: disable=not-an-iterable,protected-access # For performance, do not call replace to save the constructor call if not _inplace: return self.replace(**new_values) else: return self
(self: ~_DcT, *, array_fn: Callable[[dataclass_array.array_dataclass._ArrayField[etils.enp.array_types.typing.Array]], etils.enp.array_types.typing.Array], dc_fn: Optional[Callable[[dataclass_array.array_dataclass._ArrayField[~_DcT]], ~_DcT]], _inplace: bool = False) -> ~_DcT
44,087
dataclass_array.array_dataclass
_setattr
Like setattr, but support `frozen` dataclasses.
def _setattr(self, name: str, value: Any) -> None: """Like setattr, but support `frozen` dataclasses.""" object.__setattr__(self, name, value)
(self, name: str, value: Any) -> NoneType
44,088
dataclass_array.array_dataclass
_to_absolute_axis
Normalize the axis to absolute value.
def _to_absolute_axis(self, axis: Axes) -> Axes: """Normalize the axis to absolute value.""" try: return np_utils.to_absolute_axis(axis, ndim=self.ndim) except Exception as e: # pylint: disable=broad-except epy.reraise( e, prefix=f'For {self.__class__.__qualname__} with shape={self.shape}: ', )
(self, axis: Union[NoneType, Tuple[int, ...], int]) -> Union[NoneType, Tuple[int, ...], int]
44,089
dataclass_array.array_dataclass
as_jax
Returns the instance as containing `jnp.ndarray`.
def as_jax(self: _DcT) -> _DcT: """Returns the instance as containing `jnp.ndarray`.""" return self.as_xnp(enp.lazy.jnp)
(self: ~_DcT) -> ~_DcT
44,090
dataclass_array.array_dataclass
as_np
Returns the instance as containing `np.ndarray`.
def as_np(self: _DcT) -> _DcT: """Returns the instance as containing `np.ndarray`.""" return self.as_xnp(enp.lazy.np)
(self: ~_DcT) -> ~_DcT
44,091
dataclass_array.array_dataclass
as_tf
Returns the instance as containing `tf.Tensor`.
def as_tf(self: _DcT) -> _DcT: """Returns the instance as containing `tf.Tensor`.""" return self.as_xnp(enp.lazy.tnp)
(self: ~_DcT) -> ~_DcT
44,092
dataclass_array.array_dataclass
as_torch
Returns the instance as containing `torch.Tensor`.
def as_torch(self: _DcT) -> _DcT: """Returns the instance as containing `torch.Tensor`.""" return self.as_xnp(enp.lazy.torch)
(self: ~_DcT) -> ~_DcT
44,093
dataclass_array.array_dataclass
as_xnp
Returns the instance as containing `xnp.ndarray`.
def as_xnp(self: _DcT, xnp: enp.NpModule) -> _DcT: """Returns the instance as containing `xnp.ndarray`.""" if xnp is self.xnp: # No-op return self # Direct `torch` <> `tf`/`jax` conversion not supported, so convert to # `numpy` if enp.lazy.is_torch_xnp(xnp) or enp.lazy.is_torch_xnp(self.xnp): def _as_torch(f): arr = np.asarray(f.value) # Torch fail for scalar arrays: # https://github.com/pytorch/pytorch/issues/97021 if enp.lazy.is_torch_xnp(xnp) and not arr.shape: # Destination is torch return xnp.asarray(arr.item(), dtype=lazy.as_torch_dtype(arr.dtype)) return xnp.asarray(arr) array_fn = _as_torch else: array_fn = lambda f: xnp.asarray(f.value) # Update all childs new_self = self._map_field( # pylint: disable=protected-access array_fn=array_fn, dc_fn=lambda f: f.value.as_xnp(xnp), ) return new_self
(self: ~_DcT, xnp: Any) -> ~_DcT
44,094
dataclass_array.array_dataclass
assert_same_xnp
Assert the given array is of the same type as the current object.
def assert_same_xnp(self, x: Union[Array[...], DataclassArray]) -> None: """Assert the given array is of the same type as the current object.""" xnp = np_utils.get_xnp(x) if xnp is not self.xnp: raise ValueError( f'{self.__class__.__name__} is {self.xnp.__name__} but got input ' f'{xnp.__name__}. Please cast input first.' )
(self, x: Union[etils.enp.array_types.typing.Array, dataclass_array.array_dataclass.DataclassArray]) -> NoneType
44,095
dataclass_array.array_dataclass
broadcast_to
Broadcast the batch shape.
def broadcast_to(self: _DcT, shape: Shape) -> _DcT: """Broadcast the batch shape.""" return self._map_field( # pylint: disable=protected-access array_fn=lambda f: f.broadcast_to(shape), dc_fn=lambda f: f.broadcast_to(shape), )
(self: ~_DcT, shape: Tuple[int, ...]) -> ~_DcT
44,096
dataclass_array.array_dataclass
cpu
Move the dataclass array to the CPU device.
def cpu(self: _DcT, *args, **kwargs) -> _DcT: """Move the dataclass array to the CPU device.""" if not lazy.is_torch_xnp(self.xnp): raise ValueError('`.cpu` can only be called when `xnp == torch`') return self.map_field(lambda f: f.cpu(*args, **kwargs))
(self: ~_DcT, *args, **kwargs) -> ~_DcT
44,097
dataclass_array.array_dataclass
cuda
Move the dataclass array to the CUDA device.
def cuda(self: _DcT, *args, **kwargs) -> _DcT: """Move the dataclass array to the CUDA device.""" if not lazy.is_torch_xnp(self.xnp): raise ValueError('`.cuda` can only be called when `xnp == torch`') return self.map_field(lambda f: f.cuda(*args, **kwargs))
(self: ~_DcT, *args, **kwargs) -> ~_DcT
44,098
dataclass_array.array_dataclass
flatten
Flatten the batch shape.
def flatten(self: _DcT) -> _DcT: """Flatten the batch shape.""" return self.reshape((-1,))
(self: ~_DcT) -> ~_DcT
44,099
dataclass_array.array_dataclass
map_field
Apply a transformation on all arrays from the fields.
def map_field( self: _DcT, fn: Callable[[Array['*din']], Array['*dout']], ) -> _DcT: """Apply a transformation on all arrays from the fields.""" return self._map_field( # pylint: disable=protected-access array_fn=lambda f: fn(f.value), dc_fn=lambda f: f.value.map_field(fn), )
(self: ~_DcT, fn: Callable[[etils.enp.array_types.typing.Array], etils.enp.array_types.typing.Array]) -> ~_DcT
44,100
dataclass_array.array_dataclass
replace
Alias for `dataclasses.replace`.
def replace(self: _DcT, **kwargs: Any) -> _DcT: """Alias for `dataclasses.replace`.""" init_kwargs = { k: v for k, v in kwargs.items() if k not in self.__dca_non_init_fields__ } non_init_kwargs = { k: v for k, v in kwargs.items() if k in self.__dca_non_init_fields__ } # Create the new object new_self = dataclasses.replace(self, **init_kwargs) # pytype: disable=wrong-arg-types # re-none # TODO(epot): Could try to unify logic bellow with `tree_unflatten` # Additionally forward the non-init kwargs # `dataclasses.field(init=False) kwargs are required because `init=True` # creates conflicts: # * Inheritance fails with non-default argument 'K' follows default argument # * Pytype complains too # TODO(py310): Cleanup using `dataclass(kw_only)` assert new_self is not self for k in self.__dca_non_init_fields__: if k in non_init_kwargs: v = non_init_kwargs[k] else: v = getattr(self, k) new_self._setattr(k, v) # pylint: disable=protected-access return new_self
(self: ~_DcT, **kwargs: Any) -> ~_DcT
44,101
dataclass_array.array_dataclass
reshape
Reshape the batch shape according to the pattern. Supports both tuple and einops mode: ```python rays.reshape('b h w -> b (h w)') rays.reshape((128, -1)) ``` Args: shape: Target shape. Can be string for `einops` support. **axes_length: Any additional specifications for dimensions for einops support. Returns: The dataclass array with the new shape
def reshape(self: _DcT, shape: Union[Shape, str], **axes_length: int) -> _DcT: """Reshape the batch shape according to the pattern. Supports both tuple and einops mode: ```python rays.reshape('b h w -> b (h w)') rays.reshape((128, -1)) ``` Args: shape: Target shape. Can be string for `einops` support. **axes_length: Any additional specifications for dimensions for einops support. Returns: The dataclass array with the new shape """ if isinstance(shape, str): # Einops support return self._map_field( # pylint: disable=protected-access array_fn=lambda f: einops.rearrange( # pylint: disable=g-long-lambda f.value, np_utils.to_absolute_einops(shape, nlastdim=len(f.inner_shape)), **axes_length, ), dc_fn=lambda f: f.value.reshape( # pylint: disable=g-long-lambda np_utils.to_absolute_einops(shape, nlastdim=len(f.inner_shape)), **axes_length, ), ) else: # Numpy support assert isinstance(shape, tuple) # For pytest def _reshape(f: _ArrayField): return f.value.reshape(shape + f.inner_shape) return self._map_field(array_fn=_reshape, dc_fn=_reshape) # pylint: disable=protected-access
(self: ~_DcT, shape: Union[Tuple[int, ...], str], **axes_length: int) -> ~_DcT
44,102
dataclass_array.array_dataclass
to
Move the dataclass array to the device.
def to(self: _DcT, device, **kwargs) -> _DcT: """Move the dataclass array to the device.""" if not lazy.is_torch_xnp(self.xnp): raise ValueError('`.to` can only be called when `xnp == torch`') return self.map_field(lambda f: f.to(device, **kwargs))
(self: ~_DcT, device, **kwargs) -> ~_DcT
44,103
dataclass_array.array_dataclass
tree_flatten
`jax.tree_utils` support.
def tree_flatten(self) -> tuple[tuple[DcOrArray, ...], _TreeMetadata]: """`jax.tree_utils` support.""" # We flatten all values (and not just the non-None ones) array_field_values = tuple(f.value for f in self._all_array_fields.values()) metadata = _TreeMetadata( array_field_names=list(self._all_array_fields.keys()), non_array_field_kwargs={ f.name: getattr(self, f.name) for f in dataclasses.fields(self) # pytype: disable=wrong-arg-types # re-none if f.name not in self._all_array_fields # pylint: disable=unsupported-membership-test }, ) return (array_field_values, metadata)
(self) -> 'tuple[tuple[DcOrArray, ...], _TreeMetadata]'
44,105
dataclass_array.ops
concat
Concatenate dataclasses together.
def concat(arrays: Iterable[DcT], *, axis: int = 0) -> DcT: """Concatenate dataclasses together.""" return _ops_base( arrays, axis=axis, array_fn=lambda xnp, axis, f: xnp.concatenate( # pylint: disable=g-long-lambda [getattr(arr, f.name) for arr in arrays], axis=axis ), dc_fn=lambda xnp, axis, f: concat( # pylint: disable=g-long-lambda [getattr(arr, f.name) for arr in arrays], axis=axis, ), )
(arrays: Iterable[~DcT], *, axis: int = 0) -> ~DcT
44,106
dataclass_array.array_dataclass
dataclass_array
Optional decorator to customize `dca.DataclassArray` params. Usage: ```python @dca.dataclass_array() class MyDataclass(dca.DataclassArray): ... ``` This decorator has to be added in addition of inheriting from `dca.DataclassArray`. Args: broadcast: If `True`, enable input broadcasting cast_dtype: If `True`, auto-cast inputs `dtype` cast_list: If `True`, auto-cast lists to `xnp.ndarray` Returns: decorator: The decorator which will apply the options to the dataclass
def dataclass_array( *, # If modifying this, make sure to modify `DataclassParams` too! broadcast: bool = False, cast_dtype: bool = False, cast_list: bool = True, ) -> Callable[[type[_DcT]], type[_DcT]]: """Optional decorator to customize `dca.DataclassArray` params. Usage: ```python @dca.dataclass_array() class MyDataclass(dca.DataclassArray): ... ``` This decorator has to be added in addition of inheriting from `dca.DataclassArray`. Args: broadcast: If `True`, enable input broadcasting cast_dtype: If `True`, auto-cast inputs `dtype` cast_list: If `True`, auto-cast lists to `xnp.ndarray` Returns: decorator: The decorator which will apply the options to the dataclass """ def decorator(cls): if not issubclass(cls, DataclassArray): raise TypeError( '`@dca.dataclass_array` can only be applied on `dca.DataclassArray`. ' f'Got: {cls}' ) cls.__dca_params__ = DataclassParams( broadcast=broadcast, cast_dtype=cast_dtype, cast_list=cast_list, ) return cls return decorator
(*, broadcast: bool = False, cast_dtype: bool = False, cast_list: bool = True) -> Callable[[type[~_DcT]], type[~_DcT]]
44,107
dataclass_array.array_dataclass
array_field
Dataclass array field. See `dca.DataclassArray` for example. Args: shape: Inner shape of the field dtype: Type of the field **field_kwargs: Args forwarded to `dataclasses.field` Returns: The dataclass field.
def array_field( shape: Shape, dtype: DTypeArg = float, **field_kwargs, ) -> dataclasses.Field[DcOrArray]: """Dataclass array field. See `dca.DataclassArray` for example. Args: shape: Inner shape of the field dtype: Type of the field **field_kwargs: Args forwarded to `dataclasses.field` Returns: The dataclass field. """ # TODO(epot): Validate shape, dtype dca_field = _ArrayFieldMetadata( inner_shape_non_static=shape, dtype=dtype, ) return dataclasses.field(**field_kwargs, metadata={_METADATA_KEY: dca_field})
(shape: 'Shape', dtype: 'DTypeArg' = <class 'float'>, **field_kwargs) -> 'dataclasses.Field[DcOrArray]'
44,111
dataclass_array.ops
stack
Stack dataclasses together.
def stack( arrays: Iterable[DcT], # list[_DcT['*shape']] *, axis: int = 0, ) -> DcT: # _DcT['len(arrays) *shape']: """Stack dataclasses together.""" return _ops_base( arrays, axis=axis, array_fn=lambda xnp, axis, f: xnp.stack( # pylint: disable=g-long-lambda [getattr(arr, f.name) for arr in arrays], axis=axis ), dc_fn=lambda xnp, axis, f: stack( # pylint: disable=g-long-lambda [getattr(arr, f.name) for arr in arrays], axis=axis, ), )
(arrays: Iterable[~DcT], *, axis: int = 0) -> ~DcT
44,117
dataclass_array.vectorization
vectorize_method
Vectorize a `dca.DataclassArray` method. Allow to implement method in `dca.DataclassArray` assuming `shape == ()`. This is similar to `jax.vmap` but: * Only work on `dca.DataclassArray` methods * Instead of vectorizing a single axis, `@dca.vectorize_method` will vectorize over `*self.shape` (not just `self.shape[0]`). This is like if `vmap` was applied to `self.flatten()` * Axis with dimension `1` are brodcasted. For example, with `__matmul__(self, x: T) -> T`: ```python () @ (*x,) -> (*x,) (b,) @ (b, *x) -> (b, *x) (b,) @ (1, *x) -> (b, *x) (1,) @ (b, *x) -> (b, *x) (b, h, w) @ (b, h, w, *x) -> (b, h, w, *x) (1, h, w) @ (b, 1, 1, *x) -> (b, h, w, *x) ``` Example: ``` class Point3d(dca.DataclassArray): p: f32['*shape 3'] @dca.vectorize_method def first_value(self): return self.p[0] point = Point3d(p=[ # 4 points batched together [10, 11, 12], [20, 21, 22], [30, 31, 32], [40, 41, 42], ]) point.first_value() == [10, 20, 30, 40] # First value of each points ``` Args: fn: DataclassArray method to decorate static_args: If given, should be a set of the static argument names Returns: fn: Decorated function with vectorization applied to self.
def vectorize_method( fn=None, *, static_args=None, ): """Vectorize a `dca.DataclassArray` method. Allow to implement method in `dca.DataclassArray` assuming `shape == ()`. This is similar to `jax.vmap` but: * Only work on `dca.DataclassArray` methods * Instead of vectorizing a single axis, `@dca.vectorize_method` will vectorize over `*self.shape` (not just `self.shape[0]`). This is like if `vmap` was applied to `self.flatten()` * Axis with dimension `1` are brodcasted. For example, with `__matmul__(self, x: T) -> T`: ```python () @ (*x,) -> (*x,) (b,) @ (b, *x) -> (b, *x) (b,) @ (1, *x) -> (b, *x) (1,) @ (b, *x) -> (b, *x) (b, h, w) @ (b, h, w, *x) -> (b, h, w, *x) (1, h, w) @ (b, 1, 1, *x) -> (b, h, w, *x) ``` Example: ``` class Point3d(dca.DataclassArray): p: f32['*shape 3'] @dca.vectorize_method def first_value(self): return self.p[0] point = Point3d(p=[ # 4 points batched together [10, 11, 12], [20, 21, 22], [30, 31, 32], [40, 41, 42], ]) point.first_value() == [10, 20, 30, 40] # First value of each points ``` Args: fn: DataclassArray method to decorate static_args: If given, should be a set of the static argument names Returns: fn: Decorated function with vectorization applied to self. """ # Called as decorator with options (`@dca.vectorize_method(**options)`) if fn is None: return functools.partial(vectorize_method, static_args=static_args) # pytype: disable=bad-return-type # Signature util also make sure explicit error message are raised (e.g. # `Error in <fn> for arg <arg-name>` ) sig = inspect_utils.Signature(fn) if sig.has_var: raise NotImplementedError( '`@dca.vectorize_method` does not support function with variable args ' f'(`*args` or `**kwargs`). For {sig.fn_name}. Please open an issue.' ) if static_args is not None: if not isinstance(static_args, set): raise TypeError( f'Unexpected `static_args={static_args!r}`. Expected `set`.' ) map_non_static = functools.partial( _map_non_static, static_args=static_args, ) @functools.wraps(fn) @epy.maybe_reraise(prefix=lambda: f'Error in {fn.__qualname__}: ') def decorated( self: array_dataclass.DataclassArray, *args: Any, **kwargs: Any, ) -> _Out: if not isinstance(self, array_dataclass.DataclassArray): raise TypeError( 'dca.vectorize_method should be applied on DataclassArray method. ' f'Not: {type(self)}' ) if not self.shape: # No batch shape, no-need to vectorize return fn(self, *args, **kwargs) original_args = sig.bind(self, *args, **kwargs) # TODO(epot): Tree support (with masking inside args) # Validation # TODO(epot): Normalize `np`, `list` -> `xnp` assert_is_array = functools.partial(_assert_is_array, xnp=self.xnp) map_non_static(assert_is_array, original_args) # Broadcast and flatten args. Exemple: # Broadcast the batch shape when dim == 1: # (h, w), (h, w, c) -> (h, w), (h, w, c) # (h, w), (1, 1, c) -> (h, w), (h, w, c) # (1, 1), (h, w, c) -> (h, w), (h, w, c) # Flatten: # (h, w), (h, w, c) -> (b*h*w,), (b*h*w, c) flat_args, batch_shape = _broadcast_and_flatten_args( original_args, map_non_static=map_non_static, ) # Call the vectorized function out = _vmap_method( flat_args, map_non_static=map_non_static, xnp=self.xnp, ) # Unflatten the output unflatten = functools.partial(_unflatten, batch_shape=batch_shape) out = tree_utils.tree_map(unflatten, out) return out return decorated
(fn=None, *, static_args=None)
44,118
gwcs.coordinate_frames
CelestialFrame
Celestial Frame Representation Parameters ---------- axes_order : tuple of int A dimension in the input data that corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames A reference frame. unit : str or units.Unit instance or iterable of those Units on axes. axes_names : list Names of the axes in this frame. name : str Name of this frame.
class CelestialFrame(CoordinateFrame): """ Celestial Frame Representation Parameters ---------- axes_order : tuple of int A dimension in the input data that corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames A reference frame. unit : str or units.Unit instance or iterable of those Units on axes. axes_names : list Names of the axes in this frame. name : str Name of this frame. """ def __init__(self, axes_order=None, reference_frame=None, unit=None, axes_names=None, name=None, axis_physical_types=None): naxes = 2 if reference_frame is not None: if not isinstance(reference_frame, str): if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES: _axes_names = list(reference_frame.representation_component_names.values()) if 'distance' in _axes_names: _axes_names.remove('distance') if axes_names is None: axes_names = _axes_names naxes = len(_axes_names) _unit = list(reference_frame.representation_component_units.values()) if unit is None and _unit: unit = _unit if axes_order is None: axes_order = tuple(range(naxes)) if unit is None: unit = tuple([u.degree] * naxes) axes_type = ['SPATIAL'] * naxes super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type, axes_order=axes_order, reference_frame=reference_frame, unit=unit, axes_names=axes_names, name=name, axis_physical_types=axis_physical_types) @property def _default_axis_physical_types(self): if isinstance(self.reference_frame, coord.Galactic): return "pos.galactic.lon", "pos.galactic.lat" elif isinstance(self.reference_frame, (coord.GeocentricTrueEcliptic, coord.GCRS, coord.PrecessedGeocentric)): return "pos.bodyrc.lon", "pos.bodyrc.lat" elif isinstance(self.reference_frame, coord.builtin_frames.BaseRADecFrame): return "pos.eq.ra", "pos.eq.dec" elif isinstance(self.reference_frame, coord.builtin_frames.BaseEclipticFrame): return "pos.ecliptic.lon", "pos.ecliptic.lat" else: return tuple("custom:{}".format(t) for t in self.axes_names) @property def _world_axis_object_classes(self): return {'celestial': ( coord.SkyCoord, (), {'frame': self.reference_frame, 'unit': self.unit})} @property def _world_axis_object_components(self): return [('celestial', 0, 'spherical.lon'), ('celestial', 1, 'spherical.lat')] def coordinates(self, *args): """ Create a SkyCoord object. Parameters ---------- args : float inputs to wcs.input_frame """ if isinstance(args[0], coord.SkyCoord): return args[0].transform_to(self.reference_frame) return coord.SkyCoord(*args, unit=self.unit, frame=self.reference_frame) def coordinate_to_quantity(self, *coords): """ Convert a ``SkyCoord`` object to quantities.""" if len(coords) == 2: arg = coords elif len(coords) == 1: arg = coords[0] else: raise ValueError("Unexpected number of coordinates in " "input to frame {} : " "expected 2, got {}".format(self.name, len(coords))) if isinstance(arg, coord.SkyCoord): arg = arg.transform_to(self._reference_frame) try: lon = arg.data.lon lat = arg.data.lat except AttributeError: lon = arg.spherical.lon lat = arg.spherical.lat return lon, lat elif all(isinstance(a, u.Quantity) for a in arg): return tuple(arg) else: raise ValueError("Could not convert input {} to lon and lat quantities.".format(arg))
(axes_order=None, reference_frame=None, unit=None, axes_names=None, name=None, axis_physical_types=None)
44,119
gwcs.coordinate_frames
__init__
null
def __init__(self, axes_order=None, reference_frame=None, unit=None, axes_names=None, name=None, axis_physical_types=None): naxes = 2 if reference_frame is not None: if not isinstance(reference_frame, str): if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES: _axes_names = list(reference_frame.representation_component_names.values()) if 'distance' in _axes_names: _axes_names.remove('distance') if axes_names is None: axes_names = _axes_names naxes = len(_axes_names) _unit = list(reference_frame.representation_component_units.values()) if unit is None and _unit: unit = _unit if axes_order is None: axes_order = tuple(range(naxes)) if unit is None: unit = tuple([u.degree] * naxes) axes_type = ['SPATIAL'] * naxes super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type, axes_order=axes_order, reference_frame=reference_frame, unit=unit, axes_names=axes_names, name=name, axis_physical_types=axis_physical_types)
(self, axes_order=None, reference_frame=None, unit=None, axes_names=None, name=None, axis_physical_types=None)
44,120
gwcs.coordinate_frames
__repr__
null
def __repr__(self): fmt = '<{0}(name="{1}", unit={2}, axes_names={3}, axes_order={4}'.format( self.__class__.__name__, self.name, self.unit, self.axes_names, self.axes_order) if self.reference_position is not None: fmt += ', reference_position="{0}"'.format(self.reference_position) if self.reference_frame is not None: fmt += ", reference_frame={0}".format(self.reference_frame) fmt += ")>" return fmt
(self)
44,121
gwcs.coordinate_frames
__str__
null
def __str__(self): if self._name is not None: return self._name return self.__class__.__name__
(self)
44,122
gwcs.coordinate_frames
_set_axis_physical_types
Set the physical type of the coordinate axes using VO UCD1+ v1.23 definitions.
def _set_axis_physical_types(self, pht): """ Set the physical type of the coordinate axes using VO UCD1+ v1.23 definitions. """ if pht is not None: if isinstance(pht, str): pht = (pht,) elif not isiterable(pht): raise TypeError("axis_physical_types must be of type string or iterable of strings") if len(pht) != self.naxes: raise ValueError('"axis_physical_types" must be of length {}'.format(self.naxes)) ph_type = [] for axt in pht: if axt not in VALID_UCDS and not axt.startswith("custom:"): ph_type.append("custom:{}".format(axt)) else: ph_type.append(axt) validate_physical_types(ph_type) return tuple(ph_type)
(self, pht)
44,123
gwcs.coordinate_frames
coordinate_to_quantity
Convert a ``SkyCoord`` object to quantities.
def coordinate_to_quantity(self, *coords): """ Convert a ``SkyCoord`` object to quantities.""" if len(coords) == 2: arg = coords elif len(coords) == 1: arg = coords[0] else: raise ValueError("Unexpected number of coordinates in " "input to frame {} : " "expected 2, got {}".format(self.name, len(coords))) if isinstance(arg, coord.SkyCoord): arg = arg.transform_to(self._reference_frame) try: lon = arg.data.lon lat = arg.data.lat except AttributeError: lon = arg.spherical.lon lat = arg.spherical.lat return lon, lat elif all(isinstance(a, u.Quantity) for a in arg): return tuple(arg) else: raise ValueError("Could not convert input {} to lon and lat quantities.".format(arg))
(self, *coords)
44,124
gwcs.coordinate_frames
coordinates
Create a SkyCoord object. Parameters ---------- args : float inputs to wcs.input_frame
def coordinates(self, *args): """ Create a SkyCoord object. Parameters ---------- args : float inputs to wcs.input_frame """ if isinstance(args[0], coord.SkyCoord): return args[0].transform_to(self.reference_frame) return coord.SkyCoord(*args, unit=self.unit, frame=self.reference_frame)
(self, *args)
44,125
gwcs.coordinate_frames
CompositeFrame
Represents one or more frames. Parameters ---------- frames : list List of frames (TemporalFrame, CelestialFrame, SpectralFrame, CoordinateFrame). name : str Name for this frame.
class CompositeFrame(CoordinateFrame): """ Represents one or more frames. Parameters ---------- frames : list List of frames (TemporalFrame, CelestialFrame, SpectralFrame, CoordinateFrame). name : str Name for this frame. """ def __init__(self, frames, name=None): self._frames = frames[:] naxes = sum([frame._naxes for frame in self._frames]) axes_type = list(range(naxes)) unit = list(range(naxes)) axes_names = list(range(naxes)) axes_order = [] ph_type = list(range(naxes)) for frame in frames: axes_order.extend(frame.axes_order) for frame in frames: for ind, axtype, un, n, pht in zip(frame.axes_order, frame.axes_type, frame.unit, frame.axes_names, frame.axis_physical_types): axes_type[ind] = axtype axes_names[ind] = n unit[ind] = un ph_type[ind] = pht if len(np.unique(axes_order)) != len(axes_order): raise ValueError("Incorrect numbering of axes, " "axes_order should contain unique numbers, " "got {}.".format(axes_order)) super(CompositeFrame, self).__init__(naxes, axes_type=axes_type, axes_order=axes_order, unit=unit, axes_names=axes_names, name=name) self._axis_physical_types = tuple(ph_type) @property def frames(self): return self._frames def __repr__(self): return repr(self.frames) def coordinates(self, *args): coo = [] if len(args) == len(self.frames): for frame, arg in zip(self.frames, args): coo.append(frame.coordinates(arg)) else: for frame in self.frames: fargs = [args[i] for i in frame.axes_order] coo.append(frame.coordinates(*fargs)) return coo def coordinate_to_quantity(self, *coords): if len(coords) == len(self.frames): args = coords elif len(coords) == self.naxes: args = [] for _frame in self.frames: if _frame.naxes > 1: # Collect the arguments for this frame based on axes_order args.append([coords[i] for i in _frame.axes_order]) else: args.append(coords[_frame.axes_order[0]]) else: raise ValueError("Incorrect number of arguments") qs = [] for _frame, arg in zip(self.frames, args): ret = _frame.coordinate_to_quantity(arg) if isinstance(ret, tuple): qs += list(ret) else: qs.append(ret) return qs @property def _wao_classes_rename_map(self): mapper = defaultdict(dict) seen_names = [] for frame in self.frames: # ensure the frame is in the mapper mapper[frame] for key in frame._world_axis_object_classes.keys(): if key in seen_names: new_key = f"{key}{seen_names.count(key)}" mapper[frame][key] = new_key seen_names.append(key) return mapper @property def _wao_renamed_components_iter(self): mapper = self._wao_classes_rename_map for frame in self.frames: renamed_components = [] for comp in frame._world_axis_object_components: comp = list(comp) rename = mapper[frame].get(comp[0]) if rename: comp[0] = rename renamed_components.append(tuple(comp)) yield frame, renamed_components @property def _wao_renamed_classes_iter(self): mapper = self._wao_classes_rename_map for frame in self.frames: for key, value in frame._world_axis_object_classes.items(): rename = mapper[frame].get(key) if rename: key = rename yield key, value @property def _world_axis_object_components(self): """ We need to generate the components respecting the axes_order. """ out = [None] * self.naxes for frame, components in self._wao_renamed_components_iter: for i, ao in enumerate(frame.axes_order): out[ao] = components[i] if any([o is None for o in out]): raise ValueError("axes_order leads to incomplete world_axis_object_components") return out @property def _world_axis_object_classes(self): return dict(self._wao_renamed_classes_iter)
(frames, name=None)
44,126
gwcs.coordinate_frames
__init__
null
def __init__(self, frames, name=None): self._frames = frames[:] naxes = sum([frame._naxes for frame in self._frames]) axes_type = list(range(naxes)) unit = list(range(naxes)) axes_names = list(range(naxes)) axes_order = [] ph_type = list(range(naxes)) for frame in frames: axes_order.extend(frame.axes_order) for frame in frames: for ind, axtype, un, n, pht in zip(frame.axes_order, frame.axes_type, frame.unit, frame.axes_names, frame.axis_physical_types): axes_type[ind] = axtype axes_names[ind] = n unit[ind] = un ph_type[ind] = pht if len(np.unique(axes_order)) != len(axes_order): raise ValueError("Incorrect numbering of axes, " "axes_order should contain unique numbers, " "got {}.".format(axes_order)) super(CompositeFrame, self).__init__(naxes, axes_type=axes_type, axes_order=axes_order, unit=unit, axes_names=axes_names, name=name) self._axis_physical_types = tuple(ph_type)
(self, frames, name=None)
44,127
gwcs.coordinate_frames
__repr__
null
def __repr__(self): return repr(self.frames)
(self)
44,130
gwcs.coordinate_frames
coordinate_to_quantity
null
def coordinate_to_quantity(self, *coords): if len(coords) == len(self.frames): args = coords elif len(coords) == self.naxes: args = [] for _frame in self.frames: if _frame.naxes > 1: # Collect the arguments for this frame based on axes_order args.append([coords[i] for i in _frame.axes_order]) else: args.append(coords[_frame.axes_order[0]]) else: raise ValueError("Incorrect number of arguments") qs = [] for _frame, arg in zip(self.frames, args): ret = _frame.coordinate_to_quantity(arg) if isinstance(ret, tuple): qs += list(ret) else: qs.append(ret) return qs
(self, *coords)
44,131
gwcs.coordinate_frames
coordinates
null
def coordinates(self, *args): coo = [] if len(args) == len(self.frames): for frame, arg in zip(self.frames, args): coo.append(frame.coordinates(arg)) else: for frame in self.frames: fargs = [args[i] for i in frame.axes_order] coo.append(frame.coordinates(*fargs)) return coo
(self, *args)
44,132
gwcs.coordinate_frames
CoordinateFrame
Base class for Coordinate Frames. Parameters ---------- naxes : int Number of axes. axes_type : str One of ["SPATIAL", "SPECTRAL", "TIME"] axes_order : tuple of int A dimension in the input data that corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames Reference frame (usually used with output_frame to convert to world coordinate objects). reference_position : str Reference position - one of ``STANDARD_REFERENCE_POSITION`` unit : list of astropy.units.Unit Unit for each axis. axes_names : list Names of the axes in this frame. name : str Name of this frame.
class CoordinateFrame: """ Base class for Coordinate Frames. Parameters ---------- naxes : int Number of axes. axes_type : str One of ["SPATIAL", "SPECTRAL", "TIME"] axes_order : tuple of int A dimension in the input data that corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames Reference frame (usually used with output_frame to convert to world coordinate objects). reference_position : str Reference position - one of ``STANDARD_REFERENCE_POSITION`` unit : list of astropy.units.Unit Unit for each axis. axes_names : list Names of the axes in this frame. name : str Name of this frame. """ def __init__(self, naxes, axes_type, axes_order, reference_frame=None, reference_position=None, unit=None, axes_names=None, name=None, axis_physical_types=None): self._naxes = naxes self._axes_order = tuple(axes_order) if isinstance(axes_type, str): self._axes_type = (axes_type,) else: self._axes_type = tuple(axes_type) self._reference_frame = reference_frame if unit is not None: if astutil.isiterable(unit): unit = tuple(unit) else: unit = (unit,) if len(unit) != naxes: raise ValueError("Number of units does not match number of axes.") else: self._unit = tuple([u.Unit(au) for au in unit]) else: self._unit = tuple(u.Unit("") for na in range(naxes)) if axes_names is not None: if isinstance(axes_names, str): axes_names = (axes_names,) else: axes_names = tuple(axes_names) if len(axes_names) != naxes: raise ValueError("Number of axes names does not match number of axes.") else: axes_names = tuple([""] * naxes) self._axes_names = axes_names if name is None: self._name = self.__class__.__name__ else: self._name = name self._reference_position = reference_position if len(self._axes_type) != naxes: raise ValueError("Length of axes_type does not match number of axes.") if len(self._axes_order) != naxes: raise ValueError("Length of axes_order does not match number of axes.") super(CoordinateFrame, self).__init__() # _axis_physical_types holds any user supplied physical types self._axis_physical_types = self._set_axis_physical_types(axis_physical_types) def _set_axis_physical_types(self, pht): """ Set the physical type of the coordinate axes using VO UCD1+ v1.23 definitions. """ if pht is not None: if isinstance(pht, str): pht = (pht,) elif not isiterable(pht): raise TypeError("axis_physical_types must be of type string or iterable of strings") if len(pht) != self.naxes: raise ValueError('"axis_physical_types" must be of length {}'.format(self.naxes)) ph_type = [] for axt in pht: if axt not in VALID_UCDS and not axt.startswith("custom:"): ph_type.append("custom:{}".format(axt)) else: ph_type.append(axt) validate_physical_types(ph_type) return tuple(ph_type) def __repr__(self): fmt = '<{0}(name="{1}", unit={2}, axes_names={3}, axes_order={4}'.format( self.__class__.__name__, self.name, self.unit, self.axes_names, self.axes_order) if self.reference_position is not None: fmt += ', reference_position="{0}"'.format(self.reference_position) if self.reference_frame is not None: fmt += ", reference_frame={0}".format(self.reference_frame) fmt += ")>" return fmt def __str__(self): if self._name is not None: return self._name return self.__class__.__name__ @property def name(self): """ A custom name of this frame.""" return self._name @name.setter def name(self, val): """ A custom name of this frame.""" self._name = val @property def naxes(self): """ The number of axes in this frame.""" return self._naxes @property def unit(self): """The unit of this frame.""" return self._unit @property def axes_names(self): """ Names of axes in the frame.""" return self._axes_names @property def axes_order(self): """ A tuple of indices which map inputs to axes.""" return self._axes_order @property def reference_frame(self): """ Reference frame, used to convert to world coordinate objects. """ return self._reference_frame @property def reference_position(self): """ Reference Position. """ return getattr(self, "_reference_position", None) @property def axes_type(self): """ Type of this frame : 'SPATIAL', 'SPECTRAL', 'TIME'. """ return self._axes_type def coordinates(self, *args): """ Create world coordinates object""" coo = tuple([arg * un if not hasattr(arg, "to") else arg.to(un) for arg, un in zip(args, self.unit)]) if self.naxes == 1: return coo[0] return coo def coordinate_to_quantity(self, *coords): """ Given a rich coordinate object return an astropy quantity object. """ # NoOp leaves it to the model to handle # If coords is a 1-tuple of quantity then return the element of the tuple # This aligns the behavior with the other implementations if not hasattr(coords, 'unit') and len(coords) == 1: return coords[0] return coords @property def _default_axis_physical_types(self): """ The default physical types to use for this frame if none are specified by the user. """ return tuple("custom:{}".format(t) for t in self.axes_type) @property def axis_physical_types(self): """ The axis physical types for this frame. These physical types are the types in frame order, not transform order. """ return self._axis_physical_types or self._default_axis_physical_types @property def _world_axis_object_classes(self): return {f"{at}{i}" if i != 0 else at: (u.Quantity, (), {'unit': unit}) for i, (at, unit) in enumerate(zip(self._axes_type, self.unit))} @property def _world_axis_object_components(self): return [(f"{at}{i}" if i != 0 else at, 0, 'value') for i, at in enumerate(self._axes_type)]
(naxes, axes_type, axes_order, reference_frame=None, reference_position=None, unit=None, axes_names=None, name=None, axis_physical_types=None)
44,133
gwcs.coordinate_frames
__init__
null
def __init__(self, naxes, axes_type, axes_order, reference_frame=None, reference_position=None, unit=None, axes_names=None, name=None, axis_physical_types=None): self._naxes = naxes self._axes_order = tuple(axes_order) if isinstance(axes_type, str): self._axes_type = (axes_type,) else: self._axes_type = tuple(axes_type) self._reference_frame = reference_frame if unit is not None: if astutil.isiterable(unit): unit = tuple(unit) else: unit = (unit,) if len(unit) != naxes: raise ValueError("Number of units does not match number of axes.") else: self._unit = tuple([u.Unit(au) for au in unit]) else: self._unit = tuple(u.Unit("") for na in range(naxes)) if axes_names is not None: if isinstance(axes_names, str): axes_names = (axes_names,) else: axes_names = tuple(axes_names) if len(axes_names) != naxes: raise ValueError("Number of axes names does not match number of axes.") else: axes_names = tuple([""] * naxes) self._axes_names = axes_names if name is None: self._name = self.__class__.__name__ else: self._name = name self._reference_position = reference_position if len(self._axes_type) != naxes: raise ValueError("Length of axes_type does not match number of axes.") if len(self._axes_order) != naxes: raise ValueError("Length of axes_order does not match number of axes.") super(CoordinateFrame, self).__init__() # _axis_physical_types holds any user supplied physical types self._axis_physical_types = self._set_axis_physical_types(axis_physical_types)
(self, naxes, axes_type, axes_order, reference_frame=None, reference_position=None, unit=None, axes_names=None, name=None, axis_physical_types=None)
44,137
gwcs.coordinate_frames
coordinate_to_quantity
Given a rich coordinate object return an astropy quantity object.
def coordinate_to_quantity(self, *coords): """ Given a rich coordinate object return an astropy quantity object. """ # NoOp leaves it to the model to handle # If coords is a 1-tuple of quantity then return the element of the tuple # This aligns the behavior with the other implementations if not hasattr(coords, 'unit') and len(coords) == 1: return coords[0] return coords
(self, *coords)
44,138
gwcs.coordinate_frames
coordinates
Create world coordinates object
def coordinates(self, *args): """ Create world coordinates object""" coo = tuple([arg * un if not hasattr(arg, "to") else arg.to(un) for arg, un in zip(args, self.unit)]) if self.naxes == 1: return coo[0] return coo
(self, *args)
44,139
gwcs.coordinate_frames
Frame2D
A 2D coordinate frame. Parameters ---------- axes_order : tuple of int A dimension in the input data that corresponds to this axis. unit : list of astropy.units.Unit Unit for each axis. axes_names : list Names of the axes in this frame. name : str Name of this frame.
class Frame2D(CoordinateFrame): """ A 2D coordinate frame. Parameters ---------- axes_order : tuple of int A dimension in the input data that corresponds to this axis. unit : list of astropy.units.Unit Unit for each axis. axes_names : list Names of the axes in this frame. name : str Name of this frame. """ def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'), name=None, axis_physical_types=None): super(Frame2D, self).__init__(naxes=2, axes_type=["SPATIAL", "SPATIAL"], axes_order=axes_order, name=name, axes_names=axes_names, unit=unit, axis_physical_types=axis_physical_types) @property def _default_axis_physical_types(self): if all(self.axes_names): ph_type = self.axes_names else: ph_type = self.axes_type return tuple("custom:{}".format(t) for t in ph_type) def coordinates(self, *args): args = [args[i] for i in self.axes_order] coo = tuple([arg * un for arg, un in zip(args, self.unit)]) return coo def coordinate_to_quantity(self, *coords): # list or tuple if len(coords) == 1 and astutil.isiterable(coords[0]): coords = list(coords[0]) elif len(coords) == 2: coords = list(coords) else: raise ValueError("Unexpected number of coordinates in " "input to frame {} : " "expected 2, got {}".format(self.name, len(coords))) for i in range(2): if not hasattr(coords[i], 'unit'): coords[i] = coords[i] * self.unit[i] return tuple(coords)
(axes_order=(0, 1), unit=(Unit("pix"), Unit("pix")), axes_names=('x', 'y'), name=None, axis_physical_types=None)
44,140
gwcs.coordinate_frames
__init__
null
def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'), name=None, axis_physical_types=None): super(Frame2D, self).__init__(naxes=2, axes_type=["SPATIAL", "SPATIAL"], axes_order=axes_order, name=name, axes_names=axes_names, unit=unit, axis_physical_types=axis_physical_types)
(self, axes_order=(0, 1), unit=(Unit("pix"), Unit("pix")), axes_names=('x', 'y'), name=None, axis_physical_types=None)
44,144
gwcs.coordinate_frames
coordinate_to_quantity
null
def coordinate_to_quantity(self, *coords): # list or tuple if len(coords) == 1 and astutil.isiterable(coords[0]): coords = list(coords[0]) elif len(coords) == 2: coords = list(coords) else: raise ValueError("Unexpected number of coordinates in " "input to frame {} : " "expected 2, got {}".format(self.name, len(coords))) for i in range(2): if not hasattr(coords[i], 'unit'): coords[i] = coords[i] * self.unit[i] return tuple(coords)
(self, *coords)
44,145
gwcs.coordinate_frames
coordinates
null
def coordinates(self, *args): args = [args[i] for i in self.axes_order] coo = tuple([arg * un for arg, un in zip(args, self.unit)]) return coo
(self, *args)
44,146
gwcs.selector
LabelMapper
Maps inputs to regions. Returns the region labels corresponding to the inputs. Labels are strings or numbers which uniquely identify a location. For example, labels may represent slices of an IFU or names of spherical polygons. Parameters ---------- mapper : `~astropy.modeling.Model` A function which returns a region. no_label : str or int "" or 0 A return value for a location which has no corresponding label. inputs_mapping : `~astropy.modeling.mappings.Mapping` or tuple An optional Mapping model to be prepended to the LabelMapper with the purpose to filter the inputs or change their order. If tuple, a `~astropy.modeling.mappings.Mapping` model will be created from it. name : str The name of this transform.
class LabelMapper(_LabelMapper): """ Maps inputs to regions. Returns the region labels corresponding to the inputs. Labels are strings or numbers which uniquely identify a location. For example, labels may represent slices of an IFU or names of spherical polygons. Parameters ---------- mapper : `~astropy.modeling.Model` A function which returns a region. no_label : str or int "" or 0 A return value for a location which has no corresponding label. inputs_mapping : `~astropy.modeling.mappings.Mapping` or tuple An optional Mapping model to be prepended to the LabelMapper with the purpose to filter the inputs or change their order. If tuple, a `~astropy.modeling.mappings.Mapping` model will be created from it. name : str The name of this transform. """ n_outputs = 1 def __init__(self, inputs, mapper, no_label=np.nan, inputs_mapping=None, name=None, **kwargs): self._no_label = no_label self._inputs = inputs self._n_inputs = len(inputs) self._outputs = tuple(['x{0}'.format(ind) for ind in list(range(mapper.n_outputs))]) if isinstance(inputs_mapping, tuple): inputs_mapping = astmodels.Mapping(inputs_mapping) elif inputs_mapping is not None and not isinstance(inputs_mapping, astmodels.Mapping): raise TypeError("inputs_mapping must be an instance of astropy.modeling.Mapping.") self._inputs_mapping = inputs_mapping self._mapper = mapper self._input_units_strict = {key: False for key in self._inputs} self._input_units_allow_dimensionless = {key: False for key in self._inputs} super(_LabelMapper, self).__init__(name=name, **kwargs) self.outputs = ('label',) @property def inputs(self): """ The name(s) of the input variable(s) on which a model is evaluated. """ return self._inputs @inputs.setter def inputs(self, val): """ The name(s) of the input variable(s) on which a model is evaluated. """ self._inputs = val @property def n_inputs(self): return self._n_inputs @property def mapper(self): return self._mapper @property def inputs_mapping(self): return self._inputs_mapping @property def no_label(self): return self._no_label def evaluate(self, *args): if self.inputs_mapping is not None: args = self.inputs_mapping(*args) if self.n_outputs == 1: args = [args] res = self.mapper(*args) if np.isscalar(res): res = np.array([res]) return np.array(res)
(inputs, mapper, no_label=nan, inputs_mapping=None, name=None, **kwargs)
44,147
astropy.modeling.core
<lambda>
null
def _model_oper(oper, **kwargs): """ Returns a function that evaluates a given Python arithmetic operator between two models. The operator should be given as a string, like ``'+'`` or ``'**'``. """ return lambda left, right: CompoundModel(oper, left, right, **kwargs)
(left, right)
44,149
astropy.modeling.core
__call__
Evaluate this model using the given input(s) and the parameter values that were specified when the model was instantiated.
def __call__(self, *args, **kwargs): """ Evaluate this model using the given input(s) and the parameter values that were specified when the model was instantiated. """ # Turn any keyword arguments into positional arguments. args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs) # Read model evaluation related parameters with_bbox = kwargs.pop("with_bounding_box", False) fill_value = kwargs.pop("fill_value", np.nan) # prepare for model evaluation (overridden in CompoundModel) evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate( *args, **kwargs ) outputs = self._generic_evaluate(evaluate, inputs, fill_value, with_bbox) # post-process evaluation results (overridden in CompoundModel) return self._post_evaluate( inputs, outputs, broadcasted_shapes, with_bbox, **kwargs )
(self, *args, **kwargs)
44,150
gwcs.selector
__init__
null
def __init__(self, inputs, mapper, no_label=np.nan, inputs_mapping=None, name=None, **kwargs): self._no_label = no_label self._inputs = inputs self._n_inputs = len(inputs) self._outputs = tuple(['x{0}'.format(ind) for ind in list(range(mapper.n_outputs))]) if isinstance(inputs_mapping, tuple): inputs_mapping = astmodels.Mapping(inputs_mapping) elif inputs_mapping is not None and not isinstance(inputs_mapping, astmodels.Mapping): raise TypeError("inputs_mapping must be an instance of astropy.modeling.Mapping.") self._inputs_mapping = inputs_mapping self._mapper = mapper self._input_units_strict = {key: False for key in self._inputs} self._input_units_allow_dimensionless = {key: False for key in self._inputs} super(_LabelMapper, self).__init__(name=name, **kwargs) self.outputs = ('label',)
(self, inputs, mapper, no_label=nan, inputs_mapping=None, name=None, **kwargs)
44,151
astropy.modeling.core
__len__
null
def __len__(self): return self._n_models
(self)
44,155
astropy.modeling.core
__repr__
null
def __repr__(self): return self._format_repr()
(self)
44,156
astropy.modeling.core
__setattr__
null
def __setattr__(self, attr, value): if isinstance(self, CompoundModel): param_names = self._param_names param_names = self.param_names if param_names is not None and attr in self.param_names: param = self.__dict__[attr] value = _tofloat(value) if param._validator is not None: param._validator(self, value) # check consistency with previous shape and size eshape = self._param_metrics[attr]["shape"] if eshape == (): eshape = (1,) vshape = np.array(value).shape if vshape == (): vshape = (1,) esize = self._param_metrics[attr]["size"] if np.size(value) != esize or self._strip_ones(vshape) != self._strip_ones( eshape ): raise InputParameterError( f"Value for parameter {attr} does not match shape or size\nexpected" f" by model ({vshape}, {np.size(value)}) vs ({eshape}, {esize})" ) if param.unit is None: if isinstance(value, Quantity): param._unit = value.unit param.value = value.value else: param.value = value else: if not isinstance(value, Quantity): raise UnitsError( f"The '{param.name}' parameter should be given as a" " Quantity because it was originally " "initialized as a Quantity" ) param._unit = value.unit param.value = value.value else: if attr in ["fittable", "linear"]: self.__dict__[attr] = value else: super().__setattr__(attr, value)
(self, attr, value)
44,157
astropy.modeling.core
__str__
null
def __str__(self): return self._format_str()
(self)
44,160
astropy.modeling.core
_array_to_parameters
null
def _array_to_parameters(self): param_metrics = self._param_metrics for name in self.param_names: param = getattr(self, name) value = self._parameters[param_metrics[name]["slice"]] value.shape = param_metrics[name]["shape"] param.value = value
(self)
44,161
astropy.modeling.core
_calculate_separability_matrix
This is a hook which customises the behavior of modeling.separable. This allows complex subclasses to customise the separability matrix. If it returns `NotImplemented` the default behavior is used.
def _calculate_separability_matrix(self): """ This is a hook which customises the behavior of modeling.separable. This allows complex subclasses to customise the separability matrix. If it returns `NotImplemented` the default behavior is used. """ return NotImplemented
(self)
44,162
astropy.modeling.core
_check_param_broadcast
This subroutine checks that all parameter arrays can be broadcast against each other, and determines the shapes parameters must have in order to broadcast correctly. If model_set_axis is None this merely checks that the parameters broadcast and returns an empty dict if so. This mode is only used for single model sets.
def _check_param_broadcast(self, max_ndim): """ This subroutine checks that all parameter arrays can be broadcast against each other, and determines the shapes parameters must have in order to broadcast correctly. If model_set_axis is None this merely checks that the parameters broadcast and returns an empty dict if so. This mode is only used for single model sets. """ all_shapes = [] model_set_axis = self._model_set_axis for name in self.param_names: param = getattr(self, name) value = param.value param_shape = np.shape(value) param_ndim = len(param_shape) if max_ndim is not None and param_ndim < max_ndim: # All arrays have the same number of dimensions up to the # model_set_axis dimension, but after that they may have a # different number of trailing axes. The number of trailing # axes must be extended for mutual compatibility. For example # if max_ndim = 3 and model_set_axis = 0, an array with the # shape (2, 2) must be extended to (2, 1, 2). However, an # array with shape (2,) is extended to (2, 1). new_axes = (1,) * (max_ndim - param_ndim) if model_set_axis < 0: # Just need to prepend axes to make up the difference broadcast_shape = new_axes + param_shape else: broadcast_shape = ( param_shape[: model_set_axis + 1] + new_axes + param_shape[model_set_axis + 1 :] ) self._param_metrics[name]["broadcast_shape"] = broadcast_shape all_shapes.append(broadcast_shape) else: all_shapes.append(param_shape) # Now check mutual broadcastability of all shapes try: check_broadcast(*all_shapes) except IncompatibleShapeError as exc: shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args param_a = self.param_names[shape_a_idx] param_b = self.param_names[shape_b_idx] raise InputParameterError( f"Parameter {param_a!r} of shape {shape_a!r} cannot be broadcast with " f"parameter {param_b!r} of shape {shape_b!r}. All parameter arrays " "must have shapes that are mutually compatible according " "to the broadcasting rules." )
(self, max_ndim)
44,163
astropy.modeling.core
_default_inputs_outputs
null
def _default_inputs_outputs(self): if self.n_inputs == 1 and self.n_outputs == 1: self._inputs = ("x",) self._outputs = ("y",) elif self.n_inputs == 2 and self.n_outputs == 1: self._inputs = ("x", "y") self._outputs = ("z",) else: try: self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs)) self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs)) except TypeError: # self.n_inputs and self.n_outputs are properties # This is the case when subclasses of Model do not define # ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``. self._inputs = () self._outputs = ()
(self)
44,165
astropy.modeling.core
_format_repr
Internal implementation of ``__repr__``. This is separated out for ease of use by subclasses that wish to override the default ``__repr__`` while keeping the same basic formatting.
def _format_repr(self, args=[], kwargs={}, defaults={}): """ Internal implementation of ``__repr__``. This is separated out for ease of use by subclasses that wish to override the default ``__repr__`` while keeping the same basic formatting. """ parts = [repr(a) for a in args] parts.extend( f"{name}={param_repr_oneline(getattr(self, name))}" for name in self.param_names ) if self.name is not None: parts.append(f"name={self.name!r}") for kwarg, value in kwargs.items(): if kwarg in defaults and defaults[kwarg] == value: continue parts.append(f"{kwarg}={value!r}") if len(self) > 1: parts.append(f"n_models={len(self)}") return f"<{self.__class__.__name__}({', '.join(parts)})>"
(self, args=[], kwargs={}, defaults={})
44,166
astropy.modeling.core
_format_str
Internal implementation of ``__str__``. This is separated out for ease of use by subclasses that wish to override the default ``__str__`` while keeping the same basic formatting.
def _format_str(self, keywords=[], defaults={}): """ Internal implementation of ``__str__``. This is separated out for ease of use by subclasses that wish to override the default ``__str__`` while keeping the same basic formatting. """ default_keywords = [ ("Model", self.__class__.__name__), ("Name", self.name), ("Inputs", self.inputs), ("Outputs", self.outputs), ("Model set size", len(self)), ] parts = [ f"{keyword}: {value}" for keyword, value in default_keywords if value is not None ] for keyword, value in keywords: if keyword.lower() in defaults and defaults[keyword.lower()] == value: continue parts.append(f"{keyword}: {value}") parts.append("Parameters:") if len(self) == 1: columns = [[getattr(self, name).value] for name in self.param_names] else: columns = [getattr(self, name).value for name in self.param_names] if columns: param_table = Table(columns, names=self.param_names) # Set units on the columns for name in self.param_names: param_table[name].unit = getattr(self, name).unit parts.append(indent(str(param_table), 4 * " ")) return "\n".join(parts)
(self, keywords=[], defaults={})
44,167
astropy.modeling.core
_generic_evaluate
Generic model evaluation routine. Selects and evaluates model with or without bounding_box enforcement.
def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox): """Generic model evaluation routine. Selects and evaluates model with or without bounding_box enforcement. """ # Evaluate the model using the prepared evaluation method either # enforcing the bounding_box or not. bbox = self.get_bounding_box(with_bbox) if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None: outputs = bbox.evaluate(evaluate, _inputs, fill_value) else: outputs = evaluate(_inputs) return outputs
(self, evaluate, _inputs, fill_value, with_bbox)
44,168
astropy.modeling.core
_get_renamed_inputs_as_positional
null
def _get_renamed_inputs_as_positional(self, *args, **kwargs): def _keyword2positional(kwargs): # Inputs were passed as keyword (not positional) arguments. # Because the signature of the ``__call__`` is defined at # the class level, the name of the inputs cannot be changed at # the instance level and the old names are always present in the # signature of the method. In order to use the new names of the # inputs, the old names are taken out of ``kwargs``, the input # values are sorted in the order of self.inputs and passed as # positional arguments to ``__call__``. # These are the keys that are always present as keyword arguments. keys = [ "model_set_axis", "with_bounding_box", "fill_value", "equivalencies", "inputs_map", ] new_inputs = {} # kwargs contain the names of the new inputs + ``keys`` allkeys = list(kwargs.keys()) # Remove the names of the new inputs from kwargs and save them # to a dict ``new_inputs``. for key in allkeys: if key not in keys: new_inputs[key] = kwargs[key] del kwargs[key] return new_inputs, kwargs n_args = len(args) new_inputs, kwargs = _keyword2positional(kwargs) n_all_args = n_args + len(new_inputs) if n_all_args < self.n_inputs: raise ValueError( f"Missing input arguments - expected {self.n_inputs}, got {n_all_args}" ) elif n_all_args > self.n_inputs: raise ValueError( f"Too many input arguments - expected {self.n_inputs}, got {n_all_args}" ) if n_args == 0: # Create positional arguments from the keyword arguments in ``new_inputs``. new_args = [] for k in self.inputs: new_args.append(new_inputs[k]) elif n_args != self.n_inputs: # Some inputs are passed as positional, others as keyword arguments. args = list(args) # Create positional arguments from the keyword arguments in ``new_inputs``. new_args = [] for k in self.inputs: if k in new_inputs: new_args.append(new_inputs[k]) else: new_args.append(args[0]) del args[0] else: new_args = args return new_args, kwargs
(self, *args, **kwargs)
44,169
astropy.modeling.core
_initialize_constraints
Pop parameter constraint values off the keyword arguments passed to `Model.__init__` and store them in private instance attributes.
def _initialize_constraints(self, kwargs): """ Pop parameter constraint values off the keyword arguments passed to `Model.__init__` and store them in private instance attributes. """ # Pop any constraints off the keyword arguments for constraint in self.parameter_constraints: values = kwargs.pop(constraint, {}) for ckey, cvalue in values.items(): param = getattr(self, ckey) setattr(param, constraint, cvalue) self._mconstraints = {} for constraint in self.model_constraints: values = kwargs.pop(constraint, []) self._mconstraints[constraint] = values
(self, kwargs)
44,170
astropy.modeling.core
_initialize_parameter_value
Mostly deals with consistency checks and determining unit issues.
def _initialize_parameter_value(self, param_name, value): """Mostly deals with consistency checks and determining unit issues.""" if isinstance(value, Parameter): self.__dict__[param_name] = value return param = getattr(self, param_name) # Use default if value is not provided if value is None: default = param.default if default is None: # No value was supplied for the parameter and the # parameter does not have a default, therefore the model # is underspecified raise TypeError( f"{self.__class__.__name__}.__init__() requires a value for " f"parameter {param_name!r}" ) value = default unit = param.unit else: if isinstance(value, Quantity): unit = value.unit value = value.value else: unit = None if unit is None and param.unit is not None: raise InputParameterError( f"{self.__class__.__name__}.__init__() requires a Quantity for" f" parameter {param_name!r}" ) param._unit = unit param._set_unit(unit, force=True) param.internal_unit = None if param._setter is not None: if unit is not None: _val = param._setter(value * unit) else: _val = param._setter(value) if isinstance(_val, Quantity): param.internal_unit = _val.unit param._internal_value = np.array(_val.value) else: param.internal_unit = None param._internal_value = np.array(_val) else: param._value = np.array(value)
(self, param_name, value)
44,171
astropy.modeling.core
_initialize_parameters
Initialize the _parameters array that stores raw parameter values for all parameter sets for use with vectorized fitting algorithms; on FittableModels the _param_name attributes actually just reference slices of this array.
def _initialize_parameters(self, args, kwargs): """ Initialize the _parameters array that stores raw parameter values for all parameter sets for use with vectorized fitting algorithms; on FittableModels the _param_name attributes actually just reference slices of this array. """ n_models = kwargs.pop("n_models", None) if not ( n_models is None or (isinstance(n_models, (int, np.integer)) and n_models >= 1) ): raise ValueError( "n_models must be either None (in which case it is " "determined from the model_set_axis of the parameter initial " "values) or it must be a positive integer " f"(got {n_models!r})" ) model_set_axis = kwargs.pop("model_set_axis", None) if model_set_axis is None: if n_models is not None and n_models > 1: # Default to zero model_set_axis = 0 else: # Otherwise disable model_set_axis = False else: if not ( model_set_axis is False or np.issubdtype(type(model_set_axis), np.integer) ): raise ValueError( "model_set_axis must be either False or an integer " "specifying the parameter array axis to map to each " f"model in a set of models (got {model_set_axis!r})." ) # Process positional arguments by matching them up with the # corresponding parameters in self.param_names--if any also appear as # keyword arguments this presents a conflict params = set() if len(args) > len(self.param_names): raise TypeError( f"{self.__class__.__name__}.__init__() takes at most " f"{len(self.param_names)} positional arguments ({len(args)} given)" ) self._model_set_axis = model_set_axis self._param_metrics = defaultdict(dict) for idx, arg in enumerate(args): if arg is None: # A value of None implies using the default value, if exists continue # We use quantity_asanyarray here instead of np.asanyarray because # if any of the arguments are quantities, we need to return a # Quantity object not a plain Numpy array. param_name = self.param_names[idx] params.add(param_name) if not isinstance(arg, Parameter): value = quantity_asanyarray(arg, dtype=float) else: value = arg self._initialize_parameter_value(param_name, value) # At this point the only remaining keyword arguments should be # parameter names; any others are in error. for param_name in self.param_names: if param_name in kwargs: if param_name in params: raise TypeError( f"{self.__class__.__name__}.__init__() got multiple values for" f" parameter {param_name!r}" ) value = kwargs.pop(param_name) if value is None: continue # We use quantity_asanyarray here instead of np.asanyarray # because if any of the arguments are quantities, we need # to return a Quantity object not a plain Numpy array. value = quantity_asanyarray(value, dtype=float) params.add(param_name) self._initialize_parameter_value(param_name, value) # Now deal with case where param_name is not supplied by args or kwargs for param_name in self.param_names: if param_name not in params: self._initialize_parameter_value(param_name, None) if kwargs: # If any keyword arguments were left over at this point they are # invalid--the base class should only be passed the parameter # values, constraints, and param_dim for kwarg in kwargs: # Just raise an error on the first unrecognized argument raise TypeError( f"{self.__class__.__name__}.__init__() got an unrecognized" f" parameter {kwarg!r}" ) # Determine the number of model sets: If the model_set_axis is # None then there is just one parameter set; otherwise it is determined # by the size of that axis on the first parameter--if the other # parameters don't have the right number of axes or the sizes of their # model_set_axis don't match an error is raised if model_set_axis is not False and n_models != 1 and params: max_ndim = 0 if model_set_axis < 0: min_ndim = abs(model_set_axis) else: min_ndim = model_set_axis + 1 for name in self.param_names: value = getattr(self, name) param_ndim = np.ndim(value) if param_ndim < min_ndim: raise InputParameterError( "All parameter values must be arrays of dimension at least" f" {min_ndim} for model_set_axis={model_set_axis} (the value" f" given for {name!r} is only {param_ndim}-dimensional)" ) max_ndim = max(max_ndim, param_ndim) if n_models is None: # Use the dimensions of the first parameter to determine # the number of model sets n_models = value.shape[model_set_axis] elif value.shape[model_set_axis] != n_models: raise InputParameterError( f"Inconsistent dimensions for parameter {name!r} for" f" {n_models} model sets. The length of axis" f" {model_set_axis} must be the same for all input parameter" " values" ) self._check_param_broadcast(max_ndim) else: if n_models is None: n_models = 1 self._check_param_broadcast(None) self._n_models = n_models # now validate parameters for name in params: param = getattr(self, name) if param._validator is not None: param._validator(self, param.value)
(self, args, kwargs)
44,172
astropy.modeling.core
_initialize_setters
This exists to inject defaults for settable properties for models originating from `custom_model`.
def _initialize_setters(self, kwargs): """ This exists to inject defaults for settable properties for models originating from `custom_model`. """ if hasattr(self, "_settable_properties"): setters = { name: kwargs.pop(name, default) for name, default in self._settable_properties.items() } for name, value in setters.items(): setattr(self, name, value) return kwargs
(self, kwargs)
44,173
astropy.modeling.core
_initialize_slices
null
def _initialize_slices(self): param_metrics = self._param_metrics total_size = 0 for name in self.param_names: param = getattr(self, name) value = param.value param_size = np.size(value) param_shape = np.shape(value) param_slice = slice(total_size, total_size + param_size) param_metrics[name]["slice"] = param_slice param_metrics[name]["shape"] = param_shape param_metrics[name]["size"] = param_size total_size += param_size self._parameters = np.empty(total_size, dtype=np.float64)
(self)
44,174
astropy.modeling.core
_initialize_unit_support
Convert self._input_units_strict and self.input_units_allow_dimensionless to dictionaries mapping input name to a boolean value.
def _initialize_unit_support(self): """ Convert self._input_units_strict and self.input_units_allow_dimensionless to dictionaries mapping input name to a boolean value. """ if isinstance(self._input_units_strict, bool): self._input_units_strict = { key: self._input_units_strict for key in self.inputs } if isinstance(self._input_units_allow_dimensionless, bool): self._input_units_allow_dimensionless = { key: self._input_units_allow_dimensionless for key in self.inputs }
(self)
44,175
astropy.modeling.core
_param_sets
Implementation of the Model.param_sets property. This internal implementation has a ``raw`` argument which controls whether or not to return the raw parameter values (i.e. the values that are actually stored in the ._parameters array, as opposed to the values displayed to users. In most cases these are one in the same but there are currently a few exceptions. Note: This is notably an overcomplicated device and may be removed entirely in the near future.
def _param_sets(self, raw=False, units=False): """ Implementation of the Model.param_sets property. This internal implementation has a ``raw`` argument which controls whether or not to return the raw parameter values (i.e. the values that are actually stored in the ._parameters array, as opposed to the values displayed to users. In most cases these are one in the same but there are currently a few exceptions. Note: This is notably an overcomplicated device and may be removed entirely in the near future. """ values = [] shapes = [] for name in self.param_names: param = getattr(self, name) if raw and param._setter: value = param._internal_value else: value = param.value broadcast_shape = self._param_metrics[name].get("broadcast_shape") if broadcast_shape is not None: value = value.reshape(broadcast_shape) shapes.append(np.shape(value)) if len(self) == 1: # Add a single param set axis to the parameter's value (thus # converting scalars to shape (1,) array values) for # consistency value = np.array([value]) if units: if raw and param.internal_unit is not None: unit = param.internal_unit else: unit = param.unit if unit is not None: value = Quantity(value, unit, subok=True) values.append(value) if len(set(shapes)) != 1 or units: # If the parameters are not all the same shape, converting to an # array is going to produce an object array # However the way Numpy creates object arrays is tricky in that it # will recurse into array objects in the list and break them up # into separate objects. Doing things this way ensures a 1-D # object array the elements of which are the individual parameter # arrays. There's not much reason to do this over returning a list # except for consistency psets = np.empty(len(values), dtype=object) psets[:] = values return psets return np.array(values)
(self, raw=False, units=False)
44,176
astropy.modeling.core
_parameters_to_array
null
def _parameters_to_array(self): # Now set the parameter values (this will also fill # self._parameters) param_metrics = self._param_metrics for name in self.param_names: param = getattr(self, name) value = param.value if not isinstance(value, np.ndarray): value = np.array([value]) self._parameters[param_metrics[name]["slice"]] = value.ravel() # Finally validate all the parameters; we do this last so that # validators that depend on one of the other parameters' values will # work
(self)
44,177
astropy.modeling.core
_post_evaluate
Model specific post evaluation processing of outputs.
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs): """ Model specific post evaluation processing of outputs. """ if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1: outputs = (outputs,) outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs) outputs = self._process_output_units(inputs, outputs) if self.n_outputs == 1: return outputs[0] return outputs
(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs)
44,178
astropy.modeling.core
_pre_evaluate
Model specific input setup that needs to occur prior to model evaluation.
def _pre_evaluate(self, *args, **kwargs): """ Model specific input setup that needs to occur prior to model evaluation. """ # Broadcast inputs into common size inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs) # Setup actual model evaluation method parameters = self._param_sets(raw=True, units=True) def evaluate(_inputs): return self.evaluate(*_inputs, *parameters) return evaluate, inputs, broadcasted_shapes, kwargs
(self, *args, **kwargs)
44,179
astropy.modeling.core
_prepare_inputs_model_set
null
def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input, **kwargs): reshaped = [] pivots = [] model_set_axis_param = self.model_set_axis # needed to reshape param for idx, _input in enumerate(inputs): max_param_shape = () if self._n_models > 1 and model_set_axis_input is not False: # Use the shape of the input *excluding* the model axis input_shape = ( _input.shape[:model_set_axis_input] + _input.shape[model_set_axis_input + 1 :] ) else: input_shape = _input.shape for param in params: try: check_broadcast( input_shape, self._remove_axes_from_shape(param.shape, model_set_axis_param), ) except IncompatibleShapeError: raise ValueError( f"Model input argument {self.inputs[idx]!r} of shape" f" {input_shape!r} " f"cannot be broadcast with parameter {param.name!r} of shape " f"{self._remove_axes_from_shape(param.shape, model_set_axis_param)!r}." ) if len(param.shape) - 1 > len(max_param_shape): max_param_shape = self._remove_axes_from_shape( param.shape, model_set_axis_param ) # We've now determined that, excluding the model_set_axis, the # input can broadcast with all the parameters input_ndim = len(input_shape) if model_set_axis_input is False: if len(max_param_shape) > input_ndim: # Just needs to prepend new axes to the input n_new_axes = 1 + len(max_param_shape) - input_ndim new_axes = (1,) * n_new_axes new_shape = new_axes + _input.shape pivot = model_set_axis_param else: pivot = input_ndim - len(max_param_shape) new_shape = _input.shape[:pivot] + (1,) + _input.shape[pivot:] new_input = _input.reshape(new_shape) else: if len(max_param_shape) >= input_ndim: n_new_axes = len(max_param_shape) - input_ndim pivot = self.model_set_axis new_axes = (1,) * n_new_axes new_shape = ( _input.shape[: pivot + 1] + new_axes + _input.shape[pivot + 1 :] ) new_input = _input.reshape(new_shape) else: pivot = _input.ndim - len(max_param_shape) - 1 new_input = np.rollaxis(_input, model_set_axis_input, pivot + 1) pivots.append(pivot) reshaped.append(new_input) if self.n_inputs < self.n_outputs: pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs)) return reshaped, (pivots,)
(self, params, inputs, model_set_axis_input, **kwargs)
44,180
astropy.modeling.core
_prepare_inputs_single_model
null
def _prepare_inputs_single_model(self, params, inputs, **kwargs): broadcasts = [] for idx, _input in enumerate(inputs): input_shape = _input.shape # Ensure that array scalars are always upgrade to 1-D arrays for the # sake of consistency with how parameters work. They will be cast back # to scalars at the end if not input_shape: inputs[idx] = _input.reshape((1,)) if not params: max_broadcast = input_shape else: max_broadcast = () for param in params: try: if self.standard_broadcasting: broadcast = check_broadcast(input_shape, param.shape) else: broadcast = input_shape except IncompatibleShapeError: raise ValueError( f"self input argument {self.inputs[idx]!r} of shape" f" {input_shape!r} cannot be broadcast with parameter" f" {param.name!r} of shape {param.shape!r}." ) if len(broadcast) > len(max_broadcast): max_broadcast = broadcast elif len(broadcast) == len(max_broadcast): max_broadcast = max(max_broadcast, broadcast) broadcasts.append(max_broadcast) if self.n_outputs > self.n_inputs: extra_outputs = self.n_outputs - self.n_inputs if not broadcasts: # If there were no inputs then the broadcasts list is empty # just add a None since there is no broadcasting of outputs and # inputs necessary (see _prepare_outputs_single_self) broadcasts.append(None) broadcasts.extend([broadcasts[0]] * extra_outputs) return inputs, (broadcasts,)
(self, params, inputs, **kwargs)
44,181
astropy.modeling.core
_prepare_output_single_model
null
@staticmethod def _prepare_output_single_model(output, broadcast_shape): if broadcast_shape is not None: if not broadcast_shape: return output.item() else: try: return output.reshape(broadcast_shape) except ValueError: try: return output.item() except ValueError: return output return output
(output, broadcast_shape)
44,182
astropy.modeling.core
_prepare_outputs_model_set
null
def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis): pivots = broadcasted_shapes[0] # If model_set_axis = False was passed then use # self._model_set_axis to format the output. if model_set_axis is None or model_set_axis is False: model_set_axis = self.model_set_axis outputs = list(outputs) for idx, output in enumerate(outputs): pivot = pivots[idx] if pivot < output.ndim and pivot != model_set_axis: outputs[idx] = np.rollaxis(output, pivot, model_set_axis) return tuple(outputs)
(self, outputs, broadcasted_shapes, model_set_axis)
44,183
astropy.modeling.core
_prepare_outputs_single_model
null
def _prepare_outputs_single_model(self, outputs, broadcasted_shapes): outputs = list(outputs) for idx, output in enumerate(outputs): try: broadcast_shape = check_broadcast(*broadcasted_shapes[0]) except (IndexError, TypeError): broadcast_shape = broadcasted_shapes[0][idx] outputs[idx] = self._prepare_output_single_model(output, broadcast_shape) return tuple(outputs)
(self, outputs, broadcasted_shapes)
44,184
astropy.modeling.core
_process_output_units
null
def _process_output_units(self, inputs, outputs): inputs_are_quantity = any(isinstance(i, Quantity) for i in inputs) if self.return_units and inputs_are_quantity: # We allow a non-iterable unit only if there is one output if self.n_outputs == 1 and not isiterable(self.return_units): return_units = {self.outputs[0]: self.return_units} else: return_units = self.return_units outputs = tuple( Quantity(out, return_units.get(out_name, None), subok=True) for out, out_name in zip(outputs, self.outputs) ) return outputs
(self, inputs, outputs)
44,185
astropy.modeling.core
_remove_axes_from_shape
Given a shape tuple as the first input, construct a new one by removing that particular axis from the shape and all preceding axes. Negative axis numbers are permittted, where the axis is relative to the last axis.
@staticmethod def _remove_axes_from_shape(shape, axis): """ Given a shape tuple as the first input, construct a new one by removing that particular axis from the shape and all preceding axes. Negative axis numbers are permittted, where the axis is relative to the last axis. """ if len(shape) == 0: return shape if axis < 0: axis = len(shape) + axis return shape[:axis] + shape[axis + 1 :] if axis >= len(shape): axis = len(shape) - 1 shape = shape[axis + 1 :] return shape
(shape, axis)
44,186
astropy.modeling.core
_strip_ones
null
@staticmethod def _strip_ones(intup): return tuple(item for item in intup if item != 1)
(intup)
44,187
astropy.modeling.core
_validate_input_shape
Perform basic validation of a single model input's shape. The shape has the minimum dimensions for the given model_set_axis. Returns the shape of the input if validation succeeds.
def _validate_input_shape( self, _input, idx, argnames, model_set_axis, check_model_set_axis ): """Perform basic validation of a single model input's shape. The shape has the minimum dimensions for the given model_set_axis. Returns the shape of the input if validation succeeds. """ input_shape = np.shape(_input) # Ensure that the input's model_set_axis matches the model's # n_models if input_shape and check_model_set_axis: # Note: Scalar inputs *only* get a pass on this if len(input_shape) < model_set_axis + 1: raise ValueError( f"For model_set_axis={model_set_axis}, all inputs must be at " f"least {model_set_axis + 1}-dimensional." ) if input_shape[model_set_axis] != self._n_models: try: argname = argnames[idx] except IndexError: # the case of model.inputs = () argname = str(idx) raise ValueError( f"Input argument '{argname}' does not have the correct dimensions" f" in model_set_axis={model_set_axis} for a model set with" f" n_models={self._n_models}." ) return input_shape
(self, _input, idx, argnames, model_set_axis, check_model_set_axis)
44,188
astropy.modeling.core
_validate_input_shapes
Perform basic validation of model inputs --that they are mutually broadcastable and that they have the minimum dimensions for the given model_set_axis. If validation succeeds, returns the total shape that will result from broadcasting the input arrays with each other.
def _validate_input_shapes(self, inputs, argnames, model_set_axis): """ Perform basic validation of model inputs --that they are mutually broadcastable and that they have the minimum dimensions for the given model_set_axis. If validation succeeds, returns the total shape that will result from broadcasting the input arrays with each other. """ check_model_set_axis = self._n_models > 1 and model_set_axis is not False all_shapes = [] for idx, _input in enumerate(inputs): all_shapes.append( self._validate_input_shape( _input, idx, argnames, model_set_axis, check_model_set_axis ) ) try: input_shape = check_broadcast(*all_shapes) except IncompatibleShapeError as e: raise ValueError( "All inputs must have identical shapes or must be scalars." ) from e return input_shape
(self, inputs, argnames, model_set_axis)
44,189
astropy.modeling.core
_validate_input_units
null
def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None): inputs = list(inputs) name = self.name or self.__class__.__name__ # Check that the units are correct, if applicable if self.input_units is not None: # If a leaflist is provided that means this is in the context of # a compound model and it is necessary to create the appropriate # alias for the input coordinate name for the equivalencies dict if inputs_map: edict = {} for mod, mapping in inputs_map: if self is mod: edict[mapping[0]] = equivalencies[mapping[1]] else: edict = equivalencies # We combine any instance-level input equivalencies with user # specified ones at call-time. input_units_equivalencies = _combine_equivalency_dict( self.inputs, edict, self.input_units_equivalencies ) # We now iterate over the different inputs and make sure that their # units are consistent with those specified in input_units. for i in range(len(inputs)): input_name = self.inputs[i] input_unit = self.input_units.get(input_name, None) if input_unit is None: continue if isinstance(inputs[i], Quantity): # We check for consistency of the units with input_units, # taking into account any equivalencies if inputs[i].unit.is_equivalent( input_unit, equivalencies=input_units_equivalencies[input_name] ): # If equivalencies have been specified, we need to # convert the input to the input units - this is # because some equivalencies are non-linear, and # we need to be sure that we evaluate the model in # its own frame of reference. If input_units_strict # is set, we also need to convert to the input units. if ( len(input_units_equivalencies) > 0 or self.input_units_strict[input_name] ): inputs[i] = inputs[i].to( input_unit, equivalencies=input_units_equivalencies[input_name], ) else: # We consider the following two cases separately so as # to be able to raise more appropriate/nicer exceptions if input_unit is dimensionless_unscaled: raise UnitsError( f"{name}: Units of input '{self.inputs[i]}', " f"{inputs[i].unit} ({inputs[i].unit.physical_type})," "could not be converted to " "required dimensionless " "input" ) else: raise UnitsError( f"{name}: Units of input '{self.inputs[i]}', " f"{inputs[i].unit} ({inputs[i].unit.physical_type})," " could not be " "converted to required input" f" units of {input_unit} ({input_unit.physical_type})" ) else: # If we allow dimensionless input, we add the units to the # input values without conversion, otherwise we raise an # exception. if ( not self.input_units_allow_dimensionless[input_name] and input_unit is not dimensionless_unscaled and input_unit is not None ): if np.any(inputs[i] != 0): raise UnitsError( f"{name}: Units of input '{self.inputs[i]}'," " (dimensionless), could not be converted to required " f"input units of {input_unit} " f"({input_unit.physical_type})" ) return inputs
(self, inputs, equivalencies=None, inputs_map=None)
44,190
astropy.modeling.core
coerce_units
Attach units to this (unitless) model. Parameters ---------- input_units : dict or tuple, optional Input units to attach. If dict, each key is the name of a model input, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.inputs`. return_units : dict or tuple, optional Output units to attach. If dict, each key is the name of a model output, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.outputs`. input_units_equivalencies : dict, optional Default equivalencies to apply to input values. If set, this should be a dictionary where each key is a string that corresponds to one of the model inputs. input_units_allow_dimensionless : bool or dict, optional Allow dimensionless input. If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. Returns ------- `CompoundModel` A `CompoundModel` composed of the current model plus `~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units. Raises ------ ValueError If the current model already has units. Examples -------- Wrapping a unitless model to require and convert units: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP <Quantity 1.2 s> Wrapping a unitless model but still permitting unitless input: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(10) # doctest: +FLOAT_CMP <Quantity 21. s>
def coerce_units( self, input_units=None, return_units=None, input_units_equivalencies=None, input_units_allow_dimensionless=False, ): """ Attach units to this (unitless) model. Parameters ---------- input_units : dict or tuple, optional Input units to attach. If dict, each key is the name of a model input, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.inputs`. return_units : dict or tuple, optional Output units to attach. If dict, each key is the name of a model output, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.outputs`. input_units_equivalencies : dict, optional Default equivalencies to apply to input values. If set, this should be a dictionary where each key is a string that corresponds to one of the model inputs. input_units_allow_dimensionless : bool or dict, optional Allow dimensionless input. If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. Returns ------- `CompoundModel` A `CompoundModel` composed of the current model plus `~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units. Raises ------ ValueError If the current model already has units. Examples -------- Wrapping a unitless model to require and convert units: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP <Quantity 1.2 s> Wrapping a unitless model but still permitting unitless input: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(10) # doctest: +FLOAT_CMP <Quantity 21. s> """ from .mappings import UnitsMapping result = self if input_units is not None: if self.input_units is not None: model_units = self.input_units else: model_units = {} for unit in [model_units.get(i) for i in self.inputs]: if unit is not None and unit != dimensionless_unscaled: raise ValueError( "Cannot specify input_units for model with existing input units" ) if isinstance(input_units, dict): if input_units.keys() != set(self.inputs): message = ( f"""input_units keys ({", ".join(input_units.keys())}) """ f"""do not match model inputs ({", ".join(self.inputs)})""" ) raise ValueError(message) input_units = [input_units[i] for i in self.inputs] if len(input_units) != self.n_inputs: message = ( "input_units length does not match n_inputs: " f"expected {self.n_inputs}, received {len(input_units)}" ) raise ValueError(message) mapping = tuple( (unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units) ) input_mapping = UnitsMapping( mapping, input_units_equivalencies=input_units_equivalencies, input_units_allow_dimensionless=input_units_allow_dimensionless, ) input_mapping.inputs = self.inputs input_mapping.outputs = self.inputs result = input_mapping | result if return_units is not None: if self.return_units is not None: model_units = self.return_units else: model_units = {} for unit in [model_units.get(i) for i in self.outputs]: if unit is not None and unit != dimensionless_unscaled: raise ValueError( "Cannot specify return_units for model " "with existing output units" ) if isinstance(return_units, dict): if return_units.keys() != set(self.outputs): message = ( f"""return_units keys ({", ".join(return_units.keys())}) """ f"""do not match model outputs ({", ".join(self.outputs)})""" ) raise ValueError(message) return_units = [return_units[i] for i in self.outputs] if len(return_units) != self.n_outputs: message = ( "return_units length does not match n_outputs: " f"expected {self.n_outputs}, received {len(return_units)}" ) raise ValueError(message) mapping = tuple( (model_units.get(i), unit) for i, unit in zip(self.outputs, return_units) ) return_mapping = UnitsMapping(mapping) return_mapping.inputs = self.outputs return_mapping.outputs = self.outputs result = result | return_mapping return result
(self, input_units=None, return_units=None, input_units_equivalencies=None, input_units_allow_dimensionless=False)
44,191
astropy.modeling.core
copy
Return a copy of this model. Uses a deep copy so that all model attributes, including parameter values, are copied as well.
def copy(self): """ Return a copy of this model. Uses a deep copy so that all model attributes, including parameter values, are copied as well. """ return copy.deepcopy(self)
(self)
44,192
astropy.modeling.core
deepcopy
Return a deep copy of this model.
def deepcopy(self): """ Return a deep copy of this model. """ return self.copy()
(self)
44,193
gwcs.selector
evaluate
null
def evaluate(self, *args): if self.inputs_mapping is not None: args = self.inputs_mapping(*args) if self.n_outputs == 1: args = [args] res = self.mapper(*args) if np.isscalar(res): res = np.array([res]) return np.array(res)
(self, *args)
44,194
astropy.modeling.core
get_bounding_box
Return the ``bounding_box`` of a model if it exists or ``None`` otherwise. Parameters ---------- with_bbox : The value of the ``with_bounding_box`` keyword argument when calling the model. Default is `True` for usage when looking up the model's ``bounding_box`` without risk of error.
def get_bounding_box(self, with_bbox=True): """ Return the ``bounding_box`` of a model if it exists or ``None`` otherwise. Parameters ---------- with_bbox : The value of the ``with_bounding_box`` keyword argument when calling the model. Default is `True` for usage when looking up the model's ``bounding_box`` without risk of error. """ bbox = None if not isinstance(with_bbox, bool) or with_bbox: try: bbox = self.bounding_box except NotImplementedError: pass if isinstance(bbox, CompoundBoundingBox) and not isinstance( with_bbox, bool ): bbox = bbox[with_bbox] return bbox
(self, with_bbox=True)
44,195
astropy.modeling.core
has_inverse
Returns True if the model has an analytic or user inverse defined.
def has_inverse(self): """ Returns True if the model has an analytic or user inverse defined. """ try: self.inverse # noqa: B018 except NotImplementedError: return False return True
(self)
44,196
astropy.modeling.core
input_shape
Get input shape for bounding_box evaluation.
def input_shape(self, inputs): """Get input shape for bounding_box evaluation.""" return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis)
(self, inputs)
44,197
astropy.modeling.core
output_units
Return a dictionary of output units for this model given a dictionary of fitting inputs and outputs. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). This method will force extra model evaluations, which maybe computationally expensive. To avoid this, one can add a return_units property to the model, see :ref:`astropy:models_return_units`.
def output_units(self, **kwargs): """ Return a dictionary of output units for this model given a dictionary of fitting inputs and outputs. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). This method will force extra model evaluations, which maybe computationally expensive. To avoid this, one can add a return_units property to the model, see :ref:`astropy:models_return_units`. """ units = self.return_units if units is None or units == {}: inputs = {inp: kwargs[inp] for inp in self.inputs} values = self(**inputs) if self.n_outputs == 1: values = (values,) units = { out: getattr(values[index], "unit", dimensionless_unscaled) for index, out in enumerate(self.outputs) } return units
(self, **kwargs)
44,198
astropy.modeling.core
prepare_inputs
This method is used in `~astropy.modeling.Model.__call__` to ensure that all the inputs to the model can be broadcast into compatible shapes (if one or both of them are input as arrays), particularly if there are more than one parameter sets. This also makes sure that (if applicable) the units of the input will be compatible with the evaluate method.
def prepare_inputs( self, *inputs, model_set_axis=None, equivalencies=None, **kwargs ): """ This method is used in `~astropy.modeling.Model.__call__` to ensure that all the inputs to the model can be broadcast into compatible shapes (if one or both of them are input as arrays), particularly if there are more than one parameter sets. This also makes sure that (if applicable) the units of the input will be compatible with the evaluate method. """ # When we instantiate the model class, we make sure that __call__ can # take the following two keyword arguments: model_set_axis and # equivalencies. if model_set_axis is None: # By default the model_set_axis for the input is assumed to be the # same as that for the parameters the model was defined with # TODO: Ensure that negative model_set_axis arguments are respected model_set_axis = self.model_set_axis params = [getattr(self, name) for name in self.param_names] inputs = [np.asanyarray(_input, dtype=float) for _input in inputs] self._validate_input_shapes(inputs, self.inputs, model_set_axis) inputs_map = kwargs.get("inputs_map", None) inputs = self._validate_input_units(inputs, equivalencies, inputs_map) # The input formatting required for single models versus a multiple # model set are different enough that they've been split into separate # subroutines if self._n_models == 1: return self._prepare_inputs_single_model(params, inputs, **kwargs) else: return self._prepare_inputs_model_set( params, inputs, model_set_axis, **kwargs )
(self, *inputs, model_set_axis=None, equivalencies=None, **kwargs)
44,199
astropy.modeling.core
prepare_outputs
null
def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs): model_set_axis = kwargs.get("model_set_axis", None) if len(self) == 1: return self._prepare_outputs_single_model(outputs, broadcasted_shapes) else: return self._prepare_outputs_model_set( outputs, broadcasted_shapes, model_set_axis )
(self, broadcasted_shapes, *outputs, **kwargs)
44,200
astropy.modeling.core
render
Evaluate a model at fixed positions, respecting the ``bounding_box``. The key difference relative to evaluating the model directly is that this method is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- out : `numpy.ndarray`, optional An array that the evaluated model will be added to. If this is not given (or given as ``None``), a new array will be created. coords : array-like, optional An array to be used to translate from the model's input coordinates to the ``out`` array. It should have the property that ``self(coords)`` yields the same shape as ``out``. If ``out`` is not specified, ``coords`` will be used to determine the shape of the returned array. If this is not provided (or None), the model will be evaluated on a grid determined by `Model.bounding_box`. Returns ------- out : `numpy.ndarray` The model added to ``out`` if ``out`` is not ``None``, or else a new array from evaluating the model over ``coords``. If ``out`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Raises ------ ValueError If ``coords`` are not given and the `Model.bounding_box` of this model is not set. Examples -------- :ref:`astropy:bounding-boxes`
def render(self, out=None, coords=None): """ Evaluate a model at fixed positions, respecting the ``bounding_box``. The key difference relative to evaluating the model directly is that this method is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- out : `numpy.ndarray`, optional An array that the evaluated model will be added to. If this is not given (or given as ``None``), a new array will be created. coords : array-like, optional An array to be used to translate from the model's input coordinates to the ``out`` array. It should have the property that ``self(coords)`` yields the same shape as ``out``. If ``out`` is not specified, ``coords`` will be used to determine the shape of the returned array. If this is not provided (or None), the model will be evaluated on a grid determined by `Model.bounding_box`. Returns ------- out : `numpy.ndarray` The model added to ``out`` if ``out`` is not ``None``, or else a new array from evaluating the model over ``coords``. If ``out`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Raises ------ ValueError If ``coords`` are not given and the `Model.bounding_box` of this model is not set. Examples -------- :ref:`astropy:bounding-boxes` """ try: bbox = self.bounding_box except NotImplementedError: bbox = None if isinstance(bbox, ModelBoundingBox): bbox = bbox.bounding_box() ndim = self.n_inputs if (coords is None) and (out is None) and (bbox is None): raise ValueError("If no bounding_box is set, coords or out must be input.") # for consistent indexing if ndim == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if coords is not None: coords = np.asanyarray(coords, dtype=float) # Check dimensions match out and model assert len(coords) == ndim if out is not None: if coords[0].shape != out.shape: raise ValueError("inconsistent shape of the output.") else: out = np.zeros(coords[0].shape) if out is not None: out = np.asanyarray(out) if out.ndim != ndim: raise ValueError( "the array and model must have the same number of dimensions." ) if bbox is not None: # Assures position is at center pixel, # important when using add_array. pd = ( np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]) .astype(int) .T ) pos, delta = pd if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array( [extract_array(c, sub_shape, pos) for c in coords] ) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if out is None: out = self(*sub_coords) else: try: out = add_array(out, self(*sub_coords), pos) except ValueError: raise ValueError( "The `bounding_box` is larger than the input out in " "one or more dimensions. Set " "`model.bounding_box = None`." ) else: if coords is None: im_shape = out.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] coords = coords[::-1] out += self(*coords) return out
(self, out=None, coords=None)
44,201
astropy.modeling.core
set_slice_args
null
def set_slice_args(self, *args): if isinstance(self._user_bounding_box, CompoundBoundingBox): self._user_bounding_box.slice_args = args else: raise RuntimeError("The bounding_box for this model is not compound")
(self, *args)
44,202
astropy.modeling.core
strip_units_from_tree
null
def strip_units_from_tree(self): for item in self._leaflist: for parname in item.param_names: par = getattr(item, parname) par._set_unit(None, force=True)
(self)
44,203
astropy.modeling.core
sum_of_implicit_terms
Evaluate the sum of any implicit model terms on some input variables. This includes any fixed terms used in evaluating a linear model that do not have corresponding parameters exposed to the user. The prototypical case is `astropy.modeling.functional_models.Shift`, which corresponds to a function y = a + bx, where b=1 is intrinsically fixed by the type of model, such that sum_of_implicit_terms(x) == x. This method is needed by linear fitters to correct the dependent variable for the implicit term(s) when solving for the remaining terms (ie. a = y - bx).
def sum_of_implicit_terms(self, *args, **kwargs): """ Evaluate the sum of any implicit model terms on some input variables. This includes any fixed terms used in evaluating a linear model that do not have corresponding parameters exposed to the user. The prototypical case is `astropy.modeling.functional_models.Shift`, which corresponds to a function y = a + bx, where b=1 is intrinsically fixed by the type of model, such that sum_of_implicit_terms(x) == x. This method is needed by linear fitters to correct the dependent variable for the implicit term(s) when solving for the remaining terms (ie. a = y - bx). """
(self, *args, **kwargs)
44,204
astropy.modeling.core
with_units_from_data
Return an instance of the model which has units for which the parameter values are compatible with the data units specified. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters will gain are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter.
def with_units_from_data(self, **kwargs): """ Return an instance of the model which has units for which the parameter values are compatible with the data units specified. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters will gain are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter. """ model = self.copy() inputs_unit = { inp: getattr(kwargs[inp], "unit", dimensionless_unscaled) for inp in self.inputs if kwargs[inp] is not None } outputs_unit = { out: getattr(kwargs[out], "unit", dimensionless_unscaled) for out in self.outputs if kwargs[out] is not None } parameter_units = self._parameter_units_for_data_units( inputs_unit, outputs_unit ) # We are adding units to parameters that already have a value, but we # don't want to convert the parameter, just add the unit directly, # hence the call to ``_set_unit``. for name, unit in parameter_units.items(): parameter = getattr(model, name) parameter._set_unit(unit, force=True) return model
(self, **kwargs)
44,205
astropy.modeling.core
without_units_for_data
Return an instance of the model for which the parameter values have been converted to the right units for the data, then the units have been stripped away. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters should be converted to are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter.
def without_units_for_data(self, **kwargs): """ Return an instance of the model for which the parameter values have been converted to the right units for the data, then the units have been stripped away. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters should be converted to are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter. """ model = self.copy() inputs_unit = { inp: getattr(kwargs[inp], "unit", dimensionless_unscaled) for inp in self.inputs if kwargs[inp] is not None } outputs_unit = { out: getattr(kwargs[out], "unit", dimensionless_unscaled) for out in self.outputs if kwargs[out] is not None } parameter_units = self._parameter_units_for_data_units( inputs_unit, outputs_unit ) for name, unit in parameter_units.items(): parameter = getattr(model, name) if parameter.unit is not None: parameter.value = parameter.quantity.to(unit).value parameter._set_unit(None, force=True) if isinstance(model, CompoundModel): model.strip_units_from_tree() return model
(self, **kwargs)