index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
61,883 |
collections
|
_replace
|
Return a new Connection object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
61,884 |
graphql_relay.connection.connection
|
ConnectionConstructor
| null |
class ConnectionConstructor(Protocol):
def __call__(
self,
*,
edges: List[EdgeType],
pageInfo: PageInfoType,
) -> ConnectionType:
...
|
(*args, **kwargs)
|
61,885 |
graphql_relay.connection.connection
|
__call__
| null |
def __call__(
self,
*,
edges: List[EdgeType],
pageInfo: PageInfoType,
) -> ConnectionType:
...
|
(self, *, edges: List[graphql_relay.connection.connection.EdgeType], pageInfo: graphql_relay.connection.connection.PageInfoType) -> graphql_relay.connection.connection.ConnectionType
|
61,889 |
graphql_relay.connection.connection
|
ConnectionType
| null |
class ConnectionType(Protocol):
@property
def edges(self) -> List[EdgeType]:
...
@property
def pageInfo(self) -> PageInfoType:
...
|
(*args, **kwargs)
|
61,892 |
graphql_relay.connection.connection
|
Edge
|
A type designed to be exposed as a `Edge` over GraphQL.
|
class Edge(NamedTuple):
"""A type designed to be exposed as a `Edge` over GraphQL."""
node: Any
cursor: ConnectionCursor
|
(node: Any, cursor: str)
|
61,894 |
namedtuple_Edge
|
__new__
|
Create new instance of Edge(node, cursor)
|
from builtins import function
|
(_cls, node: Any, cursor: str)
|
61,897 |
collections
|
_replace
|
Return a new Edge object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
61,898 |
graphql_relay.connection.connection
|
EdgeConstructor
| null |
class EdgeConstructor(Protocol):
def __call__(self, *, node: Any, cursor: ConnectionCursor) -> EdgeType:
...
|
(*args, **kwargs)
|
61,899 |
graphql_relay.connection.connection
|
__call__
| null |
def __call__(self, *, node: Any, cursor: ConnectionCursor) -> EdgeType:
...
|
(self, *, node: Any, cursor: str) -> graphql_relay.connection.connection.EdgeType
|
61,902 |
graphql_relay.connection.connection
|
EdgeType
| null |
class EdgeType(Protocol):
@property
def node(self) -> Any:
...
@property
def cursor(self) -> ConnectionCursor:
...
|
(*args, **kwargs)
|
61,905 |
graphql_relay.connection.connection
|
GraphQLConnectionDefinitions
|
GraphQLConnectionDefinitions(edge_type, connection_type)
|
class GraphQLConnectionDefinitions(NamedTuple):
edge_type: GraphQLObjectType
connection_type: GraphQLObjectType
|
(edge_type: graphql.type.definition.GraphQLObjectType, connection_type: graphql.type.definition.GraphQLObjectType)
|
61,907 |
namedtuple_GraphQLConnectionDefinitions
|
__new__
|
Create new instance of GraphQLConnectionDefinitions(edge_type, connection_type)
|
from builtins import function
|
(_cls, edge_type: graphql.type.definition.GraphQLObjectType, connection_type: graphql.type.definition.GraphQLObjectType)
|
61,910 |
collections
|
_replace
|
Return a new GraphQLConnectionDefinitions object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
61,911 |
graphql_relay.node.node
|
GraphQLNodeDefinitions
|
GraphQLNodeDefinitions(node_interface, node_field, nodes_field)
|
class GraphQLNodeDefinitions(NamedTuple):
node_interface: GraphQLInterfaceType
node_field: GraphQLField
nodes_field: GraphQLField
|
(node_interface: graphql.type.definition.GraphQLInterfaceType, node_field: graphql.type.definition.GraphQLField, nodes_field: graphql.type.definition.GraphQLField)
|
61,913 |
namedtuple_GraphQLNodeDefinitions
|
__new__
|
Create new instance of GraphQLNodeDefinitions(node_interface, node_field, nodes_field)
|
from builtins import function
|
(_cls, node_interface: graphql.type.definition.GraphQLInterfaceType, node_field: graphql.type.definition.GraphQLField, nodes_field: graphql.type.definition.GraphQLField)
|
61,916 |
collections
|
_replace
|
Return a new GraphQLNodeDefinitions object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
61,917 |
graphql_relay.mutation.mutation
|
NullResult
| null |
class NullResult:
def __init__(self, clientMutationId: Optional[str] = None) -> None:
self.clientMutationId = clientMutationId
|
(clientMutationId: Optional[str] = None) -> None
|
61,918 |
graphql_relay.mutation.mutation
|
__init__
| null |
def __init__(self, clientMutationId: Optional[str] = None) -> None:
self.clientMutationId = clientMutationId
|
(self, clientMutationId: Optional[str] = None) -> NoneType
|
61,919 |
graphql_relay.connection.connection
|
PageInfo
|
A type designed to be exposed as `PageInfo` over GraphQL.
|
class PageInfo(NamedTuple):
"""A type designed to be exposed as `PageInfo` over GraphQL."""
startCursor: Optional[ConnectionCursor]
endCursor: Optional[ConnectionCursor]
hasPreviousPage: bool
hasNextPage: bool
|
(startCursor: Optional[str], endCursor: Optional[str], hasPreviousPage: bool, hasNextPage: bool)
|
61,921 |
namedtuple_PageInfo
|
__new__
|
Create new instance of PageInfo(startCursor, endCursor, hasPreviousPage, hasNextPage)
|
from builtins import function
|
(_cls, startCursor: Optional[str], endCursor: Optional[str], hasPreviousPage: bool, hasNextPage: bool)
|
61,924 |
collections
|
_replace
|
Return a new PageInfo object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
61,925 |
graphql_relay.connection.connection
|
PageInfoConstructor
| null |
class PageInfoConstructor(Protocol):
def __call__(
self,
*,
startCursor: Optional[ConnectionCursor],
endCursor: Optional[ConnectionCursor],
hasPreviousPage: bool,
hasNextPage: bool,
) -> PageInfoType:
...
|
(*args, **kwargs)
|
61,926 |
graphql_relay.connection.connection
|
__call__
| null |
def __call__(
self,
*,
startCursor: Optional[ConnectionCursor],
endCursor: Optional[ConnectionCursor],
hasPreviousPage: bool,
hasNextPage: bool,
) -> PageInfoType:
...
|
(self, *, startCursor: Optional[str], endCursor: Optional[str], hasPreviousPage: bool, hasNextPage: bool) -> graphql_relay.connection.connection.PageInfoType
|
61,929 |
graphql_relay.connection.connection
|
PageInfoType
| null |
class PageInfoType(Protocol):
@property
def startCursor(self) -> Optional[ConnectionCursor]:
...
def endCursor(self) -> Optional[ConnectionCursor]:
...
def hasPreviousPage(self) -> bool:
...
def hasNextPage(self) -> bool:
...
|
(*args, **kwargs)
|
61,932 |
graphql_relay.connection.connection
|
endCursor
| null |
def endCursor(self) -> Optional[ConnectionCursor]:
...
|
(self) -> Optional[str]
|
61,933 |
graphql_relay.connection.connection
|
hasNextPage
| null |
def hasNextPage(self) -> bool:
...
|
(self) -> bool
|
61,934 |
graphql_relay.connection.connection
|
hasPreviousPage
| null |
def hasPreviousPage(self) -> bool:
...
|
(self) -> bool
|
61,935 |
graphql_relay.node.node
|
ResolvedGlobalId
|
ResolvedGlobalId(type, id)
|
class ResolvedGlobalId(NamedTuple):
type: str
id: str
|
(type: str, id: str)
|
61,937 |
namedtuple_ResolvedGlobalId
|
__new__
|
Create new instance of ResolvedGlobalId(type, id)
|
from builtins import function
|
(_cls, type: str, id: str)
|
61,940 |
collections
|
_replace
|
Return a new ResolvedGlobalId object replacing specified fields with new values
|
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
|
(self, /, **kwds)
|
61,941 |
graphql_relay.connection.array_connection
|
SizedSliceable
| null |
class SizedSliceable(Protocol):
def __getitem__(self, index: slice) -> Any:
...
def __iter__(self) -> Iterator:
...
def __len__(self) -> int:
...
|
(*args, **kwargs)
|
61,942 |
graphql_relay.connection.array_connection
|
__getitem__
| null |
def __getitem__(self, index: slice) -> Any:
...
|
(self, index: slice) -> Any
|
61,944 |
graphql_relay.connection.array_connection
|
__iter__
| null |
def __iter__(self) -> Iterator:
...
|
(self) -> Iterator
|
61,945 |
graphql_relay.connection.array_connection
|
__len__
| null |
def __len__(self) -> int:
...
|
(self) -> int
|
61,948 |
graphql_relay.connection.connection
|
connection_definitions
|
Return GraphQLObjectTypes for a connection with the given name.
The nodes of the returned object types will be of the specified type.
|
def connection_definitions(
node_type: Union[GraphQLNamedOutputType, GraphQLNonNull[GraphQLNamedOutputType]],
name: Optional[str] = None,
resolve_node: Optional[GraphQLFieldResolver] = None,
resolve_cursor: Optional[GraphQLFieldResolver] = None,
edge_fields: Optional[ThunkMapping[GraphQLField]] = None,
connection_fields: Optional[ThunkMapping[GraphQLField]] = None,
) -> GraphQLConnectionDefinitions:
"""Return GraphQLObjectTypes for a connection with the given name.
The nodes of the returned object types will be of the specified type.
"""
name = name or get_named_type(node_type).name
edge_type = GraphQLObjectType(
name + "Edge",
description="An edge in a connection.",
fields=lambda: {
"node": GraphQLField(
node_type,
resolve=resolve_node,
description="The item at the end of the edge",
),
"cursor": GraphQLField(
GraphQLNonNull(GraphQLString),
resolve=resolve_cursor,
description="A cursor for use in pagination",
),
**resolve_thunk(edge_fields or {}),
},
)
connection_type = GraphQLObjectType(
name + "Connection",
description="A connection to a list of items.",
fields=lambda: {
"pageInfo": GraphQLField(
GraphQLNonNull(page_info_type),
description="Information to aid in pagination.",
),
"edges": GraphQLField(
GraphQLList(edge_type), description="A list of edges."
),
**resolve_thunk(connection_fields or {}),
},
)
return GraphQLConnectionDefinitions(edge_type, connection_type)
|
(node_type: Union[graphql.type.definition.GraphQLScalarType, graphql.type.definition.GraphQLObjectType, graphql.type.definition.GraphQLInterfaceType, graphql.type.definition.GraphQLUnionType, graphql.type.definition.GraphQLEnumType, graphql.type.definition.GraphQLNonNull[Union[graphql.type.definition.GraphQLScalarType, graphql.type.definition.GraphQLObjectType, graphql.type.definition.GraphQLInterfaceType, graphql.type.definition.GraphQLUnionType, graphql.type.definition.GraphQLEnumType]]], name: Optional[str] = None, resolve_node: Optional[Callable[..., Any]] = None, resolve_cursor: Optional[Callable[..., Any]] = None, edge_fields: Union[Callable[[], Mapping[str, graphql.type.definition.GraphQLField]], Mapping[str, graphql.type.definition.GraphQLField], NoneType] = None, connection_fields: Union[Callable[[], Mapping[str, graphql.type.definition.GraphQLField]], Mapping[str, graphql.type.definition.GraphQLField], NoneType] = None) -> graphql_relay.connection.connection.GraphQLConnectionDefinitions
|
61,949 |
graphql_relay.connection.array_connection
|
connection_from_array
|
Create a connection object from a sequence of objects.
Note that different from its JavaScript counterpart which expects an array,
this function accepts any kind of sliceable object with a length.
Given this `data` object representing the result set, and connection arguments,
this simple function returns a connection object for use in GraphQL. It uses
offsets as pagination, so pagination will only work if the data is static.
The result will use the default types provided in the `connectiontypes` module
if you don't pass custom types as arguments.
|
def connection_from_array(
data: SizedSliceable,
args: Optional[ConnectionArguments] = None,
connection_type: ConnectionConstructor = Connection,
edge_type: EdgeConstructor = Edge,
page_info_type: PageInfoConstructor = PageInfo,
) -> ConnectionType:
"""Create a connection object from a sequence of objects.
Note that different from its JavaScript counterpart which expects an array,
this function accepts any kind of sliceable object with a length.
Given this `data` object representing the result set, and connection arguments,
this simple function returns a connection object for use in GraphQL. It uses
offsets as pagination, so pagination will only work if the data is static.
The result will use the default types provided in the `connectiontypes` module
if you don't pass custom types as arguments.
"""
return connection_from_array_slice(
data,
args,
slice_start=0,
array_length=len(data),
connection_type=connection_type,
edge_type=edge_type,
page_info_type=page_info_type,
)
|
(data: graphql_relay.connection.array_connection.SizedSliceable, args: Optional[Dict[str, Any]] = None, connection_type: graphql_relay.connection.connection.ConnectionConstructor = <class 'graphql_relay.connection.connection.Connection'>, edge_type: graphql_relay.connection.connection.EdgeConstructor = <class 'graphql_relay.connection.connection.Edge'>, page_info_type: graphql_relay.connection.connection.PageInfoConstructor = <class 'graphql_relay.connection.connection.PageInfo'>) -> graphql_relay.connection.connection.ConnectionType
|
61,950 |
graphql_relay.connection.array_connection
|
connection_from_array_slice
|
Create a connection object from a slice of the result set.
Note that different from its JavaScript counterpart which expects an array,
this function accepts any kind of sliceable object. This object represents
a slice of the full result set. You need to pass the start position of the
slice as `slice start` and the length of the full result set as `array_length`.
If the `array_slice` does not have a length, you need to provide it separately
in `array_slice_length` as well.
This function is similar to `connection_from_array`, but is intended for use
cases where you know the cardinality of the connection, consider it too large
to materialize the entire result set, and instead wish to pass in only a slice
of the total result large enough to cover the range specified in `args`.
If you do not provide a `slice_start`, we assume that the slice starts at
the beginning of the result set, and if you do not provide an `array_length`,
we assume that the slice ends at the end of the result set.
|
def connection_from_array_slice(
array_slice: SizedSliceable,
args: Optional[ConnectionArguments] = None,
slice_start: int = 0,
array_length: Optional[int] = None,
array_slice_length: Optional[int] = None,
connection_type: ConnectionConstructor = Connection,
edge_type: EdgeConstructor = Edge,
page_info_type: PageInfoConstructor = PageInfo,
) -> ConnectionType:
"""Create a connection object from a slice of the result set.
Note that different from its JavaScript counterpart which expects an array,
this function accepts any kind of sliceable object. This object represents
a slice of the full result set. You need to pass the start position of the
slice as `slice start` and the length of the full result set as `array_length`.
If the `array_slice` does not have a length, you need to provide it separately
in `array_slice_length` as well.
This function is similar to `connection_from_array`, but is intended for use
cases where you know the cardinality of the connection, consider it too large
to materialize the entire result set, and instead wish to pass in only a slice
of the total result large enough to cover the range specified in `args`.
If you do not provide a `slice_start`, we assume that the slice starts at
the beginning of the result set, and if you do not provide an `array_length`,
we assume that the slice ends at the end of the result set.
"""
args = args or {}
before = args.get("before")
after = args.get("after")
first = args.get("first")
last = args.get("last")
if array_slice_length is None:
array_slice_length = len(array_slice)
slice_end = slice_start + array_slice_length
if array_length is None:
array_length = slice_end
start_offset = max(slice_start, 0)
end_offset = min(slice_end, array_length)
after_offset = get_offset_with_default(after, -1)
if 0 <= after_offset < array_length:
start_offset = max(start_offset, after_offset + 1)
before_offset = get_offset_with_default(before, end_offset)
if 0 <= before_offset < array_length:
end_offset = min(end_offset, before_offset)
if isinstance(first, int):
if first < 0:
raise ValueError("Argument 'first' must be a non-negative integer.")
end_offset = min(end_offset, start_offset + first)
if isinstance(last, int):
if last < 0:
raise ValueError("Argument 'last' must be a non-negative integer.")
start_offset = max(start_offset, end_offset - last)
# If supplied slice is too large, trim it down before mapping over it.
trimmed_slice = array_slice[start_offset - slice_start : end_offset - slice_start]
edges = [
edge_type(node=value, cursor=offset_to_cursor(start_offset + index))
for index, value in enumerate(trimmed_slice)
]
first_edge_cursor = edges[0].cursor if edges else None
last_edge_cursor = edges[-1].cursor if edges else None
lower_bound = after_offset + 1 if after else 0
upper_bound = before_offset if before else array_length
return connection_type(
edges=edges,
pageInfo=page_info_type(
startCursor=first_edge_cursor,
endCursor=last_edge_cursor,
hasPreviousPage=isinstance(last, int) and start_offset > lower_bound,
hasNextPage=isinstance(first, int) and end_offset < upper_bound,
),
)
|
(array_slice: graphql_relay.connection.array_connection.SizedSliceable, args: Optional[Dict[str, Any]] = None, slice_start: int = 0, array_length: Optional[int] = None, array_slice_length: Optional[int] = None, connection_type: graphql_relay.connection.connection.ConnectionConstructor = <class 'graphql_relay.connection.connection.Connection'>, edge_type: graphql_relay.connection.connection.EdgeConstructor = <class 'graphql_relay.connection.connection.Edge'>, page_info_type: graphql_relay.connection.connection.PageInfoConstructor = <class 'graphql_relay.connection.connection.PageInfo'>) -> graphql_relay.connection.connection.ConnectionType
|
61,951 |
graphql_relay.connection.array_connection
|
cursor_for_object_in_connection
|
Return the cursor associated with an object in a sequence.
This function uses the `index` method of the sequence if it exists,
otherwise searches the object by iterating via the `__getitem__` method.
|
def cursor_for_object_in_connection(
data: Sequence, obj: Any
) -> Optional[ConnectionCursor]:
"""Return the cursor associated with an object in a sequence.
This function uses the `index` method of the sequence if it exists,
otherwise searches the object by iterating via the `__getitem__` method.
"""
try:
offset = data.index(obj)
except AttributeError:
# data does not have an index method
offset = 0
try:
while True:
if data[offset] == obj:
break
offset += 1
except IndexError:
return None
else:
return offset_to_cursor(offset)
except ValueError:
return None
else:
return offset_to_cursor(offset)
|
(data: Sequence, obj: Any) -> Optional[str]
|
61,952 |
graphql_relay.connection.array_connection
|
cursor_to_offset
|
Extract the offset from the cursor string.
|
def cursor_to_offset(cursor: ConnectionCursor) -> Optional[int]:
"""Extract the offset from the cursor string."""
try:
return int(unbase64(cursor)[len(PREFIX) :])
except ValueError:
return None
|
(cursor: str) -> Optional[int]
|
61,953 |
graphql_relay.node.node
|
from_global_id
|
Takes the "global ID" created by to_global_id, and returns the type name and ID
used to create it.
|
def from_global_id(global_id: str) -> ResolvedGlobalId:
"""
Takes the "global ID" created by to_global_id, and returns the type name and ID
used to create it.
"""
global_id = unbase64(global_id)
if ":" not in global_id:
return ResolvedGlobalId("", global_id)
return ResolvedGlobalId(*global_id.split(":", 1))
|
(global_id: str) -> graphql_relay.node.node.ResolvedGlobalId
|
61,954 |
graphql_relay.connection.array_connection
|
get_offset_with_default
|
Get offset from a given cursor and a default.
Given an optional cursor and a default offset, return the offset to use;
if the cursor contains a valid offset, that will be used,
otherwise it will be the default.
|
def get_offset_with_default(
cursor: Optional[ConnectionCursor] = None, default_offset: int = 0
) -> int:
"""Get offset from a given cursor and a default.
Given an optional cursor and a default offset, return the offset to use;
if the cursor contains a valid offset, that will be used,
otherwise it will be the default.
"""
if not isinstance(cursor, str):
return default_offset
offset = cursor_to_offset(cursor)
return default_offset if offset is None else offset
|
(cursor: Optional[str] = None, default_offset: int = 0) -> int
|
61,955 |
graphql_relay.node.node
|
global_id_field
|
Creates the configuration for an id field on a node, using `to_global_id` to
construct the ID from the provided typename. The type-specific ID is fetched
by calling id_fetcher on the object, or if not provided, by accessing the `id`
attribute of the object, or the `id` if the object is a dict.
|
def global_id_field(
type_name: Optional[str] = None,
id_fetcher: Optional[Callable[[Any, GraphQLResolveInfo], str]] = None,
) -> GraphQLField:
"""
Creates the configuration for an id field on a node, using `to_global_id` to
construct the ID from the provided typename. The type-specific ID is fetched
by calling id_fetcher on the object, or if not provided, by accessing the `id`
attribute of the object, or the `id` if the object is a dict.
"""
def resolve(obj: Any, info: GraphQLResolveInfo, **_args: Any) -> str:
type_ = type_name or info.parent_type.name
id_ = (
id_fetcher(obj, info)
if id_fetcher
else (obj["id"] if isinstance(obj, dict) else obj.id)
)
return to_global_id(type_, id_)
return GraphQLField(
GraphQLNonNull(GraphQLID), description="The ID of an object", resolve=resolve
)
|
(type_name: Optional[str] = None, id_fetcher: Optional[Callable[[Any, graphql.type.definition.GraphQLResolveInfo], str]] = None) -> graphql.type.definition.GraphQLField
|
61,957 |
graphql_relay.mutation.mutation
|
mutation_with_client_mutation_id
|
Returns a GraphQLFieldConfig for the specified mutation.
The input_fields and output_fields should not include `clientMutationId`,
as this will be provided automatically.
An input object will be created containing the input fields, and an
object will be created containing the output fields.
mutate_and_get_payload will receive a GraphQLResolveInfo as first argument,
and the input fields as keyword arguments, and it should return an object
(or a dict) with an attribute (or a key) for each output field.
It may return synchronously or asynchronously.
|
def mutation_with_client_mutation_id(
name: str,
input_fields: ThunkMapping[GraphQLInputField],
output_fields: ThunkMapping[GraphQLField],
mutate_and_get_payload: MutationFn,
description: Optional[str] = None,
deprecation_reason: Optional[str] = None,
extensions: Optional[Dict[str, Any]] = None,
) -> GraphQLField:
"""
Returns a GraphQLFieldConfig for the specified mutation.
The input_fields and output_fields should not include `clientMutationId`,
as this will be provided automatically.
An input object will be created containing the input fields, and an
object will be created containing the output fields.
mutate_and_get_payload will receive a GraphQLResolveInfo as first argument,
and the input fields as keyword arguments, and it should return an object
(or a dict) with an attribute (or a key) for each output field.
It may return synchronously or asynchronously.
"""
def augmented_input_fields() -> GraphQLInputFieldMap:
return dict(
resolve_thunk(input_fields),
clientMutationId=GraphQLInputField(GraphQLString),
)
def augmented_output_fields() -> GraphQLFieldMap:
return dict(
resolve_thunk(output_fields),
clientMutationId=GraphQLField(GraphQLString),
)
output_type = GraphQLObjectType(name + "Payload", fields=augmented_output_fields)
input_type = GraphQLInputObjectType(name + "Input", fields=augmented_input_fields)
if iscoroutinefunction(mutate_and_get_payload):
# noinspection PyShadowingBuiltins
async def resolve(_root: Any, info: GraphQLResolveInfo, input: Dict) -> Any:
payload = await mutate_and_get_payload(info, **input)
clientMutationId = input.get("clientMutationId")
if payload is None:
return NullResult(clientMutationId)
if isinstance(payload, Mapping):
payload["clientMutationId"] = clientMutationId # type: ignore
else:
payload.clientMutationId = clientMutationId
return payload
else:
# noinspection PyShadowingBuiltins
def resolve( # type: ignore
_root: Any, info: GraphQLResolveInfo, input: Dict
) -> Any:
payload = mutate_and_get_payload(info, **input)
clientMutationId = input.get("clientMutationId")
if payload is None:
return NullResult(clientMutationId)
if isinstance(payload, Mapping):
payload["clientMutationId"] = clientMutationId # type: ignore
else:
payload.clientMutationId = clientMutationId # type: ignore
return payload
return GraphQLField(
output_type,
description=description,
deprecation_reason=deprecation_reason,
args={"input": GraphQLArgument(GraphQLNonNull(input_type))},
resolve=resolve,
extensions=extensions,
)
|
(name: str, input_fields: Union[Callable[[], Mapping[str, graphql.type.definition.GraphQLInputField]], Mapping[str, graphql.type.definition.GraphQLInputField]], output_fields: Union[Callable[[], Mapping[str, graphql.type.definition.GraphQLField]], Mapping[str, graphql.type.definition.GraphQLField]], mutate_and_get_payload: Callable[..., Union[Awaitable[Any], Any]], description: Optional[str] = None, deprecation_reason: Optional[str] = None, extensions: Optional[Dict[str, Any]] = None) -> graphql.type.definition.GraphQLField
|
61,959 |
graphql_relay.node.node
|
node_definitions
|
Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field object to be used as a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `is_type_of` method on object types, as with any GraphQL
interface without a provided `resolve_type` method.
|
def node_definitions(
fetch_by_id: Callable[[str, GraphQLResolveInfo], Any],
type_resolver: Optional[GraphQLTypeResolver] = None,
) -> GraphQLNodeDefinitions:
"""
Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field object to be used as a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `is_type_of` method on object types, as with any GraphQL
interface without a provided `resolve_type` method.
"""
node_interface = GraphQLInterfaceType(
"Node",
description="An object with an ID",
fields=lambda: {
"id": GraphQLField(
GraphQLNonNull(GraphQLID), description="The id of the object."
)
},
resolve_type=type_resolver,
)
# noinspection PyShadowingBuiltins
node_field = GraphQLField(
node_interface,
description="Fetches an object given its ID",
args={
"id": GraphQLArgument(
GraphQLNonNull(GraphQLID), description="The ID of an object"
)
},
resolve=lambda _obj, info, id: fetch_by_id(id, info),
)
nodes_field = GraphQLField(
GraphQLNonNull(GraphQLList(node_interface)),
description="Fetches objects given their IDs",
args={
"ids": GraphQLArgument(
GraphQLNonNull(GraphQLList(GraphQLNonNull(GraphQLID))),
description="The IDs of objects",
)
},
resolve=lambda _obj, info, ids: [fetch_by_id(id_, info) for id_ in ids],
)
return GraphQLNodeDefinitions(node_interface, node_field, nodes_field)
|
(fetch_by_id: Callable[[str, graphql.type.definition.GraphQLResolveInfo], Any], type_resolver: Optional[Callable[[Any, graphql.type.definition.GraphQLResolveInfo, ForwardRef('GraphQLAbstractType')], Union[Awaitable[Optional[str]], str, NoneType]]] = None) -> graphql_relay.node.node.GraphQLNodeDefinitions
|
61,960 |
graphql_relay.connection.array_connection
|
offset_to_cursor
|
Create the cursor string from an offset.
|
def offset_to_cursor(offset: int) -> ConnectionCursor:
"""Create the cursor string from an offset."""
return base64(f"{PREFIX}{offset}")
|
(offset: int) -> str
|
61,961 |
graphql_relay.node.plural
|
plural_identifying_root_field
| null |
def plural_identifying_root_field(
arg_name: str,
input_type: GraphQLInputType,
output_type: GraphQLOutputType,
resolve_single_input: Callable[[GraphQLResolveInfo, str], Any],
description: Optional[str] = None,
) -> GraphQLField:
def resolve(_obj: Any, info: GraphQLResolveInfo, **args: Any) -> List:
inputs = args[arg_name]
return [resolve_single_input(info, input_) for input_ in inputs]
return GraphQLField(
GraphQLList(output_type),
description=description,
args={
arg_name: GraphQLArgument(
GraphQLNonNull(
GraphQLList(
GraphQLNonNull(get_nullable_type(input_type)) # type: ignore
)
)
)
},
resolve=resolve,
)
|
(arg_name: str, input_type: Union[graphql.type.definition.GraphQLScalarType, graphql.type.definition.GraphQLEnumType, graphql.type.definition.GraphQLInputObjectType, graphql.type.definition.GraphQLWrappingType], output_type: Union[graphql.type.definition.GraphQLScalarType, graphql.type.definition.GraphQLObjectType, graphql.type.definition.GraphQLInterfaceType, graphql.type.definition.GraphQLUnionType, graphql.type.definition.GraphQLEnumType, graphql.type.definition.GraphQLWrappingType], resolve_single_input: Callable[[graphql.type.definition.GraphQLResolveInfo, str], Any], description: Optional[str] = None) -> graphql.type.definition.GraphQLField
|
61,962 |
graphql_relay.node.node
|
to_global_id
|
Takes a type name and an ID specific to that type name, and returns a
"global ID" that is unique among all types.
|
def to_global_id(type_: str, id_: Union[str, int]) -> str:
"""
Takes a type name and an ID specific to that type name, and returns a
"global ID" that is unique among all types.
"""
return base64(f"{type_}:{GraphQLID.serialize(id_)}")
|
(type_: str, id_: Union[str, int]) -> str
|
61,964 |
paos.classes.abcd
|
ABCD
|
ABCD matrix class for paraxial ray tracing.
Attributes
----------
thickness: scalar
optical thickness
power: scalar
optical power
M: scalar
optical magnification
n1n2: scalar
ratio of refractive indices n1/n2 for light propagating
from a medium with refractive index n1, into a medium
with refractive index n2
c : scalar
speed of light. Can take values +1 for light travelling left-to-right (+Z),
and -1 for light travelling right-to-left (-Z)
Note
----------
The class properties can differ from the value of the parameters used at
class instantiation. This because the ABCD matrix is decomposed into four primitives,
multiplied together as discussed in :ref:`Optical system equivalent`.
Examples
--------
>>> from paos.classes.abcd import ABCD
>>> thickness = 2.695 # mm
>>> radius = 31.850 # mm
>>> n1, n2 = 1.0, 1.5
>>> abcd = ABCD(thickness=thickness, curvature=1.0/radius, n1=n1, n2=n2)
>>> (A, B), (C, D) = abcd.ABCD
|
class ABCD:
"""
ABCD matrix class for paraxial ray tracing.
Attributes
----------
thickness: scalar
optical thickness
power: scalar
optical power
M: scalar
optical magnification
n1n2: scalar
ratio of refractive indices n1/n2 for light propagating
from a medium with refractive index n1, into a medium
with refractive index n2
c : scalar
speed of light. Can take values +1 for light travelling left-to-right (+Z),
and -1 for light travelling right-to-left (-Z)
Note
----------
The class properties can differ from the value of the parameters used at
class instantiation. This because the ABCD matrix is decomposed into four primitives,
multiplied together as discussed in :ref:`Optical system equivalent`.
Examples
--------
>>> from paos.classes.abcd import ABCD
>>> thickness = 2.695 # mm
>>> radius = 31.850 # mm
>>> n1, n2 = 1.0, 1.5
>>> abcd = ABCD(thickness=thickness, curvature=1.0/radius, n1=n1, n2=n2)
>>> (A, B), (C, D) = abcd.ABCD
"""
def __init__(self, thickness=0.0, curvature=0.0, n1=1.0, n2=1.0, M=1.0):
"""
Initialize the ABCD matrix.
Parameters
----------
thickness: scalar
optical thickness. It is positive from left to right. Default is 0.0
curvature: scalar
inverse of the radius of curvature: it is positive if the center of curvature
lies on the right. If n1=n2, the parameter is assumed describing
a thin lens of focal ratio fl=1/curvature. Default is 0.0
n1: scalar
refractive index of the first medium. Default is 1.0
n2: scalar
refractive index of the second medium. Default is 1.0
M: scalar
optical magnification. Default is 1.0
Note
-----
Light is assumed to be propagating from a medium with refractive index n1
into a medium with refractive index n2.
Note
-----
The refractive indices are assumed to be positive when light propagates
from left to right (+Z), and negative when light propagates from right
to left (-Z)
"""
if n1 == 0 or n2 == 0 or M == 0:
logger.error(
"Refractive index and magnification shall not be zero"
)
raise ValueError(
"Refractive index and magnification shall not be zero"
)
T = np.array([[1.0, thickness], [0, 1.0]])
if n1 == n2:
# Assume a thin lens
D = np.array([[1.0, 0.0], [-curvature, 1.0]])
else:
# Assume dioptre or mirror
D = np.array([[1.0, 0.0], [-(1 - n1 / n2) * curvature, n1 / n2]])
M = np.array([[M, 0.0], [0.0, 1.0 / M]])
self._ABCD = T @ D @ M
# Remove because not needed and would break ABCD surface type when defined in lens.ini file
# self._n1 = n1
# self._n2 = n2
self._cin = np.sign(n1)
self._cout = np.sign(n2)
@property
def thickness(self):
(A, B), (C, D) = self._ABCD
return B / D
@property
def M(self):
(A, B), (C, D) = self._ABCD
return (A * D - B * C) / D
@property
def n1n2(self):
(A, B), (C, D) = self._ABCD
return D * self.M
@property
def power(self):
(A, B), (C, D) = self._ABCD
return -C / self.M
# @property
# def n1(self):
# return self._n1
# @property
# def n2(self):
# return self._n2
@property
def cin(self):
return self._cin
@cin.setter
def cin(self, c):
self._cin = c
@property
def cout(self):
return self._cout
@cout.setter
def cout(self, c):
self._cout = c
@property
def f_eff(self):
return 1 / (self.power * self.M)
@property
def ABCD(self):
return self._ABCD
@ABCD.setter
def ABCD(self, ABCD):
self._ABCD = ABCD.copy()
def __call__(self):
return self._ABCD
def __mul__(self, other):
ABCD_new = self._ABCD @ other()
out = ABCD()
out.ABCD = ABCD_new
out.cin = other.cin
out.cout = other.cout
return out
|
(thickness=0.0, curvature=0.0, n1=1.0, n2=1.0, M=1.0)
|
61,965 |
paos.classes.abcd
|
__call__
| null |
def __call__(self):
return self._ABCD
|
(self)
|
61,966 |
paos.classes.abcd
|
__init__
|
Initialize the ABCD matrix.
Parameters
----------
thickness: scalar
optical thickness. It is positive from left to right. Default is 0.0
curvature: scalar
inverse of the radius of curvature: it is positive if the center of curvature
lies on the right. If n1=n2, the parameter is assumed describing
a thin lens of focal ratio fl=1/curvature. Default is 0.0
n1: scalar
refractive index of the first medium. Default is 1.0
n2: scalar
refractive index of the second medium. Default is 1.0
M: scalar
optical magnification. Default is 1.0
Note
-----
Light is assumed to be propagating from a medium with refractive index n1
into a medium with refractive index n2.
Note
-----
The refractive indices are assumed to be positive when light propagates
from left to right (+Z), and negative when light propagates from right
to left (-Z)
|
def __init__(self, thickness=0.0, curvature=0.0, n1=1.0, n2=1.0, M=1.0):
"""
Initialize the ABCD matrix.
Parameters
----------
thickness: scalar
optical thickness. It is positive from left to right. Default is 0.0
curvature: scalar
inverse of the radius of curvature: it is positive if the center of curvature
lies on the right. If n1=n2, the parameter is assumed describing
a thin lens of focal ratio fl=1/curvature. Default is 0.0
n1: scalar
refractive index of the first medium. Default is 1.0
n2: scalar
refractive index of the second medium. Default is 1.0
M: scalar
optical magnification. Default is 1.0
Note
-----
Light is assumed to be propagating from a medium with refractive index n1
into a medium with refractive index n2.
Note
-----
The refractive indices are assumed to be positive when light propagates
from left to right (+Z), and negative when light propagates from right
to left (-Z)
"""
if n1 == 0 or n2 == 0 or M == 0:
logger.error(
"Refractive index and magnification shall not be zero"
)
raise ValueError(
"Refractive index and magnification shall not be zero"
)
T = np.array([[1.0, thickness], [0, 1.0]])
if n1 == n2:
# Assume a thin lens
D = np.array([[1.0, 0.0], [-curvature, 1.0]])
else:
# Assume dioptre or mirror
D = np.array([[1.0, 0.0], [-(1 - n1 / n2) * curvature, n1 / n2]])
M = np.array([[M, 0.0], [0.0, 1.0 / M]])
self._ABCD = T @ D @ M
# Remove because not needed and would break ABCD surface type when defined in lens.ini file
# self._n1 = n1
# self._n2 = n2
self._cin = np.sign(n1)
self._cout = np.sign(n2)
|
(self, thickness=0.0, curvature=0.0, n1=1.0, n2=1.0, M=1.0)
|
61,967 |
paos.classes.abcd
|
__mul__
| null |
def __mul__(self, other):
ABCD_new = self._ABCD @ other()
out = ABCD()
out.ABCD = ABCD_new
out.cin = other.cin
out.cout = other.cout
return out
|
(self, other)
|
61,968 |
paos.classes.wfo
|
WFO
|
Physical optics wavefront propagation.
Implements the paraxial theory described in
`Lawrence et al., Applied Optics and Optical Engineering, Volume XI (1992) <https://ui.adsabs.harvard.edu/abs/1992aooe...11..125L>`_
All units are meters.
Parameters
----------
beam_diameter: scalar
the input beam diameter. Note that the input beam is always circular, regardless of
whatever non-circular apodization the input pupil might apply.
wl: scalar
the wavelength
grid_size: scalar
grid size must be a power of 2
zoom: scalar
linear scaling factor of input beam.
Attributes
----------
wl: scalar
the wavelength
z: scalar
current beam position along the z-axis (propagation axis).
Initial value is 0
w0: scalar
pilot Gaussian beam waist.
Initial value is beam_diameter/2
zw0: scalar
z-coordinate of the Gaussian beam waist.
initial value is 0
zr: scalar
Rayleigh distance: :math:`\pi w_{0}^{2} / \lambda`
rayleigh_factor: scalar
Scale factor multiplying zr to determine 'I' and 'O' regions.
Built in value is 2
dx: scalar
pixel sampling interval along x-axis
dy: scalar
pixel sampling interval along y-axis
C: scalar
curvature of the reference surface at beam position
fratio: scalar
pilot Gaussian beam f-ratio
wfo: array [gridsize, gridsize], complex128
the wavefront complex array
amplitude: array [gridsize, gridsize], float64
the wavefront amplitude array
phase: array [gridsize, gridsize], float64
the wavefront phase array in radians
wz: scalar
the Gaussian beam waist w(z) at current beam position
distancetofocus: scalar
the distance to focus from current beam position
extent: tuple
the physical coordinates of the wavefront bounding box (xmin, xmax, ymin, ymax).
Can be used directly in im.set_extent.
Returns
-------
out: an instance of wfo
Example
-------
>>> import paos
>>> import matplotlib.pyplot as plt
>>> beam_diameter = 1.0 # m
>>> wavelength = 3.0 # micron
>>> grid_size = 512
>>> zoom = 4
>>> xdec, ydec = 0.0, 0.0
>>> fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
>>> wfo = paos.WFO(beam_diameter, 1.0e-6 * wavelength, grid_size, zoom)
>>> wfo.aperture(xc=xdec, yc=ydec, r=beam_diameter/2, shape='circular')
>>> wfo.make_stop()
>>> ax0.imshow(wfo.amplitude)
>>> wfo.lens(lens_fl=1.0)
>>> wfo.propagate(dz=1.0)
>>> ax1.imshow(wfo.amplitude)
>>> plt.show()
|
class WFO:
"""
Physical optics wavefront propagation.
Implements the paraxial theory described in
`Lawrence et al., Applied Optics and Optical Engineering, Volume XI (1992) <https://ui.adsabs.harvard.edu/abs/1992aooe...11..125L>`_
All units are meters.
Parameters
----------
beam_diameter: scalar
the input beam diameter. Note that the input beam is always circular, regardless of
whatever non-circular apodization the input pupil might apply.
wl: scalar
the wavelength
grid_size: scalar
grid size must be a power of 2
zoom: scalar
linear scaling factor of input beam.
Attributes
----------
wl: scalar
the wavelength
z: scalar
current beam position along the z-axis (propagation axis).
Initial value is 0
w0: scalar
pilot Gaussian beam waist.
Initial value is beam_diameter/2
zw0: scalar
z-coordinate of the Gaussian beam waist.
initial value is 0
zr: scalar
Rayleigh distance: :math:`\\pi w_{0}^{2} / \\lambda`
rayleigh_factor: scalar
Scale factor multiplying zr to determine 'I' and 'O' regions.
Built in value is 2
dx: scalar
pixel sampling interval along x-axis
dy: scalar
pixel sampling interval along y-axis
C: scalar
curvature of the reference surface at beam position
fratio: scalar
pilot Gaussian beam f-ratio
wfo: array [gridsize, gridsize], complex128
the wavefront complex array
amplitude: array [gridsize, gridsize], float64
the wavefront amplitude array
phase: array [gridsize, gridsize], float64
the wavefront phase array in radians
wz: scalar
the Gaussian beam waist w(z) at current beam position
distancetofocus: scalar
the distance to focus from current beam position
extent: tuple
the physical coordinates of the wavefront bounding box (xmin, xmax, ymin, ymax).
Can be used directly in im.set_extent.
Returns
-------
out: an instance of wfo
Example
-------
>>> import paos
>>> import matplotlib.pyplot as plt
>>> beam_diameter = 1.0 # m
>>> wavelength = 3.0 # micron
>>> grid_size = 512
>>> zoom = 4
>>> xdec, ydec = 0.0, 0.0
>>> fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
>>> wfo = paos.WFO(beam_diameter, 1.0e-6 * wavelength, grid_size, zoom)
>>> wfo.aperture(xc=xdec, yc=ydec, r=beam_diameter/2, shape='circular')
>>> wfo.make_stop()
>>> ax0.imshow(wfo.amplitude)
>>> wfo.lens(lens_fl=1.0)
>>> wfo.propagate(dz=1.0)
>>> ax1.imshow(wfo.amplitude)
>>> plt.show()
"""
def __init__(self, beam_diameter, wl, grid_size, zoom):
assert np.log2(grid_size).is_integer(), "Grid size should be 2**n"
assert zoom > 0, "zoom factor should be positive"
assert beam_diameter > 0, "beam diameter should be positive"
assert wl > 0, "a wavelength should be positive"
self._wl = wl
self._z = 0.0 # current beam z coordinate
self._w0 = beam_diameter / 2.0 # beam waist
self._zw0 = 0.0 # beam waist z coordinate
self._zr = np.pi * self.w0**2 / wl # Rayleigh distance
self._rayleigh_factor = 2.0
self._dx = beam_diameter * zoom / grid_size # pixel size
self._dy = beam_diameter * zoom / grid_size # pixel size
self._C = 0.0 # beam curvature, start with a planar wf
self._fratio = np.inf # Gaussian beam f-ratio
grid_size = np.uint(grid_size)
self._wfo = np.ones((grid_size, grid_size), dtype=np.complex128)
@property
def wl(self):
return self._wl
@property
def z(self):
return self._z
@property
def w0(self):
return self._w0
@property
def zw0(self):
return self._zw0
@property
def zr(self):
return self._zr
@property
def rayleigh_factor(self):
return self._rayleigh_factor
@property
def dx(self):
return self._dx
@property
def dy(self):
return self._dy
@property
def C(self):
return self._C
@property
def fratio(self):
return self._fratio
@property
def wfo(self):
return self._wfo.copy()
@property
def amplitude(self):
return np.abs(self._wfo)
@property
def phase(self):
return np.angle(self._wfo)
@property
def wz(self):
return self.w0 * np.sqrt(1.0 + ((self.z - self.zw0) / self.zr) ** 2)
@property
def distancetofocus(self):
return self.zw0 - self.z
@property
def extent(self):
return (
-self._wfo.shape[1] // 2 * self.dx,
(self._wfo.shape[1] // 2 - 1) * self.dx,
-self._wfo.shape[0] // 2 * self.dy,
(self._wfo.shape[0] // 2 - 1) * self.dy,
)
def make_stop(self):
"""
Make current surface a stop.
Stop here just means that the wf at current position is normalised to unit energy.
"""
norm2 = np.sum(np.abs(self._wfo) ** 2)
self._wfo /= np.sqrt(norm2)
def aperture(
self,
xc,
yc,
hx=None,
hy=None,
r=None,
shape="elliptical",
tilt=None,
obscuration=False,
):
"""
Apply aperture mask
Parameters
----------
xc: scalar
x-centre of the aperture
yc: scalar
y-centre of the aperture
hx, hy: scalars
semi-axes of shape 'elliptical' aperture, or full dimension of shape 'rectangular' aperture
r: scalar
radius of shape 'circular' aperture
shape: string
defines aperture shape. Can be 'elliptical', 'circular', 'rectangular'
tilt: scalar
tilt angle in degrees. Applies to shapes 'elliptical' and 'rectangular'.
obscuration: boolean
if True, aperture mask is converted into obscuration mask.
"""
ixc = xc / self.dx + self._wfo.shape[1] / 2
iyc = yc / self.dy + self._wfo.shape[0] / 2
if shape == "elliptical":
if hx is None or hy is None:
logger.error("Semi major/minor axes not defined")
raise AssertionError("Semi major/minor axes not defined")
ihx = hx / self.dx
ihy = hy / self.dy
theta = 0.0 if tilt is None else np.deg2rad(tilt)
aperture = photutils.aperture.EllipticalAperture(
(ixc, iyc), ihx, ihy, theta=theta
)
mask = aperture.to_mask(method="exact").to_image(self._wfo.shape)
elif shape == "circular":
if r is None:
logger.error("Radius not defined")
raise AssertionError("Radius not defined")
ihx = r / self.dx
ihy = r / self.dy
theta = 0.0
aperture = photutils.aperture.EllipticalAperture(
(ixc, iyc), ihx, ihy, theta=theta
)
mask = aperture.to_mask(method="exact").to_image(self._wfo.shape)
elif shape == "rectangular":
if hx is None or hy is None:
logger.error("Semi major/minor axes not defined")
raise AssertionError("Semi major/minor axes not defined")
ihx = hx / self.dx
ihy = hy / self.dy
theta = 0.0 if tilt is None else np.deg2rad(tilt)
aperture = photutils.aperture.RectangularAperture(
(ixc, iyc), ihx, ihy, theta=theta
)
# Exact method not implemented in photutils 1.0.2
mask = aperture.to_mask(method="subpixel", subpixels=32).to_image(
self._wfo.shape
)
else:
logger.error("Aperture {:s} not defined yet.".format(shape))
raise ValueError("Aperture {:s} not defined yet.".format(shape))
if obscuration:
self._wfo *= 1 - mask
else:
self._wfo *= mask
return aperture
def insideout(self, z=None):
"""
Check if z position is within the Rayleigh distance
Parameters
----------
z: scalar
beam coordinate long propagation axis
Returns
-------
out: string
'I' if :math:`|z - z_{w0}| < z_{r}` else 'O'
"""
if z is None:
delta_z = self.z - self.zw0
else:
delta_z = z - self.zw0
if np.abs(delta_z) < self.rayleigh_factor * self.zr:
return "I"
else:
return "O"
def lens(self, lens_fl):
"""
Apply wavefront phase from paraxial lens
Parameters
----------
lens_fl: scalar
Lens focal length. Positive for converging lenses. Negative for diverging lenses.
Note
----------
A paraxial lens imposes a quadratic phase shift.
"""
wz = self.w0 * np.sqrt(1.0 + ((self.z - self.zw0) / self.zr) ** 2)
delta_z = self.z - self.zw0
propagator = self.insideout()
# estimate Gaussian beam curvature after lens
gCobj = delta_z / (
delta_z**2 + self.zr**2
) # Gaussian beam curvature before lens
gCima = gCobj - 1.0 / lens_fl # Gaussian beam curvature after lens
# update Gaussian beam parameters
self._w0 = wz / np.sqrt(1.0 + (np.pi * wz**2 * gCima / self.wl) ** 2)
self._zw0 = (
-gCima / (gCima**2 + (self.wl / (np.pi * wz**2)) ** 2) + self.z
)
self._zr = np.pi * self.w0**2 / self.wl
propagator = propagator + self.insideout()
if propagator[0] == "I" or self.C == 0.0:
Cobj = 0.0
else:
Cobj = 1 / delta_z
delta_z = self.z - self.zw0
if propagator[1] == "I":
Cima = 0.0
else:
Cima = 1 / delta_z
self._C = Cima
if propagator == "II":
lens_phase = 1.0 / lens_fl
elif propagator == "IO":
lens_phase = 1 / lens_fl + Cima
elif propagator == "OI":
lens_phase = 1.0 / lens_fl - Cobj
elif propagator == "OO":
lens_phase = 1.0 / lens_fl - Cobj + Cima
x = (np.arange(self._wfo.shape[1]) - self._wfo.shape[1] // 2) * self.dx
y = (np.arange(self._wfo.shape[0]) - self._wfo.shape[0] // 2) * self.dy
xx, yy = np.meshgrid(x, y)
qphase = -(xx**2 + yy**2) * (0.5 * lens_phase / self.wl)
self._fratio = np.abs(delta_z) / (2 * wz)
self._wfo = self._wfo * np.exp(2.0j * np.pi * qphase)
def Magnification(self, My, Mx=None):
"""
Given the optical magnification along one or both directions, updates the sampling along both directions,
the beam semi-diameter, the Rayleigh distance, the distance to focus, and the beam focal ratio
Parameters
----------
My: scalar
optical magnification along tangential direction
Mx: scalar
optical magnification along sagittal direction
Returns
-------
out: None
updates the wfo parameters
"""
if Mx is None:
Mx = My
assert Mx > 0.0, "Negative magnification not implemented yet."
assert My > 0.0, "Negative magnification not implemented yet."
self._dx *= Mx
self._dy *= My
if np.abs(Mx - 1.0) < 1.0e-8 or Mx is None:
logger.trace(
"Does not do anything if magnification x is close to unity."
)
return
logger.warning(
"Gaussian beam magnification is implemented, but has not been tested."
)
# Current distance to focus (before magnification)
delta_z = self.z - self.zw0
# Current w(z) (before magnification)
wz = self.w0 * np.sqrt(1.0 + ((self.z - self.zw0) / self.zr) ** 2)
# Apply magnification following ABCD Gaussian beam prescription
# i.e. w'(z) = Mx*w(z), R'(z) = Mx**2 * R(z)
delta_z *= Mx**2
wz *= Mx
self._w0 *= Mx # From Eq 56, Lawrence (1992)
self._zr *= Mx**2
self._zw0 = self.z - delta_z
self._fratio = np.abs(delta_z) / (2 * wz)
def ChangeMedium(self, n1n2):
"""
Given the ratio of refractive indices n1/n2 for light propagating from a medium with refractive index n1,
into a medium with refractive index n2, updates the Rayleigh distance, the wavelength, the distance to focus,
and the beam focal ratio
Parameters
----------
n1n2
Returns
-------
out: None
updates the wfo parameters
"""
_n1n2 = np.abs(n1n2)
# Current distance to focus (before magnification)
delta_z = self.z - self.zw0
delta_z /= n1n2
self._zr /= n1n2
self._wl *= n1n2
self._zw0 = self.z - delta_z
self._fratio /= n1n2
def ptp(self, dz):
"""
Plane-to-plane (far field) wavefront propagator
Parameters
----------
dz: scalar
propagation distance
"""
if np.abs(dz) < 0.001 * self.wl:
logger.debug(
"Thickness smaller than 1/1000 wavelength. Returning.."
)
return
if self.C != 0:
logger.error("PTP wavefront should be planar")
raise ValueError("PTP wavefront should be planar")
wf = np.fft.ifftshift(self._wfo)
wf = np.fft.fft2(wf, norm="ortho")
fx = np.fft.fftfreq(wf.shape[1], d=self.dx)
fy = np.fft.fftfreq(wf.shape[0], d=self.dy)
fxx, fyy = np.meshgrid(fx, fy)
qphase = (np.pi * self.wl * dz) * (fxx**2 + fyy**2)
wf = np.fft.ifft2(np.exp(-1.0j * qphase) * wf, norm="ortho")
self._z = self._z + dz
self._wfo = np.fft.fftshift(wf)
def stw(self, dz):
"""
Spherical-to-waist (near field to far field) wavefront propagator
Parameters
----------
dz: scalar
propagation distance
"""
if np.abs(dz) < 0.001 * self.wl:
logger.debug(
"Thickness smaller than 1/1000 wavelength. Returning.."
)
return
if self.C == 0.0:
logger.error("STW wavefront should not be planar")
raise ValueError("STW wavefront should not be planar")
s = "forward" if dz >= 0 else "reverse"
wf = np.fft.ifftshift(self._wfo)
if s == "forward":
wf = np.fft.fft2(wf, norm="ortho")
elif s == "reverse":
wf = np.fft.ifft2(wf, norm="ortho")
fx = np.fft.fftfreq(wf.shape[1], d=self.dx)
fy = np.fft.fftfreq(wf.shape[0], d=self.dy)
fxx, fyy = np.meshgrid(fx, fy)
qphase = (np.pi * self.wl * dz) * (fxx**2 + fyy**2)
self._z = self._z + dz
self._C = 0.0
self._dx = (fx[1] - fx[0]) * self.wl * np.abs(dz)
self._dy = (fy[1] - fy[0]) * self.wl * np.abs(dz)
self._wfo = np.fft.fftshift(np.exp(1.0j * qphase) * wf)
def wts(self, dz):
"""
Waist-to-spherical (far field to near field) wavefront propagator
Parameters
----------
dz: scalar
propagation distance
"""
if np.abs(dz) < 0.001 * self.wl:
logger.debug(
"Thickness smaller than 1/1000 wavelength. Returning.."
)
return
if self.C != 0.0:
logger.error("WTS wavefront should be planar")
raise ValueError("WTS wavefront should be planar")
s = "forward" if dz >= 0 else "reverse"
x = (np.arange(self._wfo.shape[1]) - self._wfo.shape[1] // 2) * self.dx
y = (np.arange(self._wfo.shape[0]) - self._wfo.shape[0] // 2) * self.dy
xx, yy = np.meshgrid(x, y)
qphase = (np.pi / (dz * self.wl)) * (xx**2 + yy**2)
wf = np.fft.ifftshift(np.exp(1.0j * qphase) * self._wfo)
if s == "forward":
wf = np.fft.fft2(wf, norm="ortho")
elif s == "reverse":
wf = np.fft.ifft2(wf, norm="ortho")
self._z = self._z + dz
self._C = 1 / (self.z - self.zw0)
self._dx = self.wl * np.abs(dz) / (wf.shape[1] * self.dx)
self._dy = self.wl * np.abs(dz) / (wf.shape[1] * self.dy)
self._wfo = np.fft.fftshift(wf)
def propagate(self, dz):
"""
Wavefront propagator. Selects the appropriate propagation primitive and applies the wf propagation
Parameters
----------
dz: scalar
propagation distance
"""
propagator = self.insideout() + self.insideout(self.z + dz)
z1 = self.z
z2 = self.z + dz
if propagator == "II":
self.ptp(dz)
elif propagator == "OI":
self.stw(self.zw0 - z1)
self.ptp(z2 - self.zw0)
elif propagator == "IO":
self.ptp(self.zw0 - z1)
self.wts(z2 - self.zw0)
elif propagator == "OO":
self.stw(self.zw0 - z1)
self.wts(z2 - self.zw0)
def zernikes(
self, index, Z, ordering, normalize, radius, offset=0.0, origin="x"
):
"""
Add a WFE represented by a Zernike expansion
Parameters
----------
index: array of integers
Sequence of zernikes to use. It should be a continuous sequence.
Z : array of floats
The coefficients of the Zernike polynomials in meters.
ordering: string
Can be 'ansi', 'noll', 'fringe', or 'standard'.
normalize: bool
Polynomials are normalised to RMS=1 if True, or to unity at radius if False.
radius: float
The radius of the circular aperture over which the polynomials are calculated.
offset: float
Angular offset in degrees.
origin: string
Angles measured counter-clockwise positive from x axis by default (origin='x').
Set origin='y' for angles measured clockwise-positive from the y-axis.
Returns
-------
out: masked array
the WFE
"""
assert not np.any(
np.diff(index) - 1
), "Zernike sequence should be continuous"
x = (np.arange(self._wfo.shape[1]) - self._wfo.shape[1] // 2) * self.dx
y = (np.arange(self._wfo.shape[0]) - self._wfo.shape[0] // 2) * self.dy
xx, yy = np.meshgrid(x, y)
rho = np.sqrt(xx**2 + yy**2) / radius
if origin == "x":
phi = np.arctan2(yy, xx) + np.deg2rad(offset)
elif origin == "y":
phi = np.arctan2(xx, yy) + np.deg2rad(offset)
else:
logger.error(
"Origin {} not recognised. Origin shall be either x or y".format(
origin
)
)
raise ValueError(
"Origin {} not recognised. Origin shall be either x or y".format(
origin
)
)
zernike = Zernike(
len(index), rho, phi, ordering=ordering, normalize=normalize
)
zer = zernike()
wfe = (zer.T * Z).T.sum(axis=0)
self._wfo = self._wfo * np.exp(
2.0 * np.pi * 1j * wfe / self._wl
).filled(0)
return wfe
def psd(
self,
A=10.0,
B=0.0,
C=0.0,
fknee=1.0,
fmin=None,
fmax=None,
SR=0.0,
units=u.m,
):
"""
Add a WFE represented by a power spectral density (PSD) and surface roughness (SR) specification.
Parameters
----------
A : float
The amplitude of the PSD.
B : float
PSD parameter. If B = 0, the PSD is a power law.
C : float
PSD parameter. It sets the slope of the PSD.
fknee : float
The knee frequency of the PSD.
fmin : float
The minimum frequency of the PSD.
fmax : float
The maximum frequency of the PSD.
SR : float
The rms of the surface roughness.
units : astropy.units
The units of the SFE. Default is meters.
Returns
-------
out: masked array
the WFE
"""
# compute 2D frequency grid
fx = np.fft.fftfreq(self._wfo.shape[0], self.dx)
fy = np.fft.fftfreq(self._wfo.shape[1], self.dy)
fxx, fyy = np.meshgrid(fx, fy)
f = np.sqrt(fxx**2 + fyy**2)
f[f == 0] = 1e-100
if fmax is None:
print("WARNING: fmax not provided, using f_Nyq")
fmax = 0.5 * np.sqrt(self.dx**-2 + self.dy**-2)
else:
f_Nyq = 0.5 * np.sqrt(self.dx**-2 + self.dy**-2)
assert fmax <= f_Nyq, f"fmax must be less than or equal to f_Nyq ({f_Nyq})"
if fmin is None:
print("WARNING: fmin not provided, using 1 / D")
fmin = 1 / (self._wfo.shape[0] * np.max([self.dx, self.dy]))
# compute 2D PSD
psd = PSD(
pupil=self._wfo.copy().real,
A=A,
B=B,
C=C,
f=f,
fknee=fknee,
fmin=fmin,
fmax=fmax,
SR=SR,
units=units,
)
wfe = psd()
# update wfo
self._wfo = self._wfo * np.exp(2.0 * np.pi * 1j * wfe / self._wl)
return wfe
|
(beam_diameter, wl, grid_size, zoom)
|
61,969 |
paos.classes.wfo
|
ChangeMedium
|
Given the ratio of refractive indices n1/n2 for light propagating from a medium with refractive index n1,
into a medium with refractive index n2, updates the Rayleigh distance, the wavelength, the distance to focus,
and the beam focal ratio
Parameters
----------
n1n2
Returns
-------
out: None
updates the wfo parameters
|
def ChangeMedium(self, n1n2):
"""
Given the ratio of refractive indices n1/n2 for light propagating from a medium with refractive index n1,
into a medium with refractive index n2, updates the Rayleigh distance, the wavelength, the distance to focus,
and the beam focal ratio
Parameters
----------
n1n2
Returns
-------
out: None
updates the wfo parameters
"""
_n1n2 = np.abs(n1n2)
# Current distance to focus (before magnification)
delta_z = self.z - self.zw0
delta_z /= n1n2
self._zr /= n1n2
self._wl *= n1n2
self._zw0 = self.z - delta_z
self._fratio /= n1n2
|
(self, n1n2)
|
61,970 |
paos.classes.wfo
|
Magnification
|
Given the optical magnification along one or both directions, updates the sampling along both directions,
the beam semi-diameter, the Rayleigh distance, the distance to focus, and the beam focal ratio
Parameters
----------
My: scalar
optical magnification along tangential direction
Mx: scalar
optical magnification along sagittal direction
Returns
-------
out: None
updates the wfo parameters
|
def Magnification(self, My, Mx=None):
"""
Given the optical magnification along one or both directions, updates the sampling along both directions,
the beam semi-diameter, the Rayleigh distance, the distance to focus, and the beam focal ratio
Parameters
----------
My: scalar
optical magnification along tangential direction
Mx: scalar
optical magnification along sagittal direction
Returns
-------
out: None
updates the wfo parameters
"""
if Mx is None:
Mx = My
assert Mx > 0.0, "Negative magnification not implemented yet."
assert My > 0.0, "Negative magnification not implemented yet."
self._dx *= Mx
self._dy *= My
if np.abs(Mx - 1.0) < 1.0e-8 or Mx is None:
logger.trace(
"Does not do anything if magnification x is close to unity."
)
return
logger.warning(
"Gaussian beam magnification is implemented, but has not been tested."
)
# Current distance to focus (before magnification)
delta_z = self.z - self.zw0
# Current w(z) (before magnification)
wz = self.w0 * np.sqrt(1.0 + ((self.z - self.zw0) / self.zr) ** 2)
# Apply magnification following ABCD Gaussian beam prescription
# i.e. w'(z) = Mx*w(z), R'(z) = Mx**2 * R(z)
delta_z *= Mx**2
wz *= Mx
self._w0 *= Mx # From Eq 56, Lawrence (1992)
self._zr *= Mx**2
self._zw0 = self.z - delta_z
self._fratio = np.abs(delta_z) / (2 * wz)
|
(self, My, Mx=None)
|
61,971 |
paos.classes.wfo
|
__init__
| null |
def __init__(self, beam_diameter, wl, grid_size, zoom):
assert np.log2(grid_size).is_integer(), "Grid size should be 2**n"
assert zoom > 0, "zoom factor should be positive"
assert beam_diameter > 0, "beam diameter should be positive"
assert wl > 0, "a wavelength should be positive"
self._wl = wl
self._z = 0.0 # current beam z coordinate
self._w0 = beam_diameter / 2.0 # beam waist
self._zw0 = 0.0 # beam waist z coordinate
self._zr = np.pi * self.w0**2 / wl # Rayleigh distance
self._rayleigh_factor = 2.0
self._dx = beam_diameter * zoom / grid_size # pixel size
self._dy = beam_diameter * zoom / grid_size # pixel size
self._C = 0.0 # beam curvature, start with a planar wf
self._fratio = np.inf # Gaussian beam f-ratio
grid_size = np.uint(grid_size)
self._wfo = np.ones((grid_size, grid_size), dtype=np.complex128)
|
(self, beam_diameter, wl, grid_size, zoom)
|
61,972 |
paos.classes.wfo
|
aperture
|
Apply aperture mask
Parameters
----------
xc: scalar
x-centre of the aperture
yc: scalar
y-centre of the aperture
hx, hy: scalars
semi-axes of shape 'elliptical' aperture, or full dimension of shape 'rectangular' aperture
r: scalar
radius of shape 'circular' aperture
shape: string
defines aperture shape. Can be 'elliptical', 'circular', 'rectangular'
tilt: scalar
tilt angle in degrees. Applies to shapes 'elliptical' and 'rectangular'.
obscuration: boolean
if True, aperture mask is converted into obscuration mask.
|
def aperture(
self,
xc,
yc,
hx=None,
hy=None,
r=None,
shape="elliptical",
tilt=None,
obscuration=False,
):
"""
Apply aperture mask
Parameters
----------
xc: scalar
x-centre of the aperture
yc: scalar
y-centre of the aperture
hx, hy: scalars
semi-axes of shape 'elliptical' aperture, or full dimension of shape 'rectangular' aperture
r: scalar
radius of shape 'circular' aperture
shape: string
defines aperture shape. Can be 'elliptical', 'circular', 'rectangular'
tilt: scalar
tilt angle in degrees. Applies to shapes 'elliptical' and 'rectangular'.
obscuration: boolean
if True, aperture mask is converted into obscuration mask.
"""
ixc = xc / self.dx + self._wfo.shape[1] / 2
iyc = yc / self.dy + self._wfo.shape[0] / 2
if shape == "elliptical":
if hx is None or hy is None:
logger.error("Semi major/minor axes not defined")
raise AssertionError("Semi major/minor axes not defined")
ihx = hx / self.dx
ihy = hy / self.dy
theta = 0.0 if tilt is None else np.deg2rad(tilt)
aperture = photutils.aperture.EllipticalAperture(
(ixc, iyc), ihx, ihy, theta=theta
)
mask = aperture.to_mask(method="exact").to_image(self._wfo.shape)
elif shape == "circular":
if r is None:
logger.error("Radius not defined")
raise AssertionError("Radius not defined")
ihx = r / self.dx
ihy = r / self.dy
theta = 0.0
aperture = photutils.aperture.EllipticalAperture(
(ixc, iyc), ihx, ihy, theta=theta
)
mask = aperture.to_mask(method="exact").to_image(self._wfo.shape)
elif shape == "rectangular":
if hx is None or hy is None:
logger.error("Semi major/minor axes not defined")
raise AssertionError("Semi major/minor axes not defined")
ihx = hx / self.dx
ihy = hy / self.dy
theta = 0.0 if tilt is None else np.deg2rad(tilt)
aperture = photutils.aperture.RectangularAperture(
(ixc, iyc), ihx, ihy, theta=theta
)
# Exact method not implemented in photutils 1.0.2
mask = aperture.to_mask(method="subpixel", subpixels=32).to_image(
self._wfo.shape
)
else:
logger.error("Aperture {:s} not defined yet.".format(shape))
raise ValueError("Aperture {:s} not defined yet.".format(shape))
if obscuration:
self._wfo *= 1 - mask
else:
self._wfo *= mask
return aperture
|
(self, xc, yc, hx=None, hy=None, r=None, shape='elliptical', tilt=None, obscuration=False)
|
61,973 |
paos.classes.wfo
|
insideout
|
Check if z position is within the Rayleigh distance
Parameters
----------
z: scalar
beam coordinate long propagation axis
Returns
-------
out: string
'I' if :math:`|z - z_{w0}| < z_{r}` else 'O'
|
def insideout(self, z=None):
"""
Check if z position is within the Rayleigh distance
Parameters
----------
z: scalar
beam coordinate long propagation axis
Returns
-------
out: string
'I' if :math:`|z - z_{w0}| < z_{r}` else 'O'
"""
if z is None:
delta_z = self.z - self.zw0
else:
delta_z = z - self.zw0
if np.abs(delta_z) < self.rayleigh_factor * self.zr:
return "I"
else:
return "O"
|
(self, z=None)
|
61,974 |
paos.classes.wfo
|
lens
|
Apply wavefront phase from paraxial lens
Parameters
----------
lens_fl: scalar
Lens focal length. Positive for converging lenses. Negative for diverging lenses.
Note
----------
A paraxial lens imposes a quadratic phase shift.
|
def lens(self, lens_fl):
"""
Apply wavefront phase from paraxial lens
Parameters
----------
lens_fl: scalar
Lens focal length. Positive for converging lenses. Negative for diverging lenses.
Note
----------
A paraxial lens imposes a quadratic phase shift.
"""
wz = self.w0 * np.sqrt(1.0 + ((self.z - self.zw0) / self.zr) ** 2)
delta_z = self.z - self.zw0
propagator = self.insideout()
# estimate Gaussian beam curvature after lens
gCobj = delta_z / (
delta_z**2 + self.zr**2
) # Gaussian beam curvature before lens
gCima = gCobj - 1.0 / lens_fl # Gaussian beam curvature after lens
# update Gaussian beam parameters
self._w0 = wz / np.sqrt(1.0 + (np.pi * wz**2 * gCima / self.wl) ** 2)
self._zw0 = (
-gCima / (gCima**2 + (self.wl / (np.pi * wz**2)) ** 2) + self.z
)
self._zr = np.pi * self.w0**2 / self.wl
propagator = propagator + self.insideout()
if propagator[0] == "I" or self.C == 0.0:
Cobj = 0.0
else:
Cobj = 1 / delta_z
delta_z = self.z - self.zw0
if propagator[1] == "I":
Cima = 0.0
else:
Cima = 1 / delta_z
self._C = Cima
if propagator == "II":
lens_phase = 1.0 / lens_fl
elif propagator == "IO":
lens_phase = 1 / lens_fl + Cima
elif propagator == "OI":
lens_phase = 1.0 / lens_fl - Cobj
elif propagator == "OO":
lens_phase = 1.0 / lens_fl - Cobj + Cima
x = (np.arange(self._wfo.shape[1]) - self._wfo.shape[1] // 2) * self.dx
y = (np.arange(self._wfo.shape[0]) - self._wfo.shape[0] // 2) * self.dy
xx, yy = np.meshgrid(x, y)
qphase = -(xx**2 + yy**2) * (0.5 * lens_phase / self.wl)
self._fratio = np.abs(delta_z) / (2 * wz)
self._wfo = self._wfo * np.exp(2.0j * np.pi * qphase)
|
(self, lens_fl)
|
61,975 |
paos.classes.wfo
|
make_stop
|
Make current surface a stop.
Stop here just means that the wf at current position is normalised to unit energy.
|
def make_stop(self):
"""
Make current surface a stop.
Stop here just means that the wf at current position is normalised to unit energy.
"""
norm2 = np.sum(np.abs(self._wfo) ** 2)
self._wfo /= np.sqrt(norm2)
|
(self)
|
61,976 |
paos.classes.wfo
|
propagate
|
Wavefront propagator. Selects the appropriate propagation primitive and applies the wf propagation
Parameters
----------
dz: scalar
propagation distance
|
def propagate(self, dz):
"""
Wavefront propagator. Selects the appropriate propagation primitive and applies the wf propagation
Parameters
----------
dz: scalar
propagation distance
"""
propagator = self.insideout() + self.insideout(self.z + dz)
z1 = self.z
z2 = self.z + dz
if propagator == "II":
self.ptp(dz)
elif propagator == "OI":
self.stw(self.zw0 - z1)
self.ptp(z2 - self.zw0)
elif propagator == "IO":
self.ptp(self.zw0 - z1)
self.wts(z2 - self.zw0)
elif propagator == "OO":
self.stw(self.zw0 - z1)
self.wts(z2 - self.zw0)
|
(self, dz)
|
61,977 |
paos.classes.wfo
|
psd
|
Add a WFE represented by a power spectral density (PSD) and surface roughness (SR) specification.
Parameters
----------
A : float
The amplitude of the PSD.
B : float
PSD parameter. If B = 0, the PSD is a power law.
C : float
PSD parameter. It sets the slope of the PSD.
fknee : float
The knee frequency of the PSD.
fmin : float
The minimum frequency of the PSD.
fmax : float
The maximum frequency of the PSD.
SR : float
The rms of the surface roughness.
units : astropy.units
The units of the SFE. Default is meters.
Returns
-------
out: masked array
the WFE
|
def psd(
self,
A=10.0,
B=0.0,
C=0.0,
fknee=1.0,
fmin=None,
fmax=None,
SR=0.0,
units=u.m,
):
"""
Add a WFE represented by a power spectral density (PSD) and surface roughness (SR) specification.
Parameters
----------
A : float
The amplitude of the PSD.
B : float
PSD parameter. If B = 0, the PSD is a power law.
C : float
PSD parameter. It sets the slope of the PSD.
fknee : float
The knee frequency of the PSD.
fmin : float
The minimum frequency of the PSD.
fmax : float
The maximum frequency of the PSD.
SR : float
The rms of the surface roughness.
units : astropy.units
The units of the SFE. Default is meters.
Returns
-------
out: masked array
the WFE
"""
# compute 2D frequency grid
fx = np.fft.fftfreq(self._wfo.shape[0], self.dx)
fy = np.fft.fftfreq(self._wfo.shape[1], self.dy)
fxx, fyy = np.meshgrid(fx, fy)
f = np.sqrt(fxx**2 + fyy**2)
f[f == 0] = 1e-100
if fmax is None:
print("WARNING: fmax not provided, using f_Nyq")
fmax = 0.5 * np.sqrt(self.dx**-2 + self.dy**-2)
else:
f_Nyq = 0.5 * np.sqrt(self.dx**-2 + self.dy**-2)
assert fmax <= f_Nyq, f"fmax must be less than or equal to f_Nyq ({f_Nyq})"
if fmin is None:
print("WARNING: fmin not provided, using 1 / D")
fmin = 1 / (self._wfo.shape[0] * np.max([self.dx, self.dy]))
# compute 2D PSD
psd = PSD(
pupil=self._wfo.copy().real,
A=A,
B=B,
C=C,
f=f,
fknee=fknee,
fmin=fmin,
fmax=fmax,
SR=SR,
units=units,
)
wfe = psd()
# update wfo
self._wfo = self._wfo * np.exp(2.0 * np.pi * 1j * wfe / self._wl)
return wfe
|
(self, A=10.0, B=0.0, C=0.0, fknee=1.0, fmin=None, fmax=None, SR=0.0, units=Unit("m"))
|
61,978 |
paos.classes.wfo
|
ptp
|
Plane-to-plane (far field) wavefront propagator
Parameters
----------
dz: scalar
propagation distance
|
def ptp(self, dz):
"""
Plane-to-plane (far field) wavefront propagator
Parameters
----------
dz: scalar
propagation distance
"""
if np.abs(dz) < 0.001 * self.wl:
logger.debug(
"Thickness smaller than 1/1000 wavelength. Returning.."
)
return
if self.C != 0:
logger.error("PTP wavefront should be planar")
raise ValueError("PTP wavefront should be planar")
wf = np.fft.ifftshift(self._wfo)
wf = np.fft.fft2(wf, norm="ortho")
fx = np.fft.fftfreq(wf.shape[1], d=self.dx)
fy = np.fft.fftfreq(wf.shape[0], d=self.dy)
fxx, fyy = np.meshgrid(fx, fy)
qphase = (np.pi * self.wl * dz) * (fxx**2 + fyy**2)
wf = np.fft.ifft2(np.exp(-1.0j * qphase) * wf, norm="ortho")
self._z = self._z + dz
self._wfo = np.fft.fftshift(wf)
|
(self, dz)
|
61,979 |
paos.classes.wfo
|
stw
|
Spherical-to-waist (near field to far field) wavefront propagator
Parameters
----------
dz: scalar
propagation distance
|
def stw(self, dz):
"""
Spherical-to-waist (near field to far field) wavefront propagator
Parameters
----------
dz: scalar
propagation distance
"""
if np.abs(dz) < 0.001 * self.wl:
logger.debug(
"Thickness smaller than 1/1000 wavelength. Returning.."
)
return
if self.C == 0.0:
logger.error("STW wavefront should not be planar")
raise ValueError("STW wavefront should not be planar")
s = "forward" if dz >= 0 else "reverse"
wf = np.fft.ifftshift(self._wfo)
if s == "forward":
wf = np.fft.fft2(wf, norm="ortho")
elif s == "reverse":
wf = np.fft.ifft2(wf, norm="ortho")
fx = np.fft.fftfreq(wf.shape[1], d=self.dx)
fy = np.fft.fftfreq(wf.shape[0], d=self.dy)
fxx, fyy = np.meshgrid(fx, fy)
qphase = (np.pi * self.wl * dz) * (fxx**2 + fyy**2)
self._z = self._z + dz
self._C = 0.0
self._dx = (fx[1] - fx[0]) * self.wl * np.abs(dz)
self._dy = (fy[1] - fy[0]) * self.wl * np.abs(dz)
self._wfo = np.fft.fftshift(np.exp(1.0j * qphase) * wf)
|
(self, dz)
|
61,980 |
paos.classes.wfo
|
wts
|
Waist-to-spherical (far field to near field) wavefront propagator
Parameters
----------
dz: scalar
propagation distance
|
def wts(self, dz):
"""
Waist-to-spherical (far field to near field) wavefront propagator
Parameters
----------
dz: scalar
propagation distance
"""
if np.abs(dz) < 0.001 * self.wl:
logger.debug(
"Thickness smaller than 1/1000 wavelength. Returning.."
)
return
if self.C != 0.0:
logger.error("WTS wavefront should be planar")
raise ValueError("WTS wavefront should be planar")
s = "forward" if dz >= 0 else "reverse"
x = (np.arange(self._wfo.shape[1]) - self._wfo.shape[1] // 2) * self.dx
y = (np.arange(self._wfo.shape[0]) - self._wfo.shape[0] // 2) * self.dy
xx, yy = np.meshgrid(x, y)
qphase = (np.pi / (dz * self.wl)) * (xx**2 + yy**2)
wf = np.fft.ifftshift(np.exp(1.0j * qphase) * self._wfo)
if s == "forward":
wf = np.fft.fft2(wf, norm="ortho")
elif s == "reverse":
wf = np.fft.ifft2(wf, norm="ortho")
self._z = self._z + dz
self._C = 1 / (self.z - self.zw0)
self._dx = self.wl * np.abs(dz) / (wf.shape[1] * self.dx)
self._dy = self.wl * np.abs(dz) / (wf.shape[1] * self.dy)
self._wfo = np.fft.fftshift(wf)
|
(self, dz)
|
61,981 |
paos.classes.wfo
|
zernikes
|
Add a WFE represented by a Zernike expansion
Parameters
----------
index: array of integers
Sequence of zernikes to use. It should be a continuous sequence.
Z : array of floats
The coefficients of the Zernike polynomials in meters.
ordering: string
Can be 'ansi', 'noll', 'fringe', or 'standard'.
normalize: bool
Polynomials are normalised to RMS=1 if True, or to unity at radius if False.
radius: float
The radius of the circular aperture over which the polynomials are calculated.
offset: float
Angular offset in degrees.
origin: string
Angles measured counter-clockwise positive from x axis by default (origin='x').
Set origin='y' for angles measured clockwise-positive from the y-axis.
Returns
-------
out: masked array
the WFE
|
def zernikes(
self, index, Z, ordering, normalize, radius, offset=0.0, origin="x"
):
"""
Add a WFE represented by a Zernike expansion
Parameters
----------
index: array of integers
Sequence of zernikes to use. It should be a continuous sequence.
Z : array of floats
The coefficients of the Zernike polynomials in meters.
ordering: string
Can be 'ansi', 'noll', 'fringe', or 'standard'.
normalize: bool
Polynomials are normalised to RMS=1 if True, or to unity at radius if False.
radius: float
The radius of the circular aperture over which the polynomials are calculated.
offset: float
Angular offset in degrees.
origin: string
Angles measured counter-clockwise positive from x axis by default (origin='x').
Set origin='y' for angles measured clockwise-positive from the y-axis.
Returns
-------
out: masked array
the WFE
"""
assert not np.any(
np.diff(index) - 1
), "Zernike sequence should be continuous"
x = (np.arange(self._wfo.shape[1]) - self._wfo.shape[1] // 2) * self.dx
y = (np.arange(self._wfo.shape[0]) - self._wfo.shape[0] // 2) * self.dy
xx, yy = np.meshgrid(x, y)
rho = np.sqrt(xx**2 + yy**2) / radius
if origin == "x":
phi = np.arctan2(yy, xx) + np.deg2rad(offset)
elif origin == "y":
phi = np.arctan2(xx, yy) + np.deg2rad(offset)
else:
logger.error(
"Origin {} not recognised. Origin shall be either x or y".format(
origin
)
)
raise ValueError(
"Origin {} not recognised. Origin shall be either x or y".format(
origin
)
)
zernike = Zernike(
len(index), rho, phi, ordering=ordering, normalize=normalize
)
zer = zernike()
wfe = (zer.T * Z).T.sum(axis=0)
self._wfo = self._wfo * np.exp(
2.0 * np.pi * 1j * wfe / self._wl
).filled(0)
return wfe
|
(self, index, Z, ordering, normalize, radius, offset=0.0, origin='x')
|
61,982 |
paos.classes.zernike
|
Zernike
|
Generates Zernike polynomials
Parameters
----------
N : integer
Number of polynomials to generate in a sequence following the defined 'ordering'
rho : array like
the radial coordinate normalised to the interval [0, 1]
phi : array like
Azimuthal coordinate in radians. Has same shape as rho.
ordering : string
Can either be:
ANSI (ordering='ansi', this is the default);
Noll (ordering='noll'). Used in Zemax as "Zernike Standard Coefficients",
R. Noll, "Zernike polynomials and atmospheric turbulence", J. Opt. Soc. Am., Vol. 66, No. 3, p207 (1976);
Fringe (ordering='fringe'), AKA the "Fringe" or "University of Arizona" notation;
Standard (ordering='standard'). Used in CodeV, Born and Wolf, Principles of Optics (Pergamon Press, New York, 1989).
normalize : bool
Set to True generates ortho-normal polynomials. Set to False generates orthogonal polynomials
as described in `Laksminarayan & Fleck, Journal of Modern Optics (2011) <https://doi.org/10.1080/09500340.2011.633763>`_.
The radial polynomial is estimated using the Jacobi polynomial expression as in their Equation in Equation 14.
Returns
-------
out : masked array
An instance of Zernike.
Example
-------
>>> import numpy as np
>>> from matplotlib import pyplot as plt
>>> x = np.linspace(-1.0, 1.0, 1024)
>>> xx, yy = np.meshgrid(x, x)
>>> rho = np.sqrt(xx**2 + yy**2)
>>> phi = np.arctan2(yy, xx)
>>> zernike = Zernike(36, rho, phi, ordering='noll', normalize=True)
>>> zer = zernike() # zer contains a list of polynomials, noll-ordered
>>> # Plot the defocus zernike polynomial
>>> plt.imshow(zer[3])
>>> plt.show()
>>> # Plot the defocus zernike polynomial
>>> plt.imshow(zernike(3))
>>> plt.show()
Note
----
In the example, the polar angle is counted counter-clockwise positive from the
x axis. To have a polar angle that is clockwise positive from the y axis
(as in figure 2 of `Laksminarayan & Fleck, Journal of Modern Optics (2011) <https://doi.org/10.1080/09500340.2011.633763>`_) use
>>> phi = 0.5*np.pi - np.arctan2(yy, xx)
|
class Zernike:
"""
Generates Zernike polynomials
Parameters
----------
N : integer
Number of polynomials to generate in a sequence following the defined 'ordering'
rho : array like
the radial coordinate normalised to the interval [0, 1]
phi : array like
Azimuthal coordinate in radians. Has same shape as rho.
ordering : string
Can either be:
ANSI (ordering='ansi', this is the default);
Noll (ordering='noll'). Used in Zemax as "Zernike Standard Coefficients",
R. Noll, "Zernike polynomials and atmospheric turbulence", J. Opt. Soc. Am., Vol. 66, No. 3, p207 (1976);
Fringe (ordering='fringe'), AKA the "Fringe" or "University of Arizona" notation;
Standard (ordering='standard'). Used in CodeV, Born and Wolf, Principles of Optics (Pergamon Press, New York, 1989).
normalize : bool
Set to True generates ortho-normal polynomials. Set to False generates orthogonal polynomials
as described in `Laksminarayan & Fleck, Journal of Modern Optics (2011) <https://doi.org/10.1080/09500340.2011.633763>`_.
The radial polynomial is estimated using the Jacobi polynomial expression as in their Equation in Equation 14.
Returns
-------
out : masked array
An instance of Zernike.
Example
-------
>>> import numpy as np
>>> from matplotlib import pyplot as plt
>>> x = np.linspace(-1.0, 1.0, 1024)
>>> xx, yy = np.meshgrid(x, x)
>>> rho = np.sqrt(xx**2 + yy**2)
>>> phi = np.arctan2(yy, xx)
>>> zernike = Zernike(36, rho, phi, ordering='noll', normalize=True)
>>> zer = zernike() # zer contains a list of polynomials, noll-ordered
>>> # Plot the defocus zernike polynomial
>>> plt.imshow(zer[3])
>>> plt.show()
>>> # Plot the defocus zernike polynomial
>>> plt.imshow(zernike(3))
>>> plt.show()
Note
----
In the example, the polar angle is counted counter-clockwise positive from the
x axis. To have a polar angle that is clockwise positive from the y axis
(as in figure 2 of `Laksminarayan & Fleck, Journal of Modern Optics (2011) <https://doi.org/10.1080/09500340.2011.633763>`_) use
>>> phi = 0.5*np.pi - np.arctan2(yy, xx)
"""
def __init__(self, N, rho, phi, ordering="ansi", normalize=False):
assert ordering in (
"ansi",
"noll",
"fringe",
"standard",
), "Unrecognised ordering scheme."
assert N > 0, "N shall be a positive integer"
self.ordering = ordering
self.N = N
self.m, self.n = self.j2mn(N, ordering)
if normalize:
self.norm = [
np.sqrt(n + 1) if m == 0 else np.sqrt(2.0 * (n + 1))
for m, n in zip(self.m, self.n)
]
else:
self.norm = np.ones(self.N, dtype=np.float64)
mask = rho > 1.0
if isinstance(rho, np.ma.MaskedArray):
rho.mask |= mask
else:
rho = np.ma.MaskedArray(data=rho, mask=mask, fill_value=0.0)
Z = {}
for n in range(max(self.n) + 1):
Z[n] = {}
for m in range(-n, 1, 2):
Z[n][m] = data = self.__ZradJacobi__(m, n, rho)
Z[n][-m] = Z[n][m].view()
self.Zrad = [Z[n][m].view() for m, n in zip(self.m, self.n)]
Z = {0: np.ones_like(phi)}
for m in range(1, self.m.max() + 1):
Z[m] = np.cos(m * phi)
Z[-m] = np.sin(m * phi)
self.Zphi = [Z[m].view() for m in self.m]
self.Z = np.ma.MaskedArray(
[
self.norm[k] * self.Zrad[k] * self.Zphi[k]
for k in range(self.N)
],
fill_value=0.0,
)
def __call__(self, j=None):
"""
Parameters
----------
j : integer
Polynomial to return. If set to None, returns all polynomial requested at
instantiation
Returns
-------
out : masked array
if j is set to None, the output is a masked array where the first dimension
has the size of the number of polynomials requested.
When j is set to an integer, returns the j-th polynomial as a masked array.
"""
if j is None:
return self.Z
else:
return self.Z[j]
@staticmethod
def j2mn(N, ordering):
"""
Convert index j into azimuthal number, m, and radial number, n
for the first N Zernikes
Parameters
----------
N: integer
Number of polynomials (starting from Piston)
ordering: string
can take values 'ansi', 'standard', 'noll', 'fringe'
Returns
-------
m, n: array
"""
j = np.arange(N, dtype=int)
if ordering == "ansi":
n = np.ceil((-3.0 + np.sqrt(9.0 + 8.0 * j)) / 2.0).astype(int)
m = 2 * j - n * (n + 2)
elif ordering == "standard":
n = np.ceil((-3.0 + np.sqrt(9.0 + 8.0 * j)) / 2.0).astype(int)
m = -2 * j + n * (n + 2)
elif ordering == "noll":
index = j + 1
n = ((0.5 * (np.sqrt(8 * index - 7) - 3)) + 1).astype(int)
cn = n * (n + 1) / 2 + 1
m = np.empty(N, dtype=int)
idx = n % 2 == 0
m[idx] = (index[idx] - cn[idx] + 1) // 2 * 2
m[~idx] = (index[~idx] - cn[~idx]) // 2 * 2 + 1
m = (-1) ** (index % 2) * m
elif ordering == "fringe":
index = j + 1
m_n = 2 * (np.ceil(np.sqrt(index)) - 1)
g_s = (m_n / 2) ** 2 + 1
n = m_n / 2 + np.floor((index - g_s) / 2)
m = (m_n - n) * (1 - np.mod(index - g_s, 2) * 2)
return m.astype(int), n.astype(int)
else:
raise NameError("Ordering not supported.")
return m, n
@staticmethod
def mn2j(m, n, ordering):
"""
Convert radial and azimuthal numbers, respectively n and m, into index j
"""
if ordering == "ansi":
return (n * (n + 2) + m) // 2
elif ordering == "standard":
return (n * (n + 2) - m) // 2
elif ordering == "fringe":
a = (1 + (n + np.abs(m)) / 2) ** 2
b = 2 * np.abs(m)
c = (1 + np.sign(m)) / 2
return (a - b - c).astype(int) + 1
elif ordering == "noll":
_p = np.zeros(n.size, dtype=np.int64)
for idx, (_m, _n) in enumerate(zip(m, n)):
if _m > 0.0 and (_n % 4 in [0, 1]):
_p[idx] = 0
elif _m < 0.0 and (_n % 4 in [2, 3]):
_p[idx] = 0
elif _m >= 0.0 and (_n % 4 in [2, 3]):
_p[idx] = 1
elif _m <= 0.0 and (_n % 4 in [0, 1]):
_p[idx] = 1
else:
raise ValueError("Invalid (m,n) in Noll indexing.")
return (n * (n + 1) / 2 + np.abs(m) + _p).astype(np.int64)
else:
raise NameError("Ordering not supported.")
@staticmethod
def __ZradJacobi__(m, n, rho):
"""
Computes the radial Zernike polynomial
Parameters
----------
m : integer
azimuthal number
n : integer
radian number
rho : array like
Pupil semi-diameter normalised radial coordinates
Returns
-------
R_mn : array like
the radial Zernike polynomial with shape identical to rho
"""
m = np.abs(m)
if n < 0:
raise ValueError(
"Invalid parameter: n={:d} should be > 0".format(n)
)
if m > n:
raise ValueError(
"Invalid parameter: n={:d} should be larger than m={:d}".format(
n, m
)
)
if (n - m) % 2:
raise ValueError(
"Invalid parameter: n-m={:d} should be a positive even number.".format(
n - m
)
)
jpoly = jacobi((n - m) // 2, m, 0.0, (1.0 - 2.0 * rho**2))
return (-1) ** ((n - m) // 2) * rho**m * jpoly
@staticmethod
def __ZradFactorial__(m, n, rho):
"""
CURRENTLY NOT USED
Computes the radial Zernike polynomial
Parameters
----------
m : integer
azimuthal number
n : integer
radian number
rho : array like
Pupil semi-diameter normalised radial coordinates
Returns
-------
R_mn : array like
the radial Zernike polynomial with shape identical to rho
"""
m = np.abs(m)
if n < 0:
raise ValueError(
"Invalid parameter: n={:d} should be > 0".format(n)
)
if m > n:
raise ValueError(
"Invalid parameter: n={:d} should be larger than m={:d}".format(
n, m
)
)
if (n - m) % 2:
raise ValueError(
"Invalid parameter: n-m={:d} should be a positive even number.".format(
n - m
)
)
pre_fac = (
lambda k: (-1.0) ** k
* fac(n - k)
/ (fac(k) * fac((n + m) // 2 - k) * fac((n - m) // 2 - k))
)
return sum(
pre_fac(k) * rho ** (n - 2 * k) for k in range((n - m) // 2 + 1)
)
def cov(self):
"""
Computes the covariance matrix M defined as
>>> M[i, j] = np.mean(Z[i, ...]*Z[j, ...])
When a pupil is defined as :math:`\\Phi = \\sum c[k] Z[k, ...]`, the pupil RMS can be calculated as
>>> RMS = np.sqrt( np.dot(c, np.dot(M, c)) )
This works also on a non-circular pupil, provided that the polynomials are masked over the pupil.
Returns
-------
M : array
the covariance matrix
"""
cov = np.empty((self.Z.shape[0], self.Z.shape[0]))
for i in range(self.Z.shape[0]):
for j in range(i, self.Z.shape[0]):
cov[i, j] = cov[j, i] = np.ma.mean(self.Z[i] * self.Z[j])
cov[cov < 1e-10] = 0.0
return cov
|
(N, rho, phi, ordering='ansi', normalize=False)
|
61,983 |
paos.classes.zernike
|
__ZradFactorial__
|
CURRENTLY NOT USED
Computes the radial Zernike polynomial
Parameters
----------
m : integer
azimuthal number
n : integer
radian number
rho : array like
Pupil semi-diameter normalised radial coordinates
Returns
-------
R_mn : array like
the radial Zernike polynomial with shape identical to rho
|
@staticmethod
def __ZradFactorial__(m, n, rho):
"""
CURRENTLY NOT USED
Computes the radial Zernike polynomial
Parameters
----------
m : integer
azimuthal number
n : integer
radian number
rho : array like
Pupil semi-diameter normalised radial coordinates
Returns
-------
R_mn : array like
the radial Zernike polynomial with shape identical to rho
"""
m = np.abs(m)
if n < 0:
raise ValueError(
"Invalid parameter: n={:d} should be > 0".format(n)
)
if m > n:
raise ValueError(
"Invalid parameter: n={:d} should be larger than m={:d}".format(
n, m
)
)
if (n - m) % 2:
raise ValueError(
"Invalid parameter: n-m={:d} should be a positive even number.".format(
n - m
)
)
pre_fac = (
lambda k: (-1.0) ** k
* fac(n - k)
/ (fac(k) * fac((n + m) // 2 - k) * fac((n - m) // 2 - k))
)
return sum(
pre_fac(k) * rho ** (n - 2 * k) for k in range((n - m) // 2 + 1)
)
|
(m, n, rho)
|
61,984 |
paos.classes.zernike
|
__ZradJacobi__
|
Computes the radial Zernike polynomial
Parameters
----------
m : integer
azimuthal number
n : integer
radian number
rho : array like
Pupil semi-diameter normalised radial coordinates
Returns
-------
R_mn : array like
the radial Zernike polynomial with shape identical to rho
|
@staticmethod
def __ZradJacobi__(m, n, rho):
"""
Computes the radial Zernike polynomial
Parameters
----------
m : integer
azimuthal number
n : integer
radian number
rho : array like
Pupil semi-diameter normalised radial coordinates
Returns
-------
R_mn : array like
the radial Zernike polynomial with shape identical to rho
"""
m = np.abs(m)
if n < 0:
raise ValueError(
"Invalid parameter: n={:d} should be > 0".format(n)
)
if m > n:
raise ValueError(
"Invalid parameter: n={:d} should be larger than m={:d}".format(
n, m
)
)
if (n - m) % 2:
raise ValueError(
"Invalid parameter: n-m={:d} should be a positive even number.".format(
n - m
)
)
jpoly = jacobi((n - m) // 2, m, 0.0, (1.0 - 2.0 * rho**2))
return (-1) ** ((n - m) // 2) * rho**m * jpoly
|
(m, n, rho)
|
61,985 |
paos.classes.zernike
|
__call__
|
Parameters
----------
j : integer
Polynomial to return. If set to None, returns all polynomial requested at
instantiation
Returns
-------
out : masked array
if j is set to None, the output is a masked array where the first dimension
has the size of the number of polynomials requested.
When j is set to an integer, returns the j-th polynomial as a masked array.
|
def __call__(self, j=None):
"""
Parameters
----------
j : integer
Polynomial to return. If set to None, returns all polynomial requested at
instantiation
Returns
-------
out : masked array
if j is set to None, the output is a masked array where the first dimension
has the size of the number of polynomials requested.
When j is set to an integer, returns the j-th polynomial as a masked array.
"""
if j is None:
return self.Z
else:
return self.Z[j]
|
(self, j=None)
|
61,986 |
paos.classes.zernike
|
__init__
| null |
def __init__(self, N, rho, phi, ordering="ansi", normalize=False):
assert ordering in (
"ansi",
"noll",
"fringe",
"standard",
), "Unrecognised ordering scheme."
assert N > 0, "N shall be a positive integer"
self.ordering = ordering
self.N = N
self.m, self.n = self.j2mn(N, ordering)
if normalize:
self.norm = [
np.sqrt(n + 1) if m == 0 else np.sqrt(2.0 * (n + 1))
for m, n in zip(self.m, self.n)
]
else:
self.norm = np.ones(self.N, dtype=np.float64)
mask = rho > 1.0
if isinstance(rho, np.ma.MaskedArray):
rho.mask |= mask
else:
rho = np.ma.MaskedArray(data=rho, mask=mask, fill_value=0.0)
Z = {}
for n in range(max(self.n) + 1):
Z[n] = {}
for m in range(-n, 1, 2):
Z[n][m] = data = self.__ZradJacobi__(m, n, rho)
Z[n][-m] = Z[n][m].view()
self.Zrad = [Z[n][m].view() for m, n in zip(self.m, self.n)]
Z = {0: np.ones_like(phi)}
for m in range(1, self.m.max() + 1):
Z[m] = np.cos(m * phi)
Z[-m] = np.sin(m * phi)
self.Zphi = [Z[m].view() for m in self.m]
self.Z = np.ma.MaskedArray(
[
self.norm[k] * self.Zrad[k] * self.Zphi[k]
for k in range(self.N)
],
fill_value=0.0,
)
|
(self, N, rho, phi, ordering='ansi', normalize=False)
|
61,987 |
paos.classes.zernike
|
cov
|
Computes the covariance matrix M defined as
>>> M[i, j] = np.mean(Z[i, ...]*Z[j, ...])
When a pupil is defined as :math:`\Phi = \sum c[k] Z[k, ...]`, the pupil RMS can be calculated as
>>> RMS = np.sqrt( np.dot(c, np.dot(M, c)) )
This works also on a non-circular pupil, provided that the polynomials are masked over the pupil.
Returns
-------
M : array
the covariance matrix
|
def cov(self):
"""
Computes the covariance matrix M defined as
>>> M[i, j] = np.mean(Z[i, ...]*Z[j, ...])
When a pupil is defined as :math:`\\Phi = \\sum c[k] Z[k, ...]`, the pupil RMS can be calculated as
>>> RMS = np.sqrt( np.dot(c, np.dot(M, c)) )
This works also on a non-circular pupil, provided that the polynomials are masked over the pupil.
Returns
-------
M : array
the covariance matrix
"""
cov = np.empty((self.Z.shape[0], self.Z.shape[0]))
for i in range(self.Z.shape[0]):
for j in range(i, self.Z.shape[0]):
cov[i, j] = cov[j, i] = np.ma.mean(self.Z[i] * self.Z[j])
cov[cov < 1e-10] = 0.0
return cov
|
(self)
|
61,988 |
paos.classes.zernike
|
j2mn
|
Convert index j into azimuthal number, m, and radial number, n
for the first N Zernikes
Parameters
----------
N: integer
Number of polynomials (starting from Piston)
ordering: string
can take values 'ansi', 'standard', 'noll', 'fringe'
Returns
-------
m, n: array
|
@staticmethod
def j2mn(N, ordering):
"""
Convert index j into azimuthal number, m, and radial number, n
for the first N Zernikes
Parameters
----------
N: integer
Number of polynomials (starting from Piston)
ordering: string
can take values 'ansi', 'standard', 'noll', 'fringe'
Returns
-------
m, n: array
"""
j = np.arange(N, dtype=int)
if ordering == "ansi":
n = np.ceil((-3.0 + np.sqrt(9.0 + 8.0 * j)) / 2.0).astype(int)
m = 2 * j - n * (n + 2)
elif ordering == "standard":
n = np.ceil((-3.0 + np.sqrt(9.0 + 8.0 * j)) / 2.0).astype(int)
m = -2 * j + n * (n + 2)
elif ordering == "noll":
index = j + 1
n = ((0.5 * (np.sqrt(8 * index - 7) - 3)) + 1).astype(int)
cn = n * (n + 1) / 2 + 1
m = np.empty(N, dtype=int)
idx = n % 2 == 0
m[idx] = (index[idx] - cn[idx] + 1) // 2 * 2
m[~idx] = (index[~idx] - cn[~idx]) // 2 * 2 + 1
m = (-1) ** (index % 2) * m
elif ordering == "fringe":
index = j + 1
m_n = 2 * (np.ceil(np.sqrt(index)) - 1)
g_s = (m_n / 2) ** 2 + 1
n = m_n / 2 + np.floor((index - g_s) / 2)
m = (m_n - n) * (1 - np.mod(index - g_s, 2) * 2)
return m.astype(int), n.astype(int)
else:
raise NameError("Ordering not supported.")
return m, n
|
(N, ordering)
|
61,989 |
paos.classes.zernike
|
mn2j
|
Convert radial and azimuthal numbers, respectively n and m, into index j
|
@staticmethod
def mn2j(m, n, ordering):
"""
Convert radial and azimuthal numbers, respectively n and m, into index j
"""
if ordering == "ansi":
return (n * (n + 2) + m) // 2
elif ordering == "standard":
return (n * (n + 2) - m) // 2
elif ordering == "fringe":
a = (1 + (n + np.abs(m)) / 2) ** 2
b = 2 * np.abs(m)
c = (1 + np.sign(m)) / 2
return (a - b - c).astype(int) + 1
elif ordering == "noll":
_p = np.zeros(n.size, dtype=np.int64)
for idx, (_m, _n) in enumerate(zip(m, n)):
if _m > 0.0 and (_n % 4 in [0, 1]):
_p[idx] = 0
elif _m < 0.0 and (_n % 4 in [2, 3]):
_p[idx] = 0
elif _m >= 0.0 and (_n % 4 in [2, 3]):
_p[idx] = 1
elif _m <= 0.0 and (_n % 4 in [0, 1]):
_p[idx] = 1
else:
raise ValueError("Invalid (m,n) in Noll indexing.")
return (n * (n + 1) / 2 + np.abs(m) + _p).astype(np.int64)
else:
raise NameError("Ordering not supported.")
|
(m, n, ordering)
|
61,991 |
paos.core.coordinateBreak
|
coordinate_break
|
Performs a coordinate break and estimates the new :math:`\vec{v_{t}}=(y, u_{y})`
and :math:`\vec{v_{s}}=(x, u_{x})`.
Parameters
----------
vt: array
vector :math:`\vec{v_{t}}=(y, u_{y})` describing a ray propagating in the tangential plane
vs: array
vector :math:`\vec{v_{s}}=(x, u_{x})` describing a ray propagating in the sagittal plane
xdec: float
x coordinate of the decenter to be applied
ydec: float
y coordinate of the decenter to be applied
xrot: float
tilt angle around the X axis to be applied
yrot: float
tilt angle around the Y axis to be applied
zrot: float
tilt angle around the Z axis to be applied
order: int
order of the coordinate break, defaults to 0.
Returns
-------
tuple
two arrays representing the new :math:`\vec{v_{t}}=(y, u_{y})`
and :math:`\vec{v_{s}}=(x, u_{x})`.
Note
----
When order=0, first a coordinate decenter is applied, followed by a XYZ rotation.
Coordinate break orders other than 0 not implemented yet.
|
def coordinate_break(vt, vs, xdec, ydec, xrot, yrot, zrot, order=0):
"""
Performs a coordinate break and estimates the new :math:`\\vec{v_{t}}=(y, u_{y})`
and :math:`\\vec{v_{s}}=(x, u_{x})`.
Parameters
----------
vt: array
vector :math:`\\vec{v_{t}}=(y, u_{y})` describing a ray propagating in the tangential plane
vs: array
vector :math:`\\vec{v_{s}}=(x, u_{x})` describing a ray propagating in the sagittal plane
xdec: float
x coordinate of the decenter to be applied
ydec: float
y coordinate of the decenter to be applied
xrot: float
tilt angle around the X axis to be applied
yrot: float
tilt angle around the Y axis to be applied
zrot: float
tilt angle around the Z axis to be applied
order: int
order of the coordinate break, defaults to 0.
Returns
-------
tuple
two arrays representing the new :math:`\\vec{v_{t}}=(y, u_{y})`
and :math:`\\vec{v_{s}}=(x, u_{x})`.
Note
----
When order=0, first a coordinate decenter is applied, followed by a XYZ rotation.
Coordinate break orders other than 0 not implemented yet.
"""
if order != 0:
logger.error(
"Coordinate break orders other than 0 not implemented yet"
)
raise ValueError(
"Coordinate break orders other than 0 not implemented yet"
)
if not np.isfinite(xdec):
xdec = 0.0
if not np.isfinite(ydec):
ydec = 0.0
if not np.isfinite(xrot):
xrot = 0.0
if not np.isfinite(yrot):
yrot = 0.0
if not np.isfinite(zrot):
zrot = 0.0
# Rotation matrix, intrinsic
U = R.from_euler("xyz", [xrot, yrot, zrot], degrees=True)
r0 = [vs[0] - xdec, vt[0] - ydec, 0.0]
n0 = [vs[1], vt[1], 1]
n1 = U.inv().apply(n0)
n1 /= n1[2]
r1_ln1 = U.inv().apply(r0)
r1 = r1_ln1 - n1 * r1_ln1[2] / n1[2]
vt1 = np.array([r1[1], n1[1]])
vs1 = np.array([r1[0], n1[0]])
return vt1, vs1
|
(vt, vs, xdec, ydec, xrot, yrot, zrot, order=0)
|
61,998 |
paos.core.parseConfig
|
parse_config
|
Parse an ini lens file
Parameters
----------
filename: string
full path to ini file
Returns
-------
pup_diameter: float
pupil diameter in lens units
parameters: dict
Dictionary with parameters defined in the section 'general' of the ini file
field: List
list of fields
wavelengths: List
list of wavelengths
opt_chain_list: List
Each list entry is a dictionary of the optical surfaces in the .ini file, estimated at the given wavelength.
(Relevant only for diffractive components)
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/ini/file')
|
def parse_config(filename):
"""
Parse an ini lens file
Parameters
----------
filename: string
full path to ini file
Returns
-------
pup_diameter: float
pupil diameter in lens units
parameters: dict
Dictionary with parameters defined in the section 'general' of the ini file
field: List
list of fields
wavelengths: List
list of wavelengths
opt_chain_list: List
Each list entry is a dictionary of the optical surfaces in the .ini file, estimated at the given wavelength.
(Relevant only for diffractive components)
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/ini/file')
"""
config = configparser.ConfigParser()
filename = os.path.expanduser(filename)
if not os.path.exists(filename) or not os.path.isfile(filename):
logger.error(
"Input file {} does not exist or is not a file. Quitting..".format(
filename
)
)
sys.exit()
config.read(filename)
# Parse parameters in section 'general'
allowed_grid_size = [64, 128, 256, 512, 1024]
allowed_zoom_val = [1, 2, 4, 8, 16]
parameters = {
"project": config["general"]["project"],
"version": config["general"]["version"],
}
dtmp = config["general"].getint("grid_size")
if dtmp not in allowed_grid_size:
raise ValueError(
"Grid size not allowed. Allowed values are", allowed_grid_size
)
parameters["grid_size"] = dtmp
dtmp = config["general"].getint("zoom")
if dtmp not in allowed_zoom_val:
raise ValueError(
"Zoom value not allowed. Allowed values are", allowed_zoom_val
)
elif dtmp == 1:
logger.warning(
"Zoom value is 1, i.e. the beam width occupies the whole of the grid width. "
"This will result a PSF that is not Nyquist sampled."
)
parameters["zoom"] = dtmp
lens_unit = config["general"].get("lens_unit", "")
if lens_unit != "m":
raise ValueError("Verify lens_unit=m in ini file")
Tambient = config["general"].getfloat("Tambient")
parameters["Tambient"] = Tambient
Pambient = config["general"].getfloat("Pambient")
parameters["Pambient"] = Pambient
# Parse section 'wavelengths'
wavelengths = []
num = 1
while True:
_wl_ = config["wavelengths"].getfloat("w{:d}".format(num))
if _wl_:
wavelengths.append(_wl_)
else:
break
num += 1
# Parse section 'fields'
fields = []
num = 1
while True:
_fld_ = config["fields"].get("f{:d}".format(num))
if _fld_:
_fld_ = np.fromstring(_fld_, sep=",")
_fld_ = np.tan(np.deg2rad(_fld_))
fields.append({"us": _fld_[0], "ut": _fld_[1]})
else:
break
num += 1
# Parse sections 'lens_??'
opt_chain_list = []
pup_diameter = None # input pupil pup_diameter
for _wl_ in wavelengths:
n1, n2 = None, None # Refractive index
glasslib = Material(_wl_, Tambient=Tambient, Pambient=Pambient)
opt_chain = {}
lens_num = 1
while "lens_{:02d}".format(lens_num) in config:
_data_ = {"num": lens_num}
element = config["lens_{:02d}".format(lens_num)]
lens_num += 1
if element.getboolean("Ignore"):
continue
_data_["type"] = element.get("SurfaceType", None)
_data_["R"] = getfloat(element.get("Radius", ""))
_data_["T"] = getfloat(element.get("Thickness", ""))
_data_["material"] = element.get("Material", None)
_data_["is_stop"] = element.getboolean("Stop", False)
_data_["save"] = element.getboolean("Save", False)
_data_["name"] = element.get("Comment", "")
if _data_["type"] == "INIT":
n1 = 1.0
aperture = element.get("aperture", "").split(",")
aperture_shape, aperture_type = aperture[0].split()
if (
aperture_shape == "elliptical"
and aperture_type == "aperture"
):
xpup = getfloat(aperture[2])
ypup = getfloat(aperture[3])
pup_diameter = 2.0 * max(xpup, ypup)
continue
if n1 is None or pup_diameter is None:
# logger.error('INIT is not the first surface in Lens Data.')
raise ValueError("INIT is not the first surface in Lens Data.")
if _data_["type"] == "Zernike":
thickness = 0.0
curvature = 0.0
n2 = n1
wave = 1.0e-6 * getfloat(element.get("Par1", ""))
_data_["Zordering"] = element.get("Par2", "").lower()
_data_["Znormalize"] = element.getboolean("Par3")
_data_["Zradius"] = getfloat(element.get("Par4", ""))
_data_["Zorigin"] = element.get("Par5", "x")
_data_["Zindex"] = np.fromstring(
element.get("Zindex", ""), sep=",", dtype=np.int64
)
_data_["Z"] = (
np.fromstring(
element.get("Z", ""), sep=",", dtype=np.float64
)
* wave
)
_data_["ABCDt"] = ABCD(
thickness=thickness,
curvature=curvature,
n1=n1,
n2=n2,
M=1.0,
)
_data_["ABCDs"] = ABCD(
thickness=thickness,
curvature=curvature,
n1=n1,
n2=n2,
M=1.0,
)
elif _data_["type"] == "PSD":
thickness = 0.0
curvature = 0.0
n2 = n1
_data_["A"] = getfloat(element.get("Par1", ""))
_data_["B"] = getfloat(element.get("Par2", ""))
_data_["C"] = getfloat(element.get("Par3", ""))
_data_["fknee"] = getfloat(element.get("Par4", ""))
_data_["fmin"] = getfloat(element.get("Par5", ""))
_data_["fmax"] = getfloat(element.get("Par6", ""))
_data_["SR"] = getfloat(element.get("Par7", ""))
_data_["units"] = u.Unit(element.get("Par8", ""))
_data_["ABCDt"] = ABCD(
thickness=thickness,
curvature=curvature,
n1=n1,
n2=n2,
M=1.0,
)
_data_["ABCDs"] = ABCD(
thickness=thickness,
curvature=curvature,
n1=n1,
n2=n2,
M=1.0,
)
elif _data_["type"] == "Coordinate Break":
thickness = _data_["T"] if np.isfinite(_data_["T"]) else 0.0
curvature = 0.0
n2 = n1
_data_["xdec"] = getfloat(element.get("Par1", ""))
_data_["ydec"] = getfloat(element.get("Par2", ""))
_data_["xrot"] = getfloat(element.get("Par3", ""))
_data_["yrot"] = getfloat(element.get("Par4", ""))
_data_["ABCDt"] = ABCD(
thickness=thickness,
curvature=curvature,
n1=n1,
n2=n2,
M=1.0,
)
_data_["ABCDs"] = ABCD(
thickness=thickness,
curvature=curvature,
n1=n1,
n2=n2,
M=1.0,
)
elif _data_["type"] == "Paraxial Lens":
focal_length = getfloat(element.get("Par1", ""))
thickness = _data_["T"] if np.isfinite(_data_["T"]) else 0.0
curvature = (
1 / focal_length if np.isfinite(focal_length) else 0.0
)
n2 = n1
aperture = element.get("aperture", "")
if aperture:
aperture = aperture.split(",")
aperture_shape, aperture_type = aperture[0].split()
_data_["aperture"] = {
"shape": aperture_shape,
"type": aperture_type,
"xrad": getfloat(aperture[1]),
"yrad": getfloat(aperture[2]),
"xc": getfloat(aperture[3]),
"yc": getfloat(aperture[4]),
}
_data_["ABCDt"] = ABCD(
thickness=thickness,
curvature=curvature,
n1=n1,
n2=n2,
M=1.0,
)
_data_["ABCDs"] = ABCD(
thickness=thickness,
curvature=curvature,
n1=n1,
n2=n2,
M=1.0,
)
elif _data_["type"] == "ABCD":
thickness = _data_["T"] if np.isfinite(_data_["T"]) else 0.0
Ax = getfloat(element.get("Par1", ""))
Bx = getfloat(element.get("Par2", ""))
Cx = getfloat(element.get("Par3", ""))
Dx = getfloat(element.get("Par4", ""))
Ay = getfloat(element.get("Par5", ""))
By = getfloat(element.get("Par6", ""))
Cy = getfloat(element.get("Par7", ""))
Dy = getfloat(element.get("Par8", ""))
ABCDs = ABCD(
thickness=thickness, curvature=0.0, n1=n1, n2=n1, M=1.0
)
ABCDt = ABCD(
thickness=thickness, curvature=0.0, n1=n1, n2=n1, M=1.0
)
_ABCDs = np.array([[Ax, Bx], [Cx, Dx]])
_ABCDt = np.array([[Ay, By], [Cy, Dy]])
ABCDs.ABCD = ABCDs() @ _ABCDs
ABCDt.ABCD = ABCDt() @ _ABCDt
aperture = element.get("aperture", "")
if aperture:
aperture = aperture.split(",")
aperture_shape, aperture_type = aperture[0].split()
_data_["aperture"] = {
"shape": aperture_shape,
"type": aperture_type,
"xrad": getfloat(aperture[1]),
"yrad": getfloat(aperture[2]),
"xc": getfloat(aperture[3]),
"yc": getfloat(aperture[4]),
}
_data_["ABCDt"] = ABCDt
_data_["ABCDs"] = ABCDs
elif _data_["type"] == "Standard":
thickness = _data_["T"] if np.isfinite(_data_["T"]) else 0.0
curvature = (
1 / _data_["R"] if np.isfinite(_data_["R"]) else 0.0
)
aperture = element.get("aperture", "")
if aperture:
aperture = aperture.split(",")
aperture_shape, aperture_type = aperture[0].split()
_data_["aperture"] = {
"shape": aperture_shape,
"type": aperture_type,
"xrad": getfloat(aperture[1]),
"yrad": getfloat(aperture[2]),
"xc": getfloat(aperture[3]),
"yc": getfloat(aperture[4]),
}
if _data_["material"] == "MIRROR":
n2 = -n1
elif _data_["material"] in glasslib.materials.keys():
n2 = glasslib.nmat(_data_["material"])[1] * np.sign(n1)
else:
n2 = 1.0 * np.sign(n1)
_data_["ABCDt"] = ABCD(
thickness=thickness,
curvature=curvature,
n1=n1,
n2=n2,
M=1.0,
)
_data_["ABCDs"] = ABCD(
thickness=thickness,
curvature=curvature,
n1=n1,
n2=n2,
M=1.0,
)
else:
logger.error(
"Surface Type not recognised: {:s}".format(
str(_data_["type"])
)
)
raise ValueError(
"Surface Type not recognised: {:s}".format(
str(_data_["type"])
)
)
opt_chain[_data_["num"]] = _data_
n1 = n2
opt_chain_list.append(opt_chain)
return pup_diameter, parameters, wavelengths, fields, opt_chain_list
|
(filename)
|
61,999 |
paos.core.plot
|
plot_pop
|
Given the POP simulation output dict, plots the squared amplitude of the
wavefront at all the optical surfaces.
Parameters
----------
retval: dict
simulation output dictionary
ima_scale: str
plot color map scale, can be either 'linear' or 'log'
ncols: int
number of columns for the subplots
figname: str
name of figure to save
options: dict
dict containing the options to display the plot: axis scale, axis unit, zoom scale and color scale.
Examples:
0) options={4: {'ima_scale':'linear'}}
1) options={4: {'surface_scale':60, 'ima_scale':'linear'}}
2) options={4: {'surface_scale':21, 'pixel_units':True, 'ima_scale':'linear'}}
3) options={4: {'surface_zoom':2, 'ima_scale':'log'}}
Returns
-------
out: None
displays the plot output or stores it to the indicated plot path
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> from paos.core.run import run
>>> from paos.core.plot import plot_pop
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/ini/file')
>>> ret_val = run(pup_diameter, 1.0e-6 * wavelengths[0], parameters['grid size'],
>>> parameters['zoom'], fields[0], opt_chains[0])
>>> plot_pop(ret_val, ima_scale='log', ncols=3, figname='path/to/output/plot')
|
def plot_pop(retval, ima_scale="log", ncols=2, figname=None, options={}):
"""
Given the POP simulation output dict, plots the squared amplitude of the
wavefront at all the optical surfaces.
Parameters
----------
retval: dict
simulation output dictionary
ima_scale: str
plot color map scale, can be either 'linear' or 'log'
ncols: int
number of columns for the subplots
figname: str
name of figure to save
options: dict
dict containing the options to display the plot: axis scale, axis unit, zoom scale and color scale.
Examples:
0) options={4: {'ima_scale':'linear'}}
1) options={4: {'surface_scale':60, 'ima_scale':'linear'}}
2) options={4: {'surface_scale':21, 'pixel_units':True, 'ima_scale':'linear'}}
3) options={4: {'surface_zoom':2, 'ima_scale':'log'}}
Returns
-------
out: None
displays the plot output or stores it to the indicated plot path
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> from paos.core.run import run
>>> from paos.core.plot import plot_pop
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/ini/file')
>>> ret_val = run(pup_diameter, 1.0e-6 * wavelengths[0], parameters['grid size'],
>>> parameters['zoom'], fields[0], opt_chains[0])
>>> plot_pop(ret_val, ima_scale='log', ncols=3, figname='path/to/output/plot')
"""
i, j = None, None
n_subplots = len(retval)
if ncols > n_subplots:
ncols = n_subplots
nrows = n_subplots // ncols
if n_subplots % ncols:
nrows += 1
figsize = (8 * ncols, 6 * nrows)
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
fig.patch.set_facecolor("white")
plt.subplots_adjust(hspace=0.3, wspace=0.5)
for k, (key, item) in enumerate(retval.items()):
if n_subplots == 1:
axis = ax
elif n_subplots <= ncols:
axis = ax[k]
else:
i = k % ncols
j = k // ncols
axis = ax[j, i]
simple_plot(
fig=fig,
axis=axis,
key=key,
item=item,
ima_scale=ima_scale,
options=options,
)
if n_subplots % ncols and k == n_subplots - 1:
for col in range(i + 1, ncols):
ax[j, col].set_visible(False)
if figname is not None:
fig.savefig(figname, bbox_inches="tight", dpi=150)
plt.close()
else:
fig.tight_layout()
plt.show()
return
|
(retval, ima_scale='log', ncols=2, figname=None, options={})
|
62,001 |
paos.core.raytrace
|
raytrace
|
Diagnostic function that implements the Paraxial ray-tracing and prints the output for each surface of the optical
chain as the ray positions and slopes in the tangential and sagittal planes.
Parameters
----------
field: dict
contains the slopes in the tangential and sagittal planes as field={'vt': slopey, 'vs': slopex}
opt_chain: dict
the dict of the optical elements returned by paos.parse_config
x: float
X-coordinate of the initial ray position
y: float
Y-coordinate of the initial ray position
Returns
-----
out: list[str]
A list of strings where each list item is the raytrace at a given surface.
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> from paos.core.raytrace import raytrace
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/conf/file')
>>> raytrace(fields[0], opt_chains[0])
|
def raytrace(field, opt_chain, x=0.0, y=0.0):
"""
Diagnostic function that implements the Paraxial ray-tracing and prints the output for each surface of the optical
chain as the ray positions and slopes in the tangential and sagittal planes.
Parameters
----------
field: dict
contains the slopes in the tangential and sagittal planes as field={'vt': slopey, 'vs': slopex}
opt_chain: dict
the dict of the optical elements returned by paos.parse_config
x: float
X-coordinate of the initial ray position
y: float
Y-coordinate of the initial ray position
Returns
-----
out: list[str]
A list of strings where each list item is the raytrace at a given surface.
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> from paos.core.raytrace import raytrace
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/conf/file')
>>> raytrace(fields[0], opt_chains[0])
"""
vt = np.array([y, field["ut"]])
vs = np.array([x, field["us"]])
ostr = []
for key, item in opt_chain.items():
if item["type"] == "Coordinate Break":
vt, vs = coordinate_break(
vt,
vs,
item["xdec"],
item["ydec"],
item["xrot"],
item["yrot"],
0.0,
)
vt = item["ABCDt"]() @ vt
vs = item["ABCDs"]() @ vs
_ostr_ = "S{:02d} - {:15s} y:{:7.3f}mm ut:{:10.3e} rad x:{:7.3f}mm us:{:10.3e} rad".format(
key, item["name"], 1000 * vt[0], vt[1], 1000 * vs[0], vs[1]
)
logger.debug(_ostr_)
ostr.append(_ostr_)
return ostr
|
(field, opt_chain, x=0.0, y=0.0)
|
62,002 |
paos.core.run
|
run
|
Run the POP.
Parameters
----------
pupil_diameter: scalar
input pupil diameter in meters
wavelength: scalar
wavelength in meters
gridsize: scalar
the size of the simulation grid. It has to be a power of 2
zoom: scalar
zoom factor
field: dictionary
contains the slopes in the tangential and sagittal planes as field={'vt': slopey, 'vs': slopex}
opt_chain: list
the list of the optical elements parsed by paos.core.parseConfig.parse_config
Returns
-------
out: dict
dictionary containing the results of the POP
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> from paos.core.run import run
>>> from paos.core.plot import simple_plot
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/conf/file')
>>> ret_val = run(pup_diameter, 1.0e-6 * wavelengths[0], parameters['grid_size'], parameters['zoom'], fields[0], opt_chains[0])
|
def run(pupil_diameter, wavelength, gridsize, zoom, field, opt_chain):
"""
Run the POP.
Parameters
----------
pupil_diameter: scalar
input pupil diameter in meters
wavelength: scalar
wavelength in meters
gridsize: scalar
the size of the simulation grid. It has to be a power of 2
zoom: scalar
zoom factor
field: dictionary
contains the slopes in the tangential and sagittal planes as field={'vt': slopey, 'vs': slopex}
opt_chain: list
the list of the optical elements parsed by paos.core.parseConfig.parse_config
Returns
-------
out: dict
dictionary containing the results of the POP
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> from paos.core.run import run
>>> from paos.core.plot import simple_plot
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/conf/file')
>>> ret_val = run(pup_diameter, 1.0e-6 * wavelengths[0], parameters['grid_size'], parameters['zoom'], fields[0], opt_chains[0])
"""
assert isinstance(opt_chain, dict), "opt_chain must be a dict"
retval = {}
vt = np.array([0.0, field["ut"]])
vs = np.array([0.0, field["us"]])
ABCDt = ABCD()
ABCDs = ABCD()
wfo = WFO(pupil_diameter, wavelength, gridsize, zoom)
for index, item in opt_chain.items():
logger.trace("Surface: {}".format(item["name"]))
if item["type"] == "Coordinate Break":
logger.trace("Apply coordinate break.")
vt, vs = coordinate_break(
vt,
vs,
item["xdec"],
item["ydec"],
item["xrot"],
item["yrot"],
0.0,
)
_retval_ = {"aperture": None}
# Check if aperture needs to be applied
if "aperture" in item:
xdec = (
item["aperture"]["xc"]
if np.isfinite(item["aperture"]["xc"])
else vs[0]
)
ydec = (
item["aperture"]["yc"]
if np.isfinite(item["aperture"]["yc"])
else vt[0]
)
xrad = item["aperture"]["xrad"]
yrad = item["aperture"]["yrad"]
xrad *= np.sqrt(1 / (vs[1] ** 2 + 1))
yrad *= np.sqrt(1 / (vt[1] ** 2 + 1))
xaper = xdec - vs[0]
yaper = ydec - vt[0]
obscuration = (
False if item["aperture"]["type"] == "aperture" else True
)
if np.all(np.isfinite([xrad, yrad])):
logger.trace("Apply aperture")
aper = wfo.aperture(
xaper,
yaper,
hx=xrad,
hy=yrad,
shape=item["aperture"]["shape"],
obscuration=obscuration,
)
_retval_["aperture"] = aper
# if item['type'] in ['Standard', 'Paraxial Lens', 'Slit', 'Obscuration']:
# xdec = item['xdec'] if np.isfinite(item['xdec']) else vs[0]
# ydec = item['ydec'] if np.isfinite(item['ydec']) else vt[0]
# xrad = item['xrad']
# yrad = item['yrad']
# xrad *= np.sqrt(1 / (vs[1] ** 2 + 1))
# yrad *= np.sqrt(1 / (vt[1] ** 2 + 1))
# xaper = xdec - vs[0]
# yaper = ydec - vt[0]
#
# aperture_shape = 'rectangular' if item['type'] == 'Slit' else 'elliptical'
# obscuration = True if item['type'] == 'Obscuration' else False
# if np.all(np.isfinite([xrad, yrad])):
# logger.trace('Apply aperture')
# aper = wfo.aperture(xaper, yaper, hx=xrad, hy=yrad,
# shape=aperture_shape, obscuration=obscuration)
# _retval_['aperture'] = aper
# Check if this is a stop surface
if item["is_stop"]:
logger.trace("Apply stop")
wfo.make_stop()
if item["type"] == "Zernike":
radius = (
item["Zradius"] if np.isfinite(item["Zradius"]) else wfo.wz
)
wfo.zernikes(
item["Zindex"],
item["Z"],
item["Zordering"],
item["Znormalize"],
radius,
origin=item["Zorigin"],
)
if item["type"] == "PSD":
wfo.psd(
item["A"],
item["B"],
item["C"],
item["fknee"],
item["fmin"],
item["fmax"],
item["SR"],
item["units"],
)
_retval_.update(push_results(wfo))
Ms = item["ABCDs"].M
Mt = item["ABCDt"].M
fl = (
np.inf
if (item["ABCDt"].power == 0)
else item["ABCDt"].cout / item["ABCDt"].power
)
T = item["ABCDt"].cout * item["ABCDt"].thickness
n1n2 = item["ABCDt"].n1n2
logger.trace("n1n2: {:.4f}".format(n1n2))
if Mt != 1.0 or Ms != 1.0:
logger.trace("Apply magnification")
wfo.Magnification(Mt, Ms)
if np.abs(n1n2) != 1.0:
logger.trace("Apply medium change")
wfo.ChangeMedium(n1n2)
if np.isfinite(fl):
logger.trace("Apply lens")
wfo.lens(fl)
if np.isfinite(T) and np.abs(T) > 1e-10:
logger.trace("Apply propagation thickness: T: {:.4f}".format(T))
wfo.propagate(T)
vt = item["ABCDt"]() @ vt
vs = item["ABCDs"]() @ vs
ABCDt = item["ABCDt"] * ABCDt
ABCDs = item["ABCDs"] * ABCDs
logger.debug(
"F num: {:2f}, distance to focus: {:.6f}".format(
_retval_["fratio"], wfo.distancetofocus
)
)
_retval_["ABCDt"] = ABCDt
_retval_["ABCDs"] = ABCDs
if item["save"]:
logger.trace("Save optical surface to output dict")
retval[item["num"]] = deepcopy(_retval_)
del _retval_
_ = gc.collect()
return retval
|
(pupil_diameter, wavelength, gridsize, zoom, field, opt_chain)
|
62,003 |
paos.core.saveOutput
|
save_datacube
|
Given a list of dictionaries with POP simulation output, a hdf5 file name, a list of
identifiers to tag each simulation and the keys to store at each surface, it saves the
outputs to a data cube along with the paos package information to the hdf5 output file.
If indicated, overwrites past output file.
Parameters
----------
retval_list: list
list of dictionaries with POP simulation outputs to be saved into a single hdf5 file
file_name: str
the hdf5 file name for saving the POP simulation
group_names: list
list of strings with unique identifiers for each POP simulation. example: for one
optical chain run at different wavelengths, use each wavelength as identifier.
keys_to_keep: list
dictionary keys to store at each surface. example: ['amplitude', 'dx', 'dy]
overwrite: bool
if True, overwrites past output file
Returns
-------
None
Saves a list of dictionaries with the POP simulation outputs to a single hdf5 file
as a datacube with group tags (e.g. the wavelengths) to identify each simulation,
along with the paos package information.
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> from paos.core.run import run
>>> from paos.core.saveOutput import save_datacube
>>> from joblib import Parallel, delayed
>>> from tqdm import tqdm
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/ini/file')
>>> ret_val_list = Parallel(n_jobs=2)(delayed(run)(pup_diameter, 1.0e-6 * wl, parameters['grid size'],
>>> parameters['zoom'], fields[0], opt_chains[0]) for wl in tqdm(wavelengths))
>>> group_tags = list(map(str, wavelengths))
>>> save_datacube(ret_val_list, 'path/to/hdf5/file', group_tags,
>>> keys_to_keep=['amplitude', 'dx', 'dy'], overwrite=True)
|
def save_datacube(
retval_list, file_name, group_names, keys_to_keep=None, overwrite=True
):
"""
Given a list of dictionaries with POP simulation output, a hdf5 file name, a list of
identifiers to tag each simulation and the keys to store at each surface, it saves the
outputs to a data cube along with the paos package information to the hdf5 output file.
If indicated, overwrites past output file.
Parameters
----------
retval_list: list
list of dictionaries with POP simulation outputs to be saved into a single hdf5 file
file_name: str
the hdf5 file name for saving the POP simulation
group_names: list
list of strings with unique identifiers for each POP simulation. example: for one
optical chain run at different wavelengths, use each wavelength as identifier.
keys_to_keep: list
dictionary keys to store at each surface. example: ['amplitude', 'dx', 'dy]
overwrite: bool
if True, overwrites past output file
Returns
-------
None
Saves a list of dictionaries with the POP simulation outputs to a single hdf5 file
as a datacube with group tags (e.g. the wavelengths) to identify each simulation,
along with the paos package information.
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> from paos.core.run import run
>>> from paos.core.saveOutput import save_datacube
>>> from joblib import Parallel, delayed
>>> from tqdm import tqdm
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/ini/file')
>>> ret_val_list = Parallel(n_jobs=2)(delayed(run)(pup_diameter, 1.0e-6 * wl, parameters['grid size'],
>>> parameters['zoom'], fields[0], opt_chains[0]) for wl in tqdm(wavelengths))
>>> group_tags = list(map(str, wavelengths))
>>> save_datacube(ret_val_list, 'path/to/hdf5/file', group_tags,
>>> keys_to_keep=['amplitude', 'dx', 'dy'], overwrite=True)
"""
assert isinstance(
retval_list, list
), "parameter retval_list must be a list"
assert isinstance(file_name, str), "parameter file_name must be a string"
assert isinstance(
group_names, list
), "parameter group_names must be a list of strings"
if keys_to_keep is not None:
assert isinstance(
keys_to_keep, list
), "parameter keys_to_keep must be a list of strings"
logger.info("Saving {} started...".format(file_name))
if overwrite:
logger.info("Remove old file")
if os.path.exists(file_name) and os.path.isfile(file_name):
os.remove(file_name)
with h5py.File(file_name, "a") as cube:
save_info(file_name, cube)
for group_name, retval in zip(group_names, retval_list):
out = cube.create_group(group_name)
logger.trace("saving group {}".format(out))
save_retval(retval, keys_to_keep, out)
logger.info("Saving ended.")
return
|
(retval_list, file_name, group_names, keys_to_keep=None, overwrite=True)
|
62,004 |
paos.core.saveOutput
|
save_output
|
Given the POP simulation output dictionary, a hdf5 file name and the keys to store
at each surface, it saves the output dictionary along with the paos package information
to the hdf5 output file. If indicated, overwrites past output file.
Parameters
----------
retval: dict
POP simulation output dictionary to be saved into hdf5 file
file_name: str
the hdf5 file name for saving the POP simulation
keys_to_keep: list
dictionary keys to store at each surface. example: ['amplitude', 'dx', 'dy']
overwrite: bool
if True, overwrites past output file
Returns
-------
None
Saves the POP simulation output dictionary along with the paos package information
to the hdf5 output file
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> from paos.core.run import run
>>> from paos.core.saveOutput import save_output
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/ini/file')
>>> ret_val = run(pup_diameter, 1.0e-6 * wavelengths[0], parameters['grid size'],
>>> parameters['zoom'], fields[0], opt_chains[0])
>>> save_output(ret_val, 'path/to/hdf5/file', keys_to_keep=['wfo', 'dx', 'dy'], overwrite=True)
|
def save_output(retval, file_name, keys_to_keep=None, overwrite=True):
"""
Given the POP simulation output dictionary, a hdf5 file name and the keys to store
at each surface, it saves the output dictionary along with the paos package information
to the hdf5 output file. If indicated, overwrites past output file.
Parameters
----------
retval: dict
POP simulation output dictionary to be saved into hdf5 file
file_name: str
the hdf5 file name for saving the POP simulation
keys_to_keep: list
dictionary keys to store at each surface. example: ['amplitude', 'dx', 'dy']
overwrite: bool
if True, overwrites past output file
Returns
-------
None
Saves the POP simulation output dictionary along with the paos package information
to the hdf5 output file
Examples
--------
>>> from paos.core.parseConfig import parse_config
>>> from paos.core.run import run
>>> from paos.core.saveOutput import save_output
>>> pup_diameter, parameters, wavelengths, fields, opt_chains = parse_config('path/to/ini/file')
>>> ret_val = run(pup_diameter, 1.0e-6 * wavelengths[0], parameters['grid size'],
>>> parameters['zoom'], fields[0], opt_chains[0])
>>> save_output(ret_val, 'path/to/hdf5/file', keys_to_keep=['wfo', 'dx', 'dy'], overwrite=True)
"""
assert isinstance(retval, dict), "parameter retval must be a dict"
assert isinstance(file_name, str), "parameter file_name must be a string"
if keys_to_keep is not None:
assert isinstance(
keys_to_keep, list
), "parameter keys_to_keep must be a list of strings"
logger.info("saving {} started...".format(file_name))
if overwrite:
logger.info("removing old file")
if os.path.isfile(file_name):
os.remove(file_name)
with h5py.File(file_name, "a") as out:
save_info(file_name, out)
save_retval(retval, keys_to_keep, out)
logger.info("saving ended.")
return
|
(retval, file_name, keys_to_keep=None, overwrite=True)
|
62,005 |
paos.log
|
setLogLevel
|
Simple function to set the logger level
Parameters
----------
level: logging level
log_id: int
this is the index of the handler to edit. The basic handler index is 0.
Every added handler is appended to the list. Default is 0.
|
def setLogLevel(level, log_id=0):
"""
Simple function to set the logger level
Parameters
----------
level: logging level
log_id: int
this is the index of the handler to edit. The basic handler index is 0.
Every added handler is appended to the list. Default is 0.
"""
global last_log
from .logger import root_logger
root_logger.handlers[log_id].setLevel(level)
last_log = level
|
(level, log_id=0)
|
62,007 |
fixtures._fixtures.streams
|
ByteStream
|
Provide a file-like object that accepts bytes and expose as a detail.
:param detail_name: The name of the detail.
:return: A fixture which has an attribute `stream` containing the file-like
object.
|
def ByteStream(detail_name):
"""Provide a file-like object that accepts bytes and expose as a detail.
:param detail_name: The name of the detail.
:return: A fixture which has an attribute `stream` containing the file-like
object.
"""
return Stream(detail_name, _byte_stream_factory)
|
(detail_name)
|
62,008 |
fixtures.fixture
|
CompoundFixture
|
A fixture that combines many fixtures.
:ivar fixtures: The list of fixtures that make up this one. (read only).
|
class CompoundFixture(Fixture):
"""A fixture that combines many fixtures.
:ivar fixtures: The list of fixtures that make up this one. (read only).
"""
def __init__(self, fixtures):
"""Construct a fixture made of many fixtures.
:param fixtures: An iterable of fixtures.
"""
super(CompoundFixture, self).__init__()
self.fixtures = list(fixtures)
def _setUp(self):
for fixture in self.fixtures:
self.useFixture(fixture)
|
(fixtures)
|
62,009 |
fixtures.fixture
|
__enter__
| null |
def __enter__(self):
self.setUp()
return self
|
(self)
|
62,010 |
fixtures.fixture
|
__exit__
| null |
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self._cleanups()
finally:
self._remove_state()
return False # propagate exceptions from the with body.
|
(self, exc_type, exc_val, exc_tb)
|
62,011 |
fixtures.fixture
|
__init__
|
Construct a fixture made of many fixtures.
:param fixtures: An iterable of fixtures.
|
def __init__(self, fixtures):
"""Construct a fixture made of many fixtures.
:param fixtures: An iterable of fixtures.
"""
super(CompoundFixture, self).__init__()
self.fixtures = list(fixtures)
|
(self, fixtures)
|
62,012 |
fixtures.fixture
|
_clear_cleanups
|
Clean the cleanup queue without running them.
This is a helper that can be useful for subclasses which define
reset(): they may perform something equivalent to a typical cleanUp
without actually calling the cleanups.
This also clears the details dict.
|
def _clear_cleanups(self):
"""Clean the cleanup queue without running them.
This is a helper that can be useful for subclasses which define
reset(): they may perform something equivalent to a typical cleanUp
without actually calling the cleanups.
This also clears the details dict.
"""
self._cleanups = CallMany()
self._details = {}
self._detail_sources = []
|
(self)
|
62,013 |
fixtures.fixture
|
_remove_state
|
Remove the internal state.
Called from cleanUp to put the fixture back into a not-ready state.
|
def _remove_state(self):
"""Remove the internal state.
Called from cleanUp to put the fixture back into a not-ready state.
"""
self._cleanups = None
self._details = None
self._detail_sources = None
|
(self)
|
62,014 |
fixtures.fixture
|
_setUp
| null |
def _setUp(self):
for fixture in self.fixtures:
self.useFixture(fixture)
|
(self)
|
62,015 |
fixtures.fixture
|
addCleanup
|
Add a clean function to be called from cleanUp.
All cleanup functions are called - see cleanUp for details on how
multiple exceptions are handled.
If for some reason you need to cancel cleanups, call
self._clear_cleanups.
:param cleanup: A callable to call during cleanUp.
:param *args: Positional args for cleanup.
:param kwargs: Keyword args for cleanup.
:return: None
|
def addCleanup(self, cleanup, *args, **kwargs):
"""Add a clean function to be called from cleanUp.
All cleanup functions are called - see cleanUp for details on how
multiple exceptions are handled.
If for some reason you need to cancel cleanups, call
self._clear_cleanups.
:param cleanup: A callable to call during cleanUp.
:param *args: Positional args for cleanup.
:param kwargs: Keyword args for cleanup.
:return: None
"""
self._cleanups.push(cleanup, *args, **kwargs)
|
(self, cleanup, *args, **kwargs)
|
62,016 |
fixtures.fixture
|
addDetail
|
Add a detail to the Fixture.
This may only be called after setUp has been called.
:param name: The name for the detail being added. Overrides existing
identically named details.
:param content_object: The content object (meeting the
testtools.content.Content protocol) being added.
|
def addDetail(self, name, content_object):
"""Add a detail to the Fixture.
This may only be called after setUp has been called.
:param name: The name for the detail being added. Overrides existing
identically named details.
:param content_object: The content object (meeting the
testtools.content.Content protocol) being added.
"""
self._details[name] = content_object
|
(self, name, content_object)
|
62,017 |
fixtures.fixture
|
cleanUp
|
Cleanup the fixture.
This function will free all resources managed by the Fixture, restoring
it (and any external facilities such as databases, temporary
directories and so forth_ to their original state.
This should not typically be overridden, see addCleanup instead.
cleanUp may be called once and only once after setUp() has been called.
The base implementation of setUp will automatically call cleanUp if
an exception occurs within setUp itself.
:param raise_first: Deprecated parameter from before testtools gained
MultipleExceptions. raise_first defaults to True. When True
if a single exception is raised, it is reraised after all the
cleanUps have run. If multiple exceptions are raised, they are
all wrapped into a MultipleExceptions object, and that is reraised.
Thus, to catch a specific exception from cleanUp, you need to catch
both the exception and MultipleExceptions, and then check within
a MultipleExceptions instance for the type you're catching.
:return: A list of the exc_info() for each exception that occurred if
raise_first was False
|
def cleanUp(self, raise_first=True):
"""Cleanup the fixture.
This function will free all resources managed by the Fixture, restoring
it (and any external facilities such as databases, temporary
directories and so forth_ to their original state.
This should not typically be overridden, see addCleanup instead.
cleanUp may be called once and only once after setUp() has been called.
The base implementation of setUp will automatically call cleanUp if
an exception occurs within setUp itself.
:param raise_first: Deprecated parameter from before testtools gained
MultipleExceptions. raise_first defaults to True. When True
if a single exception is raised, it is reraised after all the
cleanUps have run. If multiple exceptions are raised, they are
all wrapped into a MultipleExceptions object, and that is reraised.
Thus, to catch a specific exception from cleanUp, you need to catch
both the exception and MultipleExceptions, and then check within
a MultipleExceptions instance for the type you're catching.
:return: A list of the exc_info() for each exception that occurred if
raise_first was False
"""
try:
return self._cleanups(raise_errors=raise_first)
finally:
self._remove_state()
|
(self, raise_first=True)
|
62,018 |
fixtures.fixture
|
getDetails
|
Get the current details registered with the fixture.
This does not return the internal dictionary: mutating it will have no
effect. If you need to mutate it, just do so directly.
:return: Dict from name -> content_object.
|
def getDetails(self):
"""Get the current details registered with the fixture.
This does not return the internal dictionary: mutating it will have no
effect. If you need to mutate it, just do so directly.
:return: Dict from name -> content_object.
"""
result = dict(self._details)
for source in self._detail_sources:
combine_details(source.getDetails(), result)
return result
|
(self)
|
62,019 |
fixtures.fixture
|
reset
|
Reset a setUp Fixture to the 'just setUp' state again.
The default implementation calls
self.cleanUp()
self.setUp()
but this function may be overridden to provide an optimised routine to
achieve the same result.
:return: None.
|
def reset(self):
"""Reset a setUp Fixture to the 'just setUp' state again.
The default implementation calls
self.cleanUp()
self.setUp()
but this function may be overridden to provide an optimised routine to
achieve the same result.
:return: None.
"""
self.cleanUp()
self.setUp()
|
(self)
|
62,020 |
fixtures.fixture
|
setUp
|
Prepare the Fixture for use.
This should not be overridden. Concrete fixtures should implement
_setUp. Overriding of setUp is still supported, just not recommended.
After setUp has completed, the fixture will have one or more attributes
which can be used (these depend totally on the concrete subclass).
:raises: MultipleExceptions if _setUp fails. The last exception
captured within the MultipleExceptions will be a SetupError
exception.
:return: None.
:changed in 1.3: The recommendation to override setUp has been
reversed - before 1.3, setUp() should be overridden, now it should
not be.
:changed in 1.3.1: BaseException is now caught, and only subclasses of
Exception are wrapped in MultipleExceptions.
|
def setUp(self):
"""Prepare the Fixture for use.
This should not be overridden. Concrete fixtures should implement
_setUp. Overriding of setUp is still supported, just not recommended.
After setUp has completed, the fixture will have one or more attributes
which can be used (these depend totally on the concrete subclass).
:raises: MultipleExceptions if _setUp fails. The last exception
captured within the MultipleExceptions will be a SetupError
exception.
:return: None.
:changed in 1.3: The recommendation to override setUp has been
reversed - before 1.3, setUp() should be overridden, now it should
not be.
:changed in 1.3.1: BaseException is now caught, and only subclasses of
Exception are wrapped in MultipleExceptions.
"""
self._clear_cleanups()
try:
self._setUp()
except:
err = sys.exc_info()
details = {}
if gather_details is not None:
# Materialise all details since we're about to cleanup.
gather_details(self.getDetails(), details)
else:
details = self.getDetails()
errors = [err] + self.cleanUp(raise_first=False)
try:
raise SetupError(details)
except SetupError:
errors.append(sys.exc_info())
if issubclass(err[0], Exception):
raise MultipleExceptions(*errors)
else:
raise err[1].with_traceback(err[2])
|
(self)
|
62,021 |
fixtures.fixture
|
useFixture
|
Use another fixture.
The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called.
If the fixture fails to set up, useFixture will attempt to gather its
details into this fixture's details to aid in debugging.
:param fixture: The fixture to use.
:return: The fixture, after setting it up and scheduling a cleanup for
it.
:raises: Any errors raised by the fixture's setUp method.
|
def useFixture(self, fixture):
"""Use another fixture.
The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called.
If the fixture fails to set up, useFixture will attempt to gather its
details into this fixture's details to aid in debugging.
:param fixture: The fixture to use.
:return: The fixture, after setting it up and scheduling a cleanup for
it.
:raises: Any errors raised by the fixture's setUp method.
"""
try:
fixture.setUp()
except MultipleExceptions as e:
if e.args[-1][0] is SetupError:
combine_details(e.args[-1][1].args[0], self._details)
raise
except:
# The child failed to come up and didn't raise MultipleExceptions
# which we can understand... capture any details it has (copying
# the content, it may go away anytime).
if gather_details is not None:
gather_details(fixture.getDetails(), self._details)
raise
else:
self.addCleanup(fixture.cleanUp)
# Calls to getDetails while this fixture is setup will return
# details from the child fixture.
self._detail_sources.append(fixture)
return fixture
|
(self, fixture)
|
62,022 |
fixtures._fixtures.streams
|
DetailStream
|
Deprecated alias for ByteStream.
|
def DetailStream(detail_name):
"""Deprecated alias for ByteStream."""
return ByteStream(detail_name)
|
(detail_name)
|
62,023 |
fixtures._fixtures.environ
|
EnvironmentVariable
|
Isolate a specific environment variable.
|
class EnvironmentVariable(Fixture):
"""Isolate a specific environment variable."""
def __init__(self, varname, newvalue=None):
"""Create an EnvironmentVariable fixture.
:param varname: the name of the variable to isolate.
:param newvalue: A value to set the variable to. If None, the variable
will be deleted.
During setup the variable will be deleted or assigned the requested
value, and this will be restored in cleanUp.
"""
super(EnvironmentVariable, self).__init__()
self.varname = varname
self.newvalue = newvalue
def _setUp(self):
varname = self.varname
orig_value = os.environ.get(varname)
if orig_value is not None:
self.addCleanup(os.environ.__setitem__, varname, orig_value)
del os.environ[varname]
else:
self.addCleanup(os.environ.pop, varname, '')
if self.newvalue is not None:
os.environ[varname] = self.newvalue
else:
os.environ.pop(varname, '')
|
(varname, newvalue=None)
|
62,026 |
fixtures._fixtures.environ
|
__init__
|
Create an EnvironmentVariable fixture.
:param varname: the name of the variable to isolate.
:param newvalue: A value to set the variable to. If None, the variable
will be deleted.
During setup the variable will be deleted or assigned the requested
value, and this will be restored in cleanUp.
|
def __init__(self, varname, newvalue=None):
"""Create an EnvironmentVariable fixture.
:param varname: the name of the variable to isolate.
:param newvalue: A value to set the variable to. If None, the variable
will be deleted.
During setup the variable will be deleted or assigned the requested
value, and this will be restored in cleanUp.
"""
super(EnvironmentVariable, self).__init__()
self.varname = varname
self.newvalue = newvalue
|
(self, varname, newvalue=None)
|
62,029 |
fixtures._fixtures.environ
|
_setUp
| null |
def _setUp(self):
varname = self.varname
orig_value = os.environ.get(varname)
if orig_value is not None:
self.addCleanup(os.environ.__setitem__, varname, orig_value)
del os.environ[varname]
else:
self.addCleanup(os.environ.pop, varname, '')
if self.newvalue is not None:
os.environ[varname] = self.newvalue
else:
os.environ.pop(varname, '')
|
(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.