code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
if model:
resource_key = get_model_table(model)
elif instance:
resource_key = instance._meta.db_table
elif resource_name:
resource_key = resource_name_map[resource_name]
if resource_key not in resource_map:
return None
return resource_map[resource_key]['viewset'].serializer_class
|
def get_canonical_serializer(
resource_key,
model=None,
instance=None,
resource_name=None
)
|
Return canonical serializer for a given resource name.
Arguments:
resource_key - Resource key, usually DB table for model-based
resources, otherwise the plural name.
model - (Optional) Model class to look up by.
instance - (Optional) Model object instance.
Returns: serializer class
| 3.157029 | 3.09143 | 1.021219 |
routes = super(DynamicRouter, self).get_routes(viewset)
routes += self.get_relation_routes(viewset)
return routes
|
def get_routes(self, viewset)
|
DREST routes injection, overrides DRF's get_routes() method, which
gets called for each registered viewset.
| 3.97238 | 3.836855 | 1.035322 |
routes = []
if not hasattr(viewset, 'serializer_class'):
return routes
if not hasattr(viewset, 'list_related'):
return routes
serializer = viewset.serializer_class()
fields = getattr(serializer, 'get_link_fields', lambda: [])()
route_name = '{basename}-{methodnamehyphen}'
for field_name, field in six.iteritems(fields):
methodname = 'list_related'
url = (
r'^{prefix}/{lookup}/(?P<field_name>%s)'
'{trailing_slash}$' % field_name
)
routes.append(Route(
url=url,
mapping={'get': methodname},
name=replace_methodname(route_name, field_name),
initkwargs={}
))
return routes
|
def get_relation_routes(self, viewset)
|
Generate routes to serve relational objects. This method will add
a sub-URL for each relational field.
e.g.
A viewset for the following serializer:
class UserSerializer(..):
events = DynamicRelationField(EventSerializer, many=True)
groups = DynamicRelationField(GroupSerializer, many=True)
location = DynamicRelationField(LocationSerializer)
will have the following URLs added:
/users/<pk>/events/
/users/<pk>/groups/
/users/<pk>/location/
| 3.841315 | 4.105567 | 0.935636 |
paths = []
for key, child in six.iteritems(self):
if isinstance(child, TreeMap) and child:
# current child is an intermediate node
for path in child.get_paths():
path.insert(0, key)
paths.append(path)
else:
# current child is an endpoint
paths.append([key])
return paths
|
def get_paths(self)
|
Get all paths from the root to the leaves.
For example, given a chain like `{'a':{'b':{'c':None}}}`,
this method would return `[['a', 'b', 'c']]`.
Returns:
A list of lists of paths.
| 3.789695 | 3.894013 | 0.973211 |
tree = self
if not parts:
return tree
cur = tree
last = len(parts) - 1
for i, part in enumerate(parts):
if part not in cur:
cur[part] = TreeMap() if i != last else leaf_value
elif i == last: # found leaf
if update:
cur[part].update(leaf_value)
else:
cur[part] = leaf_value
cur = cur[part]
return self
|
def insert(self, parts, leaf_value, update=False)
|
Add a list of nodes into the tree.
The list will be converted into a TreeMap (chain) and then
merged with the current TreeMap.
For example, this method would insert `['a','b','c']` as
`{'a':{'b':{'c':{}}}}`.
Arguments:
parts: List of nodes representing a chain.
leaf_value: Value to insert into the leaf of the chain.
update: Whether or not to update the leaf with the given value or
to replace the value.
Returns:
self
| 2.859587 | 2.926147 | 0.977253 |
if isinstance(obj, OrderedDict):
return _TaggedOrderedDict(obj, *args, **kwargs)
else:
return _TaggedPlainDict(obj, *args, **kwargs)
|
def tag_dict(obj, *args, **kwargs)
|
Create a TaggedDict instance. Will either be a TaggedOrderedDict
or TaggedPlainDict depending on the type of `obj`.
| 3.524932 | 2.042351 | 1.725919 |
for join in six.itervalues(queryset.query.alias_map):
if join.join_type:
return True
return False
|
def has_joins(queryset)
|
Return True iff. a queryset includes joins.
If this is the case, it is possible for the queryset
to return duplicate results.
| 4.105365 | 4.559932 | 0.900313 |
rewritten = []
last = len(self.field) - 1
s = serializer
field = None
for i, field_name in enumerate(self.field):
# Note: .fields can be empty for related serializers that aren't
# sideloaded. Fields that are deferred also won't be present.
# If field name isn't in serializer.fields, get full list from
# get_all_fields() method. This is somewhat expensive, so only do
# this if we have to.
fields = s.fields
if field_name not in fields:
fields = getattr(s, 'get_all_fields', lambda: {})()
if field_name == 'pk':
rewritten.append('pk')
continue
if field_name not in fields:
raise ValidationError(
"Invalid filter field: %s" % field_name
)
field = fields[field_name]
# For remote fields, strip off '_set' for filtering. This is a
# weird Django inconsistency.
model_field_name = field.source or field_name
model_field = get_model_field(s.get_model(), model_field_name)
if isinstance(model_field, RelatedObject):
model_field_name = model_field.field.related_query_name()
# If get_all_fields() was used above, field could be unbound,
# and field.source would be None
rewritten.append(model_field_name)
if i == last:
break
# Recurse into nested field
s = getattr(field, 'serializer', None)
if isinstance(s, serializers.ListSerializer):
s = s.child
if not s:
raise ValidationError(
"Invalid nested filter field: %s" % field_name
)
if self.operator:
rewritten.append(self.operator)
return ('__'.join(rewritten), field)
|
def generate_query_key(self, serializer)
|
Get the key that can be passed to Django's filter method.
To account for serialier field name rewrites, this method
translates serializer field names to model field names
by inspecting `serializer`.
For example, a query like `filter{users.events}` would be
returned as `users__events`.
Arguments:
serializer: A DRF serializer
Returns:
A filter key.
| 3.901089 | 3.838785 | 1.01623 |
self.request = request
self.view = view
# enable addition of extra filters (i.e., a Q())
# so custom filters can be added to the queryset without
# running into https://code.djangoproject.com/ticket/18437
# which, without this, would mean that filters added to the queryset
# after this is called may not behave as expected
extra_filters = self.view.get_extra_filters(request)
disable_prefetches = self.view.is_update()
self.DEBUG = settings.DEBUG
return self._build_queryset(
queryset=queryset,
extra_filters=extra_filters,
disable_prefetches=disable_prefetches,
)
|
def filter_queryset(self, request, queryset, view)
|
Filter the queryset.
This is the main entry-point to this class, and
is called by DRF's list handler.
| 6.121601 | 6.200184 | 0.987326 |
filters_map = (
kwargs.get('filters_map') or
self.view.get_request_feature(self.view.FILTER)
)
out = TreeMap()
for spec, value in six.iteritems(filters_map):
# Inclusion or exclusion?
if spec[0] == '-':
spec = spec[1:]
inex = '_exclude'
else:
inex = '_include'
# for relational filters, separate out relation path part
if '|' in spec:
rel, spec = spec.split('|')
rel = rel.split('.')
else:
rel = None
parts = spec.split('.')
# Last part could be operator, e.g. "events.capacity.gte"
if len(parts) > 1 and parts[-1] in self.VALID_FILTER_OPERATORS:
operator = parts.pop()
else:
operator = None
# All operators except 'range' and 'in' should have one value
if operator == 'range':
value = value[:2]
elif operator == 'in':
# no-op: i.e. accept `value` as an arbitrarily long list
pass
elif operator in self.VALID_FILTER_OPERATORS:
value = value[0]
if (
operator == 'isnull' and
isinstance(value, six.string_types)
):
value = is_truthy(value)
elif operator == 'eq':
operator = None
node = FilterNode(parts, operator, value)
# insert into output tree
path = rel if rel else []
path += [inex, node.key]
out.insert(path, node)
return out
|
def _get_requested_filters(self, **kwargs)
|
Convert 'filters' query params into a dict that can be passed
to Q. Returns a dict with two fields, 'include' and 'exclude',
which can be used like:
result = self._get_requested_filters()
q = Q(**result['include'] & ~Q(**result['exclude'])
| 4.284481 | 4.249173 | 1.008309 |
def rewrite_filters(filters, serializer):
out = {}
for k, node in six.iteritems(filters):
filter_key, field = node.generate_query_key(serializer)
if isinstance(field, (BooleanField, NullBooleanField)):
node.value = is_truthy(node.value)
out[filter_key] = node.value
return out
q = q or Q()
if not includes and not excludes:
return None
if includes:
includes = rewrite_filters(includes, serializer)
q &= Q(**includes)
if excludes:
excludes = rewrite_filters(excludes, serializer)
for k, v in six.iteritems(excludes):
q &= ~Q(**{k: v})
return q
|
def _filters_to_query(self, includes, excludes, serializer, q=None)
|
Construct Django Query object from request.
Arguments are dictionaries, which will be passed to Q() as kwargs.
e.g.
includes = { 'foo' : 'bar', 'baz__in' : [1, 2] }
produces:
Q(foo='bar', baz__in=[1, 2])
Arguments:
includes: TreeMap representing inclusion filters.
excludes: TreeMap representing exclusion filters.
serializer: serializer instance of top-level object
q: Q() object (optional)
Returns:
Q() instance or None if no inclusion or exclusion filters
were specified.
| 2.903503 | 2.964891 | 0.979295 |
for source, remainder in six.iteritems(requirements):
if not remainder or isinstance(remainder, six.string_types):
# no further requirements to prefetch
continue
related_field = get_model_field(model, source)
related_model = get_related_model(related_field)
queryset = self._build_implicit_queryset(
related_model,
remainder
) if related_model else None
prefetches[source] = self._create_prefetch(
source,
queryset
)
return prefetches
|
def _build_implicit_prefetches(
self,
model,
prefetches,
requirements
)
|
Build a prefetch dictionary based on internal requirements.
| 3.824718 | 3.720181 | 1.0281 |
queryset = self._make_model_queryset(model)
prefetches = {}
self._build_implicit_prefetches(
model,
prefetches,
requirements
)
prefetch = prefetches.values()
queryset = queryset.prefetch_related(*prefetch).distinct()
if self.DEBUG:
queryset._using_prefetches = prefetches
return queryset
|
def _build_implicit_queryset(self, model, requirements)
|
Build a queryset based on implicit requirements.
| 5.247133 | 5.054183 | 1.038176 |
for name, field in six.iteritems(fields):
original_field = field
if isinstance(field, DynamicRelationField):
field = field.serializer
if isinstance(field, serializers.ListSerializer):
field = field.child
if not isinstance(field, serializers.ModelSerializer):
continue
source = field.source or name
if '.' in source:
raise ValidationError(
'nested relationship values '
'are not supported'
)
if source in prefetches:
# ignore duplicated sources
continue
is_remote = is_field_remote(model, source)
is_id_only = getattr(field, 'id_only', lambda: False)()
if is_id_only and not is_remote:
continue
related_queryset = getattr(original_field, 'queryset', None)
if callable(related_queryset):
related_queryset = related_queryset(field)
source = field.source or name
# Popping the source here (during explicit prefetch construction)
# guarantees that implicitly required prefetches that follow will
# not conflict.
required = requirements.pop(source, None)
prefetch_queryset = self._build_queryset(
serializer=field,
filters=filters.get(name, {}),
queryset=related_queryset,
requirements=required
)
# Note: There can only be one prefetch per source, even
# though there can be multiple fields pointing to
# the same source. This could break in some cases,
# but is mostly an issue on writes when we use all
# fields by default.
prefetches[source] = self._create_prefetch(
source,
prefetch_queryset
)
return prefetches
|
def _build_requested_prefetches(
self,
prefetches,
requirements,
model,
fields,
filters
)
|
Build a prefetch dictionary based on request requirements.
| 4.801482 | 4.716257 | 1.01807 |
for name, field in six.iteritems(fields):
source = field.source
# Requires may be manually set on the field -- if not,
# assume the field requires only its source.
requires = getattr(field, 'requires', None) or [source]
for require in requires:
if not require:
# ignore fields with empty source
continue
requirement = require.split('.')
if requirement[-1] == '':
# Change 'a.b.' -> 'a.b.*',
# supporting 'a.b.' for backwards compatibility.
requirement[-1] = '*'
requirements.insert(requirement, TreeMap(), update=True)
|
def _get_implicit_requirements(
self,
fields,
requirements
)
|
Extract internal prefetch requirements from serializer fields.
| 7.83876 | 7.1955 | 1.089398 |
is_root_level = False
if not serializer:
serializer = self.view.get_serializer()
is_root_level = True
queryset = self._get_queryset(queryset=queryset, serializer=serializer)
model = getattr(serializer.Meta, 'model', None)
if not model:
return queryset
prefetches = {}
# build a nested Prefetch queryset
# based on request parameters and serializer fields
fields = serializer.fields
if requirements is None:
requirements = TreeMap()
self._get_implicit_requirements(
fields,
requirements
)
if filters is None:
filters = self._get_requested_filters()
# build nested Prefetch queryset
self._build_requested_prefetches(
prefetches,
requirements,
model,
fields,
filters
)
# build remaining prefetches out of internal requirements
# that are not already covered by request requirements
self._build_implicit_prefetches(
model,
prefetches,
requirements
)
# use requirements at this level to limit fields selected
# only do this for GET requests where we are not requesting the
# entire fieldset
if (
'*' not in requirements and
not self.view.is_update() and
not self.view.is_delete()
):
id_fields = getattr(serializer, 'get_id_fields', lambda: [])()
# only include local model fields
only = [
field for field in set(
id_fields + list(requirements.keys())
) if is_model_field(model, field) and
not is_field_remote(model, field)
]
queryset = queryset.only(*only)
# add request filters
query = self._filters_to_query(
includes=filters.get('_include'),
excludes=filters.get('_exclude'),
serializer=serializer
)
# add additional filters specified by calling view
if extra_filters:
query = extra_filters if not query else extra_filters & query
if query:
# Convert internal django ValidationError to
# APIException-based one in order to resolve validation error
# from 500 status code to 400.
try:
queryset = queryset.filter(query)
except InternalValidationError as e:
raise ValidationError(
dict(e) if hasattr(e, 'error_dict') else list(e)
)
except Exception as e:
# Some other Django error in parsing the filter.
# Very likely a bad query, so throw a ValidationError.
err_msg = getattr(e, 'message', '')
raise ValidationError(err_msg)
# A serializer can have this optional function
# to dynamically apply additional filters on
# any queries that will use that serializer
# You could use this to have (for example) different
# serializers for different subsets of a model or to
# implement permissions which work even in sideloads
if hasattr(serializer, 'filter_queryset'):
queryset = self._serializer_filter(
serializer=serializer,
queryset=queryset
)
# add prefetches and remove duplicates if necessary
prefetch = prefetches.values()
if prefetch and not disable_prefetches:
queryset = queryset.prefetch_related(*prefetch)
elif isinstance(queryset, Manager):
queryset = queryset.all()
if has_joins(queryset) or not is_root_level:
queryset = queryset.distinct()
if self.DEBUG:
queryset._using_prefetches = prefetches
return queryset
|
def _build_queryset(
self,
serializer=None,
filters=None,
queryset=None,
requirements=None,
extra_filters=None,
disable_prefetches=False,
)
|
Build a queryset that pulls in all data required by this request.
Handles nested prefetching of related data and deferring fields
at the queryset level.
Arguments:
serializer: An optional serializer to use a base for the queryset.
If no serializer is passed, the `get_serializer` method will
be used to initialize the base serializer for the viewset.
filters: An optional TreeMap of nested filters.
queryset: An optional base queryset.
requirements: An optional TreeMap of nested requirements.
| 5.128579 | 5.210115 | 0.98435 |
self.ordering_param = view.SORT
ordering = self.get_ordering(request, queryset, view)
if ordering:
return queryset.order_by(*ordering)
return queryset
|
def filter_queryset(self, request, queryset, view)
|
Filter the queryset, applying the ordering.
The `ordering_param` can be overwritten here.
In DRF, the ordering_param is 'ordering', but we support changing it
to allow the viewset to control the parameter.
| 4.064722 | 3.342917 | 1.21592 |
params = view.get_request_feature(view.SORT)
if params:
fields = [param.strip() for param in params]
valid_ordering, invalid_ordering = self.remove_invalid_fields(
queryset, fields, view
)
# if any of the sort fields are invalid, throw an error.
# else return the ordering
if invalid_ordering:
raise ValidationError(
"Invalid filter field: %s" % invalid_ordering
)
else:
return valid_ordering
# No sorting was included
return self.get_default_ordering(view)
|
def get_ordering(self, request, queryset, view)
|
Return an ordering for a given request.
DRF expects a comma separated list, while DREST expects an array.
This method overwrites the DRF default so it can parse the array.
| 5.04222 | 4.589079 | 1.098743 |
valid_orderings = []
invalid_orderings = []
# for each field sent down from the query param,
# determine if its valid or invalid
for term in fields:
stripped_term = term.lstrip('-')
# add back the '-' add the end if necessary
reverse_sort_term = '' if len(stripped_term) is len(term) else '-'
ordering = self.ordering_for(stripped_term, view)
if ordering:
valid_orderings.append(reverse_sort_term + ordering)
else:
invalid_orderings.append(term)
return valid_orderings, invalid_orderings
|
def remove_invalid_fields(self, queryset, fields, view)
|
Remove invalid fields from an ordering.
Overwrites the DRF default remove_invalid_fields method to return
both the valid orderings and any invalid orderings.
| 4.744948 | 4.196001 | 1.130826 |
if not self._is_allowed_term(term, view):
return None
serializer = self._get_serializer_class(view)()
serializer_chain = term.split('.')
model_chain = []
for segment in serializer_chain[:-1]:
field = serializer.get_all_fields().get(segment)
if not (field and field.source != '*' and
isinstance(field, DynamicRelationField)):
return None
model_chain.append(field.source or segment)
serializer = field.serializer_class()
last_segment = serializer_chain[-1]
last_field = serializer.get_all_fields().get(last_segment)
if not last_field or last_field.source == '*':
return None
model_chain.append(last_field.source or last_segment)
return '__'.join(model_chain)
|
def ordering_for(self, term, view)
|
Return ordering (model field chain) for term (serializer field chain)
or None if invalid
Raise ImproperlyConfigured if serializer_class not set on view
| 3.060701 | 2.834386 | 1.079846 |
retval = ""
if encoder is None:
encoder = TomlEncoder(o.__class__)
addtoretval, sections = encoder.dump_sections(o, "")
retval += addtoretval
outer_objs = [id(o)]
while sections:
section_ids = [id(section) for section in sections]
for outer_obj in outer_objs:
if outer_obj in section_ids:
raise ValueError("Circular reference detected")
outer_objs += section_ids
newsections = encoder.get_empty_table()
for section in sections:
addtoretval, addtosections = encoder.dump_sections(
sections[section], section)
if addtoretval or (not addtoretval and not addtosections):
if retval and retval[-2:] != "\n\n":
retval += "\n"
retval += "[" + section + "]\n"
if addtoretval:
retval += addtoretval
for s in addtosections:
newsections[section + "." + s] = addtosections[s]
sections = newsections
return retval
|
def dumps(o, encoder=None)
|
Stringifies input dict as toml
Args:
o: Object to dump into toml
preserve: Boolean parameter. If true, preserve inline tables.
Returns:
String containing the toml corresponding to dict
| 3.550509 | 3.714633 | 0.955817 |
# Yield left border.
if left:
yield left
# Yield items with intersect characters.
if intersect:
try:
for j, i in enumerate(line, start=-len(line) + 1):
yield i
if j:
yield intersect
except TypeError: # Generator.
try:
item = next(line)
except StopIteration: # Was empty all along.
pass
else:
while True:
yield item
try:
peek = next(line)
except StopIteration:
break
yield intersect
item = peek
else:
for i in line:
yield i
# Yield right border.
if right:
yield right
|
def combine(line, left, intersect, right)
|
Zip borders between items in `line`.
e.g. ('l', '1', 'c', '2', 'c', '3', 'r')
:param iter line: List to iterate.
:param left: Left border.
:param intersect: Column separator.
:param right: Right border.
:return: Yields combined objects.
| 4.110333 | 3.697084 | 1.111777 |
length = 0
# Hide title if it doesn't fit.
if title is not None and outer_widths:
try:
length = visible_width(title)
except TypeError:
title = str(title)
length = visible_width(title)
if length > sum(outer_widths) + len(intersect) * (len(outer_widths) - 1):
title = None
# Handle no title.
if title is None or not outer_widths or not horizontal:
return combine((horizontal * c for c in outer_widths), left, intersect, right)
# Handle title fitting in the first column.
if length == outer_widths[0]:
return combine([title] + [horizontal * c for c in outer_widths[1:]], left, intersect, right)
if length < outer_widths[0]:
columns = [title + horizontal * (outer_widths[0] - length)] + [horizontal * c for c in outer_widths[1:]]
return combine(columns, left, intersect, right)
# Handle wide titles/narrow columns.
columns_and_intersects = [title]
for width in combine(outer_widths, None, bool(intersect), None):
# If title is taken care of.
if length < 1:
columns_and_intersects.append(intersect if width is True else horizontal * width)
# If title's last character overrides an intersect character.
elif width is True and length == 1:
length = 0
# If this is an intersect character that is overridden by the title.
elif width is True:
length -= 1
# If title's last character is within a column.
elif width >= length:
columns_and_intersects[0] += horizontal * (width - length) # Append horizontal chars to title.
length = 0
# If remainder of title won't fit in a column.
else:
length -= width
return combine(columns_and_intersects, left, None, right)
|
def build_border(outer_widths, horizontal, left, intersect, right, title=None)
|
Build the top/bottom/middle row. Optionally embed the table title within the border.
Title is hidden if it doesn't fit between the left/right characters/edges.
Example return value:
('<', '-----', '+', '------', '+', '-------', '>')
('<', 'My Table', '----', '+', '------->')
:param iter outer_widths: List of widths (with padding) for each column.
:param str horizontal: Character to stretch across each column.
:param str left: Left border.
:param str intersect: Column separator.
:param str right: Right border.
:param title: Overlay the title on the border between the left and right characters.
:return: Returns a generator of strings representing a border.
:rtype: iter
| 3.661193 | 3.656146 | 1.001381 |
if not row or not row[0]:
yield combine((), left, center, right)
return
for row_index in range(len(row[0])):
yield combine((c[row_index] for c in row), left, center, right)
|
def build_row(row, left, center, right)
|
Combine single or multi-lined cells into a single row of list of lists including borders.
Row must already be padded and extended so each cell has the same number of lines.
Example return value:
[
['>', 'Left ', '|', 'Center', '|', 'Right', '<'],
['>', 'Cell1', '|', 'Cell2 ', '|', 'Cell3', '<'],
]
:param iter row: List of cells for one row.
:param str left: Left border.
:param str center: Column separator.
:param str right: Right border.
:return: Yields other generators that yield strings.
:rtype: iter
| 4.060206 | 4.656195 | 0.872001 |
project = __import__(IMPORT, fromlist=[''])
for expected, var in [('@Robpol86', '__author__'), (LICENSE, '__license__'), (VERSION, '__version__')]:
if getattr(project, var) != expected:
raise SystemExit('Mismatch: {0}'.format(var))
# Check changelog.
if not re.compile(r'^%s - \d{4}-\d{2}-\d{2}[\r\n]' % VERSION, re.MULTILINE).search(readme()):
raise SystemExit('Version not found in readme/changelog file.')
# Check tox.
if INSTALL_REQUIRES:
contents = readme('tox.ini')
section = re.compile(r'[\r\n]+install_requires =[\r\n]+(.+?)[\r\n]+\w', re.DOTALL).findall(contents)
if not section:
raise SystemExit('Missing install_requires section in tox.ini.')
in_tox = re.findall(r' ([^=]+)==[\w\d.-]+', section[0])
if INSTALL_REQUIRES != in_tox:
raise SystemExit('Missing/unordered pinned dependencies in tox.ini.')
|
def run(cls)
|
Check variables.
| 5.243244 | 5.054718 | 1.037297 |
if handle == INVALID_HANDLE_VALUE:
raise OSError('Invalid handle.')
# Query Win32 API.
lpcsbi = ctypes.create_string_buffer(22) # Populated by GetConsoleScreenBufferInfo.
if not kernel32.GetConsoleScreenBufferInfo(handle, lpcsbi):
raise ctypes.WinError() # Subclass of OSError.
# Parse data.
left, top, right, bottom = struct.unpack('hhhhHhhhhhh', lpcsbi.raw)[5:-2]
width, height = right - left, bottom - top
return width, height
|
def get_console_info(kernel32, handle)
|
Get information about this current console window (Windows only).
https://github.com/Robpol86/colorclass/blob/ab42da59/colorclass/windows.py#L111
:raise OSError: When handle is invalid or GetConsoleScreenBufferInfo API call fails.
:param ctypes.windll.kernel32 kernel32: Loaded kernel32 instance.
:param int handle: stderr or stdout handle.
:return: Width (number of characters) and height (number of lines) of the terminal.
:rtype: tuple
| 3.399062 | 3.519606 | 0.965751 |
if IS_WINDOWS:
kernel32 = kernel32 or ctypes.windll.kernel32
try:
return get_console_info(kernel32, kernel32.GetStdHandle(STD_ERROR_HANDLE))
except OSError:
try:
return get_console_info(kernel32, kernel32.GetStdHandle(STD_OUTPUT_HANDLE))
except OSError:
return DEFAULT_WIDTH, DEFAULT_HEIGHT
try:
device = __import__('fcntl').ioctl(0, __import__('termios').TIOCGWINSZ, '\0\0\0\0\0\0\0\0')
except IOError:
return DEFAULT_WIDTH, DEFAULT_HEIGHT
height, width = struct.unpack('hhhh', device)[:2]
return width, height
|
def terminal_size(kernel32=None)
|
Get the width and height of the terminal.
http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/
http://stackoverflow.com/questions/17993814/why-the-irrelevant-code-made-a-difference
:param kernel32: Optional mock kernel32 object. For testing.
:return: Width (number of characters) and height (number of lines) of the terminal.
:rtype: tuple
| 2.187274 | 2.321512 | 0.942176 |
try:
title_bytes = title.encode('utf-8')
except AttributeError:
title_bytes = title
if IS_WINDOWS:
kernel32 = kernel32 or ctypes.windll.kernel32
try:
is_ascii = all(ord(c) < 128 for c in title) # str/unicode.
except TypeError:
is_ascii = all(c < 128 for c in title) # bytes.
if is_ascii:
return kernel32.SetConsoleTitleA(title_bytes) != 0
else:
return kernel32.SetConsoleTitleW(title) != 0
# Linux/OSX.
sys.stdout.write(b'\033]0;' + title_bytes + b'\007')
return True
|
def set_terminal_title(title, kernel32=None)
|
Set the terminal title.
:param title: The title to set (string, unicode, bytes accepted).
:param kernel32: Optional mock kernel32 object. For testing.
:return: If title changed successfully (Windows only, always True on Linux/OSX).
:rtype: bool
| 2.409054 | 2.301673 | 1.046653 |
table_data = [
[Color('{autogreen}<10ms{/autogreen}'), '192.168.0.100, 192.168.0.101'],
[Color('{autoyellow}10ms <= 100ms{/autoyellow}'), '192.168.0.102, 192.168.0.103'],
[Color('{autored}>100ms{/autored}'), '192.168.0.105'],
]
table_instance = SingleTable(table_data)
table_instance.inner_heading_row_border = False
return table_instance.table
|
def table_server_timings()
|
Return table string to be printed.
| 2.505861 | 2.496695 | 1.003671 |
table_data = [
[Color('Low Space'), Color('{autocyan}Nominal Space{/autocyan}'), Color('Excessive Space')],
[Color('Low Load'), Color('Nominal Load'), Color('{autored}High Load{/autored}')],
[Color('{autocyan}Low Free RAM{/autocyan}'), Color('Nominal Free RAM'), Color('High Free RAM')],
]
table_instance = SingleTable(table_data, '192.168.0.105')
table_instance.inner_heading_row_border = False
table_instance.inner_row_border = True
table_instance.justify_columns = {0: 'center', 1: 'center', 2: 'center'}
return table_instance.table
|
def table_server_status()
|
Return table string to be printed.
| 3.381498 | 3.335776 | 1.013706 |
table_instance = SingleTable([['A', 'B'], ['C', 'D']])
# Get first table lines.
table_instance.outer_border = False
table_inner_borders = table_instance.table.splitlines()
# Get second table lines.
table_instance.outer_border = True
table_instance.inner_heading_row_border = False
table_instance.inner_column_border = False
table_outer_borders = table_instance.table.splitlines()
# Combine.
smallest, largest = sorted([table_inner_borders, table_outer_borders], key=len)
smallest += [''] * (len(largest) - len(smallest)) # Make both same size.
combined = list()
for i, row in enumerate(largest):
combined.append(row.ljust(10) + ' ' + smallest[i])
return '\n'.join(combined)
|
def table_abcd()
|
Return table string to be printed. Two tables on one line.
| 3.325281 | 3.121938 | 1.065134 |
Windows.enable(auto_colors=True, reset_atexit=True) # Does nothing if not on Windows.
# Server timings.
print(table_server_timings())
print()
# Server status.
print(table_server_status())
print()
# Two A B C D tables.
print(table_abcd())
print()
# Instructions.
table_instance = SingleTable([['Obey Obey Obey Obey']], 'Instructions')
print(table_instance.table)
print()
|
def main()
|
Main function.
| 10.221087 | 9.997876 | 1.022326 |
inner_widths = max_dimensions(self.table_data)[0]
outer_border = 2 if self.outer_border else 0
inner_border = 1 if self.inner_column_border else 0
padding = self.padding_left + self.padding_right
return column_max_width(inner_widths, column_number, outer_border, inner_border, padding)
|
def column_max_width(self, column_number)
|
Return the maximum width of a column based on the current terminal width.
:param int column_number: The column number to query.
:return: The max width of the column.
:rtype: int
| 3.654681 | 4.123455 | 0.886315 |
outer_widths = max_dimensions(self.table_data, self.padding_left, self.padding_right)[2]
outer_border = 2 if self.outer_border else 0
inner_border = 1 if self.inner_column_border else 0
return table_width(outer_widths, outer_border, inner_border)
|
def table_width(self)
|
Return the width of the table including padding and borders.
| 4.390187 | 4.016578 | 1.093017 |
horizontal = str(self.CHAR_INNER_HORIZONTAL)
left = self.CHAR_OUTER_LEFT_VERTICAL
intersect = self.CHAR_INNER_VERTICAL
right = self.CHAR_OUTER_RIGHT_VERTICAL
columns = list()
for i, width in enumerate(outer_widths):
justify = self.justify_columns.get(i)
width = max(3, width) # Width should be at least 3 so justification can be applied.
if justify == 'left':
columns.append(':' + horizontal * (width - 1))
elif justify == 'right':
columns.append(horizontal * (width - 1) + ':')
elif justify == 'center':
columns.append(':' + horizontal * (width - 2) + ':')
else:
columns.append(horizontal * width)
return combine(columns, left, intersect, right)
|
def horizontal_border(self, _, outer_widths)
|
Handle the GitHub heading border.
E.g.:
|:---|:---:|---:|----|
:param _: Unused.
:param iter outer_widths: List of widths (with padding) for each column.
:return: Prepared border strings in a generator.
:rtype: iter
| 3.177056 | 3.239721 | 0.980657 |
for i, row in enumerate(self.table_data):
# Yield the row line by line (e.g. multi-line rows).
for line in self.gen_row_lines(row, 'row', inner_widths, inner_heights[i]):
yield line
# Yield heading separator.
if i == 0:
yield self.horizontal_border(None, outer_widths)
|
def gen_table(self, inner_widths, inner_heights, outer_widths)
|
Combine everything and yield every line of the entire table with borders.
:param iter inner_widths: List of widths (no padding) for each column.
:param iter inner_heights: List of heights (no padding) for each row.
:param iter outer_widths: List of widths (with padding) for each column.
:return:
| 5.258654 | 5.593904 | 0.940069 |
if '\033' in string:
string = RE_COLOR_ANSI.sub('', string)
# Convert to unicode.
try:
string = string.decode('u8')
except (AttributeError, UnicodeEncodeError):
pass
width = 0
for char in string:
if unicodedata.east_asian_width(char) in ('F', 'W'):
width += 2
else:
width += 1
return width
|
def visible_width(string)
|
Get the visible width of a unicode string.
Some CJK unicode characters are more than one byte unlike ASCII and latin unicode characters.
From: https://github.com/Robpol86/terminaltables/pull/9
:param str string: String to measure.
:return: String's width.
:rtype: int
| 2.794967 | 3.064211 | 0.912132 |
if not hasattr(string, 'splitlines'):
string = str(string)
# Handle trailing newlines or empty strings, str.splitlines() does not satisfy.
lines = string.splitlines() or ['']
if string.endswith('\n'):
lines.append('')
# Vertically align and pad.
if 'bottom' in align:
lines = ([''] * (inner_dimensions[1] - len(lines) + padding[2])) + lines + ([''] * padding[3])
elif 'middle' in align:
delta = inner_dimensions[1] - len(lines)
lines = ([''] * (delta // 2 + delta % 2 + padding[2])) + lines + ([''] * (delta // 2 + padding[3]))
else:
lines = ([''] * padding[2]) + lines + ([''] * (inner_dimensions[1] - len(lines) + padding[3]))
# Horizontally align and pad.
for i, line in enumerate(lines):
new_width = inner_dimensions[0] + len(line) - visible_width(line)
if 'right' in align:
lines[i] = line.rjust(padding[0] + new_width, space) + (space * padding[1])
elif 'center' in align:
lines[i] = (space * padding[0]) + line.center(new_width, space) + (space * padding[1])
else:
lines[i] = (space * padding[0]) + line.ljust(new_width + padding[1], space)
return lines
|
def align_and_pad_cell(string, align, inner_dimensions, padding, space=' ')
|
Align a string horizontally and vertically. Also add additional padding in both dimensions.
:param str string: Input string to operate on.
:param tuple align: Tuple that contains one of left/center/right and/or top/middle/bottom.
:param tuple inner_dimensions: Width and height ints to expand string to without padding.
:param iter padding: Number of space chars for left, right, top, and bottom (4 ints).
:param str space: Character to use as white space for resizing/padding (use single visible chars only).
:return: Padded cell split into lines.
:rtype: list
| 2.331966 | 2.266461 | 1.028902 |
inner_widths = [0] * (max(len(r) for r in table_data) if table_data else 0)
inner_heights = [0] * len(table_data)
# Find max width and heights.
for j, row in enumerate(table_data):
for i, cell in enumerate(row):
if not hasattr(cell, 'count') or not hasattr(cell, 'splitlines'):
cell = str(cell)
if not cell:
continue
inner_heights[j] = max(inner_heights[j], cell.count('\n') + 1)
inner_widths[i] = max(inner_widths[i], *[visible_width(l) for l in cell.splitlines()])
# Calculate with padding.
outer_widths = [padding_left + i + padding_right for i in inner_widths]
outer_heights = [padding_top + i + padding_bottom for i in inner_heights]
return inner_widths, inner_heights, outer_widths, outer_heights
|
def max_dimensions(table_data, padding_left=0, padding_right=0, padding_top=0, padding_bottom=0)
|
Get maximum widths of each column and maximum height of each row.
:param iter table_data: List of list of strings (unmodified table data).
:param int padding_left: Number of space chars on left side of cell.
:param int padding_right: Number of space chars on right side of cell.
:param int padding_top: Number of empty lines on top side of cell.
:param int padding_bottom: Number of empty lines on bottom side of cell.
:return: 4-item tuple of n-item lists. Inner column widths and row heights, outer column widths and row heights.
:rtype: tuple
| 2.308759 | 2.309049 | 0.999874 |
column_count = len(inner_widths)
terminal_width = terminal_size()[0]
# Count how much space padding, outer, and inner borders take up.
non_data_space = outer_border
non_data_space += inner_border * (column_count - 1)
non_data_space += column_count * padding
# Exclude selected column's width.
data_space = sum(inner_widths) - inner_widths[column_number]
return terminal_width - data_space - non_data_space
|
def column_max_width(inner_widths, column_number, outer_border, inner_border, padding)
|
Determine the maximum width of a column based on the current terminal width.
:param iter inner_widths: List of widths (no padding) for each column.
:param int column_number: The column number to query.
:param int outer_border: Sum of left and right outer border visible widths.
:param int inner_border: Visible width of the inner border character.
:param int padding: Total padding per cell (left + right padding).
:return: The maximum width the column can be without causing line wrapping.
| 3.80507 | 4.389714 | 0.866815 |
column_count = len(outer_widths)
# Count how much space outer and inner borders take up.
non_data_space = outer_border
if column_count:
non_data_space += inner_border * (column_count - 1)
# Space of all columns and their padding.
data_space = sum(outer_widths)
return data_space + non_data_space
|
def table_width(outer_widths, outer_border, inner_border)
|
Determine the width of the entire table including borders and padding.
:param iter outer_widths: List of widths (with padding) for each column.
:param int outer_border: Sum of left and right outer border visible widths.
:param int inner_border: Visible width of the inner border character.
:return: The width of the table.
:rtype: int
| 4.613008 | 4.863552 | 0.948485 |
if style == 'top':
horizontal = self.CHAR_OUTER_TOP_HORIZONTAL
left = self.CHAR_OUTER_TOP_LEFT
intersect = self.CHAR_OUTER_TOP_INTERSECT if self.inner_column_border else ''
right = self.CHAR_OUTER_TOP_RIGHT
title = self.title
elif style == 'bottom':
horizontal = self.CHAR_OUTER_BOTTOM_HORIZONTAL
left = self.CHAR_OUTER_BOTTOM_LEFT
intersect = self.CHAR_OUTER_BOTTOM_INTERSECT if self.inner_column_border else ''
right = self.CHAR_OUTER_BOTTOM_RIGHT
title = None
elif style == 'heading':
horizontal = self.CHAR_H_INNER_HORIZONTAL
left = self.CHAR_H_OUTER_LEFT_INTERSECT if self.outer_border else ''
intersect = self.CHAR_H_INNER_INTERSECT if self.inner_column_border else ''
right = self.CHAR_H_OUTER_RIGHT_INTERSECT if self.outer_border else ''
title = None
elif style == 'footing':
horizontal = self.CHAR_F_INNER_HORIZONTAL
left = self.CHAR_F_OUTER_LEFT_INTERSECT if self.outer_border else ''
intersect = self.CHAR_F_INNER_INTERSECT if self.inner_column_border else ''
right = self.CHAR_F_OUTER_RIGHT_INTERSECT if self.outer_border else ''
title = None
else:
horizontal = self.CHAR_INNER_HORIZONTAL
left = self.CHAR_OUTER_LEFT_INTERSECT if self.outer_border else ''
intersect = self.CHAR_INNER_INTERSECT if self.inner_column_border else ''
right = self.CHAR_OUTER_RIGHT_INTERSECT if self.outer_border else ''
title = None
return build_border(outer_widths, horizontal, left, intersect, right, title)
|
def horizontal_border(self, style, outer_widths)
|
Build any kind of horizontal border for the table.
:param str style: Type of border to return.
:param iter outer_widths: List of widths (with padding) for each column.
:return: Prepared border as a tuple of strings.
:rtype: tuple
| 1.59751 | 1.589352 | 1.005133 |
r
cells_in_row = list()
# Resize row if it doesn't have enough cells.
if len(row) != len(inner_widths):
row = row + [''] * (len(inner_widths) - len(row))
# Pad and align each cell. Split each cell into lines to support multi-line cells.
for i, cell in enumerate(row):
align = (self.justify_columns.get(i),)
inner_dimensions = (inner_widths[i], height)
padding = (self.padding_left, self.padding_right, 0, 0)
cells_in_row.append(align_and_pad_cell(cell, align, inner_dimensions, padding))
# Determine border characters.
if style == 'heading':
left = self.CHAR_H_OUTER_LEFT_VERTICAL if self.outer_border else ''
center = self.CHAR_H_INNER_VERTICAL if self.inner_column_border else ''
right = self.CHAR_H_OUTER_RIGHT_VERTICAL if self.outer_border else ''
elif style == 'footing':
left = self.CHAR_F_OUTER_LEFT_VERTICAL if self.outer_border else ''
center = self.CHAR_F_INNER_VERTICAL if self.inner_column_border else ''
right = self.CHAR_F_OUTER_RIGHT_VERTICAL if self.outer_border else ''
else:
left = self.CHAR_OUTER_LEFT_VERTICAL if self.outer_border else ''
center = self.CHAR_INNER_VERTICAL if self.inner_column_border else ''
right = self.CHAR_OUTER_RIGHT_VERTICAL if self.outer_border else ''
# Yield each line.
for line in build_row(cells_in_row, left, center, right):
yield line
|
def gen_row_lines(self, row, style, inner_widths, height)
|
r"""Combine cells in row and group them into lines with vertical borders.
Caller is expected to pass yielded lines to ''.join() to combine them into a printable line. Caller must append
newline character to the end of joined line.
In:
['Row One Column One', 'Two', 'Three']
Out:
[
('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'),
]
In:
['Row One\nColumn One', 'Two', 'Three'],
Out:
[
('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'),
('|', ' Column One ', '|', ' ', '|', ' ', '|'),
]
:param iter row: One row in the table. List of cells.
:param str style: Type of border characters to use.
:param iter inner_widths: List of widths (no padding) for each column.
:param int height: Inner height (no padding) (number of lines) to expand row to.
:return: Yields lines split into components in a list. Caller must ''.join() line.
| 2.380077 | 2.376388 | 1.001552 |
# Yield top border.
if self.outer_border:
yield self.horizontal_border('top', outer_widths)
# Yield table body.
row_count = len(self.table_data)
last_row_index, before_last_row_index = row_count - 1, row_count - 2
for i, row in enumerate(self.table_data):
# Yield the row line by line (e.g. multi-line rows).
if self.inner_heading_row_border and i == 0:
style = 'heading'
elif self.inner_footing_row_border and i == last_row_index:
style = 'footing'
else:
style = 'row'
for line in self.gen_row_lines(row, style, inner_widths, inner_heights[i]):
yield line
# If this is the last row then break. No separator needed.
if i == last_row_index:
break
# Yield heading separator.
if self.inner_heading_row_border and i == 0:
yield self.horizontal_border('heading', outer_widths)
# Yield footing separator.
elif self.inner_footing_row_border and i == before_last_row_index:
yield self.horizontal_border('footing', outer_widths)
# Yield row separator.
elif self.inner_row_border:
yield self.horizontal_border('row', outer_widths)
# Yield bottom border.
if self.outer_border:
yield self.horizontal_border('bottom', outer_widths)
|
def gen_table(self, inner_widths, inner_heights, outer_widths)
|
Combine everything and yield every line of the entire table with borders.
:param iter inner_widths: List of widths (no padding) for each column.
:param iter inner_heights: List of heights (no padding) for each row.
:param iter outer_widths: List of widths (with padding) for each column.
:return:
| 2.210457 | 2.250359 | 0.982268 |
dimensions = max_dimensions(self.table_data, self.padding_left, self.padding_right)[:3]
return flatten(self.gen_table(*dimensions))
|
def table(self)
|
Return a large string of the entire table ready to be printed to the terminal.
| 11.655906 | 9.653984 | 1.207367 |
ascii_table = super(UnixTable, self).table
optimized = ascii_table.replace('\033(B\033(0', '')
return optimized
|
def table(self)
|
Return a large string of the entire table ready to be printed to the terminal.
| 11.92993 | 9.790643 | 1.218503 |
title = 'Jetta SportWagen'
# AsciiTable.
table_instance = AsciiTable(TABLE_DATA, title)
table_instance.justify_columns[2] = 'right'
print(table_instance.table)
print()
# SingleTable.
table_instance = SingleTable(TABLE_DATA, title)
table_instance.justify_columns[2] = 'right'
print(table_instance.table)
print()
# DoubleTable.
table_instance = DoubleTable(TABLE_DATA, title)
table_instance.justify_columns[2] = 'right'
print(table_instance.table)
print()
|
def main()
|
Main function.
| 2.563854 | 2.521467 | 1.01681 |
table_data = [
['Long String', ''], # One row. Two columns. Long string will replace this empty string.
]
table = SingleTable(table_data)
# Calculate newlines.
max_width = table.column_max_width(1)
wrapped_string = '\n'.join(wrap(LONG_STRING, max_width))
table.table_data[0][1] = wrapped_string
print(table.table)
|
def main()
|
Main function.
| 6.306911 | 6.047026 | 1.042977 |
r
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if (bias > 1.0 + epsilon) or (bias < 0 - epsilon):
raise ValueError("bias must be a scalar float between 0 and 1")
alpha, beta = bias, contrast
# We use the names a and b to match documentation.
if alpha == 0:
alpha = epsilon
if beta == 0:
return arr
np.seterr(divide="ignore", invalid="ignore")
if beta > 0:
numerator = 1 / (1 + np.exp(beta * (alpha - arr))) - 1 / (
1 + np.exp(beta * alpha)
)
denominator = 1 / (1 + np.exp(beta * (alpha - 1))) - 1 / (
1 + np.exp(beta * alpha)
)
output = numerator / denominator
else:
# Inverse sigmoidal function:
# todo: account for 0s
# todo: formatting ;)
output = (
(beta * alpha)
- np.log(
(
1
/ (
(arr / (1 + np.exp(beta * alpha - beta)))
- (arr / (1 + np.exp(beta * alpha)))
+ (1 / (1 + np.exp(beta * alpha)))
)
)
- 1
)
) / beta
return output
|
def sigmoidal(arr, contrast, bias)
|
r"""
Sigmoidal contrast is type of contrast control that
adjusts the contrast without saturating highlights or shadows.
It allows control over two factors:
the contrast range from light to dark, and where the middle value
of the mid-tones falls. The result is a non-linear and smooth
contrast change.
Parameters
----------
arr : ndarray, float, 0 .. 1
Array of color values to adjust
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5)
Notes
----------
Sigmoidal contrast is based on the sigmoidal transfer function:
.. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)})
This sigmoid function is scaled so that the output is bound by
the interval [0, 1].
.. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/
( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) )
Where :math: `\alpha` is the threshold level, and :math: `\beta` the
contrast factor to be applied.
References
----------
.. [CT] Hany Farid "Fundamentals of Image Processing"
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf
| 3.230598 | 3.141237 | 1.028448 |
r
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if g <= 0 or np.isnan(g):
raise ValueError("gamma must be greater than 0")
return arr ** (1.0 / g)
|
def gamma(arr, g)
|
r"""
Gamma correction is a nonlinear operation that
adjusts the image's channel values pixel-by-pixel according
to a power-law:
.. math:: pixel_{out} = pixel_{in} ^ {\gamma}
Setting gamma (:math:`\gamma`) to be less than 1.0 darkens the image and
setting gamma to be greater than 1.0 lightens it.
Parameters
----------
gamma (:math:`\gamma`): float
Reasonable values range from 0.8 to 2.4.
| 3.589374 | 3.633906 | 0.987745 |
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
ops = (
"gamma g {gamma_g}, " "gamma b {gamma_b}, " "sigmoidal rgb {contrast} {bias}"
).format(gamma_g=gamma_g, gamma_b=gamma_b, contrast=contrast, bias=bias)
return ops
|
def simple_atmo_opstring(haze, contrast, bias)
|
Make a simple atmospheric correction formula.
| 3.818087 | 3.895514 | 0.980124 |
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
arr = np.empty(shape=(3, rgb.shape[1], rgb.shape[2]))
arr[0] = rgb[0]
arr[1] = gamma(rgb[1], gamma_g)
arr[2] = gamma(rgb[2], gamma_b)
output = rgb.copy()
output[0:3] = sigmoidal(arr, contrast, bias)
return output
|
def simple_atmo(rgb, haze, contrast, bias)
|
A simple, static (non-adaptive) atmospheric correction function.
Parameters
----------
haze: float
Amount of haze to adjust for. For example, 0.03
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5 or 50%)
| 3.095839 | 3.494291 | 0.885971 |
def f(arr):
# Avoid mutation by copying
newarr = arr.copy()
if rgb_op:
# apply func to array's first 3 bands, assumed r,g,b
# additional band(s) are untouched
newarr[0:3] = func(newarr[0:3], **kwargs)
else:
# apply func to array band at a time
for b in bands:
newarr[b - 1] = func(arr[b - 1], **kwargs)
return newarr
f.__name__ = str(opname)
return f
|
def _op_factory(func, kwargs, opname, bands, rgb_op=False)
|
create an operation function closure
don't call directly, use parse_operations
returns a function which itself takes and returns ndarrays
| 4.120519 | 4.145517 | 0.99397 |
band_lookup = {"r": 1, "g": 2, "b": 3}
count = len(band_lookup)
opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma}
opkwargs = {
"saturation": ("proportion",),
"sigmoidal": ("contrast", "bias"),
"gamma": ("g",),
}
# Operations that assume RGB colorspace
rgb_ops = ("saturation",)
# split into tokens, commas are optional whitespace
tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")]
operations = []
current = []
for token in tokens:
if token.lower() in opfuncs.keys():
if len(current) > 0:
operations.append(current)
current = []
current.append(token.lower())
if len(current) > 0:
operations.append(current)
result = []
for parts in operations:
opname = parts[0]
bandstr = parts[1]
args = parts[2:]
try:
func = opfuncs[opname]
except KeyError:
raise ValueError("{} is not a valid operation".format(opname))
if opname in rgb_ops:
# ignore bands, assumed to be in rgb
# push 2nd arg into args
args = [bandstr] + args
bands = (1, 2, 3)
else:
# 2nd arg is bands
# parse r,g,b ~= 1,2,3
bands = set()
for bs in bandstr:
try:
band = int(bs)
except ValueError:
band = band_lookup[bs.lower()]
if band < 1 or band > count:
raise ValueError(
"{} BAND must be between 1 and {}".format(opname, count)
)
bands.add(band)
# assume all args are float
args = [float(arg) for arg in args]
kwargs = dict(zip(opkwargs[opname], args))
# Create opperation function
f = _op_factory(
func=func,
kwargs=kwargs,
opname=opname,
bands=bands,
rgb_op=(opname in rgb_ops),
)
result.append(f)
return result
|
def parse_operations(ops_string)
|
Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays
| 3.260816 | 3.288648 | 0.991537 |
if jobs == 0:
raise click.UsageError("Jobs must be >= 1 or == -1")
elif jobs < 0:
import multiprocessing
jobs = multiprocessing.cpu_count()
return jobs
|
def check_jobs(jobs)
|
Validate number of jobs.
| 5.936637 | 4.999826 | 1.187369 |
with rasterio.open(src_path) as src:
opts = src.profile.copy()
windows = [(window, ij) for ij, window in src.block_windows()]
opts.update(**creation_options)
opts["transform"] = guard_transform(opts["transform"])
out_dtype = out_dtype if out_dtype else opts["dtype"]
opts["dtype"] = out_dtype
args = {"ops_string": " ".join(operations), "out_dtype": out_dtype}
# Just run this for validation this time
# parsing will be run again within the worker
# where its returned value will be used
try:
parse_operations(args["ops_string"])
except ValueError as e:
raise click.UsageError(str(e))
jobs = check_jobs(jobs)
if jobs > 1:
with riomucho.RioMucho(
[src_path],
dst_path,
color_worker,
windows=windows,
options=opts,
global_args=args,
mode="manual_read",
) as mucho:
mucho.run(jobs)
else:
with rasterio.open(dst_path, "w", **opts) as dest:
with rasterio.open(src_path) as src:
rasters = [src]
for window, ij in windows:
arr = color_worker(rasters, window, ij, args)
dest.write(arr, window=window)
dest.colorinterp = src.colorinterp
|
def color(ctx, jobs, out_dtype, src_path, dst_path, operations, creation_options)
|
Color correction
Operations will be applied to the src image in the specified order.
Available OPERATIONS include:
\b
"gamma BANDS VALUE"
Applies a gamma curve, brightening or darkening midtones.
VALUE > 1 brightens the image.
\b
"sigmoidal BANDS CONTRAST BIAS"
Adjusts the contrast and brightness of midtones.
BIAS > 0.5 darkens the image.
\b
"saturation PROPORTION"
Controls the saturation in LCH color space.
PROPORTION = 0 results in a grayscale image
PROPORTION = 1 results in an identical image
PROPORTION = 2 is likely way too saturated
BANDS are specified as a single arg, no delimiters
\b
`123` or `RGB` or `rgb` are all equivalent
Example:
\b
rio color -d uint8 -j 4 input.tif output.tif \\
gamma 3 0.95, sigmoidal rgb 35 0.13
| 4.03118 | 4.357277 | 0.92516 |
if as_color:
click.echo(
"rio color {} {} {}".format(
src_path, dst_path, simple_atmo_opstring(atmo, contrast, bias)
)
)
exit(0)
with rasterio.open(src_path) as src:
opts = src.profile.copy()
windows = [(window, ij) for ij, window in src.block_windows()]
opts.update(**creation_options)
opts["transform"] = guard_transform(opts["transform"])
out_dtype = out_dtype if out_dtype else opts["dtype"]
opts["dtype"] = out_dtype
args = {"atmo": atmo, "contrast": contrast, "bias": bias, "out_dtype": out_dtype}
jobs = check_jobs(jobs)
if jobs > 1:
with riomucho.RioMucho(
[src_path],
dst_path,
atmos_worker,
windows=windows,
options=opts,
global_args=args,
mode="manual_read",
) as mucho:
mucho.run(jobs)
else:
with rasterio.open(dst_path, "w", **opts) as dest:
with rasterio.open(src_path) as src:
rasters = [src]
for window, ij in windows:
arr = atmos_worker(rasters, window, ij, args)
dest.write(arr, window=window)
|
def atmos(
ctx,
atmo,
contrast,
bias,
jobs,
out_dtype,
src_path,
dst_path,
creation_options,
as_color,
)
|
Atmospheric correction
| 3.115274 | 3.100151 | 1.004878 |
max_int = np.iinfo(arr.dtype).max
return arr.astype(math_type) / max_int
|
def to_math_type(arr)
|
Convert an array from native integer dtype range to 0..1
scaling down linearly
| 4.301767 | 4.772754 | 0.901318 |
max_int = np.iinfo(dtype).max
return (arr * max_int).astype(dtype)
|
def scale_dtype(arr, dtype)
|
Convert an array from 0..1 to dtype, scaling up linearly
| 3.412909 | 3.467562 | 0.984239 |
ops = []
bands = None
def set_band(x):
global bands
if x.upper() == "RGB":
x = "RGB"
bands = x.upper()
set_band("RGB")
def append_sig(arg):
global bands
args = list(filter(None, re.split("[,x]+", arg)))
if len(args) == 1:
args.append(0.5)
elif len(args) == 2:
args[1] = float(args[1].replace("%", "")) / 100.0
ops.append("sigmoidal {} {} {}".format(bands, *args))
def append_gamma(arg):
global bands
ops.append("gamma {} {}".format(bands, arg))
def append_sat(arg):
args = list(filter(None, re.split("[,x]+", arg)))
# ignore args[0]
# convert to proportion
prop = float(args[1]) / 100
ops.append("saturation {}".format(prop))
nextf = None
for part in convert_opts.strip().split(" "):
if part == "-channel":
nextf = set_band
elif part == "+channel":
set_band("RGB")
nextf = None
elif part == "-sigmoidal-contrast":
nextf = append_sig
elif part == "-gamma":
nextf = append_gamma
elif part == "-modulate":
nextf = append_sat
else:
if nextf:
nextf(part)
nextf = None
return " ".join(ops)
|
def magick_to_rio(convert_opts)
|
Translate a limited subset of imagemagick convert commands
to rio color operations
Parameters
----------
convert_opts: String, imagemagick convert options
Returns
-------
operations string, ordered rio color operations
| 2.92764 | 3.00491 | 0.974285 |
s = int(round(seconds)) # round to nearest second
h, s = divmod(s, 3600) # get hours and remainder
m, s = divmod(s, 60) # split remainder into minutes and seconds
return "%2i:%02i:%02i" % (h, m, s)
|
def time_string(seconds)
|
Returns time in seconds as a string formatted HHHH:MM:SS.
| 2.452749 | 2.301557 | 1.065691 |
text = .format(
**locals()
)
return text
|
def progress_report(
curr, best, curr_score, best_score, step, totalsteps, accept, improv, elaps, remain
)
|
Report progress
| 27.649345 | 20.009493 | 1.381811 |
eps = 1e-6
assert arr1.min() > 0 - eps
assert arr1.max() < 1 + eps
assert arr2.min() > 0 - eps
assert arr2.max() < 1 + eps
if not bins:
bins = [x / 10 for x in range(11)]
hist1 = np.histogram(arr1, bins=bins)[0] / arr1.size
hist2 = np.histogram(arr2, bins=bins)[0] / arr2.size
assert abs(hist1.sum() - 1.0) < eps
assert abs(hist2.sum() - 1.0) < eps
sqerr = (hist1 - hist2) ** 2
return sqerr.sum()
|
def histogram_distance(arr1, arr2, bins=None)
|
This function returns the sum of the squared error
Parameters:
two arrays constrained to 0..1
Returns:
sum of the squared error between the histograms
| 1.852405 | 1.928688 | 0.960448 |
if w > h:
return h / target
elif h >= w:
return w / target
|
def calc_downsample(w, h, target=400)
|
Calculate downsampling value.
| 4.927862 | 4.24332 | 1.161322 |
global fig, txt, imgs
click.echo("Reading source data...", err=True)
with rasterio.open(source) as src:
if downsample is None:
ratio = calc_downsample(src.width, src.height)
else:
ratio = downsample
w = int(src.width // ratio)
h = int(src.height // ratio)
rgb = src.read((1, 2, 3), out_shape=(3, h, w))
orig_rgb = to_math_type(rgb)
click.echo("Reading reference data...", err=True)
with rasterio.open(reference) as ref:
if downsample is None:
ratio = calc_downsample(ref.width, ref.height)
else:
ratio = downsample
w = int(ref.width / ratio)
h = int(ref.height / ratio)
rgb = ref.read((1, 2, 3), out_shape=(3, h, w))
ref_rgb = to_math_type(rgb)
click.echo("Annealing...", err=True)
est = ColorEstimator(orig_rgb, ref_rgb)
if plot:
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(20, 10))
fig.suptitle("Color Formula Optimization", fontsize=18, fontweight="bold")
txt = fig.text(0.02, 0.05, "foo", family="monospace", fontsize=16)
type(txt)
axs = (
fig.add_subplot(1, 4, 1),
fig.add_subplot(1, 4, 2),
fig.add_subplot(1, 4, 3),
fig.add_subplot(1, 4, 4),
)
fig.tight_layout()
axs[0].set_title("Source")
axs[1].set_title("Current Formula")
axs[2].set_title("Best Formula")
axs[3].set_title("Reference")
imgs.append(axs[0].imshow(reshape_as_image(est.src)))
imgs.append(axs[1].imshow(reshape_as_image(est.src)))
imgs.append(axs[2].imshow(reshape_as_image(est.src)))
imgs.append(axs[3].imshow(reshape_as_image(est.ref)))
fig.show()
schedule = dict(
tmax=25.0, # Max (starting) temperature
tmin=1e-4, # Min (ending) temperature
steps=steps, # Number of iterations
updates=steps / 20, # Number of updates
)
est.set_schedule(schedule)
est.save_state_on_exit = False
optimal, score = est.anneal()
optimal["energy"] = score
ops = est.cmd(optimal)
click.echo("rio color -j4 {} {} {}".format(source, "/tmp/output.tif", ops))
|
def main(source, reference, downsample, steps, plot)
|
Given a source image and a reference image,
Find the rio color formula which results in an
output with similar histogram to the reference image.
Uses simulated annealing to determine optimal settings.
Increase the --downsample option to speed things up.
Increase the --steps to get better results (longer runtime).
| 2.670158 | 2.57964 | 1.035089 |
k = random.choice(self.keys)
multiplier = random.choice((0.95, 1.05))
invalid_key = True
while invalid_key:
# make sure bias doesn't exceed 1.0
if k == "bias":
if self.state[k] > 0.909:
k = random.choice(self.keys)
continue
invalid_key = False
newval = self.state[k] * multiplier
self.state[k] = newval
|
def move(self)
|
Create a state change.
| 4.401003 | 4.202369 | 1.047267 |
ops = self.cmd(state)
for func in parse_operations(ops):
arr = func(arr)
return arr
|
def apply_color(self, arr, state)
|
Apply color formula to an array.
| 12.344332 | 10.635267 | 1.160698 |
arr = self.src.copy()
arr = self.apply_color(arr, self.state)
scores = [histogram_distance(self.ref[i], arr[i]) for i in range(3)]
# Important: scale by 100 for readability
return sum(scores) * 100
|
def energy(self)
|
Calculate state's energy.
| 8.502007 | 8.54991 | 0.994397 |
if acceptance is None:
acceptance = 0
if improvement is None:
improvement = 0
if step > 0:
elapsed = time.time() - self.start
remain = (self.steps - step) * (elapsed / step)
# print('Time {} ({} Remaing)'.format(time_string(elapsed), time_string(remain)))
else:
elapsed = 0
remain = 0
curr = self.cmd(self.state)
curr_score = float(E)
best = self.cmd(self.best_state)
best_score = self.best_energy
report = progress_report(
curr,
best,
curr_score,
best_score,
step,
self.steps,
acceptance * 100,
improvement * 100,
time_string(elapsed),
time_string(remain),
)
print(report)
if fig:
imgs[1].set_data(
reshape_as_image(self.apply_color(self.src.copy(), self.state))
)
imgs[2].set_data(
reshape_as_image(self.apply_color(self.src.copy(), self.best_state))
)
if txt:
txt.set_text(report)
fig.canvas.draw()
|
def update(self, step, T, E, acceptance, improvement)
|
Print progress.
| 3.262991 | 3.213576 | 1.015377 |
src = srcs[0]
rgb = src.read(window=window)
rgb = to_math_type(rgb)
atmos = simple_atmo(rgb, args["atmo"], args["contrast"], args["bias"])
# should be scaled 0 to 1, scale to outtype
return scale_dtype(atmos, args["out_dtype"])
|
def atmos_worker(srcs, window, ij, args)
|
A simple atmospheric correction user function.
| 8.387779 | 8.461586 | 0.991277 |
src = srcs[0]
arr = src.read(window=window)
arr = to_math_type(arr)
for func in parse_operations(args["ops_string"]):
arr = func(arr)
# scaled 0 to 1, now scale to outtype
return scale_dtype(arr, args["out_dtype"])
|
def color_worker(srcs, window, ij, args)
|
A user function.
| 8.6116 | 8.936687 | 0.963623 |
if holes is None:
holes = []
else:
assert make_surface
# Define points that make the circle (midpoint and the four cardinal
# directions).
X = numpy.zeros((num_sections + 1, len(x0)))
if num_sections == 4:
# For accuracy, the points are provided explicitly.
X[1:, [0, 1]] = numpy.array(
[[radius, 0.0], [0.0, radius], [-radius, 0.0], [0.0, -radius]]
)
else:
X[1:, [0, 1]] = numpy.array(
[
[
radius * numpy.cos(2 * numpy.pi * k / num_sections),
radius * numpy.sin(2 * numpy.pi * k / num_sections),
]
for k in range(num_sections)
]
)
if R is not None:
assert numpy.allclose(
abs(numpy.linalg.eigvals(R)), numpy.ones(X.shape[1])
), "The transformation matrix doesn't preserve circles; at least one eigenvalue lies off the unit circle."
X = numpy.dot(X, R.T)
X += x0
# Add Gmsh Points.
p = [self.add_point(x, lcar=lcar) for x in X]
# Define the circle arcs.
arcs = [self.add_circle_arc(p[k], p[0], p[k + 1]) for k in range(1, len(p) - 1)]
arcs.append(self.add_circle_arc(p[-1], p[0], p[1]))
if compound:
if self._gmsh_major() == 3:
arcs = [self.add_compound_line(arcs)]
elif self._gmsh_major() == 4:
self.add_raw_code(
"Compound Curve{{{}}};".format(",".join([arc.id for arc in arcs]))
)
line_loop = self.add_line_loop(arcs)
if make_surface:
plane_surface = self.add_plane_surface(line_loop, holes)
if compound and self._gmsh_major() == 4:
self.add_raw_code("Compound Surface{{{}}};".format(plane_surface.id))
else:
plane_surface = None
class Circle(object):
def __init__(
self,
x0,
radius,
R,
compound,
num_sections,
holes,
line_loop,
plane_surface,
lcar=None,
):
self.x0 = x0
self.radius = radius
self.lcar = lcar
self.R = R
self.compound = compound
self.num_sections = num_sections
self.holes = holes
self.line_loop = line_loop
self.plane_surface = plane_surface
return
return Circle(
x0,
radius,
R,
compound,
num_sections,
holes,
line_loop,
plane_surface,
lcar=lcar,
)
|
def add_circle(
self,
x0,
radius,
lcar=None,
R=None,
compound=False,
num_sections=3,
holes=None,
make_surface=True,
)
|
Add circle in the :math:`x`-:math:`y`-plane.
| 2.416158 | 2.410197 | 1.002473 |
if _is_string(string_or_list):
self._GMSH_CODE.append(string_or_list)
else:
assert isinstance(string_or_list, list)
for string in string_or_list:
self._GMSH_CODE.append(string)
return
|
def add_raw_code(self, string_or_list)
|
Add raw Gmsh code.
| 2.714393 | 2.139814 | 1.268518 |
self.add_comment("Torus")
# Add circle
x0t = numpy.dot(R, numpy.array([0.0, orad, 0.0]))
# Get circles in y-z plane
Rc = numpy.array([[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
c = self.add_circle(x0 + x0t, irad, lcar=lcar, R=numpy.dot(R, Rc))
rot_axis = [0.0, 0.0, 1.0]
rot_axis = numpy.dot(R, rot_axis)
point_on_rot_axis = [0.0, 0.0, 0.0]
point_on_rot_axis = numpy.dot(R, point_on_rot_axis) + x0
# Form the torus by extruding the circle three times by 2/3*pi. This
# works around the inability of Gmsh to extrude by pi or more. The
# Extrude() macro returns an array; the first [0] entry in the array is
# the entity that has been extruded at the far end. This can be used
# for the following Extrude() step. The second [1] entry of the array
# is the surface that was created by the extrusion.
previous = c.line_loop.lines
angle = "2*Pi/3"
all_surfaces = []
for i in range(3):
self.add_comment("Round no. {}".format(i + 1))
for k, p in enumerate(previous):
# ts1[] = Extrude {{0,0,1}, {0,0,0}, 2*Pi/3}{Line{tc1};};
# ...
top, surf, _ = self.extrude(
p,
rotation_axis=rot_axis,
point_on_axis=point_on_rot_axis,
angle=angle,
)
all_surfaces.append(surf)
previous[k] = top
# compound_surface = CompoundSurface(all_surfaces)
surface_loop = self.add_surface_loop(all_surfaces)
vol = self.add_volume(surface_loop)
# The newline at the end is essential:
# If a GEO file doesn't end in a newline, Gmsh will report a syntax
# error.
self.add_comment("\n")
return vol
|
def _add_torus_extrude_lines(
self, irad, orad, lcar=None, R=numpy.eye(3), x0=numpy.array([0.0, 0.0, 0.0])
)
|
Create Gmsh code for the torus in the x-y plane under the coordinate
transformation
.. math::
\\hat{x} = R x + x_0.
:param irad: inner radius of the torus
:param orad: outer radius of the torus
| 4.413547 | 4.49758 | 0.981316 |
self.add_comment(76 * "-")
self.add_comment("Torus")
# Add circle
x0t = numpy.dot(R, numpy.array([0.0, orad, 0.0]))
Rc = numpy.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
c = self.add_circle(x0 + x0t, irad, lcar=lcar, R=numpy.dot(R, Rc))
rot_axis = [0.0, 0.0, 1.0]
rot_axis = numpy.dot(R, rot_axis)
point_on_rot_axis = [0.0, 0.0, 0.0]
point_on_rot_axis = numpy.dot(R, point_on_rot_axis) + x0
# Form the torus by extruding the circle three times by 2/3*pi. This
# works around the inability of Gmsh to extrude by pi or more. The
# Extrude() macro returns an array; the first [0] entry in the array is
# the entity that has been extruded at the far end. This can be used
# for the following Extrude() step. The second [1] entry of the array
# is the surface that was created by the extrusion. The third [2-end]
# is a list of all the planes of the lateral surface.
previous = c.plane_surface
all_volumes = []
num_steps = 3
for _ in range(num_steps):
top, vol, _ = self.extrude(
previous,
rotation_axis=rot_axis,
point_on_axis=point_on_rot_axis,
angle="2*Pi/{}".format(num_steps),
)
previous = top
all_volumes.append(vol)
if self._gmsh_major() == 3:
# This actually returns the volume, but the gmsh 4 version doesn't have that
# feature. Hence, for compatibility, also ditch it here.
self.add_compound_volume(all_volumes)
else:
assert self._gmsh_major() == 4
self.add_raw_code(
"Compound Volume{{{}}};".format(",".join(v.id for v in all_volumes))
)
self.add_comment(76 * "-" + "\n")
return
|
def _add_torus_extrude_circle(
self, irad, orad, lcar=None, R=numpy.eye(3), x0=numpy.array([0.0, 0.0, 0.0])
)
|
Create Gmsh code for the torus under the coordinate transformation
.. math::
\\hat{x} = R x + x_0.
:param irad: inner radius of the torus
:param orad: outer radius of the torus
| 4.128109 | 4.216258 | 0.979093 |
self.add_comment("Define rectangle.")
X = numpy.array(
[
[0.0, outer_radius, -0.5 * length],
[0.0, outer_radius, +0.5 * length],
[0.0, inner_radius, +0.5 * length],
[0.0, inner_radius, -0.5 * length],
]
)
# Apply transformation.
X = [numpy.dot(R, x) + x0 for x in X]
# Create points set.
p = [self.add_point(x, lcar=lcar) for x in X]
# Define edges.
e = [
self.add_line(p[0], p[1]),
self.add_line(p[1], p[2]),
self.add_line(p[2], p[3]),
self.add_line(p[3], p[0]),
]
rot_axis = [0.0, 0.0, 1.0]
rot_axis = numpy.dot(R, rot_axis)
point_on_rot_axis = [0.0, 0.0, 0.0]
point_on_rot_axis = numpy.dot(R, point_on_rot_axis) + x0
# Extrude all edges three times by 2*Pi/3.
previous = e
angle = "2*Pi/3"
all_surfaces = []
# com = []
self.add_comment("Extrude in 3 steps.")
for i in range(3):
self.add_comment("Step {}".format(i + 1))
for k, p in enumerate(previous):
# ts1[] = Extrude {{0,0,1}, {0,0,0}, 2*Pi/3}{Line{tc1};};
top, surf, _ = self.extrude(
p,
rotation_axis=rot_axis,
point_on_axis=point_on_rot_axis,
angle=angle,
)
# if k==0:
# com.append(surf)
# else:
# all_names.appends(surf)
all_surfaces.append(surf)
previous[k] = top
#
# cs = CompoundSurface(com)
# Now just add surface loop and volume.
# all_surfaces = all_names + [cs]
surface_loop = self.add_surface_loop(all_surfaces)
vol = self.add_volume(surface_loop)
return vol
|
def _add_pipe_by_rectangle_rotation(
self,
outer_radius,
inner_radius,
length,
R=numpy.eye(3),
x0=numpy.array([0.0, 0.0, 0.0]),
lcar=None,
)
|
Hollow cylinder.
Define a rectangle, extrude it by rotation.
| 3.118899 | 3.100935 | 1.005793 |
# Define ring which to Extrude by translation.
Rc = numpy.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
c_inner = self.add_circle(
x0, inner_radius, lcar=lcar, R=numpy.dot(R, Rc), make_surface=False
)
circ = self.add_circle(
x0, outer_radius, lcar=lcar, R=numpy.dot(R, Rc), holes=[c_inner.line_loop]
)
# Now Extrude the ring surface.
_, vol, _ = self.extrude(
circ.plane_surface, translation_axis=numpy.dot(R, [length, 0, 0])
)
return vol
|
def _add_pipe_by_circle_extrusion(
self,
outer_radius,
inner_radius,
length,
R=numpy.eye(3),
x0=numpy.array([0.0, 0.0, 0.0]),
lcar=None,
)
|
Hollow cylinder.
Define a ring, extrude it by translation.
| 3.489988 | 3.362623 | 1.037877 |
d = {1: "Line", 2: "Surface", 3: "Volume"}
self._GMSH_CODE.append(
"Translate {{{}}} {{ {}{{{}}}; }}".format(
", ".join([str(co) for co in vector]),
d[input_entity.dimension],
input_entity.id,
)
)
return
|
def translate(self, input_entity, vector)
|
Translates input_entity itself by vector.
Changes the input object.
| 6.621375 | 6.793502 | 0.974663 |
d = {1: "Line", 2: "Surface", 3: "Volume"}
entity = "{}{{{}}};".format(d[input_entity.dimension], input_entity.id)
if duplicate:
entity = "Duplicata{{{}}}".format(entity)
self._GMSH_CODE.append(
"Symmetry {{{}}} {{{}}}".format(
", ".join([str(co) for co in coefficients]), entity
)
)
return
|
def symmetry(self, input_entity, coefficients, duplicate=True)
|
Transforms all elementary entities symmetrically to a plane. The vector
should contain four expressions giving the coefficients of the plane's equation.
| 5.261308 | 5.768439 | 0.912085 |
self._BOOLEAN_ID += 1
# assert that all entities are of the same dimensionality
dim = None
legal_dim_types = {1: "Line", 2: "Surface", 3: "Volume"}
for ldt in legal_dim_types:
if input_entities[0].dimension == ldt:
dim = ldt
break
assert dim is not None, "Illegal input type '{}' for Boolean operation.".format(
type(input_entities[0])
)
for e in input_entities[1:] + tool_entities:
assert (
e.dimension == dim
), "Incompatible input type '{}' for Boolean operation.".format(type(e))
name = "bo{}".format(self._BOOLEAN_ID)
input_delete = "Delete;" if delete_first else ""
tool_delete = "Delete;" if delete_other else ""
legal_dim_type = legal_dim_types[dim]
if input_entities:
formatted_input_entities = (
";".join(["%s{%s}" % (legal_dim_type, e.id) for e in input_entities])
+ ";"
)
else:
formatted_input_entities = ""
if tool_entities:
formatted_tool_entities = (
";".join(["%s{%s}" % (legal_dim_type, e.id) for e in tool_entities])
+ ";"
)
else:
formatted_tool_entities = ""
self._GMSH_CODE.append(
# I wonder what this line does in Lisp. ;)
# '{}[] = {}{{{} {{{}}}; {}}} {{{} {{{}}}; {}}};'
# .format(
# name,
# operation,
# legal_dim_types[dim],
# ';'.join(e.id for e in input_entities),
# 'Delete;' if delete_first else '',
# legal_dim_types[dim],
# ';'.join(e.id for e in tool_entities),
# 'Delete;' if delete_other else ''
# ))
"%(name)s[] = %(op)s{ %(ientities)s %(idelete)s } { %(tentities)s %(tdelete)s};"
% {
"name": name,
"op": operation,
"ientities": formatted_input_entities,
"idelete": input_delete,
"tentities": formatted_tool_entities,
"tdelete": tool_delete,
}
)
mapping = {"Line": None, "Surface": SurfaceBase, "Volume": VolumeBase}
return mapping[legal_dim_types[dim]](id0=name, is_list=True)
|
def _boolean_operation(
self,
operation,
input_entities,
tool_entities,
delete_first=True,
delete_other=True,
)
|
Boolean operations, see
https://gmsh.info/doc/texinfo/gmsh.html#Boolean-operations input_entity
and tool_entity are called object and tool in gmsh documentation.
| 3.076843 | 2.958345 | 1.040056 |
assert len(entities) > 1
return self._boolean_operation(
"BooleanIntersection",
[entities[0]],
entities[1:],
delete_first=delete_first,
delete_other=delete_other,
)
|
def boolean_intersection(self, entities, delete_first=True, delete_other=True)
|
Boolean intersection, see
https://gmsh.info/doc/texinfo/gmsh.html#Boolean-operations input_entity
and tool_entity are called object and tool in gmsh documentation.
| 3.218185 | 3.045224 | 1.056798 |
out = self._boolean_operation(
"BooleanUnion",
[entities[0]],
entities[1:],
delete_first=delete_first,
delete_other=delete_other,
)
# Cannot add Compound Surface yet; see
# <https://gitlab.onelab.info/gmsh/gmsh/issues/525>.
# if compound:
# self._GMSH_CODE.append("Compound Surface {{{}}};".format(out.id))
return out
|
def boolean_union(self, entities, delete_first=True, delete_other=True)
|
Boolean union, see
https://gmsh.info/doc/texinfo/gmsh.html#Boolean-operations input_entity
and tool_entity are called object and tool in gmsh documentation.
| 5.83848 | 5.727369 | 1.0194 |
assert numpy.isclose(numpy.inner(u, u), 1.0), "the rotation axis must be unitary"
# Cross-product matrix.
cpm = numpy.array([[0.0, -u[2], u[1]], [u[2], 0.0, -u[0]], [-u[1], u[0], 0.0]])
c = numpy.cos(theta)
s = numpy.sin(theta)
R = numpy.eye(3) * c + s * cpm + (1.0 - c) * numpy.outer(u, u)
return R
|
def rotation_matrix(u, theta)
|
Return matrix that implements the rotation around the vector :math:`u`
by the angle :math:`\\theta`, cf.
https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle.
:param u: rotation vector
:param theta: rotation angle
| 2.158355 | 2.328215 | 0.927043 |
def getexistingdirectory(parent=None, caption='', basedir='',
options=QFileDialog.ShowDirsOnly):
# Calling QFileDialog static method
if sys.platform == "win32":
# On Windows platforms: redirect standard outputs
_temp1, _temp2 = sys.stdout, sys.stderr
sys.stdout, sys.stderr = None, None
try:
result = QFileDialog.getExistingDirectory(parent, caption, basedir,
options)
finally:
if sys.platform == "win32":
# On Windows platforms: restore standard outputs
sys.stdout, sys.stderr = _temp1, _temp2
if not is_text_string(result):
# PyQt API #1
result = to_text_string(result)
return result
|
Wrapper around QtGui.QFileDialog.getExistingDirectory static method
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0
| null | null | null |
|
def getopenfilename(parent=None, caption='', basedir='', filters='',
selectedfilter='', options=None):
return _qfiledialog_wrapper('getOpenFileName', parent=parent,
caption=caption, basedir=basedir,
filters=filters, selectedfilter=selectedfilter,
options=options)
|
Wrapper around QtGui.QFileDialog.getOpenFileName static method
Returns a tuple (filename, selectedfilter) -- when dialog box is canceled,
returns a tuple of empty strings
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0
| null | null | null |
|
def getopenfilenames(parent=None, caption='', basedir='', filters='',
selectedfilter='', options=None):
return _qfiledialog_wrapper('getOpenFileNames', parent=parent,
caption=caption, basedir=basedir,
filters=filters, selectedfilter=selectedfilter,
options=options)
|
Wrapper around QtGui.QFileDialog.getOpenFileNames static method
Returns a tuple (filenames, selectedfilter) -- when dialog box is canceled,
returns a tuple (empty list, empty string)
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0
| null | null | null |
|
def getsavefilename(parent=None, caption='', basedir='', filters='',
selectedfilter='', options=None):
return _qfiledialog_wrapper('getSaveFileName', parent=parent,
caption=caption, basedir=basedir,
filters=filters, selectedfilter=selectedfilter,
options=options)
|
Wrapper around QtGui.QFileDialog.getSaveFileName static method
Returns a tuple (filename, selectedfilter) -- when dialog box is canceled,
returns a tuple of empty strings
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0
| null | null | null |
|
from ..QtGui import QIcon
from ..QtCore import Qt, QObject
class userDataWrapper():
def __init__(self, data):
self.data = data
_addItem = QComboBox.addItem
def addItem(self, *args, **kwargs):
if len(args) == 3 or (not isinstance(args[0], QIcon)
and len(args) == 2):
args, kwargs['userData'] = args[:-1], args[-1]
if 'userData' in kwargs:
kwargs['userData'] = userDataWrapper(kwargs['userData'])
_addItem(self, *args, **kwargs)
_insertItem = QComboBox.insertItem
def insertItem(self, *args, **kwargs):
if len(args) == 4 or (not isinstance(args[1], QIcon)
and len(args) == 3):
args, kwargs['userData'] = args[:-1], args[-1]
if 'userData' in kwargs:
kwargs['userData'] = userDataWrapper(kwargs['userData'])
_insertItem(self, *args, **kwargs)
_setItemData = QComboBox.setItemData
def setItemData(self, index, value, role=Qt.UserRole):
value = userDataWrapper(value)
_setItemData(self, index, value, role=role)
_itemData = QComboBox.itemData
def itemData(self, index, role=Qt.UserRole):
userData = _itemData(self, index, role=role)
if isinstance(userData, userDataWrapper):
userData = userData.data
return userData
def findData(self, value):
for i in range(self.count()):
if self.itemData(i) == value:
return i
return -1
QComboBox.addItem = addItem
QComboBox.insertItem = insertItem
QComboBox.setItemData = setItemData
QComboBox.itemData = itemData
QComboBox.findData = findData
|
def patch_qcombobox(QComboBox)
|
In PySide, using Python objects as userData in QComboBox causes
Segmentation faults under certain conditions. Even in cases where it
doesn't, findData does not work correctly. Likewise, findData also does not
work correctly with Python objects when using PyQt4. On the other hand,
PyQt5 deals with this case correctly. We therefore patch QComboBox when
using PyQt4 and PySide to avoid issues.
| 1.727875 | 1.702693 | 1.01479 |
if isinstance(traffic, Traffic):
if "baro_altitude" in traffic.data.columns:
traffic = traffic.query("baro_altitude == baro_altitude")
elif "altitude" in traffic.data.columns:
traffic = traffic.query("altitude == altitude")
if minimum_time is not None:
minimum_time = to_datetime(minimum_time)
traffic = cast(Traffic, traffic.query(f"timestamp >= '{minimum_time}'"))
if isinstance(filename, str):
filename = Path(filename)
if not filename.parent.exists():
filename.parent.mkdir(parents=True)
start = format_ts(traffic.start_time)
availability = f"{start}/{format_ts(traffic.end_time)}"
export = [
{
"id": "document",
"name": f"Traffic_{start}",
"version": "1.0",
"author": getpass.getuser(),
"clock": {
"interval": availability,
"currentTime": start,
"multiplier": _CZML_Params.default_time_multiplier,
},
}
]
for flight in traffic:
for elt in export_flight(flight):
export.append(elt)
with filename.open("w") as fh:
json.dump(export, fh, indent=2)
logging.info(f"Scenario file {filename} written")
|
def to_czml(
traffic: Union[Traffic, SO6],
filename: Union[str, Path],
minimum_time: Optional[timelike] = None,
) -> None
|
Generates a CesiumJS scenario file.
| 2.974452 | 2.864708 | 1.038309 |
return ax.scatter(
self.df.longitude,
self.df.latitude,
s=s,
transform=PlateCarree(),
c=-self.df.altitude,
cmap=cmap,
**kwargs,
)
|
def plot(
self, ax: GeoAxesSubplot, cmap: str = "inferno", s: int = 5, **kwargs
) -> Artist
|
Plotting function. All arguments are passed to ax.scatter
| 2.781139 | 2.711514 | 1.025677 |
if "facecolor" not in kwargs:
kwargs["facecolor"] = "None"
if "edgecolor" not in kwargs:
kwargs["edgecolor"] = ax._get_lines.get_next_color()
if "projection" in ax.__dict__:
return ax.add_geometries([self.shape], crs=PlateCarree(), **kwargs)
else:
return ax.add_patch(
MplPolygon(list(self.shape.exterior.coords), **kwargs)
)
|
def plot(self, ax: GeoAxesSubplot, **kwargs) -> Artist
|
Plotting function. All arguments are passed to the geometry
| 2.960919 | 3.099264 | 0.955362 |
what = "own" if (own and self.auth is not None) else "all"
if bounds is not None:
try:
# thinking of shapely bounds attribute (in this order)
# I just don't want to add the shapely dependency here
west, south, east, north = bounds.bounds # type: ignore
except AttributeError:
west, south, east, north = bounds
what += f"?lamin={south}&lamax={north}&lomin={west}&lomax={east}"
c = requests.get(
f"https://opensky-network.org/api/states/{what}", auth=self.auth
)
if c.status_code != 200:
raise ValueError(c.content.decode())
r = pd.DataFrame.from_records(
c.json()["states"], columns=self._json_columns
)
r = r.drop(["origin_country", "spi", "sensors"], axis=1)
r = r.dropna()
return StateVectors(
self._format_dataframe(r, nautical_units=True), self
)
|
def api_states(
self,
own: bool = False,
bounds: Union[
BaseGeometry, Tuple[float, float, float, float], None
] = None,
) -> StateVectors
|
Returns the current state vectors from OpenSky REST API.
If own parameter is set to True, returns only the state vectors
associated to own sensors (requires authentication)
bounds parameter can be a shape or a tuple of float.
Official documentation
----------------------
Limitiations for anonymous (unauthenticated) users
Anonymous are those users who access the API without using credentials.
The limitations for anonymous users are:
Anonymous users can only get the most recent state vectors, i.e. the
time parameter will be ignored. Anonymous users can only retrieve data
with a time resultion of 10 seconds. That means, the API will return
state vectors for time now − (now mod 10)
Limitations for OpenSky users
An OpenSky user is anybody who uses a valid OpenSky account (see below)
to access the API. The rate limitations for OpenSky users are:
- OpenSky users can retrieve data of up to 1 hour in the past. If the
time parameter has a value t < now−3600 the API will return
400 Bad Request.
- OpenSky users can retrieve data with a time resultion of 5 seconds.
That means, if the time parameter was set to t , the API will return
state vectors for time t−(t mod 5).
| 4.418771 | 4.562349 | 0.96853 |
c = requests.get(
f"https://opensky-network.org/api/tracks/?icao24={icao24}"
)
if c.status_code != 200:
raise ValueError(c.content.decode())
json = c.json()
df = pd.DataFrame.from_records(
json["path"],
columns=[
"timestamp",
"latitude",
"longitude",
"altitude",
"track",
"onground",
],
).assign(icao24=json["icao24"], callsign=json["callsign"])
return Flight(self._format_dataframe(df, nautical_units=True))
|
def api_tracks(self, icao24: str) -> Flight
|
Returns a Flight corresponding to a given aircraft.
Official documentation
----------------------
Retrieve the trajectory for a certain aircraft at a given time. The
trajectory is a list of waypoints containing position, barometric
altitude, true track and an on-ground flag.
In contrast to state vectors, trajectories do not contain all
information we have about the flight, but rather show the aircraft’s
general movement pattern. For this reason, waypoints are selected among
available state vectors given the following set of rules:
- The first point is set immediately after the the aircraft’s expected
departure, or after the network received the first poisition when the
aircraft entered its reception range.
- The last point is set right before the aircraft’s expected arrival, or
the aircraft left the networks reception range.
- There is a waypoint at least every 15 minutes when the aircraft is
in-flight.
- A waypoint is added if the aircraft changes its track more than 2.5°.
- A waypoint is added if the aircraft changes altitude by more than 100m
(~330ft).
- A waypoint is added if the on-ground state changes.
Tracks are strongly related to flights. Internally, we compute flights
and tracks within the same processing step. As such, it may be
benificial to retrieve a list of flights with the API methods from
above, and use these results with the given time stamps to retrieve
detailed track information.
| 3.099209 | 3.276657 | 0.945845 |
from .. import airports
c = requests.get(
f"https://opensky-network.org/api/routes?callsign={callsign}"
)
if c.status_code == 404:
raise ValueError("Unknown callsign")
if c.status_code != 200:
raise ValueError(c.content.decode())
json = c.json()
return tuple(airports[a] for a in json["route"])
|
def api_routes(self, callsign: str) -> Tuple[Airport, ...]
|
Returns the route associated to a callsign.
| 2.971398 | 2.808924 | 1.057842 |
if begin is None:
begin = round_time(datetime.now(timezone.utc), by=timedelta(days=1))
begin = to_datetime(begin)
if end is None:
end = begin + timedelta(days=1)
else:
end = to_datetime(end)
begin = int(begin.timestamp())
end = int(end.timestamp())
c = requests.get(
f"https://opensky-network.org/api/flights/aircraft"
f"?icao24={icao24}&begin={begin}&end={end}"
)
if c.status_code != 200:
raise ValueError(c.content.decode())
return (
pd.DataFrame.from_records(c.json())[
[
"firstSeen",
"lastSeen",
"icao24",
"callsign",
"estDepartureAirport",
"estArrivalAirport",
]
]
.assign(
firstSeen=lambda df: pd.to_datetime(
df.firstSeen * 1e9
).dt.tz_localize("utc"),
lastSeen=lambda df: pd.to_datetime(
df.lastSeen * 1e9
).dt.tz_localize("utc"),
)
.sort_values("lastSeen")
)
|
def api_aircraft(
self,
icao24: str,
begin: Optional[timelike] = None,
end: Optional[timelike] = None,
) -> pd.DataFrame
|
Returns a flight table associated to an aircraft.
Official documentation
----------------------
This API call retrieves flights for a particular aircraft within a
certain time interval. Resulting flights departed and arrived within
[begin, end]. If no flights are found for the given period, HTTP stats
404 - Not found is returned with an empty response body.
| 2.086445 | 2.20181 | 0.947604 |
today = round_time(datetime.now(timezone.utc), by=timedelta(days=1))
c = requests.get(
f"https://opensky-network.org/api/sensor/myStats"
f"?days={int(today.timestamp())}",
auth=self.auth,
)
if c.status_code != 200:
raise ValueError(c.content.decode())
return set(c.json()[0]["stats"].keys())
|
def api_sensors(self) -> Set[str]
|
The set of sensors serials you own (require authentication).
| 4.290527 | 4.109511 | 1.044048 |
if date is None:
date = round_time(datetime.now(timezone.utc), by=timedelta(days=1))
else:
date = to_datetime(date)
date = int(date.timestamp())
c = requests.get(
f"https://opensky-network.org/api/range/days"
f"?days={date}&serials={serial}"
)
if c.status_code != 200:
raise ValueError(c.content.decode())
return SensorRange(c.json())
|
def api_range(
self, serial: str, date: Optional[timelike] = None
) -> SensorRange
|
Wraps a polygon representing a sensor's range.
By default, returns the current range. Otherwise, you may enter a
specific day (as a string, as an epoch or as a datetime)
| 3.428815 | 3.583991 | 0.956703 |
if isinstance(airport, str):
from .. import airports
airport_code = airports[airport].icao
else:
airport_code = airport.icao
if begin is None:
begin = round_time(datetime.now(timezone.utc), by=timedelta(days=1))
begin = to_datetime(begin)
if end is None:
end = begin + timedelta(days=1)
else:
end = to_datetime(end)
begin = int(begin.timestamp())
end = int(end.timestamp())
c = requests.get(
f"https://opensky-network.org/api/flights/arrival"
f"?begin={begin}&airport={airport_code}&end={end}"
)
if c.status_code != 200:
raise ValueError(c.content.decode())
return (
pd.DataFrame.from_records(c.json())[
[
"firstSeen",
"lastSeen",
"icao24",
"callsign",
"estDepartureAirport",
"estArrivalAirport",
]
]
.assign(
firstSeen=lambda df: pd.to_datetime(
df.firstSeen * 1e9
).dt.tz_localize("utc"),
lastSeen=lambda df: pd.to_datetime(
df.lastSeen * 1e9
).dt.tz_localize("utc"),
)
.sort_values("lastSeen")
)
|
def api_arrival(
self,
airport: Union[str, Airport],
begin: Optional[timelike] = None,
end: Optional[timelike] = None,
) -> pd.DataFrame
|
Returns a flight table associated to an airport.
By default, returns the current table. Otherwise, you may enter a
specific date (as a string, as an epoch or as a datetime)
Official documentation
----------------------
Retrieve flights for a certain airport which arrived within a given time
interval [begin, end]. If no flights are found for the given period,
HTTP stats 404 - Not found is returned with an empty response body.
| 2.159962 | 2.233968 | 0.966872 |
start = self.data.timestamp.min()
self.data = self.data.assign(start=start)
return start
|
def start(self) -> pd.Timestamp
|
Returns the minimum timestamp value of the DataFrame.
| 5.059557 | 3.97067 | 1.274233 |
if "number" not in self.data.columns:
return None
if all(self.data.number.isna()):
return None
tmp = set(self.data.number)
if len(tmp) == 1:
return tmp.pop()
logging.warn("Several numbers for one flight, consider splitting")
return tmp
|
def number(self) -> Optional[Union[str, Set[str]]]
|
Returns the unique number value(s) of the DataFrame.
| 4.163469 | 4.188702 | 0.993976 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.