code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if self._resolved_time is None or self.source_clock_overflow_period is None: self._resolved_time = decimal.Decimal(source_clock_sample) self._prev_source_sample = source_clock_sample self._prev_target_sample = target_clock_sample else: # Time between updates in the target clock domain tgt_delta = target_clock_sample - self._prev_target_sample self._prev_target_sample = target_clock_sample assert tgt_delta >= 0 # Time between updates in the source clock domain src_delta = source_clock_sample - self._prev_source_sample self._prev_source_sample = source_clock_sample # Using the target clock we can resolve the integer ambiguity (number of overflows) full_cycles = int(round((tgt_delta - src_delta) / float(self.source_clock_overflow_period), 0)) # Updating the source clock now; in two steps, in order to avoid error accumulation in floats self._resolved_time += decimal.Decimal(full_cycles * self.source_clock_overflow_period) self._resolved_time += decimal.Decimal(src_delta) return self._resolved_time
def update(self, source_clock_sample, target_clock_sample)
Args: source_clock_sample: Sample of the source clock, in seconds target_clock_sample: Sample of the target clock, in seconds Returns: Resolved absolute source clock value
3.338574
3.33796
1.000184
pi = float(self._source_time_resolver.update(source_clock_sample, target_clock_sample)) qi = target_clock_sample # Initialization if self._p is None: self._p = pi self._q = qi # Sync error - refer to the reference implementation of the algorithm self._estimated_delay = abs((pi - self._p) - (qi - self._q)) # Resynchronization (discarding known state) if self._estimated_delay > self.max_phase_error_to_resync: self._source_time_resolver.reset() self._resync_count += 1 self._p = pi = float(self._source_time_resolver.update(source_clock_sample, target_clock_sample)) self._q = qi # Offset options assert pi >= self._p offset = self._p - self._q - self.max_rate_error * (pi - self._p) - self.fixed_delay new_offset = pi - qi - self.fixed_delay # Updating p/q if the new offset is lower by magnitude if new_offset >= offset: offset = new_offset self._p = pi self._q = qi ti = pi - offset return ti
def update(self, source_clock_sample, target_clock_sample)
Args: source_clock_sample: E.g. value received from the source system, in seconds target_clock_sample: E.g. target time sampled when the data arrived to the local system, in seconds Returns: Event timestamp converted to the target time domain.
4.881685
4.905222
0.995202
if not isinstance(obj, CompoundValue) and hasattr(obj, 'transfer'): if hasattr(obj, 'message'): payload = obj.message header = 'Message' elif hasattr(obj, 'request'): payload = obj.request header = 'Request' elif hasattr(obj, 'response'): payload = obj.response header = 'Response' else: raise ValueError('Cannot generate YAML representation for %r' % type(obj)) prefix = '### %s from %s to %s ts_mono=%.6f ts_real=%.6f\n' % \ (header, obj.transfer.source_node_id or 'Anon', obj.transfer.dest_node_id or 'All', obj.transfer.ts_monotonic, obj.transfer.ts_real) return prefix + _to_yaml_impl(payload) else: return _to_yaml_impl(obj)
def to_yaml(obj)
This function returns correct YAML representation of a UAVCAN structure (message, request, or response), or a DSDL entity (array or primitive), or a UAVCAN transfer, with comments for human benefit. Args: obj: Object to convert. Returns: Unicode string containing YAML representation of the object.
3.599547
3.12235
1.152833
# Extracting constants uavcan_type = uavcan.get_uavcan_data_type(struct) if uavcan.is_request(struct): consts = uavcan_type.request_constants fields = uavcan_type.request_fields elif uavcan.is_response(struct): consts = uavcan_type.response_constants fields = uavcan_type.response_fields else: consts = uavcan_type.constants fields = uavcan_type.fields assert len(fields) > 0 # noinspection PyShadowingNames def format_output(name, value, remove_common_prefix): if remove_common_prefix: num_seps = len(field_name.split('_')) parts = name.split('_')[num_seps:] name = '_'.join(parts) return ('%s (%r)' % (name, value)) if keep_literal else name # noinspection PyShadowingNames def match_one_prefix(prefix, value): matches = [] for cname, cval in [(x.name, x.value) for x in consts if x.name.lower().startswith(prefix.lower())]: if cval == value: matches.append(cname) # Making sure we found exactly one match, otherwise it's not a correct result if len(matches) == 1: return matches[0] # noinspection PyShadowingNames def match_value(value): # Trying direct match match = match_one_prefix(field_name + '_', value) if match: return format_output(match, value, True) # Trying direct match without the terminal letter if it is 's' (plural). This works for 'flags'. # TODO: this is sketchy. if field_name[-1] == 's': match = match_one_prefix(field_name[:-1] + '_', value) if match: return format_output(match, value, True) # Trying match without prefix, only if there's just one field if len(fields) == 1: match = match_one_prefix('', value) if match: return format_output(match, value, False) # Trying single value first value = getattr(struct, field_name) match = match_value(value) if match: return match # Trying bit masks def extract_powers_of_2(x): i = 1 while i <= x: if i & x: yield i i <<= 1 matches = [] for pow2 in extract_powers_of_2(value): match = match_value(pow2) if match: matches.append(match) else: matches = [] break # If at least one couldn't be matched, we're on a wrong track, stop if len(matches) > 0: return ' | '.join(matches) # No match could be found, returning the value as is return value
def value_to_constant_name(struct, field_name, keep_literal=False)
This function accepts a UAVCAN struct (message, request, or response), and a field name; and returns the name of constant or bit mask that match the value. If no match could be established, the literal value will be returned as is. Args: struct: UAVCAN struct to work with field_name: Name of the field to work with keep_literal: Whether to include the input integer value in the output string Returns: Name of the constant or flags if match could be detected, otherwise integer as is.
2.87944
2.736615
1.05219
windows_com_port = device_name.replace('\\', '').replace('.', '').lower().startswith('com') unix_tty = device_name.startswith('/dev/') if windows_com_port or unix_tty: return SLCAN(device_name, **kwargs) elif SocketCAN is not None: return SocketCAN(device_name, **kwargs) else: raise DriverError('Unrecognized device name: %r' % device_name)
def make_driver(device_name, **kwargs)
Creates an instance of CAN driver. The right driver class will be selected automatically based on the device_name. :param device_name: This parameter is used to select driver class. E.g. "/dev/ttyACM0", "COM9", "can0". :param kwargs: Passed directly to the constructor.
4.651406
4.39569
1.058174
global DATATYPES, TYPENAMES paths = list(paths) # Try to prepend the built-in DSDL files # TODO: why do we need try/except here? # noinspection PyBroadException try: if not args.get("exclude_dist", None): dsdl_path = pkg_resources.resource_filename(__name__, "dsdl_files") # @UndefinedVariable paths = [os.path.join(dsdl_path, "uavcan")] + paths custom_path = os.path.join(os.path.expanduser("~"), "uavcan_vendor_specific_types") if os.path.isdir(custom_path): paths += [f for f in [os.path.join(custom_path, f) for f in os.listdir(custom_path)] if os.path.isdir(f)] except Exception: pass root_namespace = Namespace() dtypes = dsdl.parse_namespaces(paths) for dtype in dtypes: namespace, _, typename = dtype.full_name.rpartition(".") root_namespace._path(namespace).__dict__[typename] = dtype TYPENAMES[dtype.full_name] = dtype if dtype.default_dtid: DATATYPES[(dtype.default_dtid, dtype.kind)] = dtype # Add the base CRC to each data type capable of being transmitted dtype.base_crc = dsdl.crc16_from_bytes(struct.pack("<Q", dtype.get_data_type_signature())) logger.debug("DSDL Load {: >30} DTID: {: >4} base_crc:{: >8}" .format(typename, dtype.default_dtid, hex(dtype.base_crc))) def create_instance_closure(closure_type, _mode=None): # noinspection PyShadowingNames def create_instance(*args, **kwargs): if _mode: assert '_mode' not in kwargs, 'Mode cannot be supplied to service type instantiation helper' kwargs['_mode'] = _mode return transport.CompoundValue(closure_type, *args, **kwargs) return create_instance dtype._instantiate = create_instance_closure(dtype) if dtype.kind == dtype.KIND_SERVICE: dtype.Request = create_instance_closure(dtype, _mode='request') dtype.Response = create_instance_closure(dtype, _mode='response') namespace = root_namespace._path("uavcan") for top_namespace in namespace._namespaces(): MODULE.__dict__[str(top_namespace)] = namespace.__dict__[top_namespace] MODULE.__dict__["thirdparty"] = Namespace() for ext_namespace in root_namespace._namespaces(): if str(ext_namespace) != "uavcan": # noinspection PyUnresolvedReferences MODULE.thirdparty.__dict__[str(ext_namespace)] = root_namespace.__dict__[ext_namespace]
def load_dsdl(*paths, **args)
Loads the DSDL files under the given directory/directories, and creates types for each of them in the current module's namespace. If the exclude_dist argument is not present, or False, the DSDL definitions installed with this package will be loaded first. Also adds entries for all datatype (ID, kind)s to the DATATYPES dictionary, which maps datatype (ID, kind)s to their respective type classes.
4.040499
3.824323
1.056527
attr, _, subpath = attrpath.partition(".") if attr not in self.__dict__: self.__dict__[attr] = Namespace() self.__namespaces.add(attr) if subpath: return self.__dict__[attr]._path(subpath) else: return self.__dict__[attr]
def _path(self, attrpath)
Returns the namespace object at the given .-separated path, creating any namespaces in the path that don't already exist.
3.37566
2.826932
1.194107
can = driver.make_driver(can_device_name, **kwargs) return Node(can, **kwargs)
def make_node(can_device_name, **kwargs)
Constructs a node instance with specified CAN device. :param can_device_name: CAN device name, e.g. "/dev/ttyACM0", "COM9", "can0". :param kwargs: These arguments will be supplied to the CAN driver factory and to the node constructor.
5.194514
5.703785
0.910713
priority = 1 event = self._scheduler.enter(timeout_seconds, priority, callback, ()) return self._make_sched_handle(lambda: event)
def defer(self, timeout_seconds, callback)
This method allows to invoke the callback with specified arguments once the specified amount of time. :returns: EventHandle object. Call .remove() on it to cancel the event.
10.028252
11.65363
0.860526
priority = 0 def caller(scheduled_deadline): # Event MUST be re-registered first in order to ensure that it can be cancelled from the callback scheduled_deadline += period_seconds event_holder[0] = self._scheduler.enterabs(scheduled_deadline, priority, caller, (scheduled_deadline,)) callback() first_deadline = self._scheduler.timefunc() + period_seconds event_holder = [self._scheduler.enterabs(first_deadline, priority, caller, (first_deadline,))] return self._make_sched_handle(lambda: event_holder[0])
def periodic(self, period_seconds, callback)
This method allows to invoke the callback periodically, with specified time intervals. Note that the scheduler features zero phase drift. :returns: EventHandle object. Call .remove() on it to cancel the event.
5.463672
5.934519
0.92066
return self._handler_dispatcher.add_handler(uavcan_type, handler, **kwargs)
def add_handler(self, uavcan_type, handler, **kwargs)
Adds a handler for the specified data type. :param uavcan_type: DSDL data type. Only transfers of this type will be accepted for this handler. :param handler: The handler. This must be either a callable or a class. :param **kwargs: Extra arguments for the handler. :return: A remover object that can be used to unregister the handler as follows: x = node.add_handler(...) # Remove the handler like this: x.remove() # Or like this: if x.try_remove(): print('The handler has been removed successfully') else: print('There is no such handler')
3.749048
4.978752
0.753009
if timeout != 0: deadline = (time.monotonic() + timeout) if timeout is not None else sys.float_info.max def execute_once(): next_event_at = self._poll_scheduler_and_get_next_deadline() if next_event_at is None: next_event_at = sys.float_info.max read_timeout = min(next_event_at, deadline) - time.monotonic() read_timeout = max(read_timeout, 0) read_timeout = min(read_timeout, 1) frame = self._can_driver.receive(read_timeout) if frame: self._recv_frame(frame) execute_once() while time.monotonic() < deadline: execute_once() else: while True: frame = self._can_driver.receive(0) if frame: self._recv_frame(frame) else: break self._poll_scheduler_and_get_next_deadline()
def spin(self, timeout=None)
Runs background processes until timeout expires. Note that all processing is implemented in one thread. :param timeout: The method will return once this amount of time expires. If None, the method will never return. If zero, the method will handle only those events that are ready, then return immediately.
2.79848
2.847117
0.982917
'''Feed ASCII string or bytes to the signature function''' try: if isinstance(data_bytes, basestring): # Python 2.7 compatibility data_bytes = map(ord, data_bytes) except NameError: if isinstance(data_bytes, str): # This branch will be taken on Python 3 data_bytes = map(ord, data_bytes) for b in data_bytes: self._crc ^= (b << 56) & Signature.MASK64 for _ in range(8): if self._crc & (1 << 63): self._crc = ((self._crc << 1) & Signature.MASK64) ^ Signature.POLY else: self._crc <<= 1
def add(self, data_bytes)
Feed ASCII string or bytes to the signature function
3.540259
2.955063
1.198032
# noinspection PyShadowingNames def walk(): import fnmatch from functools import partial def on_walk_error(directory, ex): raise DsdlException('OS error in [%s]: %s' % (directory, str(ex))) for source_dir in source_dirs: walker = os.walk(source_dir, onerror=partial(on_walk_error, source_dir), followlinks=True) for root, _dirnames, filenames in walker: for filename in fnmatch.filter(filenames, '*.uavcan'): filename = os.path.join(root, filename) yield filename all_default_dtid = {} # (kind, dtid) : filename # noinspection PyShadowingNames def ensure_unique_dtid(t, filename): if t.default_dtid is None: return key = t.kind, t.default_dtid if key in all_default_dtid: first = pretty_filename(all_default_dtid[key]) second = pretty_filename(filename) error('Default data type ID collision: [%s] [%s]', first, second) all_default_dtid[key] = filename parser = Parser(source_dirs + (search_dirs or [])) output_types = [] for filename in walk(): t = parser.parse(filename) ensure_unique_dtid(t, filename) output_types.append(t) return output_types
def parse_namespaces(source_dirs, search_dirs=None)
Use only this function to parse DSDL definitions. This function takes a list of root namespace directories (containing DSDL definition files to parse) and an optional list of search directories (containing DSDL definition files that can be referenced from the types that are going to be parsed). Returns the list of parsed type definitions, where type of each element is CompoundType. Args: source_dirs: List of root namespace directories to parse. search_dirs: List of root namespace directories with referenced types (optional). This list is automatically extended with source_dirs. Example: >>> import uavcan >>> a = uavcan.dsdl.parse_namespaces(['../dsdl/uavcan']) >>> len(a) 77 >>> a[0] uavcan.Timestamp >>> a[0].fields [truncated uint48 husec] >>> a[0].constants [saturated uint48 UNKNOWN = 0, saturated uint48 USEC_PER_LSB = 100]
3.305669
3.171792
1.042209
cast_mode = 'saturated' if self.cast_mode == PrimitiveType.CAST_MODE_SATURATED else 'truncated' primary_type = { PrimitiveType.KIND_BOOLEAN: 'bool', PrimitiveType.KIND_UNSIGNED_INT: 'uint' + str(self.bitlen), PrimitiveType.KIND_SIGNED_INT: 'int' + str(self.bitlen), PrimitiveType.KIND_FLOAT: 'float' + str(self.bitlen) }[self.kind] return cast_mode + ' ' + primary_type
def get_normalized_definition(self)
Please refer to the specification for details about normalized definitions.
3.341041
3.092815
1.080259
low, high = self.value_range if not low <= value <= high: error('Value [%s] is out of range %s', value, self.value_range)
def validate_value_range(self, value)
Args: value: Throws DsdlException if this value cannot be represented by this type.
4.374103
4.591408
0.952671
typedef = self.value_type.get_normalized_definition() return ('%s[<=%d]' if self.mode == ArrayType.MODE_DYNAMIC else '%s[%d]') % (typedef, self.max_size)
def get_normalized_definition(self)
Please refer to the specification for details about normalized definitions.
9.578068
8.70352
1.100482
payload_max_bitlen = self.max_size * self.value_type.get_max_bitlen() return { self.MODE_DYNAMIC: payload_max_bitlen + self.max_size.bit_length(), self.MODE_STATIC: payload_max_bitlen }[self.mode]
def get_max_bitlen(self)
Returns total maximum bit length of the array, including length field if applicable.
3.852377
3.489232
1.104076
txt = StringIO() txt.write(self.full_name + '\n') def adjoin(attrs): return txt.write('\n'.join(x.get_normalized_definition() for x in attrs) + '\n') if self.kind == CompoundType.KIND_SERVICE: if self.request_union: txt.write('\n@union\n') adjoin(self.request_fields) txt.write('\n---\n') if self.response_union: txt.write('\n@union\n') adjoin(self.response_fields) elif self.kind == CompoundType.KIND_MESSAGE: if self.union: txt.write('\n@union\n') adjoin(self.fields) else: error('Compound type of unknown kind [%s]', self.kind) return txt.getvalue().strip().replace('\n\n\n', '\n').replace('\n\n', '\n')
def get_dsdl_signature_source_definition(self)
Returns normalized DSDL definition text. Please refer to the specification for details about normalized DSDL definitions.
3.278012
3.092102
1.060124
if self._data_type_signature is None: sig = Signature(self.get_dsdl_signature()) fields = self.request_fields + self.response_fields if self.kind == CompoundType.KIND_SERVICE else self.fields for field in fields: field_sig = field.type.get_data_type_signature() if field_sig is not None: sig_value = sig.get_value() sig.add(bytes_from_crc64(field_sig)) sig.add(bytes_from_crc64(sig_value)) self._data_type_signature = sig.get_value() return self._data_type_signature
def get_data_type_signature(self)
Computes data type signature of this type. The data type signature is guaranteed to match only if all nested data structures are compatible. Please refer to the specification for details about signatures.
3.63905
3.431532
1.060474
self._handle.remove() self._node_monitor_event_handle.remove() self._allocation_table.close()
def close(self)
Stops the instance and closes the allocation table storage.
14.933288
9.574369
1.559715
'''Returns a nice human readable path to 'filename'.''' try: a = os.path.abspath(filename) r = os.path.relpath(filename) except ValueError: # Catch relpath exception. Happens, because it can not produce relative path # if wroking directory is on different drive. a = r = filename return a if '..' in r else r
def pretty_filename(filename)
Returns a nice human readable path to 'filename'.
8.605169
7.210204
1.193471
# Check if event is of type event_pb2.Event proto. if not isinstance(event, event_pb2.Event): raise TypeError("expected an event_pb2.Event proto, " " but got %s" % type(event)) return self._write_serialized_event(event.SerializeToString())
def write_event(self, event)
Appends event to the file.
3.888622
3.771599
1.031028
if self._num_outstanding_events == 0 or self._recordio_writer is None: return self._recordio_writer.flush() if self._logger is not None: self._logger.info('wrote %d %s to disk', self._num_outstanding_events, 'event' if self._num_outstanding_events == 1 else 'events') self._num_outstanding_events = 0
def flush(self)
Flushes the event file to disk.
2.798269
2.634749
1.062063
self.flush() if self._recordio_writer is not None: self._recordio_writer.close() self._recordio_writer = None
def close(self)
Flushes the pending events and closes the writer after it is done.
3.899686
3.083695
1.264615
if self._closed: self._worker = _EventLoggerThread(self._event_queue, self._ev_writer, self._flush_secs, self._sentinel_event) self._worker.start() self._closed = False
def reopen(self)
Reopens the EventFileWriter. Can be called after `close()` to add more events in the same directory. The events will go into a new events file. Does nothing if the `EventFileWriter` was not closed.
8.048051
6.410844
1.255381
if not self._closed: self.add_event(self._sentinel_event) self.flush() self._worker.join() self._ev_writer.close() self._closed = True
def close(self)
Flushes the event file to disk and close the file. Call this method when you do not need the summary writer anymore.
5.802923
4.713627
1.231095
header = struct.pack('Q', len(event_str)) header += struct.pack('I', masked_crc32c(header)) footer = struct.pack('I', masked_crc32c(event_str)) self._writer.write(header + event_str + footer)
def write_record(self, event_str)
Writes a serialized event to file.
3.243515
3.029092
1.070788
if self._writer is not None: self.flush() self._writer.close() self._writer = None
def close(self)
Closes the record writer.
3.440955
2.724323
1.26305
if isinstance(summary, bytes): summ = summary_pb2.Summary() summ.ParseFromString(summary) summary = summ # We strip metadata from values with tags that we have seen before in order # to save space - we just store the metadata on the first value with a # specific tag. for value in summary.value: if not value.metadata: continue if value.tag in self._seen_summary_tags: # This tag has been encountered before. Strip the metadata. value.ClearField("metadata") continue # We encounter a value with a tag we have not encountered previously. And # it has metadata. Remember to strip metadata from future values with this # tag string. self._seen_summary_tags.add(value.tag) event = event_pb2.Event(summary=summary) self._add_event(event, global_step)
def add_summary(self, summary, global_step=None)
Adds a `Summary` protocol buffer to the event file. This method wraps the provided summary in an `Event` protocol buffer and adds it to the event file. Parameters ---------- summary : A `Summary` protocol buffer Optionally serialized as a string. global_step: Number Optional global step value to record with the summary.
4.284337
4.346186
0.985769
event = event_pb2.Event(graph_def=graph.SerializeToString()) self._add_event(event, None)
def add_graph(self, graph)
Adds a `Graph` protocol buffer to the event file.
5.892663
4.112292
1.432939
if self._default_bins is None: v = 1E-12 buckets = [] neg_buckets = [] while v < 1E20: buckets.append(v) neg_buckets.append(-v) v *= 1.1 self._default_bins = neg_buckets[::-1] + [0] + buckets return self._default_bins
def _get_default_bins(self)
Ported from the C++ function InitDefaultBucketsInner() in the following file. https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/histogram/histogram.cc See the following tutorial for more details on how TensorFlow initialize bin distribution. https://www.tensorflow.org/programmers_guide/tensorboard_histograms
3.028434
2.6545
1.140868
if tag not in self._scalar_dict.keys(): self._scalar_dict[tag] = [] self._scalar_dict[tag].append([timestamp, global_step, float(scalar_value)])
def _append_to_scalar_dict(self, tag, scalar_value, global_step, timestamp)
Adds a list [timestamp, step, value] to the value of `self._scalar_dict[tag]`. This allows users to store scalars in memory and dump them to a json file later.
2.126711
1.775644
1.197713
if isinstance(value, (tuple, list, dict)): if isinstance(value, (tuple, list)): if len(value) != 2: raise ValueError('expected two elements in value, while received %d' % len(value)) value = {value[0]: value[1]} self._add_scalars(tag, value, global_step) else: self._file_writer.add_summary(scalar_summary(tag, value), global_step) self._append_to_scalar_dict(self.get_logdir() + '/' + tag, value, global_step, time.time())
def add_scalar(self, tag, value, global_step=None)
Adds scalar data to the event file. Parameters ---------- tag : str Name for the scalar plot. value : float, tuple, list, or dict If value is a float, the corresponding curve would have no name attached in the plot. If value is a tuple or list, it must have two elements with the first one representing the name of the value and the second one as the float value. The name of the value will be attached to the corresponding curve in the plot. This is useful when users want to draw multiple curves in the same plot. It internally calls `_add_scalars`. If value is a dict, it's a mapping from strs to float values, with strs representing the names of the float values. This is convenient when users want to log a collection of float values with different names for visualizing them in the same plot without repeatedly calling `add_scalar` for each value. It internally calls `_add_scalars`. global_step : int Global step value to record. Examples -------- >>> import numpy as np >>> from mxboard import SummaryWriter >>> xs = np.arange(start=0, stop=2 * np.pi, step=0.01) >>> y_sin = np.sin(xs) >>> y_cos = np.cos(xs) >>> y_exp_sin = np.exp(y_sin) >>> y_exp_cos = np.exp(y_cos) >>> y_sin2 = y_sin * y_sin >>> with SummaryWriter(logdir='./logs') as sw: >>> for x, y1, y2, y3, y4, y5 in zip(xs, y_sin, y_cos, y_exp_sin, y_exp_cos, y_sin2): >>> sw.add_scalar('curves', {'sin': y1, 'cos': y2}, x * 100) >>> sw.add_scalar('curves', ('exp(sin)', y3), x * 100) >>> sw.add_scalar('curves', ['exp(cos)', y4], x * 100) >>> sw.add_scalar('curves', y5, x * 100)
3.700557
3.549107
1.042673
timestamp = time.time() fw_logdir = self._file_writer.get_logdir() for scalar_name, scalar_value in scalar_dict.items(): fw_tag = fw_logdir + '/' + tag + '/' + scalar_name if fw_tag in self._all_writers.keys(): fw = self._all_writers[fw_tag] else: fw = FileWriter(logdir=fw_tag, max_queue=self._max_queue, flush_secs=self._flush_secs, filename_suffix=self._filename_suffix, verbose=self._verbose) self._all_writers[fw_tag] = fw fw.add_summary(scalar_summary(tag, scalar_value), global_step) self._append_to_scalar_dict(fw_tag, scalar_value, global_step, timestamp)
def _add_scalars(self, tag, scalar_dict, global_step=None)
Adds multiple scalars to summary. This enables drawing multiple curves in one plot. Parameters ---------- tag : str Name for the plot. scalar_dict : dict Values to be saved. global_step : int Global step value to record.
2.38797
2.473337
0.965485
if os.path.exists(path) and os.path.isfile(path): logging.warning('%s already exists and will be overwritten by scalar dict', path) with open(path, "w") as f: json.dump(self._scalar_dict, f)
def export_scalars(self, path)
Exports to the given path an ASCII file containing all the scalars written so far by this instance, with the following format: {writer_id : [[timestamp, step, value], ...], ...}
3.301036
3.369184
0.979773
if bins == 'default': bins = self._get_default_bins() self._file_writer.add_summary(histogram_summary(tag, values, bins), global_step)
def add_histogram(self, tag, values, global_step=None, bins='default')
Add histogram data to the event file. Note: This function internally calls `asnumpy()` if `values` is an MXNet NDArray. Since `asnumpy()` is a blocking function call, this function would block the main thread till it returns. It may consequently affect the performance of async execution of the MXNet engine. Parameters ---------- tag : str Name for the `values`. values : MXNet `NDArray` or `numpy.ndarray` Values for building histogram. global_step : int Global step value to record. bins : int or sequence of scalars or str If `bins` is an int, it defines the number equal-width bins in the range `(values.min(), values.max())`. If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin width. If `bins` is a str equal to 'default', it will use the bin distribution defined in TensorFlow for building histogram. Ref: https://www.tensorflow.org/programmers_guide/tensorboard_histograms The rest of supported strings for `bins` are 'auto', 'fd', 'doane', 'scott', 'rice', 'sturges', and 'sqrt'. etc. See the documentation of `numpy.histogram` for detailed definitions of those strings. https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
3.098844
4.673509
0.663066
self._file_writer.add_summary(image_summary(tag, image), global_step)
def add_image(self, tag, image, global_step=None)
Add image data to the event file. This function supports input as a 2D, 3D, or 4D image. If the input image is 2D, a channel axis is prepended as the first dimension and image will be replicated three times and concatenated along the channel axis. If the input image is 3D, it will be replicated three times and concatenated along the channel axis. If the input image is 4D, which is a batch images, all the images will be spliced as a sprite image for display. Note: This function requires the ``pillow`` package. Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs. Since `asnumpy()` is a blocking function call, this function would block the main thread till it returns. It may consequently affect the performance of async execution of the MXNet engine. Parameters ---------- tag : str Name for the `image`. image : MXNet `NDArray` or `numpy.ndarray` Image is one of the following formats: (H, W), (C, H, W), (N, C, H, W). If the input is a batch of images, a grid of images is made by stitching them together. If data type is float, values must be in range [0, 1], and then they are rescaled to range [0, 255]. Note that this does not change the values of the input `image`. A copy of the input `image` is created instead. If data type is 'uint8`, values are unchanged. global_step : int Global step value to record.
4.51201
7.896509
0.571393
self._file_writer.add_summary(audio_summary(tag, audio, sample_rate=sample_rate), global_step)
def add_audio(self, tag, audio, sample_rate=44100, global_step=None)
Add audio data to the event file. Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs. Since `asnumpy()` is a blocking function call, this function would block the main thread till it returns. It may consequently affect the performance of async execution of the MXNet engine. Parameters ---------- tag : str Name for the `audio`. audio : MXNet `NDArray` or `numpy.ndarray` Audio data squeezable to a 1D tensor. The values of the tensor are in the range `[-1, 1]`. sample_rate : int Sample rate in Hz. global_step : int Global step value to record.
3.734565
5.866628
0.636578
self._file_writer.add_summary(text_summary(tag, text), global_step) if tag not in self._text_tags: self._text_tags.append(tag) extension_dir = self.get_logdir() + '/plugins/tensorboard_text/' if not os.path.exists(extension_dir): os.makedirs(extension_dir) with open(extension_dir + 'tensors.json', 'w') as fp: json.dump(self._text_tags, fp)
def add_text(self, tag, text, global_step=None)
Add text data to the event file. Parameters ---------- tag : str Name for the `text`. text : str Text to be saved to the event file. global_step : int Global step value to record.
2.814092
3.054695
0.921235
embedding_shape = embedding.shape if len(embedding_shape) != 2: raise ValueError('expected 2D NDArray as embedding data, while received an array with' ' ndim=%d' % len(embedding_shape)) data_dir = _get_embedding_dir(tag, global_step) save_path = os.path.join(self.get_logdir(), data_dir) try: os.makedirs(save_path) except OSError: logging.warning('embedding dir %s exists, files under this dir will be overwritten', save_path) if labels is not None: if (embedding_shape[0] != len(labels) and (not _is_2D_matrix(labels) or len(labels) != embedding_shape[0] + 1)): raise ValueError('expected equal values of embedding first dim and length of ' 'labels or embedding first dim + 1 for 2d labels ' ', while received %d and %d for each' % (embedding_shape[0], len(labels))) if self._logger is not None: self._logger.info('saved embedding labels to %s', save_path) _make_metadata_tsv(labels, save_path) if images is not None: img_labels_shape = images.shape if embedding_shape[0] != img_labels_shape[0]: raise ValueError('expected equal first dim size of embedding and images,' ' while received %d and %d for each' % (embedding_shape[0], img_labels_shape[0])) if self._logger is not None: self._logger.info('saved embedding images to %s', save_path) _make_sprite_image(images, save_path) if self._logger is not None: self._logger.info('saved embedding data to %s', save_path) _save_embedding_tsv(embedding, save_path) _add_embedding_config(self.get_logdir(), data_dir, labels is not None, images.shape if images is not None else None)
def add_embedding(self, tag, embedding, labels=None, images=None, global_step=None)
Adds embedding projector data to the event file. It will also create a config file used by the embedding projector in TensorBoard. The folder containing the embedding data is named using the formula: If global_step is not None, the folder name is `tag + '_' + str(global_step).zfill(6)`; else, the folder name is `tag`. For example, tag = 'mnist', global_step = 12, the folder's name is 'mnist_000012'; when global_step = None, the folder's name is 'mnist'. See the following reference for the meanings of labels and images. Ref: https://www.tensorflow.org/versions/r1.2/get_started/embedding_viz Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs. Since `asnumpy()` is a blocking function call, this function would block the main thread till it returns. It may consequently affect the performance of async execution of the MXNet engine. Parameters ---------- tag : str Name for the `embedding`. embedding : MXNet `NDArray` or `numpy.ndarray` A matrix whose each row is the feature vector of a data point. labels : MXNet `NDArray` or `numpy.ndarray` or a list of elements convertible to str. Labels corresponding to the data points in the `embedding`. If the labels are 2D the first row is considered the column names. images : MXNet `NDArray` or `numpy.ndarray` Images of format NCHW corresponding to the data points in the `embedding`. global_step : int Global step value to record. If not set, default to zero.
2.808191
2.854127
0.983905
if num_thresholds < 2: raise ValueError('num_thresholds must be >= 2') labels = _make_numpy_array(labels) predictions = _make_numpy_array(predictions) self._file_writer.add_summary(pr_curve_summary(tag, labels, predictions, num_thresholds, weights), global_step)
def add_pr_curve(self, tag, labels, predictions, num_thresholds, global_step=None, weights=None)
Adds precision-recall curve. Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs. Since `asnumpy()` is a blocking function call, this function would block the main thread till it returns. It may consequently affect the performance of async execution of the MXNet engine. Parameters ---------- tag : str A tag attached to the summary. Used by TensorBoard for organization. labels : MXNet `NDArray` or `numpy.ndarray`. The ground truth values. A tensor of 0/1 values with arbitrary shape. predictions : MXNet `NDArray` or `numpy.ndarray`. A float32 tensor whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds : int Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a tensor that stores an integer. The thresholds for computing the pr curves are calculated in the following way: `width = 1.0 / (num_thresholds - 1), thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]`. global_step : int Global step value to record. weights : MXNet `NDArray` or `numpy.ndarray`. Optional float32 tensor. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` tensor.
2.319356
3.061311
0.757635
for i in n: if len(i) != len(n[0]): return False return True
def _rectangular(n)
Checks to see if a 2D list is a valid 2D matrix
3.772285
2.420287
1.558611
return ((isinstance(matrix[0], list) and _rectangular(matrix) and not isinstance(matrix[0][0], list)) or (not isinstance(matrix, list) and matrix.shape == 2))
def _is_2D_matrix(matrix)
Checks to see if a ndarray is 2D or a list of lists is 2D
4.763127
4.292598
1.109614
if not isinstance(image, NDArray): raise TypeError('MXNet NDArray expected, received {}'.format(str(type(image)))) image = _prepare_image(image, nrow=nrow, padding=padding, square_image=square_image) if Image is None: raise ImportError('saving image failed because PIL is not found') im = Image.fromarray(image.asnumpy()) im.save(filename)
def _save_image(image, filename, nrow=8, padding=2, square_image=True)
Saves a given Tensor into an image file. If the input tensor contains multiple images, a grid of images will be saved. Parameters ---------- image : `NDArray` Input image(s) in the format of HW, CHW, or NCHW. filename : str Filename of the saved image(s). nrow : int Number of images displayed in each row of the grid. The Final grid size is (batch_size / `nrow`, `nrow`) when square_image is False; otherwise, (`nrow`, `nrow`). padding : int Padding value for each image in the grid. square_image : bool If True, force the image grid to be strictly square.
3.334473
3.675867
0.907126
if isinstance(img, np.ndarray): img = nd.array(img, dtype=img.dtype, ctx=current_context()) if not isinstance(img, NDArray): raise TypeError('expected MXNet NDArray or numpy.ndarray, ' 'while received type {}'.format(str(type(img)))) assert img.ndim == 2 or img.ndim == 3 or img.ndim == 4 if img.dtype == np.uint8: return make_image_grid( img, nrow=nrow, padding=padding, square_image=square_image).transpose((1, 2, 0)) elif img.dtype == np.float32 or img.dtype == np.float64: min_val = img.min().asscalar() max_val = img.max().asscalar() if min_val < 0.0: raise ValueError('expected non-negative min value from img, ' 'while received {}'.format(min_val)) if max_val > 1.0: raise ValueError('expected max value from img not greater than 1, ' 'while received {}'.format(max_val)) img = make_image_grid(img, nrow=nrow, padding=padding, square_image=square_image) * 255.0 return img.astype(np.uint8).transpose((1, 2, 0)) else: raise ValueError('expected input image dtype is one of uint8, float32, ' 'and float64, received dtype {}'.format(str(img.dtype)))
def _prepare_image(img, nrow=8, padding=2, square_image=False)
Given an image of format HW, CHW, or NCHW, returns a image of format HWC. If the input is a batch of images, a grid of images is made by stitching them together. If data type is float, values must be in the range [0, 1], and then they are rescaled to range [0, 255]. If data type is 'uint8`, values are unchanged.
2.167143
2.179743
0.994219
if isinstance(metadata, NDArray): metadata = metadata.asnumpy() elif isinstance(metadata, list): metadata = np.array(metadata) elif not isinstance(metadata, np.ndarray): raise TypeError('expected NDArray or np.ndarray or 1D/2D list, while received ' 'type {}'.format(str(type(metadata)))) if len(metadata.shape) > 2: raise TypeError('expected a 1D/2D NDArray, np.ndarray or list, while received ' 'shape {}'.format(str(metadata.shape))) if len(metadata.shape) == 1: metadata = metadata.reshape(-1, 1) with open(os.path.join(save_path, 'metadata.tsv'), 'w') as f: for row in metadata: f.write('\t'.join([str(x) for x in row]) + '\n')
def _make_metadata_tsv(metadata, save_path)
Given an `NDArray` or a `numpy.ndarray` or a list as metadata e.g. labels, save the flattened array into the file metadata.tsv under the path provided by the user. The labels can be 1D or 2D with multiple labels per data point. Made to satisfy the requirement in the following link: https://www.tensorflow.org/programmers_guide/embedding#metadata
2.18149
2.022094
1.078827
if isinstance(images, np.ndarray): images = nd.array(images, dtype=images.dtype, ctx=current_context()) elif not isinstance(images, (NDArray, np.ndarray)): raise TypeError('images must be an MXNet NDArray or numpy.ndarray,' ' while received type {}'.format(str(type(images)))) assert isinstance(images, NDArray) shape = images.shape nrow = int(np.ceil(np.sqrt(shape[0]))) _save_image( images, os.path.join(save_path, 'sprite.png'), nrow=nrow, padding=0, square_image=True)
def _make_sprite_image(images, save_path)
Given an NDArray as a batch images, make a sprite image out of it following the rule defined in https://www.tensorflow.org/programmers_guide/embedding and save it in sprite.png under the path provided by the user.
3.436628
3.246198
1.058663
with open(os.path.join(file_path, 'projector_config.pbtxt'), 'a') as f: s = 'embeddings {\n' s += 'tensor_name: "{}"\n'.format(data_dir) s += 'tensor_path: "{}"\n'.format(os.path.join(data_dir, 'tensors.tsv')) if has_metadata: s += 'metadata_path: "{}"\n'.format(os.path.join(data_dir, 'metadata.tsv')) if label_img_shape is not None: if len(label_img_shape) != 4: logging.warning('expected 4D sprite image in the format NCHW, while received image' ' ndim=%d, skipping saving sprite' ' image info', len(label_img_shape)) else: s += 'sprite {\n' s += 'image_path: "{}"\n'.format(os.path.join(data_dir, 'sprite.png')) s += 'single_image_dim: {}\n'.format(label_img_shape[3]) s += 'single_image_dim: {}\n'.format(label_img_shape[2]) s += '}\n' s += '}\n' f.write(s)
def _add_embedding_config(file_path, data_dir, has_metadata=False, label_img_shape=None)
Creates a config file used by the embedding projector. Adapted from the TensorFlow function `visualize_embeddings()` at https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/tensorboard/plugins/projector/__init__.py
2.403511
2.275091
1.056446
if isinstance(data, np.ndarray): data_list = data.tolist() elif isinstance(data, NDArray): data_list = data.asnumpy().tolist() else: raise TypeError('expected NDArray of np.ndarray, while received type {}'.format( str(type(data)))) with open(os.path.join(file_path, 'tensors.tsv'), 'w') as f: for x in data_list: x = [str(i) for i in x] f.write('\t'.join(x) + '\n')
def _save_embedding_tsv(data, file_path)
Given a 2D `NDarray` or a `numpy.ndarray` as embeding, save it in tensors.tsv under the path provided by the user.
2.527216
2.103978
1.201161
# In the past, the first argument to summary ops was a tag, which allowed # arbitrary characters. Now we are changing the first argument to be the node # name. This has a number of advantages (users of summary ops now can # take advantage of the tf name scope system) but risks breaking existing # usage, because a much smaller set of characters are allowed in node names. # This function replaces all illegal characters with _s, and logs a warning. # It also strips leading slashes from the name. if name is not None: new_name = _INVALID_TAG_CHARACTERS.sub('_', name) new_name = new_name.lstrip('/') # Remove leading slashes if new_name != name: logging.warning('Summary name %s is illegal; using %s instead.', name, new_name) name = new_name return name
def _clean_tag(name)
Cleans a tag. Removes illegal characters for instance. Adapted from the TensorFlow function `clean_tag()` at https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/summary_op_util.py Parameters ---------- name : str The original tag name to be processed. Returns ------- The cleaned tag name.
6.506063
6.482557
1.003626
tag = _clean_tag(tag) scalar = _make_numpy_array(scalar) assert(scalar.squeeze().ndim == 0), 'scalar should be 0D' scalar = float(scalar) return Summary(value=[Summary.Value(tag=tag, simple_value=scalar)])
def scalar_summary(tag, scalar)
Outputs a `Summary` protocol buffer containing a single scalar value. The generated Summary has a Tensor.proto containing the input Tensor. Adapted from the TensorFlow function `scalar()` at https://github.com/tensorflow/tensorflow/blob/r1.6/tensorflow/python/summary/summary.py Parameters ---------- tag : str A name for the generated summary. Will also serve as the series name in TensorBoard. scalar : int, MXNet `NDArray`, or `numpy.ndarray` A scalar value or an ndarray of shape (1,). Returns ------- A `Summary` protobuf of the `scalar` value. Raises ------ ValueError: If the scalar has the wrong shape or type.
3.452307
4.608978
0.74904
tag = _clean_tag(tag) values = _make_numpy_array(values) hist = _make_histogram(values.astype(float), bins) return Summary(value=[Summary.Value(tag=tag, histo=hist)])
def histogram_summary(tag, values, bins)
Outputs a `Summary` protocol buffer with a histogram. Adding a histogram summary makes it possible to visualize the data's distribution in TensorBoard. See detailed explanation of the TensorBoard histogram dashboard at https://www.tensorflow.org/get_started/tensorboard_histograms This op reports an `InvalidArgument` error if any value is not finite. Adapted from the TensorFlow function `histogram()` at https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/summary/summary.py Parameters ---------- tag : str A name for the summary of the histogram. Will also serve as a series name in TensorBoard. values : MXNet `NDArray` or `numpy.ndarray` Values for building the histogram. Returns ------- A `Summary` protobuf of the histogram.
3.477409
4.649336
0.747937
values = values.reshape(-1) counts, limits = np.histogram(values, bins=bins) limits = limits[1:] sum_sq = values.dot(values) return HistogramProto(min=values.min(), max=values.max(), num=len(values), sum=values.sum(), sum_squares=sum_sq, bucket_limit=limits, bucket=counts)
def _make_histogram(values, bins)
Converts values into a histogram proto using logic from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/histogram/histogram.cc
2.92291
2.467951
1.184347
tag = _clean_tag(tag) image = _prepare_image(image) image = _make_image(image) return Summary(value=[Summary.Value(tag=tag, image=image)])
def image_summary(tag, image)
Outputs a `Summary` protocol buffer with image(s). Parameters ---------- tag : str A name for the generated summary. Will also serve as a series name in TensorBoard. image : MXNet `NDArray` or `numpy.ndarray` Image data that is one of the following layout: (H, W), (C, H, W), (N, C, H, W). The pixel values of the image are assumed to be normalized in the range [0, 1]. The image will be rescaled to the range [0, 255] and cast to `np.uint8` before creating the image protobuf. Returns ------- A `Summary` protobuf of the image.
3.538652
4.650449
0.760927
assert isinstance(tensor, NDArray) if Image is None: raise ImportError('need to install PIL for visualizing images') height, width, channel = tensor.shape tensor = _make_numpy_array(tensor) image = Image.fromarray(tensor) output = io.BytesIO() image.save(output, format='PNG') image_string = output.getvalue() output.close() return Summary.Image(height=height, width=width, colorspace=channel, encoded_image_string=image_string)
def _make_image(tensor)
Converts an NDArray type image to Image protobuf
2.740776
2.393902
1.144899
audio = audio.squeeze() if audio.ndim != 1: raise ValueError('input audio must be squeezable to 1D, input audio squeezed ' 'shape is {}'.format(audio.shape)) audio = _make_numpy_array(audio) tensor_list = [int(32767.0 * x) for x in audio] fio = io.BytesIO() wave_writer = wave.open(fio, 'wb') wave_writer.setnchannels(1) wave_writer.setsampwidth(2) wave_writer.setframerate(sample_rate) tensor_enc = b'' for v in tensor_list: # pylint: disable=invalid-name tensor_enc += struct.pack('<h', v) wave_writer.writeframes(tensor_enc) wave_writer.close() audio_string = fio.getvalue() fio.close() audio = Summary.Audio(sample_rate=sample_rate, num_channels=1, length_frames=len(tensor_list), encoded_audio_string=audio_string, content_type='audio/wav') return Summary(value=[Summary.Value(tag=tag, audio=audio)])
def audio_summary(tag, audio, sample_rate=44100)
Outputs a `Summary` protocol buffer with audio data. Parameters ---------- tag : str A name for the generated summary. Will also serve as a series name in TensorBoard. audio : MXNet `NDArray` or `numpy.ndarray` Audio data that can be squeezed into 1D array. The values are in the range [-1, 1]. sample_rate : int Sampling frequency. 44,100Hz is a common sampling frequency. Returns ------- A `Summary` protobuf of the audio data.
2.516223
2.492583
1.009484
plugin_data = [SummaryMetadata.PluginData(plugin_name='text')] smd = SummaryMetadata(plugin_data=plugin_data) tensor = TensorProto(dtype='DT_STRING', string_val=[text.encode(encoding='utf_8')], tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)])) return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
def text_summary(tag, text)
Outputs a `Summary` protocol buffer with audio data. Parameters ---------- tag : str A name for the generated summary. Will also serve as a series name in TensorBoard. text : str Text data. Returns ------- A `Summary` protobuf of the audio data.
3.350461
3.182843
1.052663
# num_thresholds > 127 results in failure of creating protobuf, # probably a bug of protobuf if num_thresholds > 127: logging.warning('num_thresholds>127 would result in failure of creating pr_curve protobuf,' ' clipping it at 127') num_thresholds = 127 labels = _make_numpy_array(labels) predictions = _make_numpy_array(predictions) if weights is not None: weights = _make_numpy_array(weights) data = _compute_curve(labels, predictions, num_thresholds=num_thresholds, weights=weights) pr_curve_plugin_data = PrCurvePluginData(version=0, num_thresholds=num_thresholds).SerializeToString() plugin_data = [SummaryMetadata.PluginData(plugin_name='pr_curves', content=pr_curve_plugin_data)] smd = SummaryMetadata(plugin_data=plugin_data) tensor = TensorProto(dtype='DT_FLOAT', float_val=data.reshape(-1).tolist(), tensor_shape=TensorShapeProto( dim=[TensorShapeProto.Dim(size=data.shape[0]), TensorShapeProto.Dim(size=data.shape[1])])) return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
def pr_curve_summary(tag, labels, predictions, num_thresholds, weights=None)
Outputs a precision-recall curve `Summary` protocol buffer. Parameters ---------- tag : str A tag attached to the summary. Used by TensorBoard for organization. labels : MXNet `NDArray` or `numpy.ndarray`. The ground truth values. A tensor of 0/1 values with arbitrary shape. predictions : MXNet `NDArray` or `numpy.ndarray`. A float32 tensor whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds : int Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a tensor that stores an integer. The thresholds for computing the pr curves are calculated in the following way: `width = 1.0 / (num_thresholds - 1), thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]`. weights : MXNet `NDArray` or `numpy.ndarray`. Optional float32 tensor. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` tensor. Returns ------- A `Summary` protobuf of the pr_curve.
2.974266
3.127773
0.950921
if weights is None: weights = 1.0 # Compute bins of true positives and false positives. bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1))) float_labels = labels.astype(np.float) histogram_range = (0, num_thresholds - 1) tp_buckets, _ = np.histogram( bucket_indices, bins=num_thresholds, range=histogram_range, weights=float_labels * weights) fp_buckets, _ = np.histogram( bucket_indices, bins=num_thresholds, range=histogram_range, weights=(1.0 - float_labels) * weights) # Obtain the reverse cumulative sum. tp = np.cumsum(tp_buckets[::-1])[::-1] fp = np.cumsum(fp_buckets[::-1])[::-1] tn = fp[0] - fp fn = tp[0] - tp precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp) recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn) return np.stack((tp, fp, tn, fn, precision, recall))
def _compute_curve(labels, predictions, num_thresholds, weights=None)
This function is another implementation of functions in https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py
2.256516
2.253552
1.001315
if not isinstance(sym, Symbol): raise TypeError('sym must be an `mxnet.symbol.Symbol`,' ' received type {}'.format(str(type(sym)))) conf = json.loads(sym.tojson()) nodes = conf['nodes'] data2op = {} # key: data id, value: list of ops to whom data is an input for i, node in enumerate(nodes): if node['op'] != 'null': # node is an operator input_list = node['inputs'] for idx in input_list: if idx[0] == 0: # do not include 'data' node in the op scope continue if idx[0] in data2op: # nodes[idx[0]] is a data as an input to op nodes[i] data2op[idx[0]].append(i) else: data2op[idx[0]] = [i] # In the following, we group data with operators they belong to # by attaching them with operator names as scope names. # The parameters with the operator name as the prefix will be # assigned with the scope name of that operator. For example, # a convolution op has name 'conv', while its weight and bias # have name 'conv_weight' and 'conv_bias'. In the end, the operator # has scope name 'conv' prepended to its name, i.e. 'conv/conv'. # The parameters are named 'conv/conv_weight' and 'conv/conv_bias'. node_defs = [] for i, node in enumerate(nodes): node_name = node['name'] op_name = node['op'] kwargs = {'op': op_name, 'name': node_name} if op_name != 'null': # node is an operator inputs = [] input_list = node['inputs'] for idx in input_list: input_node = nodes[idx[0]] input_node_name = input_node['name'] if input_node['op'] != 'null': inputs.append(_scoped_name(input_node_name, input_node_name)) elif idx[0] in data2op and len(data2op[idx[0]]) == 1 and data2op[idx[0]][0] == i: # the data is only as an input to nodes[i], no else inputs.append(_scoped_name(node_name, input_node_name)) else: # the data node has no scope name, e.g. 'data' as the input node inputs.append(input_node_name) kwargs['input'] = inputs kwargs['name'] = _scoped_name(node_name, node_name) elif i in data2op and len(data2op[i]) == 1: # node is a data node belonging to one op, find out which operator this node belongs to op_node_name = nodes[data2op[i][0]]['name'] kwargs['name'] = _scoped_name(op_node_name, node_name) if 'attrs' in node: # TensorBoard would escape quotation marks, replace it with space attr = json.dumps(node['attrs'], sort_keys=True).replace("\"", ' ') attr = {'param': AttrValue(s=attr.encode(encoding='utf-8'))} kwargs['attr'] = attr node_def = NodeDef(**kwargs) node_defs.append(node_def) return node_defs
def _get_nodes_from_symbol(sym)
Given a symbol and shapes, return a list of `NodeDef`s for visualizing the the graph in TensorBoard.
3.214533
3.12561
1.02845
mask = {} indices = range(data.shape[0]) lags = lags or [0] criteria = criteria or {'framewise_displacement': ('>', 0.5), 'std_dvars': ('>', 1.5)} for metric, (criterion, threshold) in criteria.items(): if criterion == '<': mask[metric] = set(np.where(data[metric] < threshold)[0]) elif criterion == '>': mask[metric] = set(np.where(data[metric] > threshold)[0]) mask = reduce((lambda x, y: x | y), mask.values()) for lag in lags: mask = set([m + lag for m in mask]) | mask mask = mask.intersection(indices) if minimum_contiguous is not None: post_final = data.shape[0] + 1 epoch_length = np.diff(sorted(mask | set([-1, post_final]))) - 1 epoch_end = sorted(mask | set([post_final])) for end, length in zip(epoch_end, epoch_length): if length < minimum_contiguous: mask = mask | set(range(end - length, end)) mask = mask.intersection(indices) if output == 'mask': spikes = np.zeros(data.shape[0]) spikes[list(mask)] = 1 spikes = pd.DataFrame(data=spikes, columns=[header_prefix]) else: spikes = np.zeros((max(indices)+1, len(mask))) for i, m in enumerate(sorted(mask)): spikes[m, i] = 1 header = ['{:s}{:02d}'.format(header_prefix, vol) for vol in range(len(mask))] spikes = pd.DataFrame(data=spikes, columns=header) if concatenate: return pd.concat((data, spikes), axis=1) else: return spikes
def spike_regressors(data, criteria=None, header_prefix='motion_outlier', lags=None, minimum_contiguous=None, concatenate=True, output='spikes')
Add spike regressors to a confound/nuisance matrix. Parameters ---------- data: pandas DataFrame object A tabulation of observations from which spike regressors should be estimated. criteria: dict{str: ('>' or '<', float)} Criteria for generating a spike regressor. If, for a given frame, the value of the variable corresponding to the key exceeds the threshold indicated by the value, then a spike regressor is created for that frame. By default, the strategy from Power 2014 is implemented: any frames with FD greater than 0.5 or standardised DV greater than 1.5 are flagged for censoring. header_prefix: str The prefix used to indicate spike regressors in the output data table. lags: list(int) A list indicating the frames to be censored relative to each flag. For instance, [0] censors the flagged frame, while [0, 1] censors both the flagged frame and the following frame. minimum_contiguous: int or None The minimum number of contiguous frames that must be unflagged for spike regression. If any series of contiguous unflagged frames is shorter than the specified minimum, then all of those frames will additionally have spike regressors implemented. concatenate: bool Indicates whether the returned object should include only spikes (if false) or all input time series and spikes (if true, default). output: str Indicates whether the output should be formatted as spike regressors ('spikes', a separate column for each outlier) or as a temporal mask ('mask', a single output column indicating the locations of outliers). Returns ------- data: pandas DataFrame object The input DataFrame with a column for each spike regressor. References ---------- Power JD, Mitra A, Laumann TO, Snyder AZ, Schlaggar BL, Petersen SE (2014) Methods to detect, characterize, and remove motion artifact in resting state fMRI. NeuroImage.
2.74229
2.676166
1.024709
variables_deriv = OrderedDict() data_deriv = OrderedDict() if 0 in order: data_deriv[0] = data[variables] variables_deriv[0] = variables order = set(order) - set([0]) for o in order: variables_deriv[o] = ['{}_derivative{}'.format(v, o) for v in variables] data_deriv[o] = np.tile(np.nan, data[variables].shape) data_deriv[o][o:, :] = np.diff(data[variables], n=o, axis=0) variables_deriv = reduce((lambda x, y: x + y), variables_deriv.values()) data_deriv = pd.DataFrame(columns=variables_deriv, data=np.concatenate([*data_deriv.values()], axis=1)) return (variables_deriv, data_deriv)
def temporal_derivatives(order, variables, data)
Compute temporal derivative terms by the method of backwards differences. Parameters ---------- order: range or list(int) A list of temporal derivative terms to include. For instance, [1, 2] indicates that the first and second derivative terms should be added. To retain the original terms, 0 *must* be included in the list. variables: list(str) List of variables for which temporal derivative terms should be computed. data: pandas DataFrame object Table of values of all observations of all variables. Returns ------- variables_deriv: list A list of variables to include in the final data frame after adding the specified derivative terms. data_deriv: pandas DataFrame object Table of values of all observations of all variables, including any specified derivative terms.
2.682291
2.585475
1.037446
variables_exp = OrderedDict() data_exp = OrderedDict() if 1 in order: data_exp[1] = data[variables] variables_exp[1] = variables order = set(order) - set([1]) for o in order: variables_exp[o] = ['{}_power{}'.format(v, o) for v in variables] data_exp[o] = data[variables]**o variables_exp = reduce((lambda x, y: x + y), variables_exp.values()) data_exp = pd.DataFrame(columns=variables_exp, data=np.concatenate([*data_exp.values()], axis=1)) return (variables_exp, data_exp)
def exponential_terms(order, variables, data)
Compute exponential expansions. Parameters ---------- order: range or list(int) A list of exponential terms to include. For instance, [1, 2] indicates that the first and second exponential terms should be added. To retain the original terms, 1 *must* be included in the list. variables: list(str) List of variables for which exponential terms should be computed. data: pandas DataFrame object Table of values of all observations of all variables. Returns ------- variables_exp: list A list of variables to include in the final data frame after adding the specified exponential terms. data_exp: pandas DataFrame object Table of values of all observations of all variables, including any specified exponential terms.
2.787845
2.694917
1.034482
order = order.split('-') order = [int(o) for o in order] if len(order) > 1: order = range(order[0], (order[-1] + 1)) return order
def _order_as_range(order)
Convert a hyphenated string representing order for derivative or exponential terms into a range object that can be passed as input to the appropriate expansion function.
2.567503
2.677654
0.958863
if re.search(r'\^\^[0-9]+$', expr): order = re.compile(r'\^\^([0-9]+)$').findall(expr) order = range(1, int(*order) + 1) variables, data = exponential_terms(order, variables, data) elif re.search(r'\^[0-9]+[\-]?[0-9]*$', expr): order = re.compile(r'\^([0-9]+[\-]?[0-9]*)').findall(expr) order = _order_as_range(*order) variables, data = exponential_terms(order, variables, data) return variables, data
def _check_and_expand_exponential(expr, variables, data)
Check if the current operation specifies exponential expansion. ^^6 specifies all powers up to the 6th, ^5-6 the 5th and 6th powers, ^6 the 6th only.
2.887493
2.860055
1.009593
if re.search(r'^dd[0-9]+', expr): order = re.compile(r'^dd([0-9]+)').findall(expr) order = range(0, int(*order) + 1) (variables, data) = temporal_derivatives(order, variables, data) elif re.search(r'^d[0-9]+[\-]?[0-9]*', expr): order = re.compile(r'^d([0-9]+[\-]?[0-9]*)').findall(expr) order = _order_as_range(*order) (variables, data) = temporal_derivatives(order, variables, data) return variables, data
def _check_and_expand_derivative(expr, variables, data)
Check if the current operation specifies a temporal derivative. dd6x specifies all derivatives up to the 6th, d5-6x the 5th and 6th, d6x the 6th only.
3.034133
2.704741
1.121783
grouping_depth = 0 for i, char in enumerate(expression): if char == '(': if grouping_depth == 0: formula_delimiter = i + 1 grouping_depth += 1 elif char == ')': grouping_depth -= 1 if grouping_depth == 0: expr = expression[formula_delimiter:i].strip() return parse_formula(expr, parent_data) return variables, data
def _check_and_expand_subformula(expression, parent_data, variables, data)
Check if the current operation contains a suboperation, and parse it where appropriate.
2.815257
2.767668
1.017195
variables = None data = None variables, data = _check_and_expand_subformula(expression, parent_data, variables, data) variables, data = _check_and_expand_exponential(expression, variables, data) variables, data = _check_and_expand_derivative(expression, variables, data) if variables is None: expr = expression.strip() variables = [expr] data = parent_data[expr] return variables, data
def parse_expression(expression, parent_data)
Parse an expression in a model formula. Parameters ---------- expression: str Formula expression: either a single variable or a variable group paired with an operation (exponentiation or differentiation). parent_data: pandas DataFrame The source data for the model expansion. Returns ------- variables: list A list of variables in the provided formula expression. data: pandas DataFrame A tabulation of all terms in the provided formula expression.
3.104138
2.849263
1.089453
wm = 'white_matter' gsr = 'global_signal' rps = 'trans_x + trans_y + trans_z + rot_x + rot_y + rot_z' fd = 'framewise_displacement' acc = _get_matches_from_data('a_comp_cor_[0-9]+', variables) tcc = _get_matches_from_data('t_comp_cor_[0-9]+', variables) dv = _get_matches_from_data('^std_dvars$', variables) dvall = _get_matches_from_data('.*dvars', variables) nss = _get_matches_from_data('non_steady_state_outlier[0-9]+', variables) spikes = _get_matches_from_data('motion_outlier[0-9]+', variables) model_formula = re.sub('wm', wm, model_formula) model_formula = re.sub('gsr', gsr, model_formula) model_formula = re.sub('rps', rps, model_formula) model_formula = re.sub('fd', fd, model_formula) model_formula = re.sub('acc', acc, model_formula) model_formula = re.sub('tcc', tcc, model_formula) model_formula = re.sub('dv', dv, model_formula) model_formula = re.sub('dvall', dvall, model_formula) model_formula = re.sub('nss', nss, model_formula) model_formula = re.sub('spikes', spikes, model_formula) formula_variables = _get_variables_from_formula(model_formula) others = ' + '.join(set(variables) - set(formula_variables)) model_formula = re.sub('others', others, model_formula) return model_formula
def _expand_shorthand(model_formula, variables)
Expand shorthand terms in the model formula.
2.400448
2.388506
1.005
matches = ['_power[0-9]+', '_derivative[0-9]+'] var = OrderedDict((c, deque()) for c in parent_data.columns) for c in data.columns: col = c for m in matches: col = re.sub(m, '', col) if col == c: var[col].appendleft(c) else: var[col].append(c) unscrambled = reduce((lambda x, y: x + y), var.values()) return data[[*unscrambled]]
def _unscramble_regressor_columns(parent_data, data)
Reorder the columns of a confound matrix such that the columns are in the same order as the input data with any expansion columns inserted immediately after the originals.
3.94802
3.860067
1.022785
variables = {} data = {} expr_delimiter = 0 grouping_depth = 0 model_formula = _expand_shorthand(model_formula, parent_data.columns) for i, char in enumerate(model_formula): if char == '(': grouping_depth += 1 elif char == ')': grouping_depth -= 1 elif grouping_depth == 0 and char == '+': expression = model_formula[expr_delimiter:i].strip() variables[expression] = None data[expression] = None expr_delimiter = i + 1 expression = model_formula[expr_delimiter:].strip() variables[expression] = None data[expression] = None for expression in list(variables): if expression[0] == '(' and expression[-1] == ')': (variables[expression], data[expression]) = parse_formula(expression[1:-1], parent_data) else: (variables[expression], data[expression]) = parse_expression(expression, parent_data) variables = list(set(reduce((lambda x, y: x + y), variables.values()))) data = pd.concat((data.values()), axis=1) if unscramble: data = _unscramble_regressor_columns(parent_data, data) return variables, data
def parse_formula(model_formula, parent_data, unscramble=False)
Recursively parse a model formula by breaking it into additive atoms and tracking grouping symbol depth. Parameters ---------- model_formula: str Expression for the model formula, e.g. '(a + b)^^2 + dd1(c + (d + e)^3) + f' Note that any expressions to be expanded *must* be in parentheses, even if they include only a single variable (e.g., (x)^2, not x^2). parent_data: pandas DataFrame A tabulation of all values usable in the model formula. Each additive term in `model_formula` should correspond either to a variable in this data frame or to instructions for operating on a variable (for instance, computing temporal derivatives or exponential terms). Temporal derivative options: * d6(variable) for the 6th temporal derivative * dd6(variable) for all temporal derivatives up to the 6th * d4-6(variable) for the 4th through 6th temporal derivatives * 0 must be included in the temporal derivative range for the original term to be returned when temporal derivatives are computed. Exponential options: * (variable)^6 for the 6th power * (variable)^^6 for all powers up to the 6th * (variable)^4-6 for the 4th through 6th powers * 1 must be included in the powers range for the original term to be returned when exponential terms are computed. Temporal derivatives and exponential terms are computed for all terms in the grouping symbols that they adjoin. Returns ------- variables: list(str) A list of variables included in the model parsed from the provided formula. data: pandas DataFrame All values in the complete model.
2.510636
2.557268
0.981765
import nibabel as nb import os # Load the input image in_nii = nb.load(in_file) # Load the mask image mask_nii = nb.load(mask_file) # Set all non-mask voxels in the input file to zero. data = in_nii.get_data() data[mask_nii.get_data() == 0] = 0 # Save the new masked image. new_nii = nb.Nifti1Image(data, in_nii.affine, in_nii.header) new_nii.to_filename(new_name) return os.path.abspath(new_name)
def mask(in_file, mask_file, new_name)
Apply a binary mask to an image. Parameters ---------- in_file : str Path to a NIfTI file to mask mask_file : str Path to a binary mask new_name : str Path/filename for the masked output image. Returns ------- str Absolute path of the masked output image. Notes ----- in_file and mask_file must be in the same image space and have the same dimensions.
1.915724
1.988675
0.963317
import os import numpy as np import nibabel as nb from nipype.utils.filemanip import fname_presuffix if out_path is None: out_path = fname_presuffix(in_file, suffix='_cfm', newpath=os.getcwd()) else: out_path = os.path.abspath(out_path) if not global_mask and not lesion_mask: NIWORKFLOWS_LOG.warning( 'No lesion mask was provided and global_mask not requested, ' 'therefore the original mask will not be modified.') # Load the input image in_img = nb.load(in_file) # If we want a global mask, create one based on the input image. data = np.ones(in_img.shape, dtype=np.uint8) if global_mask else in_img.get_data() if set(np.unique(data)) - {0, 1}: raise ValueError("`global_mask` must be true if `in_file` is not a binary mask") # If a lesion mask was provided, combine it with the secondary mask. if lesion_mask is not None: # Reorient the lesion mask and get the data. lm_img = nb.as_closest_canonical(nb.load(lesion_mask)) # Subtract lesion mask from secondary mask, set negatives to 0 data = np.fmax(data - lm_img.get_data(), 0) # Cost function mask will be created from subtraction # Otherwise, CFM will be created from global mask cfm_img = nb.Nifti1Image(data, in_img.affine, in_img.header) # Save the cost function mask. cfm_img.set_data_dtype(np.uint8) cfm_img.to_filename(out_path) return out_path
def create_cfm(in_file, lesion_mask=None, global_mask=True, out_path=None)
Create a mask to constrain registration. Parameters ---------- in_file : str Path to an existing image (usually a mask). If global_mask = True, this is used as a size/dimension reference. out_path : str Path/filename for the new cost function mask. lesion_mask : str, optional Path to an existing binary lesion mask. global_mask : bool Create a whole-image mask (True) or limit to reference mask (False) A whole image-mask is 1 everywhere Returns ------- str Absolute path of the new cost function mask. Notes ----- in_file and lesion_mask must be in the same image space and have the same dimensions
3.012302
2.869976
1.049591
# If user-defined settings exist... if isdefined(self.inputs.settings): # Note this in the log and return those settings. NIWORKFLOWS_LOG.info('User-defined settings, overriding defaults') return self.inputs.settings # Define a prefix for output files based on the modality of the moving image. filestart = '{}-mni_registration_{}_'.format( self.inputs.moving.lower(), self.inputs.flavor) # Get a list of settings files that match the flavor. filenames = [i for i in pkgr.resource_listdir('niworkflows', 'data') if i.startswith(filestart) and i.endswith('.json')] # Return the settings files. return [pkgr.resource_filename('niworkflows.data', f) for f in sorted(filenames)]
def _get_settings(self)
Return any settings defined by the user, as well as any pre-defined settings files that exist for the image modalities to be registered.
6.934263
6.146121
1.128234
r s = "\n".join(source) if s.find("$") == -1: return # This searches for "$blah$" inside a pair of curly braces -- # don't change these, since they're probably coming from a nested # math environment. So for each match, we replace it with a temporary # string, and later on we substitute the original back. global _data _data = {} def repl(matchobj): global _data s = matchobj.group(0) t = "___XXX_REPL_%d___" % len(_data) _data[t] = s return t s = re.sub(r"({[^{}$]*\$[^{}$]*\$[^{}]*})", repl, s) # matches $...$ dollars = re.compile(r"(?<!\$)(?<!\\)\$([^\$]+?)\$") # regular expression for \$ slashdollar = re.compile(r"\\\$") s = dollars.sub(r":math:`\1`", s) s = slashdollar.sub(r"$", s) # change the original {...} things in: for r in _data: s = s.replace(r, _data[r]) # now save results in "source" source[:] = [s]
def dollars_to_math(source)
r""" Replace dollar signs with backticks. More precisely, do a regular expression search. Replace a plain dollar sign ($) by a backtick (`). Replace an escaped dollar sign (\$) by a dollar sign ($). Don't change a dollar sign preceded or followed by a backtick (`$ or $`), because of strings like "``$HOME``". Don't make any changes on lines starting with spaces, because those are indented and hence part of a block of code or examples. This also doesn't replaces dollar signs enclosed in curly braces, to avoid nested math environments, such as :: $f(n) = 0 \text{ if $n$ is prime}$ Thus the above line would get changed to `f(n) = 0 \text{ if $n$ is prime}`
5.486075
5.605058
0.978772
self._fixed_image = self.inputs.after self._moving_image = self.inputs.before self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None NIWORKFLOWS_LOG.info( 'Report - setting before (%s) and after (%s) images', self._fixed_image, self._moving_image) return super(SimpleBeforeAfterRPT, self)._post_run_hook(runtime)
def _post_run_hook(self, runtime)
there is not inner interface to run
6.514266
6.304049
1.033346
data_dir = data_dir or '' default_dirs = [Path(d).expanduser().resolve() for d in os.getenv('CRN_SHARED_DATA', '').split(os.pathsep) if d.strip()] default_dirs += [Path(d).expanduser().resolve() for d in os.getenv('CRN_DATA', '').split(os.pathsep) if d.strip()] default_dirs += [NIWORKFLOWS_CACHE_DIR] return [Path(d).expanduser() for d in data_dir.split(os.pathsep) if d.strip()] or default_dirs
def _get_data_path(data_dir=None)
Get data storage directory data_dir: str Path of the data directory. Used to force data storage in a specified location. :returns: a list of paths where the dataset could be stored, ordered by priority
3.111761
3.351086
0.928583
dataset_folder = dataset_name if not dataset_prefix \ else '%s%s' % (dataset_prefix, dataset_name) default_paths = default_paths or '' paths = [p / dataset_folder for p in _get_data_path(data_dir)] all_paths = [Path(p) / dataset_folder for p in default_paths.split(os.pathsep)] + paths # Check if the dataset folder exists somewhere and is not empty for path in all_paths: if path.is_dir() and list(path.iterdir()): if verbose > 1: NIWORKFLOWS_LOG.info( 'Dataset "%s" already cached in %s', dataset_name, path) return path, True for path in paths: if verbose > 0: NIWORKFLOWS_LOG.info( 'Dataset "%s" not cached, downloading to %s', dataset_name, path) path.mkdir(parents=True, exist_ok=True) return path, False
def _get_dataset(dataset_name, dataset_prefix=None, data_dir=None, default_paths=None, verbose=1)
Create if necessary and returns data directory of given dataset. data_dir: str Path of the data directory. Used to force data storage in a specified location. default_paths: list(str) Default system paths in which the dataset may already have been installed by a third party software. They will be checked first. verbose: int verbosity level (0 means no message). :returns: the path of the given dataset directory. :rtype: str .. note:: This function retrieves the datasets directory (or data directory) using the following priority : 1. defaults system paths 2. the keyword argument data_dir 3. the global environment variable CRN_SHARED_DATA 4. the user environment variable CRN_DATA 5. ~/.cache/stanford-crn in the user home folder
2.975024
3.267021
0.910623
path = os.readlink(link) if op.isabs(path): return path return op.join(op.dirname(link), path)
def readlinkabs(link)
Return an absolute path for the destination of a symlink
2.582408
2.809552
0.919153
with Path(path).open('rb') as fhandle: md5sum = hashlib.md5() while True: data = fhandle.read(8192) if not data: break md5sum.update(data) return md5sum.hexdigest()
def _md5_sum_file(path)
Calculates the MD5 sum of a file.
1.808751
1.785344
1.013111
try: if total_size is None: total_size = response.info().get('Content-Length').strip() total_size = int(total_size) + initial_size except Exception as exc: if verbose > 2: NIWORKFLOWS_LOG.warn('Total size of chunk could not be determined') if verbose > 3: NIWORKFLOWS_LOG.warn("Full stack trace: %s", str(exc)) total_size = None bytes_so_far = initial_size t_0 = time_last_display = time.time() while True: chunk = response.read(chunk_size) bytes_so_far += len(chunk) time_last_read = time.time() if (report_hook and # Refresh report every half second or when download is # finished. (time_last_read > time_last_display + 0.5 or not chunk)): _chunk_report_(bytes_so_far, total_size, initial_size, t_0) time_last_display = time_last_read if chunk: local_file.write(chunk) else: break return
def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, initial_size=0, total_size=None, verbose=1)
Download a file chunk by chunk and show advancement :param urllib.response.addinfourl response: response to the download request in order to get file size :param str local_file: hard disk file where data should be written :param int chunk_size: size of downloaded chunks. Default: 8192 :param bool report_hook: whether or not to show downloading advancement :param int initial_size: if resuming, indicate the initial size of the file :param int total_size: Expected final size of download (None means it is unknown). :param int verbose: verbosity level (0 means no message). :returns: the downloaded file path. :rtype: string
3.005439
3.110784
0.966136
if not total_size: sys.stderr.write("\rDownloaded {0:d} of ? bytes.".format(bytes_so_far)) else: # Estimate remaining download time total_percent = float(bytes_so_far) / total_size current_download_size = bytes_so_far - initial_size bytes_remaining = total_size - bytes_so_far delta_t = time.time() - t_0 download_rate = current_download_size / max(1e-8, float(delta_t)) # Minimum rate of 0.01 bytes/s, to avoid dividing by zero. time_remaining = bytes_remaining / max(0.01, download_rate) # Trailing whitespace is to erase extra char when message length # varies sys.stderr.write( "\rDownloaded {0:d} of {1:d} bytes ({2:.1f}%, {3!s} remaining)".format( bytes_so_far, total_size, total_percent * 100, _format_time(time_remaining)))
def _chunk_report_(bytes_so_far, total_size, initial_size, t_0)
Show downloading percentage. :param int bytes_so_far: number of downloaded bytes :param int total_size: total size of the file (may be 0/None, depending on download method). :param int t_0: the time in seconds (as returned by time.time()) at which the download was resumed / started. :param int initial_size: if resuming, indicate the initial size of the file. If not resuming, set to zero.
3.329981
3.32917
1.000244
# Read aseg data bmask = aseg.copy() bmask[bmask > 0] = 1 bmask = bmask.astype(np.uint8) # Morphological operations selem = sim.ball(ball_size) newmask = sim.binary_closing(bmask, selem) newmask = binary_fill_holes(newmask.astype(np.uint8), selem).astype(np.uint8) return newmask.astype(np.uint8)
def refine_aseg(aseg, ball_size=4)
First step to reconcile ANTs' and FreeSurfer's brain masks. Here, the ``aseg.mgz`` mask from FreeSurfer is refined in two steps, using binary morphological operations: 1. With a binary closing operation the sulci are included into the mask. This results in a smoother brain mask that does not exclude deep, wide sulci. 2. Fill any holes (typically, there could be a hole next to the pineal gland and the corpora quadrigemina if the great cerebral brain is segmented out).
2.815914
3.11144
0.90502
selem = sim.ball(bw) if ants_segs is None: ants_segs = np.zeros_like(aseg, dtype=np.uint8) aseg[aseg == 42] = 3 # Collapse both hemispheres gm = anat.copy() gm[aseg != 3] = 0 refined = refine_aseg(aseg) newrefmask = sim.binary_dilation(refined, selem) - refined indices = np.argwhere(newrefmask > 0) for pixel in indices: # When ATROPOS identified the pixel as GM, set and carry on if ants_segs[tuple(pixel)] == 2: refined[tuple(pixel)] = 1 continue window = gm[ pixel[0] - ww:pixel[0] + ww, pixel[1] - ww:pixel[1] + ww, pixel[2] - ww:pixel[2] + ww ] if np.any(window > 0): mu = window[window > 0].mean() sigma = max(window[window > 0].std(), 1.e-5) zstat = abs(anat[tuple(pixel)] - mu) / sigma refined[tuple(pixel)] = int(zstat < zval) refined = sim.binary_opening(refined, selem) return refined
def grow_mask(anat, aseg, ants_segs=None, ww=7, zval=2.0, bw=4)
Grow mask including pixels that have a high likelihood. GM tissue parameters are sampled in image patches of ``ww`` size. This is inspired on mindboggle's solution to the problem: https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1660
3.529007
3.544843
0.995533
import nibabel as nb import numpy as np import os fn = os.path.basename(in_file) if not target_subject.startswith('fs'): return in_file cortex = nb.freesurfer.read_label(os.path.join( subjects_dir, target_subject, 'label', '{}.cortex.label'.format(fn[:2]))) func = nb.load(in_file) medial = np.delete(np.arange(len(func.darrays[0].data)), cortex) for darray in func.darrays: darray.data[medial] = np.nan out_file = os.path.join(newpath or os.getcwd(), fn) func.to_filename(out_file) return out_file
def medial_wall_to_nan(in_file, subjects_dir, target_subject, newpath=None)
Convert values on medial wall to NaNs
2.432442
2.371516
1.025691
if self._is_at_section(): return # If several signatures present, take the last one while True: summary = self._doc.read_to_next_empty_line() summary_str = " ".join([s.strip() for s in summary]).strip() if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): self['Signature'] = summary_str if not self._is_at_section(): continue break if summary is not None: self['Summary'] = summary if not self._is_at_section(): self['Extended Summary'] = self._read_to_next_section()
def _parse_summary(self)
Grab signature (if given) and summary
5.290898
4.821434
1.09737
''' generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid ''' self._anat_file = self.inputs.in_files[0] outputs = self.aggregate_outputs(runtime=runtime) self._mask_file = outputs.tissue_class_map # We are skipping the CSF class because with combination with others # it only shows the skullstriping mask self._seg_files = outputs.tissue_class_files[1:] self._masked = False NIWORKFLOWS_LOG.info('Generating report for FAST (in_files %s, ' 'segmentation %s, individual tissue classes %s).', self.inputs.in_files, outputs.tissue_class_map, outputs.tissue_class_files) return super(FASTRPT, self)._post_run_hook(runtime)
def _post_run_hook(self, runtime)
generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid
10.092503
5.730394
1.761223
''' generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid ''' outputs = self.aggregate_outputs(runtime=runtime) self._anat_file = os.path.join(outputs.subjects_dir, outputs.subject_id, 'mri', 'brain.mgz') self._contour = os.path.join(outputs.subjects_dir, outputs.subject_id, 'mri', 'ribbon.mgz') self._masked = False NIWORKFLOWS_LOG.info('Generating report for ReconAll (subject %s)', outputs.subject_id) return super(ReconAllRPT, self)._post_run_hook(runtime)
def _post_run_hook(self, runtime)
generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid
7.364975
3.642952
2.021705
''' generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid ''' outputs = self.aggregate_outputs(runtime=runtime) self._melodic_dir = outputs.out_dir NIWORKFLOWS_LOG.info('Generating report for MELODIC') return super(MELODICRPT, self)._post_run_hook(runtime)
def _post_run_hook(self, runtime)
generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid
17.795265
5.13713
3.464048
try: base = app.config.github_project_url if not base: raise AttributeError if not base.endswith('/'): base += '/' except AttributeError as err: raise ValueError('github_project_url configuration value is not set (%s)' % str(err)) ref = base + type + '/' + slug + '/' set_classes(options) prefix = "#" if type == 'pull': prefix = "PR " + prefix node = nodes.reference(rawtext, prefix + utils.unescape(slug), refuri=ref, **options) return node
def make_link_node(rawtext, app, type, slug, options)
Create a link to a github resource. :param rawtext: Text being replaced with link node. :param app: Sphinx application context :param type: Link type (issues, changeset, etc.) :param slug: ID of the thing to link to :param options: Options dictionary passed to role func.
4.080116
3.781775
1.078889
try: issue_num = int(text) if issue_num <= 0: raise ValueError except ValueError: msg = inliner.reporter.error( 'GitHub issue number must be a number greater than or equal to 1; ' '"%s" is invalid.' % text, line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] app = inliner.document.settings.env.app #app.info('issue %r' % text) if 'pull' in name.lower(): category = 'pull' elif 'issue' in name.lower(): category = 'issues' else: msg = inliner.reporter.error( 'GitHub roles include "ghpull" and "ghissue", ' '"%s" is invalid.' % name, line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] node = make_link_node(rawtext, app, category, str(issue_num), options) return [node], []
def ghissue_role(name, rawtext, text, lineno, inliner, options={}, content=[])
Link to a GitHub issue. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization.
2.26643
2.255896
1.00467
app = inliner.document.settings.env.app #app.info('user link %r' % text) ref = 'https://www.github.com/' + text node = nodes.reference(rawtext, text, refuri=ref, **options) return [node], []
def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[])
Link to a GitHub user. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization.
3.34164
3.798531
0.879719
app = inliner.document.settings.env.app #app.info('user link %r' % text) try: base = app.config.github_project_url if not base: raise AttributeError if not base.endswith('/'): base += '/' except AttributeError as err: raise ValueError('github_project_url configuration value is not set (%s)' % str(err)) ref = base + text node = nodes.reference(rawtext, text[:6], refuri=ref, **options) return [node], []
def ghcommit_role(name, rawtext, text, lineno, inliner, options={}, content=[])
Link to a GitHub commit. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization.
3.804386
4.149671
0.916792
app.info('Initializing GitHub plugin') app.add_role('ghissue', ghissue_role) app.add_role('ghpull', ghissue_role) app.add_role('ghuser', ghuser_role) app.add_role('ghcommit', ghcommit_role) app.add_config_value('github_project_url', None, 'env') return
def setup(app)
Install the plugin. :param app: Sphinx application context.
2.650626
2.782974
0.952444
''' Set package_name >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = 'docutils' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True ''' # It's also possible to imagine caching the module parsing here self._package_name = package_name root_module = self._import(package_name) self.root_path = root_module.__path__[-1] self.written_modules = None
def set_package_name(self, package_name)
Set package_name >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = 'docutils' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True
5.231213
3.012271
1.736634
''' Import namespace package ''' mod = __import__(name) components = name.split('.') for comp in components[1:]: mod = getattr(mod, comp) return mod
def _import(self, name)
Import namespace package
2.934889
3.007656
0.975806
''' Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- >>> dw = ApiDocWriter('sphinx') >>> mods = dw.discover_modules() >>> 'sphinx.util' in mods True >>> dw.package_skip_patterns.append('\.util$') >>> 'sphinx.util' in dw.discover_modules() False >>> ''' modules = [self.package_name] # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): # Check directory names for packages root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) # Normally, we'd only iterate over dirnames, but since # dipy does not import a whole bunch of modules we'll # include those here as well (the *.py filenames). filenames = [f[:-3] for f in filenames if f.endswith('.py') and not f.startswith('__init__')] for filename in filenames: package_uri = '/'.join((dirpath, filename)) for subpkg_name in dirnames + filenames: package_uri = '.'.join((root_uri, subpkg_name)) package_path = self._uri2path(package_uri) if (package_path and self._survives_exclude(package_uri, 'package')): modules.append(package_uri) return sorted(modules)
def discover_modules(self)
Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- >>> dw = ApiDocWriter('sphinx') >>> mods = dw.discover_modules() >>> 'sphinx.util' in mods True >>> dw.package_skip_patterns.append('\.util$') >>> 'sphinx.util' in dw.discover_modules() False >>>
5.03092
3.18964
1.577269
if isinstance(bids_dir, BIDSLayout): layout = bids_dir else: layout = BIDSLayout(str(bids_dir), validate=bids_validate) all_participants = set(layout.get_subjects()) # Error: bids_dir does not contain subjects if not all_participants: raise BIDSError( 'Could not find participants. Please make sure the BIDS data ' 'structure is present and correct. Datasets can be validated online ' 'using the BIDS Validator (http://bids-standard.github.io/bids-validator/).\n' 'If you are using Docker for Mac or Docker for Windows, you ' 'may need to adjust your "File sharing" preferences.', bids_dir) # No --participant-label was set, return all if not participant_label: return sorted(all_participants) if isinstance(participant_label, str): participant_label = [participant_label] # Drop sub- prefixes participant_label = [sub[4:] if sub.startswith('sub-') else sub for sub in participant_label] # Remove duplicates participant_label = sorted(set(participant_label)) # Remove labels not found found_label = sorted(set(participant_label) & all_participants) if not found_label: raise BIDSError('Could not find participants [{}]'.format( ', '.join(participant_label)), bids_dir) # Warn if some IDs were not found notfound_label = sorted(set(participant_label) - all_participants) if notfound_label: exc = BIDSError('Some participants were not found: {}'.format( ', '.join(notfound_label)), bids_dir) if strict: raise exc warnings.warn(exc.msg, BIDSWarning) return found_label
def collect_participants(bids_dir, participant_label=None, strict=False, bids_validate=True)
List the participants under the BIDS root and checks that participants designated with the participant_label argument exist in that folder. Returns the list of participants to be finally processed. Requesting all subjects in a BIDS directory root: >>> collect_participants(str(datadir / 'ds114'), bids_validate=False) ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10'] Requesting two subjects, given their IDs: >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '04'], ... bids_validate=False) ['02', '04'] Requesting two subjects, given their IDs (works with 'sub-' prefixes): >>> collect_participants(str(datadir / 'ds114'), participant_label=['sub-02', 'sub-04'], ... bids_validate=False) ['02', '04'] Requesting two subjects, but one does not exist: >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '14'], ... bids_validate=False) ['02'] >>> collect_participants( ... str(datadir / 'ds114'), participant_label=['02', '14'], ... strict=True, bids_validate=False) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): fmriprep.utils.bids.BIDSError: ...
2.872303
2.726972
1.053294
return _init_layout(in_file, bids_dir, validate).get_metadata( str(in_file))
def get_metadata_for_nifti(in_file, bids_dir=None, validate=True)
Fetch metadata for a given nifti file >>> metadata = get_metadata_for_nifti( ... datadir / 'ds054' / 'sub-100185' / 'fmap' / 'sub-100185_phasediff.nii.gz', ... validate=False) >>> metadata['Manufacturer'] 'SIEMENS' >>>
8.314244
12.616259
0.65901