code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
r b = event.current_buffer # When already navigating through completions, select the next one. if b.complete_state: b.complete_next() else: event.cli.start_completion(insert_common_part=True, select_first=False)
def generate_completions(event)
r""" Tab-completion: where the first tab completes the common suffix and the second tab lists all the completions.
9.285877
9.267842
1.001946
# Request completions. b = event.current_buffer if b.completer is None: return complete_event = CompleteEvent(completion_requested=True) completions = list(b.completer.get_completions(b.document, complete_event)) # Calculate the common suffix. common_suffix = get_common_complete_suffix(b.document, completions) # One completion: insert it. if len(completions) == 1: b.delete_before_cursor(-completions[0].start_position) b.insert_text(completions[0].text) # Multiple completions with common part. elif common_suffix: b.insert_text(common_suffix) # Otherwise: display all completions. elif completions: _display_completions_like_readline(event.cli, completions)
def display_completions_like_readline(event)
Key binding handler for readline-style tab completion. This is meant to be as similar as possible to the way how readline displays completions. Generate the completions immediately (blocking) and display them above the prompt in columns. Usage:: # Call this handler when 'Tab' has been pressed. registry.add_binding(Keys.ControlI)(display_completions_like_readline)
3.375026
3.586486
0.94104
from prompt_toolkit.shortcuts import create_confirm_application assert isinstance(completions, list) # Get terminal dimensions. term_size = cli.output.get_size() term_width = term_size.columns term_height = term_size.rows # Calculate amount of required columns/rows for displaying the # completions. (Keep in mind that completions are displayed # alphabetically column-wise.) max_compl_width = min(term_width, max(get_cwidth(c.text) for c in completions) + 1) column_count = max(1, term_width // max_compl_width) completions_per_page = column_count * (term_height - 1) page_count = int(math.ceil(len(completions) / float(completions_per_page))) # Note: math.ceil can return float on Python2. def display(page): # Display completions. page_completions = completions[page * completions_per_page: (page+1) * completions_per_page] page_row_count = int(math.ceil(len(page_completions) / float(column_count))) page_columns = [page_completions[i * page_row_count:(i+1) * page_row_count] for i in range(column_count)] result = [] for r in range(page_row_count): for c in range(column_count): try: result.append(page_columns[c][r].text.ljust(max_compl_width)) except IndexError: pass result.append('\n') cli.output.write(''.join(result)) cli.output.flush() # User interaction through an application generator function. def run(): if len(completions) > completions_per_page: # Ask confirmation if it doesn't fit on the screen. message = 'Display all {} possibilities? (y on n) '.format(len(completions)) confirm = yield create_confirm_application(message) if confirm: # Display pages. for page in range(page_count): display(page) if page != page_count - 1: # Display --MORE-- and go to the next page. show_more = yield _create_more_application() if not show_more: return else: cli.output.write('\n'); cli.output.flush() else: # Display all completions. display(0) cli.run_application_generator(run, render_cli_done=True)
def _display_completions_like_readline(cli, completions)
Display the list of completions in columns above the prompt. This will ask for a confirmation if there are too many completions to fit on a single page and provide a paginator to walk through them.
3.247398
3.216888
1.009485
from prompt_toolkit.shortcuts import create_prompt_application registry = Registry() @registry.add_binding(' ') @registry.add_binding('y') @registry.add_binding('Y') @registry.add_binding(Keys.ControlJ) @registry.add_binding(Keys.ControlI) # Tab. def _(event): event.cli.set_return_value(True) @registry.add_binding('n') @registry.add_binding('N') @registry.add_binding('q') @registry.add_binding('Q') @registry.add_binding(Keys.ControlC) def _(event): event.cli.set_return_value(False) return create_prompt_application( '--MORE--', key_bindings_registry=registry, erase_when_done=True)
def _create_more_application()
Create an `Application` instance that displays the "--MORE--".
2.959003
2.676876
1.105394
assert isinstance(callbacks, EventLoopCallbacks) # Create reader class. stdin_reader = PosixStdinReader(stdin.fileno()) if self.closed: raise Exception('Event loop already closed.') inputstream = InputStream(callbacks.feed_key) try: # Create a new Future every time. self._stopped_f = asyncio.Future(loop=self.loop) # Handle input timouts def timeout_handler(): inputstream.flush() callbacks.input_timeout() timeout = AsyncioTimeout(INPUT_TIMEOUT, timeout_handler, self.loop) # Catch sigwinch def received_winch(): self.call_from_executor(callbacks.terminal_size_changed) self.loop.add_signal_handler(signal.SIGWINCH, received_winch) # Read input data. def stdin_ready(): data = stdin_reader.read() inputstream.feed(data) timeout.reset() # Quit when the input stream was closed. if stdin_reader.closed: self.stop() self.loop.add_reader(stdin.fileno(), stdin_ready) # Block this coroutine until stop() has been called. for f in self._stopped_f: yield f finally: # Clean up. self.loop.remove_reader(stdin.fileno()) self.loop.remove_signal_handler(signal.SIGWINCH) # Don't trigger any timeout events anymore. timeout.stop()
def run_as_coroutine(self, stdin, callbacks)
The input 'event loop'.
4.468292
4.351898
1.026746
if not isinstance(bool_or_filter, (bool, SimpleFilter)): raise TypeError('Expecting a bool or a SimpleFilter instance. Got %r' % bool_or_filter) return { True: _always, False: _never, }.get(bool_or_filter, bool_or_filter)
def to_simple_filter(bool_or_filter)
Accept both booleans and CLIFilters as input and turn it into a SimpleFilter.
3.231131
3.394107
0.951983
if not isinstance(bool_or_filter, (bool, CLIFilter)): raise TypeError('Expecting a bool or a CLIFilter instance. Got %r' % bool_or_filter) return { True: _always, False: _never, }.get(bool_or_filter, bool_or_filter)
def to_cli_filter(bool_or_filter)
Accept both booleans and CLIFilters as input and turn it into a CLIFilter.
3.533571
3.293222
1.072983
# By default we choose a rather small chunk size, because reading # big amounts of input at once, causes the event loop to process # all these key bindings also at once without going back to the # loop. This will make the application feel unresponsive. if self.closed: return b'' # Note: the following works better than wrapping `self.stdin` like # `codecs.getreader('utf-8')(stdin)` and doing `read(1)`. # Somehow that causes some latency when the escape # character is pressed. (Especially on combination with the `select`.) try: data = os.read(self.stdin_fd, count) # Nothing more to read, stream is closed. if data == b'': self.closed = True return '' except OSError: # In case of SIGWINCH data = b'' return self._stdin_decoder.decode(data)
def read(self, count=1024)
Read the input and return it as a string. Return the text. Note that this can return an empty string, even when the input stream was not yet closed. This means that something went wrong during the decoding.
10.745376
11.226556
0.957139
lines = [] while data: match = self._line_end_re.search(data) if match is None: chunk = data else: chunk = data[:match.end()] data = data[len(chunk):] if self._buf and self._buf[-1].endswith(b('\r')) and not chunk.startswith(b('\n')): # if we get a carriage return followed by something other than # a newline then we assume that we're overwriting the current # line (ie. a progress bar) # # We don't terminate lines that end with a carriage return until # we see what's coming next so we can distinguish between a # progress bar situation and a Windows line terminator. # # TODO(adrian): some day these hacks should be replaced with # real terminal emulation lines.append(self._finish_line()) self._buf.append(chunk) if chunk.endswith(b('\n')): lines.append(self._finish_line()) return lines
def add_string(self, data)
Process some data splitting it into complete lines and buffering the rest Args: data: A `str` in Python 2 or `bytes` in Python 3 Returns: list of complete lines ending with a carriage return (eg. a progress bar) or a newline.
4.750025
4.340554
1.094336
if cur_time is None: cur_time = time.time() lines = self._line_buffer.add_string(message) for line in lines: #print('ts line', repr(line)) timestamp = '' if self._prepend_timestamp: timestamp = datetime.datetime.utcfromtimestamp( cur_time).isoformat() + ' ' line = u'{}{}{}'.format(self._line_prepend, timestamp, line) self._fsapi.push(self._filename, line)
def write(self, message, cur_time=None)
Write some text to the pusher. Args: message: a string to push for this file. cur_time: used for unit testing. override line timestamp.
4.576999
4.394819
1.041453
last_step = 0 row = {} buffer = [] last_row = {} for summary in tf.train.summary_iterator(path): parsed = tf_summary_to_dict(summary) if last_step != parsed["tensorflow_step"]: step += 1 last_step = parsed["tensorflow_step"] # TODO: handle time if len(row) > 0: last_row = to_json(row) buffer.append(Chunk("wandb-history.jsonl", util.json_dumps_safer_history(to_json(row)))) row.update(parsed) file_api._send(buffer) return last_row
def stream_tfevents(path, file_api, step=0)
Parses and streams a tfevents file to the server
5.744522
5.910687
0.971887
user_process_pid = args['pid'] stdout_master_fd = args['stdout_master_fd'] stderr_master_fd = args['stderr_master_fd'] try: run = wandb.wandb_run.Run.from_environment_or_defaults() run.enable_logging() api = wandb.apis.InternalApi() api.set_current_run_id(run.id) rm = wandb.run_manager.RunManager( api, run, cloud=args['cloud'], port=args['port']) rm.wrap_existing_process( user_process_pid, stdout_master_fd, stderr_master_fd) except Exception as e: util.sentry_reraise(e)
def headless(args)
Headless mode is where we start a monitoring / syncing process to watch an already-running user process. It's like `wandb run` for a user process that has already started. The user process that calls this waits for a signal that everything is ready, which is sent at the end of rm.wrap_existing_process
4.297045
3.590833
1.196671
run = wandb.wandb_run.Run.from_environment_or_defaults() run.enable_logging() api = wandb.apis.InternalApi() api.set_current_run_id(run.id) # TODO: better failure handling root = api.git.root # handle non-git directories if not root: root = os.path.abspath(os.getcwd()) host = socket.gethostname() remote_url = 'file://{}{}'.format(host, root) run.save(program=args['program'], api=api) env = dict(os.environ) run.set_environment(env) try: rm = wandb.run_manager.RunManager(api, run) except wandb.run_manager.Error: exc_type, exc_value, exc_traceback = sys.exc_info() wandb.termerror('An Exception was raised during setup, see %s for full traceback.' % util.get_log_file_path()) wandb.termerror(exc_value) if 'permission' in str(exc_value): wandb.termerror( 'Are you sure you provided the correct API key to "wandb login"?') lines = traceback.format_exception( exc_type, exc_value, exc_traceback) logging.error('\n'.join(lines)) else: rm.run_user_process(args['program'], args['args'], env)
def agent_run(args)
A version of `wandb run` that the agent uses to run things.
4.430546
4.21315
1.0516
global Summary, Event if tensorboardX: tensorboard_module = "tensorboardX.writer" if tensorflow_loaded: wandb.termlog( "Found TensorboardX and tensorflow, pass tensorboardX=False to patch regular tensorboard.") from tensorboardX.proto.summary_pb2 import Summary from tensorboardX.proto.event_pb2 import Event else: tensorboard_module = "tensorflow.python.summary.writer.writer" from tensorflow.summary import Summary, Event writers = set() def _add_event(self, event, step, walltime=None): event.wall_time = time.time() if walltime is None else walltime if step is not None: event.step = int(step) try: # TensorboardX uses _file_name if hasattr(self.event_writer._ev_writer, "_file_name"): name = self.event_writer._ev_writer._file_name else: name = self.event_writer._ev_writer.FileName().decode("utf-8") writers.add(name) # This is a little hacky, there is a case where the log_dir changes. # Because the events files will have the same names in sub directories # we simply overwrite the previous symlink in wandb.save if the log_dir # changes. log_dir = os.path.dirname(os.path.commonprefix(list(writers))) filename = os.path.basename(name) # Tensorboard loads all tfevents files in a directory and prepends # their values with the path. Passing namespace to log allows us # to nest the values in wandb namespace = name.replace(filename, "").replace( log_dir, "").strip(os.sep) if save: wandb.save(name, base_path=log_dir) wandb.save(os.path.join(log_dir, "*.pbtxt"), base_path=log_dir) log(event, namespace=namespace, step=event.step) except Exception as e: wandb.termerror("Unable to log event %s" % e) # six.reraise(type(e), e, sys.exc_info()[2]) self.event_writer.add_event(event) writer = wandb.util.get_module(tensorboard_module) writer.SummaryToEventTransformer._add_event = _add_event
def patch(save=True, tensorboardX=tensorboardX_loaded)
Monkeypatches tensorboard or tensorboardX so that all events are logged to tfevents files and wandb. We save the tfevents files and graphs to wandb by default. Arguments: save, default: True - Passing False will skip sending events. tensorboardX, default: True if module can be imported - You can override this when calling patch
4.352513
4.206951
1.0346
values = {} if isinstance(tf_summary_str_or_pb, Summary): summary_pb = tf_summary_str_or_pb elif isinstance(tf_summary_str_or_pb, Event): summary_pb = tf_summary_str_or_pb.summary values["global_step"] = tf_summary_str_or_pb.step values["_timestamp"] = tf_summary_str_or_pb.wall_time else: summary_pb = Summary() summary_pb.ParseFromString(tf_summary_str_or_pb) for value in summary_pb.value: kind = value.WhichOneof("value") if kind == "simple_value": values[namespaced_tag(value.tag, namespace)] = value.simple_value elif kind == "image": from PIL import Image image = wandb.Image(Image.open( six.BytesIO(value.image.encoded_image_string))) tag_idx = value.tag.rsplit('/', 1) if len(tag_idx) > 1 and tag_idx[1].isdigit(): tag, idx = tag_idx values.setdefault(history_image_key(tag), []).append(image) else: values[history_image_key(value.tag)] = image # Coming soon... # elif kind == "audio": # audio = wandb.Audio(six.BytesIO(value.audio.encoded_audio_string), # sample_rate=value.audio.sample_rate, content_type=value.audio.content_type) elif kind == "histo": first = value.histo.bucket_limit[0] + \ value.histo.bucket_limit[0] - value.histo.bucket_limit[1] last = value.histo.bucket_limit[-2] + \ value.histo.bucket_limit[-2] - value.histo.bucket_limit[-3] np_histogram = (list(value.histo.bucket), [ first] + value.histo.bucket_limit[:-1] + [last]) values[namespaced_tag(value.tag)] = wandb.Histogram( np_histogram=np_histogram) return values
def tf_summary_to_dict(tf_summary_str_or_pb, namespace="")
Convert a Tensorboard Summary to a dictionary Accepts either a tensorflow.summary.Summary or one encoded as a string.
2.138667
2.143591
0.997703
# When the search buffer has focus, take that text. if self.preview_search(cli) and cli.buffers[self.search_buffer_name].text: return cli.buffers[self.search_buffer_name].text # Otherwise, take the text of the last active search. else: return self.get_search_state(cli).text
def _get_search_text(self, cli)
The text we are searching for.
5.508532
5.161973
1.067137
# Try for the character under the cursor. if document.current_char and document.current_char in self.chars: pos = document.find_matching_bracket_position( start_pos=document.cursor_position - self.max_cursor_distance, end_pos=document.cursor_position + self.max_cursor_distance) # Try for the character before the cursor. elif (document.char_before_cursor and document.char_before_cursor in self._closing_braces and document.char_before_cursor in self.chars): document = Document(document.text, document.cursor_position - 1) pos = document.find_matching_bracket_position( start_pos=document.cursor_position - self.max_cursor_distance, end_pos=document.cursor_position + self.max_cursor_distance) else: pos = None # Return a list of (row, col) tuples that need to be highlighted. if pos: pos += document.cursor_position # pos is relative. row, col = document.translate_index_to_position(pos) return [(row, col), (document.cursor_position_row, document.cursor_position_col)] else: return []
def _get_positions_to_highlight(self, document)
Return a list of (row, col) tuples that need to be highlighted.
2.738892
2.568279
1.066431
def get_static_tokens(cli): return [(token, text)] return cls(get_static_tokens)
def static(cls, text, token=Token)
Create a :class:`.BeforeInput` instance that always inserts the same text.
8.175682
8.085131
1.0112
if not self.connection: self.connect() start = time.time() conn, _, err = select([self.connection], [], [ self.connection], max_seconds) try: if len(err) > 0: raise socket.error("Couldn't open socket") message = b'' while True: if time.time() - start > max_seconds: raise socket.error( "Timeout of %s seconds waiting for W&B process" % max_seconds) res = self.connection.recv(1024) term = res.find(b'\0') if term != -1: message += res[:term] break else: message += res message = json.loads(message.decode('utf8')) if message['status'] == 'done': return True, None elif message['status'] == 'ready': return True, message elif message['status'] == 'launch_error': return False, None else: raise socket.error("Invalid status: %s" % message['status']) except (socket.error, ValueError) as e: util.sentry_exc(e) return False, None
def listen(self, max_seconds=30)
Waits to receive up to two bytes for up to max_seconds
2.943886
2.938873
1.001706
res = super()._calc_adu() self.ensure_one() dafs_to_apply = self.env['ddmrp.adjustment'].search( self._daf_to_apply_domain()) if dafs_to_apply: daf = 1 values = dafs_to_apply.mapped('value') for val in values: daf *= val prev = self.adu self.adu *= daf _logger.debug( "DAF=%s applied to %s. ADU: %s -> %s" % (daf, self.name, prev, self.adu)) # Compute generated demand to be applied to components: dafs_to_explode = self.env['ddmrp.adjustment'].search( self._daf_to_apply_domain(False)) for daf in dafs_to_explode: prev = self.adu increased_demand = prev * daf.value - prev self.explode_demand_to_components( daf, increased_demand, self.product_uom) return res
def _calc_adu(self)
Apply DAFs if existing for the buffer.
4.407499
3.980065
1.107394
self.env['ddmrp.adjustment.demand'].search([]).unlink() super().cron_ddmrp_adu(automatic) today = fields.Date.today() for op in self.search([]).filtered('extra_demand_ids'): to_add = sum(op.extra_demand_ids.filtered( lambda r: r.date_start <= today <= r.date_end ).mapped('extra_demand')) if to_add: op.adu += to_add _logger.debug( "DAFs-originated demand applied. %s: ADU += %s" % (op.name, to_add))
def cron_ddmrp_adu(self, automatic=False)
Apply extra demand originated by Demand Adjustment Factors to components after the cron update of all the buffers.
5.325033
4.698447
1.13336
res = super()._compute_dlt() for rec in self: ltaf_to_apply = self.env['ddmrp.adjustment'].search( rec._ltaf_to_apply_domain()) if ltaf_to_apply: ltaf = 1 values = ltaf_to_apply.mapped('value') for val in values: ltaf *= val prev = rec.dlt rec.dlt *= ltaf _logger.debug( "LTAF=%s applied to %s. DLT: %s -> %s" % (ltaf, rec.name, prev, rec.dlt)) return res
def _compute_dlt(self)
Apply Lead Time Adj Factor if existing
4.618146
3.819719
1.209028
if previous_key: return u"{}{}{}".format(previous_key, separator, new_key) else: return new_key
def _construct_key(previous_key, separator, new_key)
Returns the new_key if no previous key exists, otherwise concatenates previous key, separator, and new_key :param previous_key: :param separator: :param new_key: :return: a string if previous_key exists and simply passes through the new_key otherwise
2.579778
3.015475
0.855513
assert isinstance(nested_dict, dict), "flatten requires a dictionary input" assert isinstance(separator, six.string_types), "separator must be string" # This global dictionary stores the flattened keys and values and is # ultimately returned flattened_dict = dict() def _flatten(object_, key): # Empty object can't be iterated, take as is if not object_: flattened_dict[key] = object_ # These object types support iteration elif isinstance(object_, dict): for object_key in object_: if not (not key and object_key in root_keys_to_ignore): _flatten(object_[object_key], _construct_key(key, separator, object_key)) elif isinstance(object_, (list, set, tuple)): for index, item in enumerate(object_): _flatten(item, _construct_key(key, separator, index)) # Anything left take as is else: flattened_dict[key] = object_ _flatten(nested_dict, None) return flattened_dict
def flatten(nested_dict, separator="_", root_keys_to_ignore=set())
Flattens a dictionary with nested structure to a dictionary with no hierarchy Consider ignoring keys that you are not interested in to prevent unnecessary processing This is specially true for very deep objects :param nested_dict: dictionary we want to flatten :param separator: string to separate dictionary keys by :param root_keys_to_ignore: set of root keys to ignore from flattening :return: flattened dictionary
3.101415
3.144879
0.986179
_unflatten_asserts(flat_dict, separator) # This global dictionary is mutated and returned unflattened_dict = dict() def _unflatten(dic, keys, value): for key in keys[:-1]: dic = dic.setdefault(key, {}) dic[keys[-1]] = value for item in flat_dict: _unflatten(unflattened_dict, item.split(separator), flat_dict[item]) return unflattened_dict
def unflatten(flat_dict, separator='_')
Creates a hierarchical dictionary from a flattened dictionary Assumes no lists are present :param flat_dict: a dictionary with no hierarchy :param separator: a string that separates keys :return: a dictionary with hierarchy
2.815125
3.084166
0.912767
_unflatten_asserts(flat_dict, separator) # First unflatten the dictionary assuming no lists exist unflattened_dict = unflatten(flat_dict, separator) def _convert_dict_to_list(object_, parent_object, parent_object_key): if isinstance(object_, dict): for key in object_: if isinstance(object_[key], dict): _convert_dict_to_list(object_[key], object_, key) try: keys = [int(key) for key in object_] keys.sort() except (ValueError, TypeError): keys = [] keys_len = len(keys) if (keys_len > 0 and sum(keys) == int(((keys_len - 1) * keys_len) / 2) and keys[0] == 0 and keys[-1] == keys_len - 1 and check_if_numbers_are_consecutive(keys)): # The dictionary looks like a list so we're going to replace it parent_object[parent_object_key] = [] for key_index, key in enumerate(keys): parent_object[parent_object_key].append(object_[str(key)]) # The list item we just added might be a list itself # https://github.com/amirziai/flatten/issues/15 _convert_dict_to_list(parent_object[parent_object_key][-1], parent_object[parent_object_key], key_index) _convert_dict_to_list(unflattened_dict, None, None) return unflattened_dict
def unflatten_list(flat_dict, separator='_')
Unflattens a dictionary, first assuming no lists exist and then tries to identify lists and replaces them This is probably not very efficient and has not been tested extensively Feel free to add test cases or rewrite the logic Issues that stand out to me: - Sorting all the keys in the dictionary, which specially for the root dictionary can be a lot of keys - Checking that numbers are consecutive is O(N) in number of keys :param flat_dict: dictionary with no hierarchy :param separator: a string that separates keys :return: a dictionary with hierarchy
2.919977
2.842197
1.027366
return all((True if second - first == 1 else False for first, second in zip(list_[:-1], list_[1:])))
def check_if_numbers_are_consecutive(list_)
Returns True if numbers in the list are consecutive :param list_: list of integers :return: Boolean
4.753451
6.548018
0.725937
''' Strips all color codes from a text. ''' members = [attr for attr in Colors.__dict__.keys() if not attr.startswith( "__" ) and not attr == 'strip'] for c in members: text = text.replace( vars( Colors )[c], '' ) return text
def strip( text )
Strips all color codes from a text.
6.439471
4.884866
1.318249
''' get/set the verbosity level. The verbosity level filters messages that are output to the console. Only messages tagged with a verbosity less-than-or-equal-to the class verbosity are output. This does not affect output to non-console devices such as files or remote sockets. verbosity(): returns the current level verbosity(<N>): sets the verbosity to <N> ''' if len(args): self._verbosity = args[0] else: return self._verbosity
def verbosity(self, *args)
get/set the verbosity level. The verbosity level filters messages that are output to the console. Only messages tagged with a verbosity less-than-or-equal-to the class verbosity are output. This does not affect output to non-console devices such as files or remote sockets. verbosity(): returns the current level verbosity(<N>): sets the verbosity to <N>
7.755634
1.886897
4.110259
''' get/set the tag string itself. If called with non-zero length argument, will toggle the internal b_tag flag to True. The tagstring, if flagged TRUE and non-zero length, will prepend each output log line. In this manner, it's possible to post-filter log files for specific tags. tagstring(): returns the current tagstring tagstring(<string>): sets the tagstring to <string> ''' if len(args): self._str_tag = args[0] self._b_tag = True else: return self._str_tag
def tagstring(self, *args)
get/set the tag string itself. If called with non-zero length argument, will toggle the internal b_tag flag to True. The tagstring, if flagged TRUE and non-zero length, will prepend each output log line. In this manner, it's possible to post-filter log files for specific tags. tagstring(): returns the current tagstring tagstring(<string>): sets the tagstring to <string>
8.420909
1.646601
5.114117
''' get/set the tag flag. The tag flag toggles the most basic prepending to each log output. The idea with the tagging text is to provide a simple mechanism by which a log output can be filtered/parsed for specific outputs. tag(): returns the current syslog flag tag(True|False): sets the flag to True|False ''' if len(args): self._b_tag = args[0] else: return self._b_tag
def tag(self, *args)
get/set the tag flag. The tag flag toggles the most basic prepending to each log output. The idea with the tagging text is to provide a simple mechanism by which a log output can be filtered/parsed for specific outputs. tag(): returns the current syslog flag tag(True|False): sets the flag to True|False
12.07385
1.983671
6.086619
''' get/set the syslog flag. The syslog flag toggles prepending each message with a syslog-style prefix. syslog(): returns the current syslog flag syslog(True|False): sets the flag to True|False ''' if len(args): self._b_syslog = args[0] else: return self._b_syslog
def syslog(self, *args)
get/set the syslog flag. The syslog flag toggles prepending each message with a syslog-style prefix. syslog(): returns the current syslog flag syslog(True|False): sets the flag to True|False
6.742705
2.386015
2.825927
''' get/set the str_syslog, i.e. the current value of the syslog prepend string. str_syslog(): returns the current syslog string str_syslog(<astr>): sets the syslog string to <astr> ''' if len(args): self._str_syslog = args[0] else: return self._str_syslog
def str_syslog(self, *args)
get/set the str_syslog, i.e. the current value of the syslog prepend string. str_syslog(): returns the current syslog string str_syslog(<astr>): sets the syslog string to <astr>
5.447064
2.233185
2.439146
''' get/set the tee flag. The tee flag toggles any output that is directed to non-console destinations to also appear on the console. Tee'd console output is still verbosity filtered tee(): returns the current syslog flag tee(True|False): sets the flag to True|False ''' if len(args): self._b_tee = args[0] else: return self._b_tee
def tee(self, *args)
get/set the tee flag. The tee flag toggles any output that is directed to non-console destinations to also appear on the console. Tee'd console output is still verbosity filtered tee(): returns the current syslog flag tee(True|False): sets the flag to True|False
11.746576
2.160275
5.437539
''' Examines <astr_destination> and if of form <str1>:<str2> assumes that <str1> is a host to send datagram comms to over port <str2>. Returns True or False. ''' t_socketInfo = astr_destination.partition(':') if len(t_socketInfo[1]): self._b_isSocket = True self._socketRemote = t_socketInfo[0] self._socketPort = t_socketInfo[2] else: self._b_isSocket = False return self._b_isSocket
def socket_parse(self, astr_destination)
Examines <astr_destination> and if of form <str1>:<str2> assumes that <str1> is a host to send datagram comms to over port <str2>. Returns True or False.
5.841474
2.552677
2.288372
''' get/set the 'device' to which messages are sent. Valid targets are: string filenames: '/tmp/test.log' remote hosts: 'pretoria:1701' system devices: sys.stdout, sys.stderr special names: 'stdout' file handles: open('/tmp/test.log') ''' if len(args): self._logFile = args[0] if self._logHandle and self._logHandle != sys.stdout: self._logHandle.close() # if type(self._logFile) is types.FileType: if isinstance(self._logFile, IOBase): self._logHandle = self._logFile elif self._logFile == 'stdout': self._logHandle = sys.stdout elif self.socket_parse(self._logFile): self._logHandle = C_dgmsocket( self._socketRemote, int(self._socketPort)) else: self._logHandle = open(self._logFile, "a") self._sys_stdout = self._logHandle else: return self._logFile
def to(self, *args)
get/set the 'device' to which messages are sent. Valid targets are: string filenames: '/tmp/test.log' remote hosts: 'pretoria:1701' system devices: sys.stdout, sys.stderr special names: 'stdout' file handles: open('/tmp/test.log')
5.587109
2.804555
1.992155
''' A verbosity-aware printf. ''' if self._verbosity and self._verbosity >= alevel: sys.stdout.write(format % args)
def vprintf(self, alevel, format, *args)
A verbosity-aware printf.
7.337173
4.215007
1.740726
''' Returns a string similar to: Tue Oct 9 10:49:53 2012 pretoria message.py[26873]: where 'pretoria' is the hostname, 'message.py' is the current process name and 26873 is the current process id. ''' localtime = time.asctime( time.localtime(time.time()) ) hostname = os.uname()[1] syslog = '%s %s %s[%s]' % (localtime, hostname, str_processName, str_pid) return syslog
def syslog_generate(str_processName, str_pid)
Returns a string similar to: Tue Oct 9 10:49:53 2012 pretoria message.py[26873]: where 'pretoria' is the hostname, 'message.py' is the current process name and 26873 is the current process id.
5.81348
1.957021
2.970576
rows, cols = a_M.shape a_Mmask = ones( (rows, cols) ) if len(args): a_Mmask = args[0] a_M *= a_Mmask # The "binary" density determines the density of nonzero elements, # irrespective of their actual value f_binaryMass = float(size(nonzero(a_M)[0])) f_actualMass = a_M.sum() f_area = float(size(nonzero(a_Mmask)[0])) f_binaryDensity = f_binaryMass / f_area; f_actualDensity = f_actualMass / f_area; return f_actualDensity, f_binaryDensity
def density(a_M, *args, **kwargs)
ARGS a_M matrix to analyze *args[0] optional mask matrix; if passed, calculate density of a_M using non-zero elements of args[0] as a mask. DESC Determine the "density" of a passed matrix. Two densities are returned: o f_actualDensity -- density of the matrix using matrix values as "mass" o f_binaryDensity -- density of the matrix irrespective of actual matrix values If the passed matrix contains only "ones", the f_binaryDensity will be equal to the f_actualDensity.
4.626609
3.529955
1.310671
counts, bin_edges = histogram(arr, **kwargs) cdf = cumsum(counts) return cdf
def cdf(arr, **kwargs)
ARGS arr array to calculate cumulative distribution function **kwargs Passed directly to numpy.histogram. Typical options include: bins = <num_bins> normed = True|False DESC Determines the cumulative distribution function.
4.025932
5.066495
0.794619
f_range = a_cdf[-1] - a_cdf[0] f_rangePart = f_range / a_partitions lowerBound = a_cdf[0] vl = [] for part in arange(0, a_partitions): # Due to possible cumulative rounding errors, relax the tolerance # on the final partition: if part == a_partitions - 1: subset = (a_cdf > lowerBound) else: subset = (a_cdf > lowerBound) & (a_cdf <= lowerBound + f_rangePart) indices = where(subset, 1, 0) v = where(indices == 1) vl.append(v) lowerBound += f_rangePart return vl
def cdf_distribution(a_cdf, a_partitions)
ARGS a_cdf vector a vectors of values/observations a_partitions int the number of partitions DESC This function returns the indices of a passed cdf such that the the range of values across the indices is uniform across the number of partitions. Imagine you have a range of observations/values, and you'd like to partition the observations over 3 ranges. If you simply partition the range of values into three evenly spaced groups across the domain, you will most likely find all the dynamic range of values in each partition is non-uniform. By partitioning the cdf, however, the range of values in each partition is uniform. The "size" of each partition, however, is not.
3.813404
3.779436
1.008988
f_x = 0 f_y = 0 f_m = 0 for i in range(len(ar_grid)): for j in range(len(ar_grid[i])): if ar_grid[i][j]: # Since python arrays are zero indexed, we need to offset # the loop counters by 1 to account for mass in the 1st # column. f_x += (j+1) * ar_grid[i][j] f_y += (i+1) * ar_grid[i][j] f_m += ar_grid[i][j] f_com = array( (float(f_x)/f_m , float(f_y)/f_m) ) return f_com
def com_find(ar_grid)
Find the center of mass in array grid <ar_grid>. Mass elements are grid index values. Return an array, in format (x, y), i.e. col, row!
3.040549
2.788892
1.090235
b_reorder = True b_oneOffset = True for key, value in kwargs.iteritems(): if key == 'ordering' and value == 'rc': b_reorder = False if key == 'ordering' and value == 'xy': b_reorder = True if key == 'indexing' and value == 'zero': b_oneOffset = False if key == 'indexing' and value == 'one': b_oneOffset = True f_Smass = ar_grid.sum() f_comX = (ar_grid[nonzero(ar_grid)] * (nonzero(ar_grid)[1] + 1)).sum() / f_Smass f_comY = (ar_grid[nonzero(ar_grid)] * (nonzero(ar_grid)[0] + 1)).sum() / f_Smass if b_reorder: ar_ret = array( (f_comX, f_comY) ) if not b_reorder: ar_ret = array( (f_comY, f_comX) ) if not b_oneOffset: ar_ret -= 1.0 return ar_ret
def com_find2D(ar_grid, **kwargs)
ARGS **kwargs ordering = 'rc' or 'xy' order the return either in (x,y) or (row, col) order. indexing = 'zero' or 'one' return positions relative to zero (i.e. python addressing) or one (i.e. MatLAB addressing) DESC Find the center of mass in 2D array grid <ar_grid>. Mass elements are grid index values. By using python idioms, his version is MUCH faster than the com_find()
2.906177
2.405606
1.208085
rows = arr[0] cols = arr[1] arr_index = zeros((rows * cols, 2)) count = 0 for row in arange(0, rows): for col in arange(0, cols): arr_index[count] = array([row, col]) count = count + 1 return arr_index
def array2DIndices_enumerate(arr)
DESC Given a 2D array defined by arr, prepare an explicit list of the indices. ARGS arr in 2 element array with the first element the rows and the second the cols RET A list of explicit 2D coordinates.
2.665558
2.708696
0.984074
i = 0; k = 0; # Cycle up in powers of radix until the largest exponent is found. # This is required to determine the word length while (pow(aradix, i)) <= anum10: i = i + 1; forcelength = i # Optionally, allow user to specify word length if len(args): forcelength = args[0] # Check that word length is valid if(forcelength and (forcelength < i)): error_exit('b10_convertFrom', 'checking on requested return array', 'specified length is too small', 1) numm = anum10; num_r = zeros((forcelength)); if(i): k = forcelength - i; else: k = forcelength - 1; if(anum10 == 1): num_r[(k)] = 1; return num_r; for j in arange(i, 0, -1): num_r[(k)] = fix(numm / pow(aradix, (j - 1))); numm = numm % pow(aradix, (j - 1)); k = k + 1; return num_r
def b10_convertFrom(anum10, aradix, *args)
ARGS anum10 in number in base 10 aradix in convert <anum10> to number in base + <aradix> OPTIONAL forcelength in if nonzero, indicates the length + of the return array. Useful if + array needs to be zero padded. DESC Converts a scalar from base 10 to base radix. Return an array. NOTE: "Translated" from a MatLAB script of the same name.
4.939049
4.603121
1.072978
global Gtic_start f_elapsedTime = time.time() - Gtic_start for key, value in kwargs.items(): if key == 'sysprint': return value % f_elapsedTime if key == 'default': return "Elapsed time = %f seconds." % f_elapsedTime return f_elapsedTime
def toc(*args, **kwargs)
Port of the MatLAB function of same name Behaviour is controllable to some extent by the keyword args:
7.769345
8.858425
0.877057
b_wrapGridEdges = False # If True, wrap around edges of grid if len(args): b_wrapGridEdges = args[0] # Check for points "less than" grid space if b_wrapGridEdges: W = where(A_point < 0) A_point[W] += a_gridSize[W[1]] Wbool = A_point >= 0 W = Wbool.prod(axis=1) A_point = A_point[where(W > 0)] # Check for points "more than" grid space A_inGrid = a_gridSize - A_point if b_wrapGridEdges: W = where(A_inGrid <= 0) A_point[W] = -A_inGrid[W] A_inGrid = a_gridSize - A_point Wbool = A_inGrid > 0 W = Wbool.prod(axis=1) A_point = A_point[where(W > 0)] # A_point = abs(A_point) return A_point
def pointInGrid(A_point, a_gridSize, *args)
SYNOPSIS [A_point] = pointInGrid(A_point, a_gridSize [, ab_wrapGridEdges]) ARGS INPUT A_point array of N-D points points in grid space a_gridSize array the size (rows, cols) of + the grid space OPTIONAL ab_wrapGridEdges bool if True, wrap "external" points back into grid OUTPUT A_point array of N-D points points that are within the grid. DESC Determines if set of N-dimensionals <A_point> is within a grid <a_gridSize>. PRECONDITIONS o Assumes strictly positive domains, i.e. points with negative locations are by definition out-of-range. If negative domains are valid in a particular problem space, the caller will need to offset <a_point> to be strictly positive first. POSTCONDITIONS o if <ab_wrapGridEdges> is False, returns only the subset of points in A_point that are within the <a_gridSize>. o if <ab_wrapGridEdges> is True, "wraps" any points in A_point back into a_gridSize first, and then checks for those that are still within <a_gridSize>.
3.064423
2.963299
1.034125
new_num_arr = array(()) current = anum10 while current != 0: remainder = current % aradix new_num_arr = r_[remainder, new_num_arr] current = current / aradix forcelength = new_num_arr.size # Optionally, allow user to specify word length if len(args): forcelength = args[0] while new_num_arr.size < forcelength: new_num_arr = r_[0, new_num_arr] return new_num_arr
def arr_base10toN(anum10, aradix, *args)
ARGS anum10 in number in base 10 aradix in convert <anum10> to number in base + <aradix> OPTIONAL forcelength in if nonzero, indicates the length + of the return array. Useful if + array needs to be zero padded. DESC Converts a scalar from base 10 to base radix. Return an array.
3.647186
3.669863
0.993821
num_rep = {10:'a', 11:'b', 12:'c', 13:'d', 14:'e', 15:'f', 16:'g', 17:'h', 18:'i', 19:'j', 20:'k', 21:'l', 22:'m', 23:'n', 24:'o', 25:'p', 26:'q', 27:'r', 28:'s', 29:'t', 30:'u', 31:'v', 32:'w', 33:'x', 34:'y', 35:'z'} new_num_string = '' new_num_arr = array(()) current = num while current != 0: remainder = current % n if 36 > remainder > 9: remainder_string = num_rep[remainder] elif remainder >= 36: remainder_string = '(' + str(remainder) + ')' else: remainder_string = str(remainder) new_num_string = remainder_string + new_num_string new_num_arr = r_[remainder, new_num_arr] current = current / n print(new_num_arr) return new_num_string
def base10toN(num, n)
Change a num to a base-n number. Up to base-36 is supported without special notation.
1.794718
1.775069
1.011069
slist = [] for el in ilist: slist.append(str(el)) return slist
def list_i2str(ilist)
Convert an integer list into a string list.
2.80398
2.479683
1.130782
adict = {} alist = str2lst(astr_attributes, astr_separator) for str_pair in alist: alistTuple = str2lst(str_pair, "=") adict.setdefault(alistTuple[0], alistTuple[1].strip(chr(0x22) + chr(0x27))) return adict
def attributes_strToDict(astr_attributes, astr_separator=" ")
This is logical inverse of the dictToStr method. The <astr_attributes> string *MUST* have <key>=<value> tuples separated by <astr_separator>.
3.666313
3.614267
1.0144
str_tabBoundary = " " for key, value in kwargs.iteritems(): if key == 'tabBoundary': str_tabBoundary = value b_trailN = False length = len(astr_buf) ch_trailN = astr_buf[length - 1] if ch_trailN == '\n': b_trailN = True astr_buf = astr_buf[0:length - 1] str_ret = astr_buf str_tab = '' str_Indent = '' for i in range(a_tabLength): str_tab = '%s ' % str_tab str_tab = "%s%s" % (str_tab, str_tabBoundary) for i in range(a_tabs): str_Indent = '%s%s' % (str_Indent, str_tab) str_ret = re.sub('\n', '\n%s' % str_Indent, astr_buf) str_ret = '%s%s' % (str_Indent, str_ret) if b_trailN: str_ret = str_ret + '\n' return str_ret
def str_blockIndent(astr_buf, a_tabs=1, a_tabLength=4, **kwargs)
For the input string <astr_buf>, replace each '\n' with '\n<tab>' where the number of tabs is indicated by <a_tabs> and the length of the tab by <a_tabLength> Trailing '\n' are *not* replaced.
2.337188
2.461261
0.94959
if b_echoCommand: printf('<p>str_command = %s</p>', str_command) str_stdout = os.popen(str_command).read() retcode = os.popen(str_command).close() return retcode, str_stdout
def system_procRet(str_command, b_echoCommand=0)
Run the <str_command> on the underlying shell. Any stderr stream is lost. RETURN Tuple (retcode, str_stdout) o retcode: the system return code o str_stdout: the standard output stream
3.382447
3.915612
0.863836
child = os.popen(command) data = child.read() err = child.close() if err: raise RuntimeError('%s failed w/ exit code %d' % (command, err)) return data
def shellne(command)
Runs 'commands' on the underlying shell; any stderr is echo'd to the console. Raises a RuntimeException on any shell exec errors.
3.509491
4.004444
0.876399
'''Helper around 'locate' ''' hits = '' for F in locate(pattern, root): hits = hits + F + '\n' l = hits.split('\n') if(not len(l[-1])): l.pop() if len(l) == 1 and not len(l[0]): return None else: return l
def find(pattern, root=os.curdir)
Helper around 'locate'
5.856971
4.583839
1.277744
try: index = astr_datestr.index(astr_sep) except: return astr_datestr.encode('ascii') try: tm = time.strptime(astr_datestr, '%d/%M/%Y') except: try: tm = time.strptime(astr_datestr, '%d/%M/%y') except: error_exit('str_dateStrip', 'parsing date string', 'no conversion was possible', 1) tstr = time.strftime("%d%M%Y", tm) return tstr.encode('ascii')
def str_dateStrip(astr_datestr, astr_sep='/')
Simple date strip method. Checks if the <astr_datestr> contains <astr_sep>. If so, strips these from the string and returns result. The actual stripping entails falling through two layers of exception handling... so it is something of a hack.
3.211587
3.181625
1.009417
alistI = astr_input.split(astr_separator) alistJ = [] for i in range(0, len(alistI)): alistI[i] = alistI[i].strip() alistI[i] = alistI[i].encode('ascii') if len(alistI[i]): alistJ.append(alistI[i]) return alistJ
def str2lst(astr_input, astr_separator=" ")
Breaks a string at <astr_separator> and joins into a list. Steps along all list elements and strips white space. The list elements are explicitly ascii encoded.
2.250395
2.154263
1.044624
if self.fall or not args: return True elif self.value in args: # changed for v1.5, see below self.fall = True return True else: return False
def match(self, *args)
Indicate whether or not to enter a case suite
4.341633
6.371057
0.681462
''' Error handling. Based on the <astr_key>, error information is extracted from _dictErr and sent to log object. If <ab_exitToOs> is False, error is considered non-fatal and processing can continue, otherwise processing terminates. ''' log = callingClass.log() b_syslog = log.syslog() log.syslog(False) if ab_exitToOs: log( Colors.RED + "\n:: FATAL ERROR :: " + Colors.NO_COLOUR ) else: log( Colors.YELLOW + "\n:: WARNING :: " + Colors.NO_COLOUR ) if len(astr_header): log( Colors.BROWN + astr_header + Colors.NO_COLOUR ) log( "\n" ) log( "\tSorry, some error seems to have occurred in:\n\t<" ) log( Colors.LIGHT_GREEN + ("%s" % callingClass.name()) + Colors.NO_COLOUR + "::") log( Colors.LIGHT_CYAN + ("%s" % inspect.stack()[2][4][0].strip()) + Colors.NO_COLOUR) log( "> called by <") try: caller = inspect.stack()[3][4][0].strip() except: caller = '__main__' log( Colors.LIGHT_GREEN + ("%s" % callingClass.name()) + Colors.NO_COLOUR + "::") log( Colors.LIGHT_CYAN + ("%s" % caller) + Colors.NO_COLOUR) log( ">\n") log( "\tWhile %s\n" % callingClass._dictErr[astr_key]['action'] ) log( "\t%s\n" % callingClass._dictErr[astr_key]['error'] ) log( "\n" ) if ab_exitToOs: log( "Returning to system with error code %d\n" % \ callingClass._dictErr[astr_key]['exitCode'] ) sys.exit( callingClass._dictErr[astr_key]['exitCode'] ) log.syslog(b_syslog) return callingClass._dictErr[astr_key]['exitCode']
def report( callingClass, astr_key, ab_exitToOs=1, astr_header="" )
Error handling. Based on the <astr_key>, error information is extracted from _dictErr and sent to log object. If <ab_exitToOs> is False, error is considered non-fatal and processing can continue, otherwise processing terminates.
3.393534
2.676954
1.267685
''' Convenience dispatcher to the error_exit() method. Will raise "fatal" error, i.e. terminate script. ''' b_exitToOS = True report( callingClass, astr_key, b_exitToOS, astr_extraMsg )
def fatal( callingClass, astr_key, astr_extraMsg="" )
Convenience dispatcher to the error_exit() method. Will raise "fatal" error, i.e. terminate script.
11.74705
4.270719
2.750602
''' Convenience dispatcher to the error_exit() method. Will raise "warning" error, i.e. script processing continues. ''' b_exitToOS = False report( callingClass, astr_key, b_exitToOS, astr_extraMsg )
def warn( callingClass, astr_key, astr_extraMsg="" )
Convenience dispatcher to the error_exit() method. Will raise "warning" error, i.e. script processing continues.
15.134341
4.101436
3.690011
''' get/set the internal pipeline log message object. Caller can further manipulate the log object with object-specific calls. ''' if len(args): self._log = args[0] else: return self._log
def log(self, *args)
get/set the internal pipeline log message object. Caller can further manipulate the log object with object-specific calls.
10.041615
2.454341
4.091369
''' get/set the descriptive name text of this object. ''' if len(args): self.__name = args[0] else: return self.__name
def name(self, *args)
get/set the descriptive name text of this object.
6.035336
3.387951
1.781412
''' Get / set internal object description. ''' if len(args): self._str_desc = args[0] else: return self._str_desc
def description(self, *args)
Get / set internal object description.
5.876301
3.827057
1.535462
''' Processes a single slice. ''' if b_rot90: self._Mnp_2Dslice = np.rot90(self._Mnp_2Dslice) if self.func == 'invertIntensities': self.invert_slice_intensities()
def process_slice(self, b_rot90=None)
Processes a single slice.
6.906815
6.269041
1.101734
''' Saves a single slice. ARGS o astr_output The output filename to save the slice to. ''' self._log('Outputfile = %s\n' % astr_outputFile) fformat = astr_outputFile.split('.')[-1] if fformat == 'dcm': if self._dcm: self._dcm.pixel_array.flat = self._Mnp_2Dslice.flat self._dcm.PixelData = self._dcm.pixel_array.tostring() self._dcm.save_as(astr_outputFile) else: raise ValueError('dcm output format only available for DICOM files') else: pylab.imsave(astr_outputFile, self._Mnp_2Dslice, format=fformat, cmap = cm.Greys_r)
def slice_save(self, astr_outputFile)
Saves a single slice. ARGS o astr_output The output filename to save the slice to.
3.932694
2.968267
1.324912
''' Runs the DICOM conversion based on internal state. ''' self._log('Converting DICOM image.\n') try: self._log('PatientName: %s\n' % self._dcm.PatientName) except AttributeError: self._log('PatientName: %s\n' % 'PatientName not found in DCM header.') error.warn(self, 'PatientNameTag') try: self._log('PatientAge: %s\n' % self._dcm.PatientAge) except AttributeError: self._log('PatientAge: %s\n' % 'PatientAge not found in DCM header.') error.warn(self, 'PatientAgeTag') try: self._log('PatientSex: %s\n' % self._dcm.PatientSex) except AttributeError: self._log('PatientSex: %s\n' % 'PatientSex not found in DCM header.') error.warn(self, 'PatientSexTag') try: self._log('PatientID: %s\n' % self._dcm.PatientID) except AttributeError: self._log('PatientID: %s\n' % 'PatientID not found in DCM header.') error.warn(self, 'PatientIDTag') try: self._log('SeriesDescription: %s\n' % self._dcm.SeriesDescription) except AttributeError: self._log('SeriesDescription: %s\n' % 'SeriesDescription not found in DCM header.') error.warn(self, 'SeriesDescriptionTag') try: self._log('ProtocolName: %s\n' % self._dcm.ProtocolName) except AttributeError: self._log('ProtocolName: %s\n' % 'ProtocolName not found in DCM header.') error.warn(self, 'ProtocolNameTag') if self._b_convertMiddleSlice: self._log('Converting middle slice in DICOM series: %d\n' % self._sliceToConvert) l_rot90 = [ True, True, False ] misc.mkdir(self._str_outputDir) if not self._b_3D: str_outputFile = '%s/%s.%s' % (self._str_outputDir, self._str_outputFileStem, self._str_outputFileType) self.process_slice() self.slice_save(str_outputFile) if self._b_3D: rotCount = 0 if self._b_reslice: for dim in ['x', 'y', 'z']: self.dim_save(dimension = dim, makeSubDir = True, rot90 = l_rot90[rotCount], indexStart = 0, indexStop = -1) rotCount += 1 else: self.dim_save(dimension = 'z', makeSubDir = False, rot90 = False, indexStart = 0, indexStop = -1)
def run(self)
Runs the DICOM conversion based on internal state.
2.253279
2.13516
1.055321
''' Runs the NIfTI conversion based on internal state. ''' self._log('About to perform NifTI to %s conversion...\n' % self._str_outputFileType) frames = 1 frameStart = 0 frameEnd = 0 sliceStart = 0 sliceEnd = 0 if self._b_4D: self._log('4D volume detected.\n') frames = self._Vnp_4DVol.shape[3] if self._b_3D: self._log('3D volume detected.\n') if self._b_convertMiddleFrame: self._frameToConvert = int(frames/2) if self._frameToConvert == -1: frameEnd = frames else: frameStart = self._frameToConvert frameEnd = self._frameToConvert + 1 for f in range(frameStart, frameEnd): if self._b_4D: self._Vnp_3DVol = self._Vnp_4DVol[:,:,:,f] slices = self._Vnp_3DVol.shape[2] if self._b_convertMiddleSlice: self._sliceToConvert = int(slices/2) if self._sliceToConvert == -1: sliceEnd = -1 else: sliceStart = self._sliceToConvert sliceEnd = self._sliceToConvert + 1 misc.mkdir(self._str_outputDir) if self._b_reslice: for dim in ['x', 'y', 'z']: self.dim_save(dimension = dim, makeSubDir = True, indexStart = sliceStart, indexStop = sliceEnd, rot90 = True) else: self.dim_save(dimension = 'z', makeSubDir = False, indexStart = sliceStart, indexStop = sliceEnd, rot90 = True)
def run(self)
Runs the NIfTI conversion based on internal state.
3.279036
2.992054
1.095915
logger = logging.getLogger(name) logger.setLevel(logging.INFO) # File output handler file_handler = logging.FileHandler(log_path) file_handler.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s %(name)12s %(levelname)8s %(lineno)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger
def get_logger(name)
Return a logger with a file handler.
1.872282
1.788211
1.047014
def wrapper(*args, **kwargs): start = time.time() result = method(*args, **kwargs) end = time.time() click.echo('Cost {}s'.format(int(end-start))) return result return wrapper
def timeit(method)
Compute the download time.
2.812256
2.699779
1.041661
def wrapper(*args, **kwargs): crawler = args[0].crawler # args[0] is a NetEase object try: if os.path.isfile(cookie_path): with open(cookie_path, 'r') as cookie_file: cookie = cookie_file.read() expire_time = re.compile(r'\d{4}-\d{2}-\d{2}').findall(cookie) now = time.strftime('%Y-%m-%d', time.localtime(time.time())) if expire_time[0] > now: crawler.session.cookies.load() else: crawler.login() else: crawler.login() except RequestException: click.echo('Maybe password error, please try again.') sys.exit(1) result = method(*args, **kwargs) return result return wrapper
def login(method)
Require user to login.
3.157192
3.104947
1.016827
try: song = self.crawler.search_song(song_name, self.quiet) except RequestException as exception: click.echo(exception) else: self.download_song_by_id(song.song_id, song.song_name, self.folder)
def download_song_by_search(self, song_name)
Download a song by its name. :params song_name: song name.
4.354397
4.319128
1.008166
try: url = self.crawler.get_song_url(song_id) if self.lyric: # use old api lyric_info = self.crawler.get_song_lyric(song_id) else: lyric_info = None song_name = song_name.replace('/', '') song_name = song_name.replace('.', '') self.crawler.get_song_by_url(url, song_name, folder, lyric_info) except RequestException as exception: click.echo(exception)
def download_song_by_id(self, song_id, song_name, folder='.')
Download a song by id and save it to disk. :params song_id: song id. :params song_name: song name. :params folder: storage path.
3.32609
3.215645
1.034346
try: album = self.crawler.search_album(album_name, self.quiet) except RequestException as exception: click.echo(exception) else: self.download_album_by_id(album.album_id, album.album_name)
def download_album_by_search(self, album_name)
Download a album by its name. :params album_name: album name.
4.002076
4.063632
0.984852
try: # use old api songs = self.crawler.get_album_songs(album_id) except RequestException as exception: click.echo(exception) else: folder = os.path.join(self.folder, album_name) for song in songs: self.download_song_by_id(song.song_id, song.song_name, folder)
def download_album_by_id(self, album_id, album_name)
Download a album by its name. :params album_id: album id. :params album_name: album name.
3.543796
3.627349
0.976966
try: artist = self.crawler.search_artist(artist_name, self.quiet) except RequestException as exception: click.echo(exception) else: self.download_artist_by_id(artist.artist_id, artist.artist_name)
def download_artist_by_search(self, artist_name)
Download a artist's top50 songs by his/her name. :params artist_name: artist name.
4.028801
4.352056
0.925724
try: # use old api songs = self.crawler.get_artists_hot_songs(artist_id) except RequestException as exception: click.echo(exception) else: folder = os.path.join(self.folder, artist_name) for song in songs: self.download_song_by_id(song.song_id, song.song_name, folder)
def download_artist_by_id(self, artist_id, artist_name)
Download a artist's top50 songs by his/her id. :params artist_id: artist id. :params artist_name: artist name.
3.887652
3.896846
0.997641
try: playlist = self.crawler.search_playlist( playlist_name, self.quiet) except RequestException as exception: click.echo(exception) else: self.download_playlist_by_id( playlist.playlist_id, playlist.playlist_name)
def download_playlist_by_search(self, playlist_name)
Download a playlist's songs by its name. :params playlist_name: playlist name.
4.325251
4.380013
0.987497
try: songs = self.crawler.get_playlist_songs( playlist_id) except RequestException as exception: click.echo(exception) else: folder = os.path.join(self.folder, playlist_name) for song in songs: self.download_song_by_id(song.song_id, song.song_name, folder)
def download_playlist_by_id(self, playlist_id, playlist_name)
Download a playlist's songs by its id. :params playlist_id: playlist id. :params playlist_name: playlist name.
3.218007
3.157982
1.019007
try: user = self.crawler.search_user(user_name, self.quiet) except RequestException as exception: click.echo(exception) else: self.download_user_playlists_by_id(user.user_id)
def download_user_playlists_by_search(self, user_name)
Download user's playlists by his/her name. :params user_name: user name.
4.362395
4.584716
0.951508
try: playlist = self.crawler.get_user_playlists(user_id) except RequestException as exception: click.echo(exception) else: self.download_playlist_by_id( playlist.playlist_id, playlist.playlist_name)
def download_user_playlists_by_id(self, user_id)
Download user's playlists by his/her id. :params user_id: user id.
4.057256
4.072624
0.996227
with open(person_info_path, 'r') as person_info: user_id = int(person_info.read()) self.download_user_playlists_by_id(user_id)
def download_person_playlists(self)
Download person playlist including private playlist. note: login required.
4.006613
3.86455
1.036761
LOG.info('%s => %s', sign, frame) click.echo('Bye') sys.exit(0)
def signal_handler(sign, frame)
Capture Ctrl+C.
6.455691
6.217091
1.038378
ctx.obj = NetEase(timeout, proxy, output, quiet, lyric, again)
def cli(ctx, timeout, proxy, output, quiet, lyric, again)
A command tool to download NetEase-Music's songs.
4.137251
3.787402
1.092372
if name: netease.download_song_by_search(name) if id: netease.download_song_by_id(id, 'song'+str(id))
def song(netease, name, id)
Download a song by name or id.
4.251218
4.075033
1.043235
if name: netease.download_album_by_search(name) if id: netease.download_album_by_id(id, 'album'+str(id))
def album(netease, name, id)
Download a album's songs by name or id.
4.390244
4.228622
1.038221
if name: netease.download_artist_by_search(name) if id: netease.download_artist_by_id(id, 'artist'+str(id))
def artist(netease, name, id)
Download a artist's hot songs by name or id.
4.507348
4.399415
1.024534
if name: netease.download_playlist_by_search(name) if id: netease.download_playlist_by_id(id, 'playlist'+str(id))
def playlist(netease, name, id)
Download a playlist's songs by id.
4.311046
4.434225
0.972221
if name: netease.download_user_playlists_by_search(name) if id: netease.download_user_playlists_by_id(id)
def user(netease, name, id)
Download a user\'s playlists by id.
3.479958
3.219727
1.080824
if len(songs) == 1: select_i = 0 else: table = PrettyTable(['Sequence', 'Song Name', 'Artist Name']) for i, song in enumerate(songs, 1): table.add_row([i, song['name'], song['ar'][0]['name']]) click.echo(table) select_i = click.prompt('Select one song', type=int, default=1) while select_i < 1 or select_i > len(songs): select_i = click.prompt('Error Select! Select Again', type=int) song_id, song_name = songs[select_i-1]['id'], songs[select_i-1]['name'] song = Song(song_id, song_name) return song
def select_one_song(songs)
Display the songs returned by search api. :params songs: API['result']['songs'] :return: a Song object.
2.48166
2.363941
1.049798
if len(albums) == 1: select_i = 0 else: table = PrettyTable(['Sequence', 'Album Name', 'Artist Name']) for i, album in enumerate(albums, 1): table.add_row([i, album['name'], album['artist']['name']]) click.echo(table) select_i = click.prompt('Select one album', type=int, default=1) while select_i < 1 or select_i > len(albums): select_i = click.prompt('Error Select! Select Again', type=int) album_id = albums[select_i-1]['id'] album_name = albums[select_i-1]['name'] album = Album(album_id, album_name) return album
def select_one_album(albums)
Display the albums returned by search api. :params albums: API['result']['albums'] :return: a Album object.
2.344725
2.312371
1.013992
if len(artists) == 1: select_i = 0 else: table = PrettyTable(['Sequence', 'Artist Name']) for i, artist in enumerate(artists, 1): table.add_row([i, artist['name']]) click.echo(table) select_i = click.prompt('Select one artist', type=int, default=1) while select_i < 1 or select_i > len(artists): select_i = click.prompt('Error Select! Select Again', type=int) artist_id = artists[select_i-1]['id'] artist_name = artists[select_i-1]['name'] artist = Artist(artist_id, artist_name) return artist
def select_one_artist(artists)
Display the artists returned by search api. :params artists: API['result']['artists'] :return: a Artist object.
2.424428
2.368116
1.023779
if len(playlists) == 1: select_i = 0 else: table = PrettyTable(['Sequence', 'Name']) for i, playlist in enumerate(playlists, 1): table.add_row([i, playlist['name']]) click.echo(table) select_i = click.prompt('Select one playlist', type=int, default=1) while select_i < 1 or select_i > len(playlists): select_i = click.prompt('Error Select! Select Again', type=int) playlist_id = playlists[select_i-1]['id'] playlist_name = playlists[select_i-1]['name'] playlist = Playlist(playlist_id, playlist_name) return playlist
def select_one_playlist(playlists)
Display the playlists returned by search api or user playlist. :params playlists: API['result']['playlists'] or API['playlist'] :return: a Playlist object.
2.320439
2.282408
1.016663
if len(users) == 1: select_i = 0 else: table = PrettyTable(['Sequence', 'Name']) for i, user in enumerate(users, 1): table.add_row([i, user['nickname']]) click.echo(table) select_i = click.prompt('Select one user', type=int, default=1) while select_i < 1 or select_i > len(users): select_i = click.prompt('Error Select! Select Again', type=int) user_id = users[select_i-1]['userId'] user_name = users[select_i-1]['nickname'] user = User(user_id, user_name) return user
def select_one_user(users)
Display the users returned by search api. :params users: API['result']['userprofiles'] :return: a User object.
2.566177
2.52407
1.016682
def wrapper(*args, **kwargs): try: result = method(*args, **kwargs) return result except ProxyError: LOG.exception('ProxyError when try to get %s.', args) raise ProxyError('A proxy error occurred.') except ConnectionException: LOG.exception('ConnectionError when try to get %s.', args) raise ConnectionException('DNS failure, refused connection, etc.') except Timeout: LOG.exception('Timeout when try to get %s', args) raise Timeout('The request timed out.') except RequestException: LOG.exception('RequestException when try to get %s.', args) raise RequestException('Please check out your network.') return wrapper
def exception_handle(method)
Handle exception raised by requests library.
3.166923
2.962548
1.068986
resp = self.session.get(url, timeout=self.timeout, proxies=self.proxies) result = resp.json() if result['code'] != 200: LOG.error('Return %s when try to get %s', result, url) raise GetRequestIllegal(result) else: return result
def get_request(self, url)
Send a get request. warning: old api. :return: a dict or raise Exception.
4.214883
3.940905
1.069522
data = encrypted_request(params) resp = self.session.post(url, data=data, timeout=self.timeout, proxies=self.proxies) result = resp.json() if result['code'] != 200: LOG.error('Return %s when try to post %s => %s', result, url, params) raise PostRequestIllegal(result) else: return result
def post_request(self, url, params)
Send a post request. :return: a dict or raise Exception.
4.362993
4.213763
1.035415
url = 'http://music.163.com/weapi/cloudsearch/get/web?csrf_token=' params = {'s': search_content, 'type': search_type, 'offset': 0, 'sub': 'false', 'limit': limit} result = self.post_request(url, params) return result
def search(self, search_content, search_type, limit=9)
Search entrance. :params search_content: search content. :params search_type: search type. :params limit: result count returned by weapi. :return: a dict.
2.58178
2.322362
1.111704
result = self.search(song_name, search_type=1, limit=limit) if result['result']['songCount'] <= 0: LOG.warning('Song %s not existed!', song_name) raise SearchNotFound('Song {} not existed.'.format(song_name)) else: songs = result['result']['songs'] if quiet: song_id, song_name = songs[0]['id'], songs[0]['name'] song = Song(song_id, song_name) return song else: return self.display.select_one_song(songs)
def search_song(self, song_name, quiet=False, limit=9)
Search song by song name. :params song_name: song name. :params quiet: automatically select the best one. :params limit: song count returned by weapi. :return: a Song object.
3.120448
3.034938
1.028175
result = self.search(album_name, search_type=10, limit=limit) if result['result']['albumCount'] <= 0: LOG.warning('Album %s not existed!', album_name) raise SearchNotFound('Album {} not existed'.format(album_name)) else: albums = result['result']['albums'] if quiet: album_id, album_name = albums[0]['id'], albums[0]['name'] album = Album(album_id, album_name) return album else: return self.display.select_one_album(albums)
def search_album(self, album_name, quiet=False, limit=9)
Search album by album name. :params album_name: album name. :params quiet: automatically select the best one. :params limit: album count returned by weapi. :return: a Album object.
3.280183
3.135848
1.046028