code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
'''Fail if max element in ``sequence`` is separated from the present by more than ``lag`` as determined by the '<=' operator. If the max element is a datetime, "present" is defined as ``datetime.now()``; if the max element is a date, "present" is defined as ``date.today()``. This is equivalent to ``self.assertLessEqual(present - max(sequence), lag)``. Parameters ---------- sequence : iterable lag : timedelta msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``sequence`` is not iterable. TypeError If ``lag`` is not a timedelta object. TypeError If max element in ``sequence`` is not a datetime or date object. ''' if not isinstance(sequence, collections.Iterable): raise TypeError('First argument is not iterable') if not isinstance(lag, timedelta): raise TypeError('Second argument is not a timedelta object') # Cannot compare datetime to date, so if dates are provided use # date.today(), if datetimes are provided use datetime.today() if isinstance(max(sequence), datetime): target = datetime.today() elif isinstance(max(sequence), date): target = date.today() else: raise TypeError('Expected iterable of datetime or date objects') self.assertLessEqual(target - max(sequence), lag, msg=msg)
def assertDateTimesLagLessEqual(self, sequence, lag, msg=None)
Fail if max element in ``sequence`` is separated from the present by more than ``lag`` as determined by the '<=' operator. If the max element is a datetime, "present" is defined as ``datetime.now()``; if the max element is a date, "present" is defined as ``date.today()``. This is equivalent to ``self.assertLessEqual(present - max(sequence), lag)``. Parameters ---------- sequence : iterable lag : timedelta msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``sequence`` is not iterable. TypeError If ``lag`` is not a timedelta object. TypeError If max element in ``sequence`` is not a datetime or date object.
3.568411
1.695366
2.104802
'''Fail if ``dt`` has a non-null ``tzinfo`` attribute. Parameters ---------- dt : datetime msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``dt`` is not a datetime object. ''' if not isinstance(dt, datetime): raise TypeError('First argument is not a datetime object') self.assertIsNone(dt.tzinfo, msg=msg)
def assertTimeZoneIsNone(self, dt, msg=None)
Fail if ``dt`` has a non-null ``tzinfo`` attribute. Parameters ---------- dt : datetime msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``dt`` is not a datetime object.
3.997305
1.87699
2.129636
'''Fail unless ``dt`` has a non-null ``tzinfo`` attribute. Parameters ---------- dt : datetime msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``dt`` is not a datetime object. ''' if not isinstance(dt, datetime): raise TypeError('First argument is not a datetime object') self.assertIsNotNone(dt.tzinfo, msg=msg)
def assertTimeZoneIsNotNone(self, dt, msg=None)
Fail unless ``dt`` has a non-null ``tzinfo`` attribute. Parameters ---------- dt : datetime msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``dt`` is not a datetime object.
3.972677
1.90901
2.081015
'''Fail unless ``dt``'s ``tzinfo`` attribute equals ``tz`` as determined by the '==' operator. Parameters ---------- dt : datetime tz : timezone msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``dt`` is not a datetime object. TypeError If ``tz`` is not a timezone object. ''' if not isinstance(dt, datetime): raise TypeError('First argument is not a datetime object') if not isinstance(tz, timezone): raise TypeError('Second argument is not a timezone object') self.assertEqual(dt.tzinfo, tz, msg=msg)
def assertTimeZoneEqual(self, dt, tz, msg=None)
Fail unless ``dt``'s ``tzinfo`` attribute equals ``tz`` as determined by the '==' operator. Parameters ---------- dt : datetime tz : timezone msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``dt`` is not a datetime object. TypeError If ``tz`` is not a timezone object.
3.339013
1.525038
2.189463
'''Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as determined by the '!=' operator. Parameters ---------- dt : datetime tz : timezone msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``dt`` is not a datetime object. TypeError If ``tz`` is not a timezone object. ''' if not isinstance(dt, datetime): raise TypeError('First argument is not a datetime object') if not isinstance(tz, timezone): raise TypeError('Second argument is not a timezone object') self.assertNotEqual(dt.tzinfo, tz, msg=msg)
def assertTimeZoneNotEqual(self, dt, tz, msg=None)
Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as determined by the '!=' operator. Parameters ---------- dt : datetime tz : timezone msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``dt`` is not a datetime object. TypeError If ``tz`` is not a timezone object.
3.293122
1.549477
2.125312
for cls in inspect.getmro(meth.__self__.__class__): if cls.__dict__.get(meth.__name__) is meth: return '{}.{}'.format(cls.__module__, cls.__name__) meth = meth.__func__ if inspect.isfunction(meth): module = meth.__qualname__.split('.<locals>', 1)[0] cls = getattr(inspect.getmodule(meth), module.rsplit('.', 1)[0]) if isinstance(cls, type): return '{}.{}'.format(cls.__module__, cls.__name__)
def _class_defining_method(meth): # pragma: no cover '''Gets the name of the class that defines meth. Adapted from http://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545. ''' if inspect.ismethod(meth)
Gets the name of the class that defines meth. Adapted from http://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545.
2.060213
2.15862
0.954412
'''Configure what assertion logging is done. Settings configured with this method are overridden by environment variables. Parameters ---------- logfile : str or bytes or file object If a string or bytes object, we write to that filename. If an open file object, we just write to it. If None, disable logging. If we open the file, we open it in ``'w'`` mode, so any contents will be overwritten. attrs : list of str Capture these attributes on the TestCase being run when logging an assertion. For example, if you are testing multiple resources, make sure the resource name is a member of your TestCase, and configure marbles logging with that name. These are only captured on failure. verbose_attrs : list of str Similar to attrs, but these attrs are captured even on success. verbose : bool or list of str Fields (within the set {msg, note, locals}) to capture even when the test is successful. By default, those three fields are only captured on failure. ''' if 'logfile' in kwargs: # Note that kwargs['logfile'] might be an open file # object, not a string. We deal with this in # _open_if_needed, but refactoring it so that in that case # it gets set on another attribute would be tricky to # handle the lazy opening semantics that let us override # it with MARBLES_LOGFILE, so instead we choose to let # self._logfilename do double-duty: sometimes it's a name, # sometimes it's sneakily a file object. self._logfilename = kwargs['logfile'] if 'attrs' in kwargs: self._attrs = kwargs['attrs'] if 'verbose_attrs' in kwargs: self._verbose_attrs = kwargs['verbose_attrs'] if 'verbose' in kwargs: self._verbose = kwargs['verbose']
def configure(self, **kwargs)
Configure what assertion logging is done. Settings configured with this method are overridden by environment variables. Parameters ---------- logfile : str or bytes or file object If a string or bytes object, we write to that filename. If an open file object, we just write to it. If None, disable logging. If we open the file, we open it in ``'w'`` mode, so any contents will be overwritten. attrs : list of str Capture these attributes on the TestCase being run when logging an assertion. For example, if you are testing multiple resources, make sure the resource name is a member of your TestCase, and configure marbles logging with that name. These are only captured on failure. verbose_attrs : list of str Similar to attrs, but these attrs are captured even on success. verbose : bool or list of str Fields (within the set {msg, note, locals}) to capture even when the test is successful. By default, those three fields are only captured on failure.
8.629747
2.424955
3.558725
'''Locates the ``msg`` argument in a function signature. We need to determine where we expect to find ``msg`` if it's passed positionally, so we can extract it if the user passed it. Returns ------- tuple The index of the ``msg`` param, the default value for it, and the number of non-``msg`` positional parameters we expect. ''' names = signature.parameters.keys() try: msg_idx = list(names).index('msg') default_msg = signature.parameters['msg'].default except ValueError: # 'msg' is not in list # It's likely that this is a custom assertion that's just # passing all remaining args and kwargs through # (e.g. tests.marbles.ReversingTestCaseMixin). Unfortunately, # we can't inspect its code to find the assert it's wrapping, # so we just have to assume it's of the standard form with msg # in the last position with a default of None. msg_idx = -1 default_msg = None # We also don't want to steal any actually positional arguments if # we can help it. Therefore, we leave the default msg if there are # fewer than this many args passed. We stop counting at a # parameter named 'msg' or when we hit a varargs or keyword-only # parameter. kinds = (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD) non_msg_params = itertools.takewhile( lambda param: param.name != 'msg' and param.kind in kinds, signature.parameters.values()) non_msg_params = sum(1 for _ in non_msg_params) return msg_idx, default_msg, non_msg_params
def _find_msg_argument(signature)
Locates the ``msg`` argument in a function signature. We need to determine where we expect to find ``msg`` if it's passed positionally, so we can extract it if the user passed it. Returns ------- tuple The index of the ``msg`` param, the default value for it, and the number of non-``msg`` positional parameters we expect.
5.73397
4.086349
1.403201
'''Extracts the ``msg`` argument from the passed ``args``. Returns ------- tuple The found ``msg``, the args and kwargs with that ``msg`` removed, and any remaining positional args after ``msg``. ''' rem_args = [] if 'msg' in kwargs: msg = kwargs.pop('msg') elif len(args) > non_msg_params and msg_idx < len(args): msg = args[msg_idx] if 0 <= msg_idx: rem_args = args[msg_idx + 1:] args = args[:msg_idx] else: msg = default_msg return msg, args, rem_args, kwargs
def _extract_msg(args, kwargs, msg_idx, default_msg, non_msg_params)
Extracts the ``msg`` argument from the passed ``args``. Returns ------- tuple The found ``msg``, the args and kwargs with that ``msg`` removed, and any remaining positional args after ``msg``.
3.334835
2.05833
1.620166
'''Wraps each paragraph in ``text`` individually. Parameters ---------- text : str Returns ------- str Single string containing the wrapped paragraphs. ''' pilcrow = re.compile(r'(\n\s*\n)', re.MULTILINE) list_prefix = re.compile(r'\s*(?:\w|[0-9]+)[\.\)]\s+') paragraphs = pilcrow.split(text) wrapped_lines = [] for paragraph in paragraphs: if paragraph.isspace(): wrapped_lines.append('') else: wrapper = textwrap.TextWrapper(**vars(self)) list_item = re.match(list_prefix, paragraph) if list_item: wrapper.subsequent_indent += ' ' * len(list_item.group(0)) wrapped_lines.extend(wrapper.wrap(paragraph)) return wrapped_lines
def wrap(self, text, **kwargs)
Wraps each paragraph in ``text`` individually. Parameters ---------- text : str Returns ------- str Single string containing the wrapped paragraphs.
3.651909
3.161325
1.155183
'''Returns a string displaying the whole statement that failed, with a '>' indicator on the line starting the expression. ''' # This will be used by linecache to read the source of this # module. See the docstring for _find_assert_stmt below which # explains how. # We don't have a test for this because automating the # creation of an egg, installation into an environment, # running of tests, and verification that marbles found the # right source and was able to print it is a lot of # automation. We have tested manually, and marbles works with # all check installation mechanisms we know of right now # (setup.py install, setup.py develop, pip install, bdist_egg, # bdist_wheel). module_globals = vars(sys.modules[self.module]) line_range, lineno = self._find_assert_stmt( self.filename, self.linenumber, module_globals=module_globals) source = [linecache.getline(self.filename, x, module_globals=module_globals) for x in line_range] # Dedent the source, removing the final newline added by dedent dedented_lines = textwrap.dedent(''.join(source)).split('\n')[:-1] formatted_lines = [] for i, line in zip(line_range, dedented_lines): prefix = '>' if i == lineno else ' ' formatted_lines.append(' {0} {1:4d} {2}'.format(prefix, i, line)) return '\n'.join(formatted_lines)
def assert_stmt(self)
Returns a string displaying the whole statement that failed, with a '>' indicator on the line starting the expression.
6.895773
5.554096
1.241565
'''Given a Python module name, filename and line number, find the lines that are part of the statement containing that line. Python stacktraces, when reporting which line they're on, always show the last line of the statement. This can be confusing if the statement spans multiple lines. This function helps reconstruct the whole statement, and is used by :meth:`marbles.core.ContextualAssertionError.assert_stmt`. Returns a tuple of the range of lines spanned by the source being returned, the number of the line on which the interesting statement starts. We may need the ``module_globals`` in order to tell :mod:`linecache` how to find the file, if it comes from inside an egg. In that case, ``module_globals`` should contain a key ``__loader__`` which knows how to read from that file. ''' lines = linecache.getlines( filename, module_globals=module_globals) _source = ''.join(lines) _tree = ast.parse(_source) finder = _StatementFinder(linenumber) finder.visit(_tree) line_range = range(finder.found - leading, linenumber + following) return line_range, finder.found
def _find_assert_stmt(filename, linenumber, leading=1, following=2, module_globals=None)
Given a Python module name, filename and line number, find the lines that are part of the statement containing that line. Python stacktraces, when reporting which line they're on, always show the last line of the statement. This can be confusing if the statement spans multiple lines. This function helps reconstruct the whole statement, and is used by :meth:`marbles.core.ContextualAssertionError.assert_stmt`. Returns a tuple of the range of lines spanned by the source being returned, the number of the line on which the interesting statement starts. We may need the ``module_globals`` in order to tell :mod:`linecache` how to find the file, if it comes from inside an egg. In that case, ``module_globals`` should contain a key ``__loader__`` which knows how to read from that file.
8.751089
1.662191
5.264792
'''Ensures that the annotation has the right fields.''' required_keys = set(self._required_keys) keys = set(key for key, val in annotation.items() if val) missing_keys = required_keys.difference(keys) if missing_keys: error = 'Annotation missing required fields: {0}'.format( missing_keys) raise AnnotationError(error)
def _validate_annotation(self, annotation)
Ensures that the annotation has the right fields.
3.572977
2.993361
1.193634
for e in evals: if e in tc_deps: # we've already included it continue else: if e in deps: # has additional dependnecies tc_deps[e]=deps[e] # add to tc_deps the dependencies of the dependencies _tchelper(tc_deps,deps[e],deps) return tc_deps
def _tchelper(tc_deps,evals,deps)
modifies graph in place
3.296195
3.261885
1.010518
if not isinstance(evals,list): evals=[evals] if feed_dict is None: feed_dict={} if breakpoints is None: breakpoints=[] self.state=RUNNING self._original_evals=evals self._original_feed_dict=feed_dict self._exe_order=op_store.compute_exe_order(evals) self._init_evals_bps(evals, breakpoints) # convert cache keys to strings for k,v in feed_dict.items(): if not isinstance(k,str): k=k.name self._cache[k]=v op_store.register_dbsession(self) if break_immediately: return self._break() else: return self.c()
def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False)
starts the debug session
3.650884
3.549871
1.028456
next_node=self._exe_order[self.step] self._eval(next_node) self.step+=1 if self.step==len(self._exe_order): return self._finish() else: # if stepping, return the value of the node we just # evaled return self._break(value=self._cache.get(next_node.name))
def s(self)
step to the next node in the execution order
5.774628
4.501087
1.282941
i,node=self._get_next_eval() if node.name in self._bpset: if self.state == RUNNING: return self._break() self.state = RUNNING self._eval(node) # increment to next node self.step=i+1 if self.step < len(self._exe_order): return self.c() else: return self._finish()
def c(self)
continue
6.004079
5.715412
1.050507
return [self._cache.get(i.name,None) for i in self._original_evals]
def get_values(self)
returns final values (same result as tf.Session.run())
11.833019
9.402774
1.25846
if isinstance(node,tf.Tensor): return self._cache.get(node.name,None) elif isinstance(node,tf.Operation): return None else: # handle ascii, unicode strings return self._cache.get(node,None)
def get_value(self, node)
retrieve a node value from the cache
4.457179
3.859277
1.154926
# If an eval or bp is the tf.Placeholder output of a tdb.PythonOp, replace it with its respective PythonOp node evals2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in evals] breakpoints2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in breakpoints] # compute execution order self._exe_order=op_store.compute_exe_order(evals2) # list of nodes # compute evaluation set self._evalset=set([e.name for e in evals2]) for e in self._exe_order: if isinstance(e,HTOp): self._evalset.add(e.name) for t in e.inputs: if not op_store.is_htop_out(t): self._evalset.add(t.name) # compute breakpoint set self._bpset=set([bp.name for bp in breakpoints2])
def _init_evals_bps(self, evals, breakpoints)
HTOps may depend on tf.Tensors that are not in eval. We need to have all inputs to HTOps ready upon evaluation. 1. all evals that were originally specified are added 2. each HTOp in the execution closure needs to be in eval (they won't be eval'ed automatically by Session.run) 3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor), it needs to be in eval as well (it's not tensorflow so we'll have to manually evaluate it). Remember, we don't track Placeholders because we instead run the HTOps that generate their values.
3.871294
3.705955
1.044614
# if node.name == 'Momentum': # pdb.set_trace() if isinstance(node,HTOp): # All Tensors MUST be in the cache. feed_dict=dict((t,self._cache[t.name]) for t in node.inputs) node.run(feed_dict) # this will populate self._cache on its own else: # is a TensorFlow node if isinstance(node,tf.Tensor): result=self.session.run(node,self._cache) self._cache[node.name]=result else: # is an operation if node.type =='Assign' or node.type == 'AssignAdd' or node.type == 'AssignSub': # special operation that takes in a tensor ref and mutates it # unfortunately, we end up having to execute nearly the full graph? # alternatively, find a way to pass the tensor_ref thru the feed_dict # rather than the tensor values. self.session.run(node,self._original_feed_dict)
def _eval(self, node)
node is a TensorFlow Op or Tensor from self._exe_order
6.199906
5.78776
1.07121
print('Extracting', filename) with gzip.open(filename) as bytestream: bytestream.read(16) buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images) data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1) return data
def extract_data(filename, num_images)
Extract the images into a 4D tensor [image index, y, x, channels]. Values are rescaled from [0, 255] down to [-0.5, 0.5].
1.528633
1.43772
1.063234
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tf.histogram_summary(x.name + '/activations', x) tf.scalar_summary(x.name + '/sparsity', tf.nn.zero_fraction(x))
def _activation_summary(x)
Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing
1.801473
1.972836
0.913139
return 100.0 - ( 100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])
def error_rate(predictions, labels)
Return the error rate based on dense predictions and 1-hot labels.
2.148639
1.993687
1.077721
if name in _ops: return _ops[name] else: g=tf.get_default_graph() return g.as_graph_element(name)
def get_node(name)
returns HTOp or tf graph element corresponding to requested node name
3.515758
2.362924
1.487884
deps={} g=tf.get_default_graph() for op in g.get_operations(): d=set([i.name for i in op.control_inputs]) for t in op.inputs: if is_htop_out(t): d.add(get_op(t).name) else: d.add(t.name) deps[op.name]=d for t in op.outputs: deps[t.name]=set([op.name]) # do the same thing with HTOps for op in _ops.values(): d=set() for t in op.inputs: if is_htop_out(t): d.add(get_op(t).name) else: d.add(t.name) deps[op.name]=d return deps
def compute_node_deps()
- returns the full dependency graph of ALL ops and ALL tensors Map<string,list<string>> where key=node name, values=list of dependency names If an Op takes in a placeholder tensor that is the ouput of a PythonOp, we need to replace that Placeholder with the PythonOp.
2.409353
2.361466
1.020278
# construct a PythonOp and return its TensorNode outputs, if it has one global COUNT # check outputs if not isinstance(outputs,list): outputs=[outputs] for tensor in outputs: if tensor.op.type != 'Placeholder': raise TypeError('Output nodes must be Placeholders') op=PythonOp('Python', fn, COUNT, inputs, outputs) op_store.add_op(op) COUNT+=1 if outputs: return outputs[0] else: return op
def python_op(fn, inputs=None, outputs=None)
User-exposed api method for constructing a python_node Args: fn: python function that computes some np.ndarrays given np.ndarrays as inputs. it can have arbitrary side effects. inputs: array of tf.Tensors (optional). These are where fn derives its values from outputs: tf.Placeholder nodes (optional). These are constructed by the user (which allows the user to plug them into other ht.Ops or tf.Ops). The outputs of fn are mapped to each of the output placeholders. raises an Error if fn cannot map
5.728452
5.893338
0.972022
if results is None: # self.fn was probably only used to compute side effects. return elif isinstance(results,np.ndarray): # fn returns single np.ndarray. # re-format it into a list results=[results] # check validity of fn output elif isinstance(results,list): if len(results) is not len(self.outputs): raise ValueError('Number of output tensors does not match number of outputs produced by function') elif isinstance(results,np.number): if len(self.outputs) != 1: raise ValueError('Fn produces scalar but %d outputs expected' % (len(self.outputs))) results=[results] # assign each element in ndarrays to corresponding output tensor for i,ndarray in enumerate(results): self.session._cache_value(self.outputs[i], ndarray)
def cache_values(self, results)
loads into DebugSession cache
5.023601
4.873234
1.030856
global _dbsession _dbsession=debug_session.DebugSession(session) return _dbsession.run(evals,feed_dict,breakpoints,break_immediately)
def debug(evals,feed_dict=None,breakpoints=None,break_immediately=False,session=None)
spawns a new debug session
4.243829
3.854843
1.100908
if not is_notebook(): print('Python session is not running in a Notebook Kernel') return global _comm kernel=get_ipython().kernel kernel.comm_manager.register_target('tdb',handle_comm_opened) # initiate connection to frontend. _comm=Comm(target_name='tdb',data={}) # bind recv handler _comm.on_msg(None)
def connect()
establish connection to frontend notebook
8.084593
7.215376
1.120467
data={"msg_type":"action", "action":action} if params is not None: data['params']=params _comm.send(data)
def send_action(action, params=None)
helper method for sending actions
4.304204
4.322321
0.995808
imgdata = StringIO.StringIO() fig.savefig(imgdata, format='png') imgdata.seek(0) # rewind the data uri = 'data:image/png;base64,' + urllib.quote(b64encode(imgdata.buf)) send_action("update_plot",params={"src":uri, "name":name})
def send_fig(fig,name)
sends figure to frontend
3.179082
3.036345
1.04701
# normalize to 0-1 range if normalize: data -= data.min() data /= data.max() n = int(np.ceil(np.sqrt(data.shape[0]))) # force square padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3) data = np.pad(data, padding, mode='constant', constant_values=(padval, padval)) # tile the filters into an image data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1))) data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:]) plt.matshow(data,cmap=cmap)
def viz_square(data, normalize=True, cmap=plt.cm.gray, padsize=1, padval=0)
takes a np.ndarray of shape (n, height, width) or (n, height, width, channels) visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n) However, this only draws first input channel
1.532021
1.517733
1.009414
global COUNT, ht # check outputs if not isinstance(outputs,list): outputs=[outputs] for tensor in outputs: if tensor.op.type is not 'Placeholder': raise Error('Output nodes must be Placeholders') op=PlotOp(fn, COUNT, inputs, outputs) op_store.add_op(op) COUNT+=1 # if node has output, return value for python_op is the first output (placeholder) tensor # otherwise, return the op if outputs: return outputs[0] else: return op
def plot_op(fn, inputs=[], outputs=[])
User-exposed api method for constructing a python_node Args: fn: python function that computes some np.ndarrays given np.ndarrays as inputs. it can have arbitrary side effects. inputs: array of tf.Tensors (optional). These are where fn derives its values from outputs: tf.Placeholder nodes (optional). These are constructed by the user (which allows the user to plug them into other ht.Ops or tf.Ops). The outputs of fn are mapped to each of the output placeholders. raises an Error if fn cannot map
6.414007
6.369558
1.006978
'''Create a handler for query engine based on a URL. The following environment variables are used for default connection: TD_API_KEY API key TD_API_SERVER API server (default: api.treasuredata.com) HTTP_PROXY HTTP proxy (optional) Parameters ---------- url : string Engine descriptor in the form "type://apikey@host/database?params..." Use shorthand notation "type:database?params..." for the default connection. con : Connection, optional Handler returned by connect. If not given, default connection is used. header : string or boolean, default True Prepend comment strings, in the form "-- comment", as a header of queries. Set False to disable header. show_progress : double or boolean, default 5.0 Number of seconds to wait before printing progress. Set False to disable progress entirely. clear_progress : boolean, default True If True, clear progress when query completed. Returns ------- QueryEngine ''' url = urlparse(url) engine_type = url.scheme if url.scheme else 'presto' if con is None: if url.netloc: # create connection apikey, host = url.netloc.split('@') con = Connection(apikey=apikey, endpoint="https://{0}/".format(host)) else: # default connection con = Connection() database = url.path[1:] if url.path.startswith('/') else url.path params = { 'type': engine_type, } params.update(parse_qsl(url.query)) return QueryEngine(con, database, params, header=header, show_progress=show_progress, clear_progress=clear_progress)
def create_engine(url, con=None, header=True, show_progress=5.0, clear_progress=True)
Create a handler for query engine based on a URL. The following environment variables are used for default connection: TD_API_KEY API key TD_API_SERVER API server (default: api.treasuredata.com) HTTP_PROXY HTTP proxy (optional) Parameters ---------- url : string Engine descriptor in the form "type://apikey@host/database?params..." Use shorthand notation "type:database?params..." for the default connection. con : Connection, optional Handler returned by connect. If not given, default connection is used. header : string or boolean, default True Prepend comment strings, in the form "-- comment", as a header of queries. Set False to disable header. show_progress : double or boolean, default 5.0 Number of seconds to wait before printing progress. Set False to disable progress entirely. clear_progress : boolean, default True If True, clear progress when query completed. Returns ------- QueryEngine
3.945218
1.541693
2.559017
'''Read Treasure Data query into a DataFrame. Returns a DataFrame corresponding to the result set of the query string. Optionally provide an index_col parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- query : string Query string to be executed. engine : QueryEngine Handler returned by create_engine. index_col : string, optional Column name to use as index for the returned DataFrame object. parse_dates : list or dict, optional - List of column names to parse as dates - Dict of {column_name: format string} where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps distributed_join : boolean, default False (Presto only) If True, distributed join is enabled. If False, broadcast join is used. See https://prestodb.io/docs/current/release/release-0.77.html params : dict, optional Parameters to pass to execute method. Available parameters: - result_url (str): result output URL - priority (int or str): priority (e.g. "NORMAL", "HIGH", etc.) - retry_limit (int): retry limit Returns ------- DataFrame ''' if params is None: params = {} # header header = engine.create_header("read_td_query") if engine.type == 'presto' and distributed_join is not None: header += "-- set session distributed_join = '{0}'\n".format('true' if distributed_join else 'false') # execute r = engine.execute(header + query, **params) return r.to_dataframe(index_col=index_col, parse_dates=parse_dates)
def read_td_query(query, engine, index_col=None, parse_dates=None, distributed_join=False, params=None)
Read Treasure Data query into a DataFrame. Returns a DataFrame corresponding to the result set of the query string. Optionally provide an index_col parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- query : string Query string to be executed. engine : QueryEngine Handler returned by create_engine. index_col : string, optional Column name to use as index for the returned DataFrame object. parse_dates : list or dict, optional - List of column names to parse as dates - Dict of {column_name: format string} where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps distributed_join : boolean, default False (Presto only) If True, distributed join is enabled. If False, broadcast join is used. See https://prestodb.io/docs/current/release/release-0.77.html params : dict, optional Parameters to pass to execute method. Available parameters: - result_url (str): result output URL - priority (int or str): priority (e.g. "NORMAL", "HIGH", etc.) - retry_limit (int): retry limit Returns ------- DataFrame
4.290289
1.556361
2.756616
'''Read Treasure Data job result into a DataFrame. Returns a DataFrame corresponding to the result set of the job. This method waits for job completion if the specified job is still running. Optionally provide an index_col parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- job_id : integer Job ID. engine : QueryEngine Handler returned by create_engine. index_col : string, optional Column name to use as index for the returned DataFrame object. parse_dates : list or dict, optional - List of column names to parse as dates - Dict of {column_name: format string} where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps Returns ------- DataFrame ''' # get job job = engine.connection.client.job(job_id) # result r = engine.get_result(job, wait=True) return r.to_dataframe(index_col=index_col, parse_dates=parse_dates)
def read_td_job(job_id, engine, index_col=None, parse_dates=None)
Read Treasure Data job result into a DataFrame. Returns a DataFrame corresponding to the result set of the job. This method waits for job completion if the specified job is still running. Optionally provide an index_col parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- job_id : integer Job ID. engine : QueryEngine Handler returned by create_engine. index_col : string, optional Column name to use as index for the returned DataFrame object. parse_dates : list or dict, optional - List of column names to parse as dates - Dict of {column_name: format string} where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps Returns ------- DataFrame
4.265103
1.540112
2.769346
'''Read Treasure Data table into a DataFrame. The number of returned rows is limited by "limit" (default 10,000). Setting limit=None means all rows. Be careful when you set limit=None because your table might be very large and the result does not fit into memory. Parameters ---------- table_name : string Name of Treasure Data table in database. engine : QueryEngine Handler returned by create_engine. index_col : string, optional Column name to use as index for the returned DataFrame object. parse_dates : list or dict, optional - List of column names to parse as dates - Dict of {column_name: format string} where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps columns : list, optional List of column names to select from table. time_range : tuple (start, end), optional Limit time range to select. "start" and "end" are one of None, integers, strings or datetime objects. "end" is exclusive, not included in the result. limit : int, default 10,000 Maximum number of rows to select. Returns ------- DataFrame ''' # header query = engine.create_header("read_td_table('{0}')".format(table_name)) # SELECT query += "SELECT {0}\n".format('*' if columns is None else ', '.join(columns)) # FROM query += "FROM {0}\n".format(table_name) # WHERE if time_range is not None: start, end = time_range query += "WHERE td_time_range(time, {0}, {1})\n".format(_convert_time(start), _convert_time(end)) # LIMIT if limit is not None: query += "LIMIT {0}\n".format(limit) # execute r = engine.execute(query) return r.to_dataframe(index_col=index_col, parse_dates=parse_dates)
def read_td_table(table_name, engine, index_col=None, parse_dates=None, columns=None, time_range=None, limit=10000)
Read Treasure Data table into a DataFrame. The number of returned rows is limited by "limit" (default 10,000). Setting limit=None means all rows. Be careful when you set limit=None because your table might be very large and the result does not fit into memory. Parameters ---------- table_name : string Name of Treasure Data table in database. engine : QueryEngine Handler returned by create_engine. index_col : string, optional Column name to use as index for the returned DataFrame object. parse_dates : list or dict, optional - List of column names to parse as dates - Dict of {column_name: format string} where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps columns : list, optional List of column names to select from table. time_range : tuple (start, end), optional Limit time range to select. "start" and "end" are one of None, integers, strings or datetime objects. "end" is exclusive, not included in the result. limit : int, default 10,000 Maximum number of rows to select. Returns ------- DataFrame
3.193934
1.506725
2.119786
primary_key = self.__current_descriptor.get('primaryKey', []) if not isinstance(primary_key, list): primary_key = [primary_key] return primary_key
def primary_key(self)
https://github.com/frictionlessdata/tableschema-py#schema
3.851867
3.117869
1.235417
foreign_keys = self.__current_descriptor.get('foreignKeys', []) for key in foreign_keys: key.setdefault('fields', []) key.setdefault('reference', {}) key['reference'].setdefault('resource', '') key['reference'].setdefault('fields', []) if not isinstance(key['fields'], list): key['fields'] = [key['fields']] if not isinstance(key['reference']['fields'], list): key['reference']['fields'] = [key['reference']['fields']] return foreign_keys
def foreign_keys(self)
https://github.com/frictionlessdata/tableschema-py#schema
2.341384
2.078893
1.126265
self.__current_descriptor.setdefault('fields', []) self.__current_descriptor['fields'].append(descriptor) self.__build() return self.__fields[-1]
def add_field(self, descriptor)
https://github.com/frictionlessdata/tableschema-py#schema
5.179802
4.883097
1.060762
for field in self.__next_descriptor['fields']: if field['name'] == name: field.update(update) return True return False
def update_field(self, name, update)
https://github.com/frictionlessdata/tableschema-py#schema
4.945371
4.669451
1.05909
field = self.get_field(name) if field: predicat = lambda field: field.get('name') != name self.__current_descriptor['fields'] = filter( predicat, self.__current_descriptor['fields']) self.__build() return field
def remove_field(self, name)
https://github.com/frictionlessdata/tableschema-py#schema
4.672669
4.413386
1.058749
# Prepare result = [] errors = [] # Check row length if len(row) != len(self.fields): message = 'Row length %s doesn\'t match fields count %s' message = message % (len(row), len(self.fields)) raise exceptions.CastError(message) # Cast row for field, value in zip(self.fields, row): try: result.append(field.cast_value(value)) except exceptions.CastError as exception: if fail_fast: raise errors.append(exception) # Raise errors if errors: message = 'There are %s cast errors (see exception.errors)' % len(errors) raise exceptions.CastError(message, errors=errors) return result
def cast_row(self, row, fail_fast=False)
https://github.com/frictionlessdata/tableschema-py#schema
2.446368
2.303007
1.062249
# Get headers if isinstance(headers, int): headers_row = headers while True: headers_row -= 1 headers = rows.pop(0) if not headers_row: break elif not isinstance(headers, list): headers = [] # Get descriptor guesser = _TypeGuesser() resolver = _TypeResolver() descriptor = {'fields': []} type_matches = {} for header in headers: descriptor['fields'].append({'name': header}) for index, row in enumerate(rows): # Normalize rows with invalid dimensions for sanity row_length = len(row) headers_length = len(headers) if row_length > headers_length: row = row[:len(headers)] if row_length < headers_length: diff = headers_length - row_length fill = [''] * diff row = row + fill # build a column-wise lookup of type matches for index, value in enumerate(row): rv = guesser.cast(value) if type_matches.get(index): type_matches[index].extend(rv) else: type_matches[index] = list(rv) # choose a type/format for each column based on the matches for index, results in type_matches.items(): rv = resolver.get(results, confidence) descriptor['fields'][index].update(**rv) # Save descriptor self.__current_descriptor = descriptor self.__build() return descriptor
def infer(self, rows, headers=1, confidence=0.75)
https://github.com/frictionlessdata/tableschema-py#schema
3.87143
3.750562
1.032227
mode = 'w' encoding = 'utf-8' if six.PY2: mode = 'wb' encoding = None helpers.ensure_dir(target) with io.open(target, mode=mode, encoding=encoding) as file: json.dump(self.__current_descriptor, file, indent=4, ensure_ascii=ensure_ascii)
def save(self, target, ensure_ascii=True)
https://github.com/frictionlessdata/tableschema-py#schema
3.005085
2.88119
1.043001
dirpath = os.path.dirname(path) if dirpath and not os.path.exists(dirpath): os.makedirs(dirpath)
def ensure_dir(path)
Ensure directory exists. Args: path(str): dir path
1.880435
2.693665
0.698095
cast = str if six.PY2: cast = unicode # noqa return cast(value).lower()
def normalize_value(value)
Convert value to string and make it lower cased.
9.858271
6.382795
1.544507
# Null value if value in self.__missing_values: value = None # Cast value cast_value = value if value is not None: cast_value = self.__cast_function(value) if cast_value == config.ERROR: raise exceptions.CastError(( 'Field "{field.name}" can\'t cast value "{value}" ' 'for type "{field.type}" with format "{field.format}"' ).format(field=self, value=value)) # Check value if constraints: for name, check in self.__check_functions.items(): if isinstance(constraints, list): if name not in constraints: continue passed = check(cast_value) if not passed: raise exceptions.CastError(( 'Field "{field.name}" has constraint "{name}" ' 'which is not satisfied for value "{value}"' ).format(field=self, name=name, value=value)) return cast_value
def cast_value(self, value, constraints=True)
https://github.com/frictionlessdata/tableschema-py#field
3.059644
2.867739
1.066918
# Prepare unique checks if cast: unique_fields_cache = {} if self.schema: unique_fields_cache = _create_unique_fields_cache(self.schema) # Open/iterate stream self.__stream.open() iterator = self.__stream.iter(extended=True) iterator = self.__apply_processors(iterator, cast=cast) for row_number, headers, row in iterator: # Get headers if not self.__headers: self.__headers = headers # Check headers if cast: if self.schema and self.headers: if self.headers != self.schema.field_names: self.__stream.close() message = 'Table headers don\'t match schema field names' raise exceptions.CastError(message) # Check unique if cast: for indexes, cache in unique_fields_cache.items(): values = tuple(value for i, value in enumerate(row) if i in indexes) if not all(map(lambda value: value is None, values)): if values in cache['data']: self.__stream.close() message = 'Field(s) "%s" duplicates in row "%s"' message = message % (cache['name'], row_number) raise exceptions.CastError(message) cache['data'].add(values) # Resolve relations if relations: if self.schema: for foreign_key in self.schema.foreign_keys: row = _resolve_relations(row, headers, relations, foreign_key) if row is None: self.__stream.close() message = 'Foreign key "%s" violation in row "%s"' message = message % (foreign_key['fields'], row_number) raise exceptions.RelationError(message) # Form row if extended: yield (row_number, headers, row) elif keyed: yield dict(zip(headers, row)) else: yield row # Close stream self.__stream.close()
def iter(self, keyed=False, extended=False, cast=True, relations=False)
https://github.com/frictionlessdata/tableschema-py#schema
3.258951
3.150461
1.034436
result = [] rows = self.iter(keyed=keyed, extended=extended, cast=cast, relations=relations) for count, row in enumerate(rows, start=1): result.append(row) if count == limit: break return result
def read(self, keyed=False, extended=False, cast=True, relations=False, limit=None)
https://github.com/frictionlessdata/tableschema-py#schema
2.527716
2.379247
1.062402
if self.__schema is None or self.__headers is None: # Infer (tabulator) if not self.__storage: with self.__stream as stream: if self.__schema is None: self.__schema = Schema() self.__schema.infer(stream.sample[:limit], headers=stream.headers, confidence=confidence) if self.__headers is None: self.__headers = stream.headers # Infer (storage) else: descriptor = self.__storage.describe(self.__source) if self.__schema is None: self.__schema = Schema(descriptor) if self.__headers is None: self.__headers = self.__schema.field_names return self.__schema.descriptor
def infer(self, limit=100, confidence=0.75)
https://github.com/frictionlessdata/tableschema-py#schema
4.080125
3.584669
1.138215
# Save (tabulator) if storage is None: with Stream(self.iter, headers=self.__schema.headers) as stream: stream.save(target, **options) return True # Save (storage) else: if not isinstance(storage, Storage): storage = Storage.connect(storage, **options) storage.create(target, self.__schema.descriptor, force=True) storage.write(target, self.iter(cast=False)) return storage
def save(self, target, storage=None, **options)
https://github.com/frictionlessdata/tableschema-py#schema
5.690811
4.687294
1.214093
descriptor = tableschema.infer(data, encoding=encoding, limit=row_limit, confidence=confidence) if to_file: with io.open(to_file, mode='w+t', encoding='utf-8') as dest: dest.write(json.dumps(descriptor, ensure_ascii=False, indent=4)) click.echo(descriptor)
def infer(data, row_limit, confidence, encoding, to_file)
Infer a schema from data. * data must be a local filepath * data must be CSV * the file encoding is assumed to be UTF-8 unless an encoding is passed with --encoding * the first line of data must be headers * these constraints are just for the CLI
3.065584
3.402426
0.901
try: tableschema.validate(schema) click.echo("Schema is valid") sys.exit(0) except tableschema.exceptions.ValidationError as exception: click.echo("Schema is not valid") click.echo(exception.errors) sys.exit(1)
def validate(schema)
Validate that a supposed schema is in fact a Table Schema.
2.844811
2.6506
1.073271
# Deprecated arguments order is_string = lambda value: isinstance(value, six.string_types) if isinstance(source, list) and all(map(is_string, source)): warnings.warn('Correct arguments order infer(source, headers)', UserWarning) source, headers = headers, source table = Table(source, headers=headers, **options) descriptor = table.infer(limit=limit, confidence=confidence) return descriptor
def infer(source, headers=1, limit=100, confidence=0.75, **options)
https://github.com/frictionlessdata/tableschema-py#schema
4.732825
4.74698
0.997018
if self._key_path_segments is None: return False if search_depth < 0 or search_depth > self._number_of_key_path_segments: return False # Note that the root has no entry in the key path segments and # no name to match. if search_depth == 0: segment_name = '' else: segment_name = self._key_path_segments[search_depth - 1] if self._is_regex: if isinstance(segment_name, py2to3.STRING_TYPES): # Allow '\n' to be matched by '.' and make '\w', '\W', '\b', '\B', # '\d', '\D', '\s' and '\S' Unicode safe. flags = re.DOTALL | re.IGNORECASE | re.UNICODE try: segment_name = r'^{0:s}$'.format(segment_name) segment_name = re.compile(segment_name, flags=flags) except sre_constants.error: # TODO: set self._key_path_segments[search_depth - 1] to None ? return False self._key_path_segments[search_depth - 1] = segment_name else: segment_name = segment_name.lower() self._key_path_segments[search_depth - 1] = segment_name if search_depth > 0: if self._is_regex: # pylint: disable=no-member if not segment_name.match(registry_key.name): return False elif segment_name != registry_key.name.lower(): return False return True
def _CheckKeyPath(self, registry_key, search_depth)
Checks the key path find specification. Args: registry_key (WinRegistryKey): Windows Registry key. search_depth (int): number of key path segments to compare. Returns: bool: True if the Windows Registry key matches the find specification, False if not.
2.688198
2.666643
1.008083
if self._key_path_segments is not None: if search_depth >= self._number_of_key_path_segments: return True return False
def AtMaximumDepth(self, search_depth)
Determines if the find specification is at maximum depth. Args: search_depth (int): number of key path segments to compare. Returns: bool: True if at maximum depth, False if not.
6.893075
4.371861
1.576691
if self._key_path_segments is None: key_path_match = None else: key_path_match = self._CheckKeyPath(registry_key, search_depth) if not key_path_match: return False, key_path_match if search_depth != self._number_of_key_path_segments: return False, key_path_match return True, key_path_match
def Matches(self, registry_key, search_depth)
Determines if the Windows Registry key matches the find specification. Args: registry_key (WinRegistryKey): Windows Registry key. search_depth (int): number of key path segments to compare. Returns: tuple: contains: bool: True if the Windows Registry key matches the find specification, False otherwise. bool: True if the key path matches, False if not or None if no key path specified.
3.267658
2.617879
1.248208
sub_find_specs = [] for find_spec in find_specs: match, key_path_match = find_spec.Matches(registry_key, search_depth) if match: yield registry_key.path # pylint: disable=singleton-comparison if key_path_match != False and not find_spec.AtMaximumDepth(search_depth): sub_find_specs.append(find_spec) if sub_find_specs: search_depth += 1 for sub_registry_key in registry_key.GetSubkeys(): for matching_path in self._FindInKey( sub_registry_key, sub_find_specs, search_depth): yield matching_path
def _FindInKey(self, registry_key, find_specs, search_depth)
Searches for matching keys within the Windows Registry key. Args: registry_key (WinRegistryKey): Windows Registry key. find_specs (list[FindSpec]): find specifications. search_depth (int): number of key path segments to compare. Yields: str: key path of a matching Windows Registry key.
2.655441
2.941489
0.902754
if not find_specs: find_specs = [FindSpec()] registry_key = self._win_registry.GetRootKey() for matching_path in self._FindInKey(registry_key, find_specs, 0): yield matching_path
def Find(self, find_specs=None)
Searches for matching keys within the Windows Registry. Args: find_specs (list[FindSpec]): find specifications. where None will return all allocated Windows Registry keys. Yields: str: key path of a matching Windows Registry key.
4.999172
5.164927
0.967908
root_key = self.GetRootKey() if root_key: for registry_key in root_key.RecurseKeys(): yield registry_key
def RecurseKeys(self)
Recurses the Windows Registry keys starting with the root key. Yields: WinRegistryKey: Windows Registry key.
5.268549
4.191793
1.256872
self._key_path_prefix = key_path_prefix self._key_path_prefix_length = len(key_path_prefix) self._key_path_prefix_upper = key_path_prefix.upper()
def SetKeyPathPrefix(self, key_path_prefix)
Sets the Window Registry key path prefix. Args: key_path_prefix (str): Windows Registry key path prefix.
2.258729
2.548381
0.886339
yield self for subkey in self.GetSubkeys(): for key in subkey.RecurseKeys(): yield key
def RecurseKeys(self)
Recurses the subkeys starting with the key. Yields: WinRegistryKey: Windows Registry key.
4.082162
4.184082
0.975641
return self.data_type in ( definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN, definitions.REG_QWORD)
def DataIsInteger(self)
Determines, based on the data type, if the data is an integer. The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN), REG_DWORD_BIG_ENDIAN and REG_QWORD. Returns: bool: True if the data is an integer, False otherwise.
6.168316
5.998096
1.028379
if not key_path.startswith(definitions.KEY_PATH_SEPARATOR): raise ValueError('Key path does not start with: {0:s}'.format( definitions.KEY_PATH_SEPARATOR)) if not self._root_key: self._root_key = FakeWinRegistryKey(self._key_path_prefix) path_segments = key_paths.SplitKeyPath(key_path) parent_key = self._root_key for path_segment in path_segments: try: subkey = FakeWinRegistryKey(path_segment) parent_key.AddSubkey(subkey) except KeyError: subkey = parent_key.GetSubkeyByName(path_segment) parent_key = subkey parent_key.AddSubkey(registry_key)
def AddKeyByPath(self, key_path, registry_key)
Adds a Windows Registry key for a specific key path. Args: key_path (str): Windows Registry key path to add the key. registry_key (WinRegistryKey): Windows Registry key. Raises: KeyError: if the subkey already exists. ValueError: if the Windows Registry key cannot be added.
2.317108
2.344035
0.988512
key_path_upper = key_path.upper() if key_path_upper.startswith(self._key_path_prefix_upper): relative_key_path = key_path[self._key_path_prefix_length:] elif key_path.startswith(definitions.KEY_PATH_SEPARATOR): relative_key_path = key_path key_path = ''.join([self._key_path_prefix, key_path]) else: return None path_segments = key_paths.SplitKeyPath(relative_key_path) registry_key = self._root_key if not registry_key: return None for path_segment in path_segments: registry_key = registry_key.GetSubkeyByName(path_segment) if not registry_key: return None return registry_key
def GetKeyByPath(self, key_path)
Retrieves the key for a specific path. Args: key_path (str): Windows Registry key path. Returns: WinRegistryKey: Windows Registry key or None if not available.
2.371898
2.345495
1.011257
if self._last_written_time is None: return dfdatetime_semantic_time.SemanticTime('Not set') return dfdatetime_filetime.Filetime(timestamp=self._last_written_time)
def last_written_time(self)
dfdatetime.DateTimeValues: last written time.
3.268301
2.411891
1.355078
if subkeys: for registry_key in subkeys: name = registry_key.name.upper() if name in self._subkeys: continue self._subkeys[name] = registry_key # pylint: disable=protected-access registry_key._key_path = key_paths.JoinKeyPath([ self._key_path, registry_key.name]) if values: for registry_value in values: name = registry_value.name.upper() if name in self._values: continue self._values[name] = registry_value
def _BuildKeyHierarchy(self, subkeys, values)
Builds the Windows Registry key hierarchy. Args: subkeys (list[FakeWinRegistryKey]): list of subkeys. values (list[FakeWinRegistryValue]): list of values.
2.391968
2.363059
1.012234
name = registry_key.name.upper() if name in self._subkeys: raise KeyError( 'Subkey: {0:s} already exists.'.format(registry_key.name)) self._subkeys[name] = registry_key key_path = key_paths.JoinKeyPath([self._key_path, registry_key.name]) registry_key._key_path = key_path
def AddSubkey(self, registry_key)
Adds a subkey. Args: registry_key (WinRegistryKey): Windows Registry subkey. Raises: KeyError: if the subkey already exists.
2.951398
3.018591
0.97774
name = registry_value.name.upper() if name in self._values: raise KeyError( 'Value: {0:s} already exists.'.format(registry_value.name)) self._values[name] = registry_value
def AddValue(self, registry_value)
Adds a value. Args: registry_value (WinRegistryValue): Windows Registry value. Raises: KeyError: if the value already exists.
3.445001
3.271774
1.052946
subkeys = list(self._subkeys.values()) if index < 0 or index >= len(subkeys): raise IndexError('Index out of bounds.') return subkeys[index]
def GetSubkeyByIndex(self, index)
Retrieves a subkey by index. Args: index (int): index of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found. Raises: IndexError: if the index is out of bounds.
3.274297
5.06998
0.645821
subkey = self for path_segment in key_paths.SplitKeyPath(key_path): subkey = subkey.GetSubkeyByName(path_segment) if not subkey: break return subkey
def GetSubkeyByPath(self, key_path)
Retrieves a subkey by path. Args: key_path (str): path of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found.
3.519525
4.880693
0.721112
if not self._data: return None if self._data_type in self._STRING_VALUE_TYPES: try: return self._data.decode('utf-16-le') # AttributeError is raised when self._data has no decode method. except AttributeError as exception: raise errors.WinRegistryValueError(( 'Unsupported data type: {0!s} of value: {1!s} with error: ' '{2!s}').format(type(self._data), self._name, exception)) except UnicodeError as exception: raise errors.WinRegistryValueError( 'Unable to decode data of value: {0!s} with error: {1!s}'.format( self._name, exception)) elif (self._data_type == definitions.REG_DWORD and self._data_size == 4): return self._INT32_LITTLE_ENDIAN.MapByteStream(self._data) elif (self._data_type == definitions.REG_DWORD_BIG_ENDIAN and self._data_size == 4): return self._INT32_BIG_ENDIAN.MapByteStream(self._data) elif (self._data_type == definitions.REG_QWORD and self._data_size == 8): return self._INT64_LITTLE_ENDIAN.MapByteStream(self._data) elif self._data_type == definitions.REG_MULTI_SZ: try: utf16_string = self._data.decode('utf-16-le') # TODO: evaluate the use of filter here is appropriate behavior. return list(filter(None, utf16_string.split('\x00'))) # AttributeError is raised when self._data has no decode method. except AttributeError as exception: raise errors.WinRegistryValueError(( 'Unsupported data type: {0!s} of value: {1!s} with error: ' '{2!s}').format(type(self._data), self._name, exception)) except UnicodeError as exception: raise errors.WinRegistryValueError( 'Unable to read data from value: {0!s} with error: {1!s}'.format( self._name, exception)) return self._data
def GetDataAsObject(self)
Retrieves the data as an object. Returns: object: data as a Python type or None if not available. Raises: WinRegistryValueError: if the value data cannot be read.
1.985064
1.895385
1.047314
longest_key_path_prefix_upper = '' longest_key_path_prefix_length = len(longest_key_path_prefix_upper) for key_path_prefix_upper in self._registry_files: if key_path_upper.startswith(key_path_prefix_upper): key_path_prefix_length = len(key_path_prefix_upper) if key_path_prefix_length > longest_key_path_prefix_length: longest_key_path_prefix_upper = key_path_prefix_upper longest_key_path_prefix_length = key_path_prefix_length if not longest_key_path_prefix_upper: return None, None registry_file = self._registry_files.get( longest_key_path_prefix_upper, None) return longest_key_path_prefix_upper, registry_file
def _GetCachedFileByPath(self, key_path_upper)
Retrieves a cached Windows Registry file for a key path. Args: key_path_upper (str): Windows Registry key path, in upper case with a resolved root key alias. Returns: tuple: consist: str: key path prefix WinRegistryFile: corresponding Windows Registry file or None if not available.
1.719597
1.678251
1.024636
select_key_path = 'HKEY_LOCAL_MACHINE\\System\\Select' select_key = self.GetKeyByPath(select_key_path) if not select_key: return None # To determine the current control set check: # 1. The "Current" value. # 2. The "Default" value. # 3. The "LastKnownGood" value. control_set = None for value_name in ('Current', 'Default', 'LastKnownGood'): value = select_key.GetValueByName(value_name) if not value or not value.DataIsInteger(): continue control_set = value.GetDataAsObject() # If the control set is 0 then we need to check the other values. if control_set > 0 or control_set <= 999: break if not control_set or control_set <= 0 or control_set > 999: return None control_set_path = 'HKEY_LOCAL_MACHINE\\System\\ControlSet{0:03d}'.format( control_set) key_path = ''.join([control_set_path, key_path_suffix]) return self.GetKeyByPath(key_path)
def _GetCurrentControlSet(self, key_path_suffix)
Virtual key callback to determine the current control set. Args: key_path_suffix (str): current control set Windows Registry key path suffix with leading path separator. Returns: WinRegistryKey: the current control set Windows Registry key or None if not available.
2.531908
2.422453
1.045184
user_key_name, _, key_path_suffix = key_path_suffix.partition( definitions.KEY_PATH_SEPARATOR) # HKEY_USERS\.DEFAULT is an alias for HKEY_USERS\S-1-5-18 which is # the Local System account. if user_key_name == '.DEFAULT': search_key_name = 'S-1-5-18' else: search_key_name = user_key_name user_profile_list_key = self.GetKeyByPath(self._USER_PROFILE_LIST_KEY_PATH) if not user_profile_list_key: return None for user_profile_key in user_profile_list_key.GetSubkeys(): if search_key_name == user_profile_key.name: profile_path_value = user_profile_key.GetValueByName('ProfileImagePath') if not profile_path_value: break profile_path = profile_path_value.GetDataAsObject() if not profile_path: break key_name_upper = user_profile_key.name.upper() if key_name_upper.endswith('_CLASSES'): profile_path = '\\'.join([ profile_path, 'AppData', 'Local', 'Microsoft', 'Windows', 'UsrClass.dat']) else: profile_path = '\\'.join([profile_path, 'NTUSER.DAT']) profile_path_upper = profile_path.upper() registry_file = self._GetCachedUserFileByPath(profile_path_upper) if not registry_file: break key_path_prefix = definitions.KEY_PATH_SEPARATOR.join([ 'HKEY_USERS', user_key_name]) key_path = ''.join([key_path_prefix, key_path_suffix]) registry_file.SetKeyPathPrefix(key_path_prefix) return registry_file.GetKeyByPath(key_path) return None
def _GetUsers(self, key_path_suffix)
Virtual key callback to determine the users sub keys. Args: key_path_suffix (str): users Windows Registry key path suffix with leading path separator. Returns: WinRegistryKey: the users Windows Registry key or None if not available.
2.691654
2.686841
1.001791
# TODO: handle HKEY_USERS in both 9X and NT. key_path_prefix, registry_file = self._GetCachedFileByPath(key_path_upper) if not registry_file: for mapping in self._GetFileMappingsByPath(key_path_upper): try: registry_file = self._OpenFile(mapping.windows_path) except IOError: registry_file = None if not registry_file: continue if not key_path_prefix: key_path_prefix = mapping.key_path_prefix self.MapFile(key_path_prefix, registry_file) key_path_prefix = key_path_prefix.upper() break return key_path_prefix, registry_file
def _GetFileByPath(self, key_path_upper)
Retrieves a Windows Registry file for a specific path. Args: key_path_upper (str): Windows Registry key path, in upper case with a resolved root key alias. Returns: tuple: consists: str: upper case key path prefix WinRegistryFile: corresponding Windows Registry file or None if not available.
4.053668
3.438808
1.1788
candidate_mappings = [] for mapping in self._REGISTRY_FILE_MAPPINGS_NT: if key_path_upper.startswith(mapping.key_path_prefix.upper()): candidate_mappings.append(mapping) # Sort the candidate mappings by longest (most specific) match first. candidate_mappings.sort( key=lambda mapping: len(mapping.key_path_prefix), reverse=True) for mapping in candidate_mappings: yield mapping
def _GetFileMappingsByPath(self, key_path_upper)
Retrieves the Windows Registry file mappings for a specific path. Args: key_path_upper (str): Windows Registry key path, in upper case with a resolved root key alias. Yields: WinRegistryFileMapping: Windows Registry file mapping.
3.269513
3.205891
1.019845
if not self._registry_file_reader: return None return self._registry_file_reader.Open( path, ascii_codepage=self._ascii_codepage)
def _OpenFile(self, path)
Opens a Windows Registry file. Args: path (str): path of the Windows Registry file. Returns: WinRegistryFile: Windows Registry file or None if not available.
5.892574
5.249054
1.122597
root_key_path, _, key_path = key_path.partition( definitions.KEY_PATH_SEPARATOR) # Resolve a root key alias. root_key_path = root_key_path.upper() root_key_path = self._ROOT_KEY_ALIASES.get(root_key_path, root_key_path) if root_key_path not in self._ROOT_KEYS: raise RuntimeError('Unsupported root key: {0:s}'.format(root_key_path)) key_path = definitions.KEY_PATH_SEPARATOR.join([root_key_path, key_path]) key_path_upper = key_path.upper() for virtual_key_path, virtual_key_callback in self._VIRTUAL_KEYS: virtual_key_path_upper = virtual_key_path.upper() if key_path_upper.startswith(virtual_key_path_upper): key_path_suffix = key_path[len(virtual_key_path):] callback_function = getattr(self, virtual_key_callback) virtual_key = callback_function(key_path_suffix) if not virtual_key: raise RuntimeError('Unable to resolve virtual key: {0:s}.'.format( virtual_key_path)) return virtual_key key_path_prefix_upper, registry_file = self._GetFileByPath(key_path_upper) if not registry_file: return None if not key_path_upper.startswith(key_path_prefix_upper): raise RuntimeError('Key path prefix mismatch.') key_path_suffix = key_path[len(key_path_prefix_upper):] key_path = key_path_suffix or definitions.KEY_PATH_SEPARATOR return registry_file.GetKeyByPath(key_path)
def GetKeyByPath(self, key_path)
Retrieves the key for a specific path. Args: key_path (str): Windows Registry key path. Returns: WinRegistryKey: Windows Registry key or None if not available. Raises: RuntimeError: if the root key is not supported.
2.223164
2.279402
0.975328
if not registry_file: return '' candidate_mappings = [] for mapping in self._REGISTRY_FILE_MAPPINGS_NT: if not mapping.unique_key_paths: continue # If all unique key paths are found consider the file to match. match = True for key_path in mapping.unique_key_paths: registry_key = registry_file.GetKeyByPath(key_path) if not registry_key: match = False if match: candidate_mappings.append(mapping) if not candidate_mappings: return '' if len(candidate_mappings) == 1: return candidate_mappings[0].key_path_prefix key_path_prefixes = frozenset([ mapping.key_path_prefix for mapping in candidate_mappings]) expected_key_path_prefixes = frozenset([ 'HKEY_CURRENT_USER', 'HKEY_CURRENT_USER\\Software\\Classes']) if key_path_prefixes == expected_key_path_prefixes: return 'HKEY_CURRENT_USER' raise RuntimeError('Unable to resolve Windows Registry file mapping.')
def GetRegistryFileMapping(self, registry_file)
Determines the Registry file mapping based on the content of the file. Args: registry_file (WinRegistyFile): Windows Registry file. Returns: str: key path prefix or an empty string. Raises: RuntimeError: if there are multiple matching mappings and the correct mapping cannot be resolved.
2.641607
2.328562
1.134437
root_registry_key = virtual.VirtualWinRegistryKey('') for mapped_key in self._MAPPED_KEYS: key_path_segments = key_paths.SplitKeyPath(mapped_key) if not key_path_segments: continue registry_key = root_registry_key for name in key_path_segments[:-1]: sub_registry_key = registry_key.GetSubkeyByName(name) if not sub_registry_key: sub_registry_key = virtual.VirtualWinRegistryKey(name) registry_key.AddSubkey(sub_registry_key) registry_key = sub_registry_key sub_registry_key = registry_key.GetSubkeyByName(key_path_segments[-1]) if (not sub_registry_key and isinstance(registry_key, virtual.VirtualWinRegistryKey)): sub_registry_key = virtual.VirtualWinRegistryKey( key_path_segments[-1], registry=self) registry_key.AddSubkey(sub_registry_key) return root_registry_key
def GetRootKey(self)
Retrieves the Windows Registry root key. Returns: WinRegistryKey: Windows Registry root key. Raises: RuntimeError: if there are multiple matching mappings and the correct mapping cannot be resolved.
2.0789
2.349161
0.884954
self._registry_files[key_path_prefix.upper()] = registry_file registry_file.SetKeyPathPrefix(key_path_prefix)
def MapFile(self, key_path_prefix, registry_file)
Maps the Windows Registry file to a specific key path prefix. Args: key_path_prefix (str): key path prefix. registry_file (WinRegistryFile): Windows Registry file.
3.623301
4.517895
0.801989
key_path_upper = key_path.upper() if key_path_upper.startswith(self._key_path_prefix_upper): relative_key_path = key_path[self._key_path_prefix_length:] elif key_path.startswith(definitions.KEY_PATH_SEPARATOR): relative_key_path = key_path key_path = ''.join([self._key_path_prefix, key_path]) else: return None try: regf_key = self._regf_file.get_key_by_path(relative_key_path) except IOError: regf_key = None if not regf_key: return None return REGFWinRegistryKey(regf_key, key_path=key_path)
def GetKeyByPath(self, key_path)
Retrieves the key for a specific path. Args: key_path (str): Windows Registry key path. Returns: WinRegistryKey: Registry key or None if not available.
2.765845
2.645877
1.045341
regf_key = self._regf_file.get_root_key() if not regf_key: return None return REGFWinRegistryKey(regf_key, key_path=self._key_path_prefix)
def GetRootKey(self)
Retrieves the root key. Returns: WinRegistryKey: Windows Registry root key or None if not available.
6.488935
4.620709
1.404316
self._file_object = file_object self._regf_file.open_file_object(self._file_object) return True
def Open(self, file_object)
Opens the Windows Registry file using a file-like object. Args: file_object (file): file-like object. Returns: bool: True if successful or False if not.
5.237211
6.253977
0.837421
timestamp = self._pyregf_key.get_last_written_time_as_integer() if timestamp == 0: return dfdatetime_semantic_time.SemanticTime('Not set') return dfdatetime_filetime.Filetime(timestamp=timestamp)
def last_written_time(self)
dfdatetime.DateTimeValues: last written time.
4.477695
3.008699
1.48825
if index < 0 or index >= self._pyregf_key.number_of_sub_keys: raise IndexError('Index out of bounds.') pyregf_key = self._pyregf_key.get_sub_key(index) if not pyregf_key: return None key_path = key_paths.JoinKeyPath([self._key_path, pyregf_key.name]) return REGFWinRegistryKey(pyregf_key, key_path=key_path)
def GetSubkeyByIndex(self, index)
Retrieves a subkey by index. Args: index (int): index of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found. Raises: IndexError: if the index is out of bounds.
3.409837
2.981103
1.143817
pyregf_key = self._pyregf_key.get_sub_key_by_name(name) if not pyregf_key: return None key_path = key_paths.JoinKeyPath([self._key_path, pyregf_key.name]) return REGFWinRegistryKey(pyregf_key, key_path=key_path)
def GetSubkeyByName(self, name)
Retrieves a subkey by name. Args: name (str): name of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found.
4.265815
3.811141
1.119301
pyregf_key = self._pyregf_key.get_sub_key_by_path(key_path) if not pyregf_key: return None key_path = key_paths.JoinKeyPath([self._key_path, key_path]) return REGFWinRegistryKey(pyregf_key, key_path=key_path)
def GetSubkeyByPath(self, key_path)
Retrieves a subkey by path. Args: key_path (str): path of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found.
4.20545
3.845116
1.093712
for pyregf_key in self._pyregf_key.sub_keys: key_path = key_paths.JoinKeyPath([self._key_path, pyregf_key.name]) yield REGFWinRegistryKey(pyregf_key, key_path=key_path)
def GetSubkeys(self)
Retrieves all subkeys within the key. Yields: WinRegistryKey: Windows Registry subkey.
5.269849
4.020091
1.310878
pyregf_value = self._pyregf_key.get_value_by_name(name) if not pyregf_value: return None return REGFWinRegistryValue(pyregf_value)
def GetValueByName(self, name)
Retrieves a value by name. Value names are not unique and pyregf provides first match for the value. Args: name (str): name of the value or an empty string for the default value. Returns: WinRegistryValue: Windows Registry value if a corresponding value was found or None if not.
6.13134
3.996077
1.53434
try: return self._pyregf_value.data except IOError as exception: raise errors.WinRegistryValueError( 'Unable to read data from value: {0:s} with error: {1!s}'.format( self._pyregf_value.name, exception))
def data(self)
bytes: value data as a byte string. Raises: WinRegistryValueError: if the value data cannot be read.
4.460923
3.207673
1.390704
if self._pyregf_value.type in self._STRING_VALUE_TYPES: try: return self._pyregf_value.get_data_as_string() except IOError as exception: raise errors.WinRegistryValueError( 'Unable to read data from value: {0:s} with error: {1!s}'.format( self._pyregf_value.name, exception)) if self._pyregf_value.type in self._INTEGER_VALUE_TYPES: try: return self._pyregf_value.get_data_as_integer() except (IOError, OverflowError) as exception: raise errors.WinRegistryValueError( 'Unable to read data from value: {0:s} with error: {1!s}'.format( self._pyregf_value.name, exception)) try: value_data = self._pyregf_value.data except IOError as exception: raise errors.WinRegistryValueError( 'Unable to read data from value: {0:s} with error: {1!s}'.format( self._pyregf_value.name, exception)) if self._pyregf_value.type == definitions.REG_MULTI_SZ: # TODO: Add support for REG_MULTI_SZ to pyregf. if value_data is None: return [] try: utf16_string = value_data.decode('utf-16-le') return list(filter(None, utf16_string.split('\x00'))) except UnicodeError as exception: raise errors.WinRegistryValueError( 'Unable to read data from value: {0:s} with error: {1!s}'.format( self._pyregf_value.name, exception)) return value_data
def GetDataAsObject(self)
Retrieves the data as an object. Returns: object: data as a Python type. Raises: WinRegistryValueError: if the value data cannot be read.
1.710347
1.625185
1.052402
# This is an optimized way to combine the path segments into a single path # and combine multiple successive path separators to one. # Split all the path segments based on the path (segment) separator. path_segments = [ segment.split(definitions.KEY_PATH_SEPARATOR) for segment in path_segments] # Flatten the sublists into one list. path_segments = [ element for sublist in path_segments for element in sublist] # Remove empty path segments. path_segments = filter(None, path_segments) key_path = definitions.KEY_PATH_SEPARATOR.join(path_segments) if not key_path.startswith('HKEY_'): key_path = '{0:s}{1:s}'.format(definitions.KEY_PATH_SEPARATOR, key_path) return key_path
def JoinKeyPath(path_segments)
Joins the path segments into key path. Args: path_segments (list[str]): Windows Registry key path segments. Returns: str: key path.
3.392616
3.379468
1.003891
# Split the path with the path separator and remove empty path segments. return list(filter(None, key_path.split(path_separator)))
def SplitKeyPath(key_path, path_separator=definitions.KEY_PATH_SEPARATOR)
Splits the key path into path segments. Args: key_path (str): key path. path_separator (Optional[str]): path separator. Returns: list[str]: key path segments without the root path segment, which is an empty string.
4.826132
7.903938
0.610598
if not self._registry_key and self._registry: self._GetKeyFromRegistry() if not self._registry_key: return None return self._registry_key.class_name
def class_name(self)
str: class name of the key or None if not available.
6.265044
4.648295
1.347816
if not self._registry_key and self._registry: self._GetKeyFromRegistry() if not self._registry_key: return None return self._registry_key.last_written_time
def last_written_time(self)
dfdatetime.DateTimeValues: last written time or None.
5.739492
3.901955
1.470927
if not self._registry_key and self._registry: self._GetKeyFromRegistry() return len(self._subkeys)
def number_of_subkeys(self)
int: number of subkeys within the key.
11.671484
9.97985
1.169505