code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
needs_closing = False # convenience for working with sqlite3 if isinstance(dbo, string_types): import sqlite3 dbo = sqlite3.connect(dbo) needs_closing = True try: _todb(table, dbo, tablename, schema=schema, commit=commit, truncate=False) finally: if needs_closing: dbo.close()
def appenddb(table, dbo, tablename, schema=None, commit=True)
Load data into an existing database table via a DB-API 2.0 connection or cursor. As :func:`petl.io.db.todb` except that the database table will be appended, i.e., the new data will be inserted into the table, and any existing rows will remain.
4.062488
3.674285
1.105654
cols = OrderedDict() it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) for f in flds: cols[f] = list() for row in it: for f, v in izip_longest(flds, row, fillvalue=missing): if f in cols: cols[f].append(v) return cols
def columns(table, missing=None)
Construct a :class:`dict` mapping field names to lists of values. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] >>> cols = etl.columns(table) >>> cols['foo'] ['a', 'b', 'b'] >>> cols['bar'] [1, 2, 3] See also :func:`petl.util.materialise.facetcolumns`.
2.953169
3.208403
0.920448
fct = dict() it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) indices = asindices(hdr, key) assert len(indices) > 0, 'no key field selected' getkey = operator.itemgetter(*indices) for row in it: kv = getkey(row) if kv not in fct: cols = dict() for f in flds: cols[f] = list() fct[kv] = cols else: cols = fct[kv] for f, v in izip_longest(flds, row, fillvalue=missing): if f in cols: cols[f].append(v) return fct
def facetcolumns(table, key, missing=None)
Like :func:`petl.util.materialise.columns` but stratified by values of the given key field. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar', 'baz'], ... ['a', 1, True], ... ['b', 2, True], ... ['b', 3]] >>> fc = etl.facetcolumns(table, 'foo') >>> fc['a'] {'foo': ['a'], 'bar': [1], 'baz': [True]} >>> fc['b'] {'foo': ['b', 'b'], 'bar': [2, 3], 'baz': [True, None]}
3.177962
3.594729
0.884061
_writepickle(table, source=source, mode='wb', protocol=protocol, write_header=write_header)
def topickle(table, source=None, protocol=-1, write_header=True)
Write the table to a pickle file. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2], ... ['c', 2]] >>> etl.topickle(table1, 'example.p') >>> # look what it did ... table2 = etl.frompickle('example.p') >>> table2 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | 1 | +-----+-----+ | 'b' | 2 | +-----+-----+ | 'c' | 2 | +-----+-----+ Note that if a file already exists at the given location, it will be overwritten. The pickle file format preserves type information, i.e., reading and writing is round-trippable for tables with non-string data values.
4.334139
6.512033
0.665559
_writepickle(table, source=source, mode='ab', protocol=protocol, write_header=write_header)
def appendpickle(table, source=None, protocol=-1, write_header=False)
Append data to an existing pickle file. I.e., as :func:`petl.io.pickle.topickle` but the file is opened in append mode. Note that no attempt is made to check that the fields or row lengths are consistent with the existing data, the data rows from the table are simply appended to the file.
4.139475
6.143468
0.673801
return TeePickleView(table, source=source, protocol=protocol, write_header=write_header)
def teepickle(table, source=None, protocol=-1, write_header=True)
Return a table that writes rows to a pickle file as they are iterated over.
3.925545
4.192108
0.936413
source = read_source_from_arg(source) return JsonView(source, *args, **kwargs)
def fromjson(source, *args, **kwargs)
Extract data from a JSON file. The file must contain a JSON array as the top level object, and each member of the array will be treated as a row of data. E.g.:: >>> import petl as etl >>> data = ''' ... [{"foo": "a", "bar": 1}, ... {"foo": "b", "bar": 2}, ... {"foo": "c", "bar": 2}] ... ''' >>> with open('example.json', 'w') as f: ... f.write(data) ... 74 >>> table1 = etl.fromjson('example.json', header=['foo', 'bar']) >>> table1 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | 1 | +-----+-----+ | 'b' | 2 | +-----+-----+ | 'c' | 2 | +-----+-----+ If your JSON file does not fit this structure, you will need to parse it via :func:`json.load` and select the array to treat as the data, see also :func:`petl.io.json.fromdicts`. .. versionchanged:: 1.1.0 If no `header` is specified, fields will be discovered by sampling keys from the first `sample` objects in `source`. The header will be constructed from keys in the order discovered. Note that this ordering may not be stable, and therefore it may be advisable to specify an explicit `header` or to use another function like :func:`petl.transform.headers.sortheader` on the resulting table to guarantee stability.
9.291181
21.76199
0.426945
return DictsView(dicts, header=header, sample=sample, missing=missing)
def fromdicts(dicts, header=None, sample=1000, missing=None)
View a sequence of Python :class:`dict` as a table. E.g.:: >>> import petl as etl >>> dicts = [{"foo": "a", "bar": 1}, ... {"foo": "b", "bar": 2}, ... {"foo": "c", "bar": 2}] >>> table1 = etl.fromdicts(dicts, header=['foo', 'bar']) >>> table1 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | 1 | +-----+-----+ | 'b' | 2 | +-----+-----+ | 'c' | 2 | +-----+-----+ If `header` is not specified, `sample` items from `dicts` will be inspected to discovery dictionary keys. Note that the order in which dictionary keys are discovered may not be stable, See also :func:`petl.io.json.fromjson`. .. versionchanged:: 1.1.0 If no `header` is specified, fields will be discovered by sampling keys from the first `sample` dictionaries in `dicts`. The header will be constructed from keys in the order discovered. Note that this ordering may not be stable, and therefore it may be advisable to specify an explicit `header` or to use another function like :func:`petl.transform.headers.sortheader` on the resulting table to guarantee stability.
5.030895
9.200877
0.546784
obj = list(_dicts(table)) _writejson(source, obj, prefix, suffix, *args, **kwargs)
def tojson(table, source=None, prefix=None, suffix=None, *args, **kwargs)
Write a table in JSON format, with rows output as JSON objects. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2], ... ['c', 2]] >>> etl.tojson(table1, 'example.json', sort_keys=True) >>> # check what it did ... print(open('example.json').read()) [{"bar": 1, "foo": "a"}, {"bar": 2, "foo": "b"}, {"bar": 2, "foo": "c"}] Note that this is currently not streaming, all data is loaded into memory before being written to the file.
7.73622
12.401957
0.62379
if output_header: obj = list(table) else: obj = list(data(table)) _writejson(source, obj, prefix, suffix, *args, **kwargs)
def tojsonarrays(table, source=None, prefix=None, suffix=None, output_header=False, *args, **kwargs)
Write a table in JSON format, with rows output as JSON arrays. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2], ... ['c', 2]] >>> etl.tojsonarrays(table1, 'example.json') >>> # check what it did ... print(open('example.json').read()) [["a", 1], ["b", 2], ["c", 2]] Note that this is currently not streaming, all data is loaded into memory before being written to the file.
4.582346
7.241315
0.632806
import numpy as np it = iter(table) peek, it = iterpeek(it, sample) hdr = next(it) flds = list(map(str, hdr)) dtype = construct_dtype(flds, peek, dtype) # numpy is fussy about having tuples, need to make sure it = (tuple(row) for row in it) sa = np.fromiter(it, dtype=dtype, count=count) return sa
def toarray(table, dtype=None, count=-1, sample=1000)
Load data from the given `table` into a `numpy <http://www.numpy.org/>`_ structured array. E.g.:: >>> import petl as etl >>> table = [('foo', 'bar', 'baz'), ... ('apples', 1, 2.5), ... ('oranges', 3, 4.4), ... ('pears', 7, .1)] >>> a = etl.toarray(table) >>> a array([('apples', 1, 2.5), ('oranges', 3, 4.4), ('pears', 7, 0.1)], dtype=(numpy.record, [('foo', '<U7'), ('bar', '<i8'), ('baz', '<f8')])) >>> # the dtype can be specified as a string ... a = etl.toarray(table, dtype='a4, i2, f4') >>> a array([(b'appl', 1, 2.5), (b'oran', 3, 4.4), (b'pear', 7, 0.1)], dtype=[('foo', 'S4'), ('bar', '<i2'), ('baz', '<f4')]) >>> # the dtype can also be partially specified ... a = etl.toarray(table, dtype={'foo': 'a4'}) >>> a array([(b'appl', 1, 2.5), (b'oran', 3, 4.4), (b'pear', 7, 0.1)], dtype=[('foo', 'S4'), ('bar', '<i8'), ('baz', '<f8')]) If the dtype is not completely specified, `sample` rows will be examined to infer an appropriate dtype.
5.608231
7.05303
0.795152
import numpy as np return toarray(*args, **kwargs).view(np.recarray)
def torecarray(*args, **kwargs)
Convenient shorthand for ``toarray(*args, **kwargs).view(np.recarray)``.
4.430295
2.974329
1.489511
import numpy as np it = iter(vals) if dtype is None: peek, it = iterpeek(it, sample) dtype = np.array(peek).dtype a = np.fromiter(it, dtype=dtype, count=count) return a
def valuestoarray(vals, dtype=None, count=-1, sample=1000)
Load values from a table column into a `numpy <http://www.numpy.org/>`_ array, e.g.:: >>> import petl as etl >>> table = [('foo', 'bar', 'baz'), ... ('apples', 1, 2.5), ... ('oranges', 3, 4.4), ... ('pears', 7, .1)] >>> table = etl.wrap(table) >>> table.values('bar').array() array([1, 3, 7]) >>> # specify dtype ... table.values('bar').array(dtype='i4') array([1, 3, 7], dtype=int32)
3.805537
5.118118
0.743542
source = read_source_from_arg(source) return XmlView(source, *args, **kwargs)
def fromxml(source, *args, **kwargs)
Extract data from an XML file. E.g.:: >>> import petl as etl >>> # setup a file to demonstrate with ... d = '''<table> ... <tr> ... <td>foo</td><td>bar</td> ... </tr> ... <tr> ... <td>a</td><td>1</td> ... </tr> ... <tr> ... <td>b</td><td>2</td> ... </tr> ... <tr> ... <td>c</td><td>2</td> ... </tr> ... </table>''' >>> with open('example1.xml', 'w') as f: ... f.write(d) ... 212 >>> table1 = etl.fromxml('example1.xml', 'tr', 'td') >>> table1 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | '1' | +-----+-----+ | 'b' | '2' | +-----+-----+ | 'c' | '2' | +-----+-----+ If the data values are stored in an attribute, provide the attribute name as an extra positional argument:: >>> d = '''<table> ... <tr> ... <td v='foo'/><td v='bar'/> ... </tr> ... <tr> ... <td v='a'/><td v='1'/> ... </tr> ... <tr> ... <td v='b'/><td v='2'/> ... </tr> ... <tr> ... <td v='c'/><td v='2'/> ... </tr> ... </table>''' >>> with open('example2.xml', 'w') as f: ... f.write(d) ... 220 >>> table2 = etl.fromxml('example2.xml', 'tr', 'td', 'v') >>> table2 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | '1' | +-----+-----+ | 'b' | '2' | +-----+-----+ | 'c' | '2' | +-----+-----+ Data values can also be extracted by providing a mapping of field names to element paths:: >>> d = '''<table> ... <row> ... <foo>a</foo><baz><bar v='1'/><bar v='3'/></baz> ... </row> ... <row> ... <foo>b</foo><baz><bar v='2'/></baz> ... </row> ... <row> ... <foo>c</foo><baz><bar v='2'/></baz> ... </row> ... </table>''' >>> with open('example3.xml', 'w') as f: ... f.write(d) ... 223 >>> table3 = etl.fromxml('example3.xml', 'row', ... {'foo': 'foo', 'bar': ('baz/bar', 'v')}) >>> table3 +------------+-----+ | bar | foo | +============+=====+ | ('1', '3') | 'a' | +------------+-----+ | '2' | 'b' | +------------+-----+ | '2' | 'c' | +------------+-----+ If `lxml <http://lxml.de/>`_ is installed, full XPath expressions can be used. Note that the implementation is currently **not** streaming, i.e., the whole document is loaded into memory. If multiple elements match a given field, all values are reported as a tuple. If there is more than one element name used for row values, a tuple or list of paths can be provided, e.g., ``fromxml('example.html', './/tr', ('th', 'td'))``.
8.960412
16.145479
0.55498
# TODO don't read the data twice! return convert(table, header(table), *args, **kwargs)
def convertall(table, *args, **kwargs)
Convenience function to convert all fields in the table using a common function or mapping. See also :func:`convert`. The ``where`` keyword argument can be given with a callable or expression which is evaluated on each row and which should return True if the conversion should be applied on that row, else False.
13.233602
16.99457
0.778696
return convert(table, field, {a: b}, **kwargs)
def replace(table, field, a, b, **kwargs)
Convenience function to replace all occurrences of `a` with `b` under the given field. See also :func:`convert`. The ``where`` keyword argument can be given with a callable or expression which is evaluated on each row and which should return True if the conversion should be applied on that row, else False.
9.986567
10.513548
0.949876
return convertall(table, {a: b}, **kwargs)
def replaceall(table, a, b, **kwargs)
Convenience function to replace all instances of `a` with `b` under all fields. See also :func:`convertall`. The ``where`` keyword argument can be given with a callable or expression which is evaluated on each row and which should return True if the conversion should be applied on that row, else False.
13.277449
11.696178
1.135195
return convert(table, field, lambda v: value, **kwargs)
def update(table, field, value, **kwargs)
Convenience function to convert a field to a fixed value. Accepts the ``where`` keyword argument. See also :func:`convert`.
12.653923
10.046694
1.259511
return convertall(table, numparser(strict), **kwargs)
def convertnumbers(table, strict=False, **kwargs)
Convenience function to convert all field values to numbers where possible. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz', 'quux'], ... ['1', '3.0', '9+3j', 'aaa'], ... ['2', '1.3', '7+2j', None]] >>> table2 = etl.convertnumbers(table1) >>> table2 +-----+-----+--------+-------+ | foo | bar | baz | quux | +=====+=====+========+=======+ | 1 | 3.0 | (9+3j) | 'aaa' | +-----+-----+--------+-------+ | 2 | 1.3 | (7+2j) | None | +-----+-----+--------+-------+
18.840839
31.201431
0.603845
conv = lambda v: fmt.format(v) return convert(table, field, conv, **kwargs)
def format(table, field, fmt, **kwargs)
Convenience function to format all values in the given `field` using the `fmt` format string. The ``where`` keyword argument can be given with a callable or expression which is evaluated on each row and which should return True if the conversion should be applied on that row, else False.
5.665411
6.983341
0.811275
conv = lambda v: fmt.format(v) return convertall(table, conv, **kwargs)
def formatall(table, fmt, **kwargs)
Convenience function to format all values in all fields using the `fmt` format string. The ``where`` keyword argument can be given with a callable or expression which is evaluated on each row and which should return True if the conversion should be applied on that row, else False.
6.917872
7.768019
0.890558
conv = lambda v: fmt % v return convert(table, field, conv, **kwargs)
def interpolate(table, field, fmt, **kwargs)
Convenience function to interpolate all values in the given `field` using the `fmt` string. The ``where`` keyword argument can be given with a callable or expression which is evaluated on each row and which should return True if the conversion should be applied on that row, else False.
7.671781
8.603663
0.891688
conv = lambda v: fmt % v return convertall(table, conv, **kwargs)
def interpolateall(table, fmt, **kwargs)
Convenience function to interpolate all values in all fields using the `fmt` string. The ``where`` keyword argument can be given with a callable or expression which is evaluated on each row and which should return True if the conversion should be applied on that row, else False.
8.395133
9.840335
0.853135
return SortView(table, key=key, reverse=reverse, buffersize=buffersize, tempdir=tempdir, cache=cache)
def sort(table, key=None, reverse=False, buffersize=None, tempdir=None, cache=True)
Sort the table. Field names or indices (from zero) can be used to specify the key. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['C', 2], ... ['A', 9], ... ['A', 6], ... ['F', 1], ... ['D', 10]] >>> table2 = etl.sort(table1, 'foo') >>> table2 +-----+-----+ | foo | bar | +=====+=====+ | 'A' | 9 | +-----+-----+ | 'A' | 6 | +-----+-----+ | 'C' | 2 | +-----+-----+ | 'D' | 10 | +-----+-----+ | 'F' | 1 | +-----+-----+ >>> # sorting by compound key is supported ... table3 = etl.sort(table1, key=['foo', 'bar']) >>> table3 +-----+-----+ | foo | bar | +=====+=====+ | 'A' | 6 | +-----+-----+ | 'A' | 9 | +-----+-----+ | 'C' | 2 | +-----+-----+ | 'D' | 10 | +-----+-----+ | 'F' | 1 | +-----+-----+ >>> # if no key is specified, the default is a lexical sort ... table4 = etl.sort(table1) >>> table4 +-----+-----+ | foo | bar | +=====+=====+ | 'A' | 6 | +-----+-----+ | 'A' | 9 | +-----+-----+ | 'C' | 2 | +-----+-----+ | 'D' | 10 | +-----+-----+ | 'F' | 1 | +-----+-----+ The `buffersize` argument should be an `int` or `None`. If the number of rows in the table is less than `buffersize`, the table will be sorted in memory. Otherwise, the table is sorted in chunks of no more than `buffersize` rows, each chunk is written to a temporary file, and then a merge sort is performed on the temporary files. If `buffersize` is `None`, the value of `petl.config.sort_buffersize` will be used. By default this is set to 100000 rows, but can be changed, e.g.:: >>> import petl.config >>> petl.config.sort_buffersize = 500000 If `petl.config.sort_buffersize` is set to `None`, this forces all sorting to be done entirely in memory. By default the results of the sort will be cached, and so a second pass over the sorted table will yield rows from the cache and will not repeat the sort operation. To turn off caching, set the `cache` argument to `False`.
3.189612
4.430291
0.719955
if key is None: keyed_iterables = iterables for element in heapq.merge(*keyed_iterables): yield element else: keyed_iterables = [(_Keyed(key(obj), obj) for obj in iterable) for iterable in iterables] for element in heapq.merge(*keyed_iterables): yield element.obj
def _heapqmergesorted(key=None, *iterables)
Return a single iterator over the given iterables, sorted by the given `key` function, assuming the input iterables are already sorted by the same function. (I.e., the merge part of a general merge sort.) Uses :func:`heapq.merge` for the underlying implementation.
2.831947
2.869601
0.986878
if reverse: op = max else: op = min if key is not None: opkwargs = {'key': key} else: opkwargs = dict() # populate initial shortlist # (remember some iterables might be empty) iterators = list() shortlist = list() for iterable in iterables: it = iter(iterable) try: first = next(it) iterators.append(it) shortlist.append(first) except StopIteration: pass # do the mergesort while iterators: nxt = op(shortlist, **opkwargs) yield nxt nextidx = shortlist.index(nxt) try: shortlist[nextidx] = next(iterators[nextidx]) except StopIteration: del shortlist[nextidx] del iterators[nextidx]
def _shortlistmergesorted(key=None, reverse=False, *iterables)
Return a single iterator over the given iterables, sorted by the given `key` function, assuming the input iterables are already sorted by the same function. (I.e., the merge part of a general merge sort.) Uses :func:`min` (or :func:`max` if ``reverse=True``) for the underlying implementation.
2.788352
2.726825
1.022564
# determine the operator to use when comparing rows if reverse and strict: op = operator.lt elif reverse and not strict: op = operator.le elif strict: op = operator.gt else: op = operator.ge it = iter(table) flds = [text_type(f) for f in next(it)] if key is None: prev = next(it) for curr in it: if not op(curr, prev): return False prev = curr else: getkey = comparable_itemgetter(*asindices(flds, key)) prev = next(it) prevkey = getkey(prev) for curr in it: currkey = getkey(curr) if not op(currkey, prevkey): return False prevkey = currkey return True
def issorted(table, key=None, reverse=False, strict=False)
Return True if the table is ordered (i.e., sorted) by the given key. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['a', 1, True], ... ['b', 3, True], ... ['b', 2]] >>> etl.issorted(table1, key='foo') True >>> etl.issorted(table1, key='bar') False >>> etl.issorted(table1, key='foo', strict=True) False >>> etl.issorted(table1, key='foo', reverse=True) False
2.739974
2.809054
0.975408
if dictionary is None: dictionary = dict() # setup it, getkey, getvalue = _setup_lookup(table, key, value) # build lookup dictionary for row in it: k = getkey(row) v = getvalue(row) if k in dictionary: # work properly with shelve l = dictionary[k] l.append(v) dictionary[k] = l else: dictionary[k] = [v] return dictionary
def lookup(table, key, value=None, dictionary=None)
Load a dictionary with data from the given table. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2], ... ['b', 3]] >>> lkp = etl.lookup(table1, 'foo', 'bar') >>> lkp['a'] [1] >>> lkp['b'] [2, 3] >>> # if no value argument is given, defaults to the whole ... # row (as a tuple) ... lkp = etl.lookup(table1, 'foo') >>> lkp['a'] [('a', 1)] >>> lkp['b'] [('b', 2), ('b', 3)] >>> # compound keys are supported ... table2 = [['foo', 'bar', 'baz'], ... ['a', 1, True], ... ['b', 2, False], ... ['b', 3, True], ... ['b', 3, False]] >>> lkp = etl.lookup(table2, ('foo', 'bar'), 'baz') >>> lkp[('a', 1)] [True] >>> lkp[('b', 2)] [False] >>> lkp[('b', 3)] [True, False] >>> # data can be loaded into an existing dictionary-like ... # object, including persistent dictionaries created via the ... # shelve module ... import shelve >>> lkp = shelve.open('example.dat', flag='n') >>> lkp = etl.lookup(table1, 'foo', 'bar', lkp) >>> lkp.close() >>> lkp = shelve.open('example.dat', flag='r') >>> lkp['a'] [1] >>> lkp['b'] [2, 3]
3.539757
3.917043
0.903681
if dictionary is None: dictionary = dict() # setup it, getkey, getvalue = _setup_lookup(table, key, value) # build lookup dictionary for row in it: k = getkey(row) if strict and k in dictionary: raise DuplicateKeyError(k) elif k not in dictionary: v = getvalue(row) dictionary[k] = v return dictionary
def lookupone(table, key, value=None, dictionary=None, strict=False)
Load a dictionary with data from the given table, assuming there is at most one value for each key. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2], ... ['b', 3]] >>> # if the specified key is not unique and strict=False (default), ... # the first value wins ... lkp = etl.lookupone(table1, 'foo', 'bar') >>> lkp['a'] 1 >>> lkp['b'] 2 >>> # if the specified key is not unique and strict=True, will raise ... # DuplicateKeyError ... try: ... lkp = etl.lookupone(table1, 'foo', strict=True) ... except etl.errors.DuplicateKeyError as e: ... print(e) ... duplicate key: 'b' >>> # compound keys are supported ... table2 = [['foo', 'bar', 'baz'], ... ['a', 1, True], ... ['b', 2, False], ... ['b', 3, True], ... ['b', 3, False]] >>> lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz') >>> lkp[('a', 1)] True >>> lkp[('b', 2)] False >>> lkp[('b', 3)] True >>> # data can be loaded into an existing dictionary-like ... # object, including persistent dictionaries created via the ... # shelve module ... import shelve >>> lkp = shelve.open('example.dat', flag='n') >>> lkp = etl.lookupone(table1, 'foo', 'bar', lkp) >>> lkp.close() >>> lkp = shelve.open('example.dat', flag='r') >>> lkp['a'] 1 >>> lkp['b'] 2
3.49812
4.073324
0.858788
if dictionary is None: dictionary = dict() it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) keyindices = asindices(hdr, key) assert len(keyindices) > 0, 'no key selected' getkey = operator.itemgetter(*keyindices) for row in it: k = getkey(row) if strict and k in dictionary: raise DuplicateKeyError(k) elif k not in dictionary: d = asdict(flds, row) dictionary[k] = d return dictionary
def dictlookupone(table, key, dictionary=None, strict=False)
Load a dictionary with data from the given table, mapping to dicts, assuming there is at most one row for each key. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2], ... ['b', 3]] >>> # if the specified key is not unique and strict=False (default), ... # the first value wins ... lkp = etl.dictlookupone(table1, 'foo') >>> lkp['a'] {'foo': 'a', 'bar': 1} >>> lkp['b'] {'foo': 'b', 'bar': 2} >>> # if the specified key is not unique and strict=True, will raise ... # DuplicateKeyError ... try: ... lkp = etl.dictlookupone(table1, 'foo', strict=True) ... except etl.errors.DuplicateKeyError as e: ... print(e) ... duplicate key: 'b' >>> # compound keys are supported ... table2 = [['foo', 'bar', 'baz'], ... ['a', 1, True], ... ['b', 2, False], ... ['b', 3, True], ... ['b', 3, False]] >>> lkp = etl.dictlookupone(table2, ('foo', 'bar')) >>> lkp[('a', 1)] {'foo': 'a', 'bar': 1, 'baz': True} >>> lkp[('b', 2)] {'foo': 'b', 'bar': 2, 'baz': False} >>> lkp[('b', 3)] {'foo': 'b', 'bar': 3, 'baz': True} >>> # data can be loaded into an existing dictionary-like ... # object, including persistent dictionaries created via the ... # shelve module ... import shelve >>> lkp = shelve.open('example.dat', flag='n') >>> lkp = etl.dictlookupone(table1, 'foo', lkp) >>> lkp.close() >>> lkp = shelve.open('example.dat', flag='r') >>> lkp['a'] {'foo': 'a', 'bar': 1} >>> lkp['b'] {'foo': 'b', 'bar': 2}
3.42129
3.832933
0.892604
if dictionary is None: dictionary = dict() it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) keyindices = asindices(hdr, key) assert len(keyindices) > 0, 'no key selected' getkey = operator.itemgetter(*keyindices) for row in it: k = getkey(row) rec = Record(row, flds) if k in dictionary: # work properly with shelve l = dictionary[k] l.append(rec) dictionary[k] = l else: dictionary[k] = [rec] return dictionary
def recordlookup(table, key, dictionary=None)
Load a dictionary with data from the given table, mapping to record objects.
3.718594
3.51658
1.057446
return BcolzView(source, expression=expression, outcols=outcols, limit=limit, skip=skip)
def frombcolz(source, expression=None, outcols=None, limit=None, skip=0)
Extract a table from a bcolz ctable, e.g.:: >>> import petl as etl >>> import bcolz >>> cols = [ ... ['apples', 'oranges', 'pears'], ... [1, 3, 7], ... [2.5, 4.4, .1] ... ] >>> names = ('foo', 'bar', 'baz') >>> ctbl = bcolz.ctable(cols, names=names) >>> tbl = etl.frombcolz(ctbl) >>> tbl +-----------+-----+-----+ | foo | bar | baz | +===========+=====+=====+ | 'apples' | 1 | 2.5 | +-----------+-----+-----+ | 'oranges' | 3 | 4.4 | +-----------+-----+-----+ | 'pears' | 7 | 0.1 | +-----------+-----+-----+ If `expression` is provided it will be executed by bcolz and only matching rows returned, e.g.:: >>> tbl2 = etl.frombcolz(ctbl, expression='bar > 1') >>> tbl2 +-----------+-----+-----+ | foo | bar | baz | +===========+=====+=====+ | 'oranges' | 3 | 4.4 | +-----------+-----+-----+ | 'pears' | 7 | 0.1 | +-----------+-----+-----+ .. versionadded:: 1.1.0
3.608431
5.092095
0.708634
import bcolz import numpy as np it = iter(table) peek, it = iterpeek(it, sample) hdr = next(it) # numpy is fussy about having tuples, need to make sure it = (tuple(row) for row in it) flds = list(map(text_type, hdr)) dtype = construct_dtype(flds, peek, dtype) # create ctable kwargs.setdefault('expectedlen', 1000000) kwargs.setdefault('mode', 'w') ctbl = bcolz.ctable(np.array([], dtype=dtype), **kwargs) # fill chunk-wise chunklen = sum(ctbl.cols[name].chunklen for name in ctbl.names) // len(ctbl.names) while True: data = list(itertools.islice(it, chunklen)) data = np.array(data, dtype=dtype) ctbl.append(data) if len(data) < chunklen: break ctbl.flush() return ctbl
def tobcolz(table, dtype=None, sample=1000, **kwargs)
Load data into a bcolz ctable, e.g.:: >>> import petl as etl >>> table = [('foo', 'bar', 'baz'), ... ('apples', 1, 2.5), ... ('oranges', 3, 4.4), ... ('pears', 7, .1)] >>> ctbl = etl.tobcolz(table) >>> ctbl ctable((3,), [('foo', '<U7'), ('bar', '<i8'), ('baz', '<f8')]) nbytes: 132; cbytes: 1023.98 KB; ratio: 0.00 cparams := cparams(clevel=5, shuffle=1, cname='lz4', quantize=0) [('apples', 1, 2.5) ('oranges', 3, 4.4) ('pears', 7, 0.1)] >>> ctbl.names ['foo', 'bar', 'baz'] >>> ctbl['foo'] carray((3,), <U7) nbytes := 84; cbytes := 511.98 KB; ratio: 0.00 cparams := cparams(clevel=5, shuffle=1, cname='lz4', quantize=0) chunklen := 18724; chunksize: 524272; blocksize: 0 ['apples' 'oranges' 'pears'] Other keyword arguments are passed through to the ctable constructor. .. versionadded:: 1.1.0
3.90693
4.071793
0.959511
import bcolz import numpy as np if isinstance(obj, string_types): ctbl = bcolz.open(obj, mode='a') else: assert hasattr(obj, 'append') and hasattr(obj, 'names'), \ 'expected rootdir or ctable, found %r' % obj ctbl = obj # setup dtype = ctbl.dtype it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) # check names match if check_names: assert tuple(flds) == tuple(ctbl.names), 'column names do not match' # fill chunk-wise chunklen = sum(ctbl.cols[name].chunklen for name in ctbl.names) // len(ctbl.names) while True: data = list(itertools.islice(it, chunklen)) data = np.array(data, dtype=dtype) ctbl.append(data) if len(data) < chunklen: break ctbl.flush() return ctbl
def appendbcolz(table, obj, check_names=True)
Append data into a bcolz ctable. The `obj` argument can be either an existing ctable or the name of a directory were an on-disk ctable is stored. .. versionadded:: 1.1.0
3.251575
3.121821
1.041563
return MeltView(table, key=key, variables=variables, variablefield=variablefield, valuefield=valuefield)
def melt(table, key=None, variables=None, variablefield='variable', valuefield='value')
Reshape a table, melting fields into data. E.g.:: >>> import petl as etl >>> table1 = [['id', 'gender', 'age'], ... [1, 'F', 12], ... [2, 'M', 17], ... [3, 'M', 16]] >>> table2 = etl.melt(table1, 'id') >>> table2.lookall() +----+----------+-------+ | id | variable | value | +====+==========+=======+ | 1 | 'gender' | 'F' | +----+----------+-------+ | 1 | 'age' | 12 | +----+----------+-------+ | 2 | 'gender' | 'M' | +----+----------+-------+ | 2 | 'age' | 17 | +----+----------+-------+ | 3 | 'gender' | 'M' | +----+----------+-------+ | 3 | 'age' | 16 | +----+----------+-------+ >>> # compound keys are supported ... table3 = [['id', 'time', 'height', 'weight'], ... [1, 11, 66.4, 12.2], ... [2, 16, 53.2, 17.3], ... [3, 12, 34.5, 9.4]] >>> table4 = etl.melt(table3, key=['id', 'time']) >>> table4.lookall() +----+------+----------+-------+ | id | time | variable | value | +====+======+==========+=======+ | 1 | 11 | 'height' | 66.4 | +----+------+----------+-------+ | 1 | 11 | 'weight' | 12.2 | +----+------+----------+-------+ | 2 | 16 | 'height' | 53.2 | +----+------+----------+-------+ | 2 | 16 | 'weight' | 17.3 | +----+------+----------+-------+ | 3 | 12 | 'height' | 34.5 | +----+------+----------+-------+ | 3 | 12 | 'weight' | 9.4 | +----+------+----------+-------+ >>> # a subset of variable fields can be selected ... table5 = etl.melt(table3, key=['id', 'time'], ... variables=['height']) >>> table5.lookall() +----+------+----------+-------+ | id | time | variable | value | +====+======+==========+=======+ | 1 | 11 | 'height' | 66.4 | +----+------+----------+-------+ | 2 | 16 | 'height' | 53.2 | +----+------+----------+-------+ | 3 | 12 | 'height' | 34.5 | +----+------+----------+-------+ See also :func:`petl.transform.reshape.recast`.
3.288277
5.040175
0.652413
return PivotView(table, f1, f2, f3, aggfun, missing=missing, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def pivot(table, f1, f2, f3, aggfun, missing=None, presorted=False, buffersize=None, tempdir=None, cache=True)
Construct a pivot table. E.g.:: >>> import petl as etl >>> table1 = [['region', 'gender', 'style', 'units'], ... ['east', 'boy', 'tee', 12], ... ['east', 'boy', 'golf', 14], ... ['east', 'boy', 'fancy', 7], ... ['east', 'girl', 'tee', 3], ... ['east', 'girl', 'golf', 8], ... ['east', 'girl', 'fancy', 18], ... ['west', 'boy', 'tee', 12], ... ['west', 'boy', 'golf', 15], ... ['west', 'boy', 'fancy', 8], ... ['west', 'girl', 'tee', 6], ... ['west', 'girl', 'golf', 16], ... ['west', 'girl', 'fancy', 1]] >>> table2 = etl.pivot(table1, 'region', 'gender', 'units', sum) >>> table2 +--------+-----+------+ | region | boy | girl | +========+=====+======+ | 'east' | 33 | 29 | +--------+-----+------+ | 'west' | 35 | 23 | +--------+-----+------+ >>> table3 = etl.pivot(table1, 'region', 'style', 'units', sum) >>> table3 +--------+-------+------+-----+ | region | fancy | golf | tee | +========+=======+======+=====+ | 'east' | 25 | 22 | 15 | +--------+-------+------+-----+ | 'west' | 9 | 31 | 18 | +--------+-------+------+-----+ >>> table4 = etl.pivot(table1, 'gender', 'style', 'units', sum) >>> table4 +--------+-------+------+-----+ | gender | fancy | golf | tee | +========+=======+======+=====+ | 'boy' | 15 | 29 | 24 | +--------+-------+------+-----+ | 'girl' | 19 | 24 | 9 | +--------+-------+------+-----+ See also :func:`petl.transform.reshape.recast`.
1.94627
2.690331
0.723432
source = read_source_from_arg(source) return TextView(source, header=header, encoding=encoding, errors=errors, strip=strip)
def fromtext(source=None, encoding=None, errors='strict', strip=None, header=('lines',))
Extract a table from lines in the given text file. E.g.:: >>> import petl as etl >>> # setup example file ... text = 'a,1\\nb,2\\nc,2\\n' >>> with open('example.txt', 'w') as f: ... f.write(text) ... 12 >>> table1 = etl.fromtext('example.txt') >>> table1 +-------+ | lines | +=======+ | 'a,1' | +-------+ | 'b,2' | +-------+ | 'c,2' | +-------+ >>> # post-process, e.g., with capture() ... table2 = table1.capture('lines', '(.*),(.*)$', ['foo', 'bar']) >>> table2 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | '1' | +-----+-----+ | 'b' | '2' | +-----+-----+ | 'c' | '2' | +-----+-----+ Note that the strip() function is called on each line, which by default will remove leading and trailing whitespace, including the end-of-line character - use the `strip` keyword argument to specify alternative characters to strip. Set the `strip` argument to `False` to disable this behaviour and leave line endings in place.
5.511103
8.681862
0.634784
_writetext(table, source=source, mode='wb', encoding=encoding, errors=errors, template=template, prologue=prologue, epilogue=epilogue)
def totext(table, source=None, encoding=None, errors='strict', template=None, prologue=None, epilogue=None)
Write the table to a text file. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2], ... ['c', 2]] >>> prologue = '''{| class="wikitable" ... |- ... ! foo ... ! bar ... ''' >>> template = '''|- ... | {foo} ... | {bar} ... ''' >>> epilogue = '|}' >>> etl.totext(table1, 'example.txt', template=template, ... prologue=prologue, epilogue=epilogue) >>> # see what we did ... print(open('example.txt').read()) {| class="wikitable" |- ! foo ! bar |- | a | 1 |- | b | 2 |- | c | 2 |} The `template` will be used to format each row via `str.format <http://docs.python.org/library/stdtypes.html#str.format>`_.
2.801955
3.784901
0.740298
assert template is not None, 'template is required' return TeeTextView(table, source=source, encoding=encoding, errors=errors, template=template, prologue=prologue, epilogue=epilogue)
def teetext(table, source=None, encoding=None, errors='strict', template=None, prologue=None, epilogue=None)
Return a table that writes rows to a text file as they are iterated over.
2.733316
2.878713
0.949492
# noqa return ProblemsView(table, constraints=constraints, header=header)
def validate(table, constraints=None, header=None)
Validate a `table` against a set of `constraints` and/or an expected `header`, e.g.:: >>> import petl as etl >>> # define some validation constraints ... header = ('foo', 'bar', 'baz') >>> constraints = [ ... dict(name='foo_int', field='foo', test=int), ... dict(name='bar_date', field='bar', test=etl.dateparser('%Y-%m-%d')), ... dict(name='baz_enum', field='baz', assertion=lambda v: v in ['Y', 'N']), ... dict(name='not_none', assertion=lambda row: None not in row), ... dict(name='qux_int', field='qux', test=int, optional=True), ... ] >>> # now validate a table ... table = (('foo', 'bar', 'bazzz'), ... (1, '2000-01-01', 'Y'), ... ('x', '2010-10-10', 'N'), ... (2, '2000/01/01', 'Y'), ... (3, '2015-12-12', 'x'), ... (4, None, 'N'), ... ('y', '1999-99-99', 'z'), ... (6, '2000-01-01'), ... (7, '2001-02-02', 'N', True)) >>> problems = etl.validate(table, constraints=constraints, header=header) >>> problems.lookall() +--------------+-----+-------+--------------+------------------+ | name | row | field | value | error | +==============+=====+=======+==============+==================+ | '__header__' | 0 | None | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'foo_int' | 2 | 'foo' | 'x' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 3 | 'bar' | '2000/01/01' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 4 | 'baz' | 'x' | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 5 | 'bar' | None | 'AttributeError' | +--------------+-----+-------+--------------+------------------+ | 'not_none' | 5 | None | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'foo_int' | 6 | 'foo' | 'y' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 6 | 'bar' | '1999-99-99' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 6 | 'baz' | 'z' | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | '__len__' | 7 | None | 2 | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 7 | 'baz' | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | '__len__' | 8 | None | 4 | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ Returns a table of validation problems.
15.481842
17.275604
0.896168
local_constraints = constraints or [] local_constraints = [dict(**c) for c in local_constraints] local_constraints = [ c for c in local_constraints if c.get('field') in flds or not c.get('optional') ] return local_constraints
def normalize_constraints(constraints, flds)
This method renders local constraints such that return value is: * a list, not None * a list of dicts * a list of non-optional constraints or optional with defined field .. note:: We use a new variable 'local_constraints' because the constraints parameter may be a mutable collection, and we do not wish to cause side-effects by modifying it locally
3.721468
2.812469
1.323203
# TODO don't read data twice (occurs if using natural key) lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return JoinView(left, right, lkey=lkey, rkey=rkey, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache, lprefix=lprefix, rprefix=rprefix)
def join(left, right, key=None, lkey=None, rkey=None, presorted=False, buffersize=None, tempdir=None, cache=True, lprefix=None, rprefix=None)
Perform an equi-join on the given tables. E.g.:: >>> import petl as etl >>> table1 = [['id', 'colour'], ... [1, 'blue'], ... [2, 'red'], ... [3, 'purple']] >>> table2 = [['id', 'shape'], ... [1, 'circle'], ... [3, 'square'], ... [4, 'ellipse']] >>> table3 = etl.join(table1, table2, key='id') >>> table3 +----+----------+----------+ | id | colour | shape | +====+==========+==========+ | 1 | 'blue' | 'circle' | +----+----------+----------+ | 3 | 'purple' | 'square' | +----+----------+----------+ >>> # if no key is given, a natural join is tried ... table4 = etl.join(table1, table2) >>> table4 +----+----------+----------+ | id | colour | shape | +====+==========+==========+ | 1 | 'blue' | 'circle' | +----+----------+----------+ | 3 | 'purple' | 'square' | +----+----------+----------+ >>> # note behaviour if the key is not unique in either or both tables ... table5 = [['id', 'colour'], ... [1, 'blue'], ... [1, 'red'], ... [2, 'purple']] >>> table6 = [['id', 'shape'], ... [1, 'circle'], ... [1, 'square'], ... [2, 'ellipse']] >>> table7 = etl.join(table5, table6, key='id') >>> table7 +----+----------+-----------+ | id | colour | shape | +====+==========+===========+ | 1 | 'blue' | 'circle' | +----+----------+-----------+ | 1 | 'blue' | 'square' | +----+----------+-----------+ | 1 | 'red' | 'circle' | +----+----------+-----------+ | 1 | 'red' | 'square' | +----+----------+-----------+ | 2 | 'purple' | 'ellipse' | +----+----------+-----------+ >>> # compound keys are supported ... table8 = [['id', 'time', 'height'], ... [1, 1, 12.3], ... [1, 2, 34.5], ... [2, 1, 56.7]] >>> table9 = [['id', 'time', 'weight'], ... [1, 2, 4.5], ... [2, 1, 6.7], ... [2, 2, 8.9]] >>> table10 = etl.join(table8, table9, key=['id', 'time']) >>> table10 +----+------+--------+--------+ | id | time | height | weight | +====+======+========+========+ | 1 | 2 | 34.5 | 4.5 | +----+------+--------+--------+ | 2 | 1 | 56.7 | 6.7 | +----+------+--------+--------+ If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments.
3.505993
4.219221
0.830957
# TODO don't read data twice (occurs if using natural key) lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return JoinView(left, right, lkey=lkey, rkey=rkey, presorted=presorted, leftouter=False, rightouter=True, missing=missing, buffersize=buffersize, tempdir=tempdir, cache=cache, lprefix=lprefix, rprefix=rprefix)
def rightjoin(left, right, key=None, lkey=None, rkey=None, missing=None, presorted=False, buffersize=None, tempdir=None, cache=True, lprefix=None, rprefix=None)
Perform a right outer join on the given tables. E.g.:: >>> import petl as etl >>> table1 = [['id', 'colour'], ... [1, 'blue'], ... [2, 'red'], ... [3, 'purple']] >>> table2 = [['id', 'shape'], ... [1, 'circle'], ... [3, 'square'], ... [4, 'ellipse']] >>> table3 = etl.rightjoin(table1, table2, key='id') >>> table3 +----+----------+-----------+ | id | colour | shape | +====+==========+===========+ | 1 | 'blue' | 'circle' | +----+----------+-----------+ | 3 | 'purple' | 'square' | +----+----------+-----------+ | 4 | None | 'ellipse' | +----+----------+-----------+ If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments.
3.627537
4.089162
0.88711
lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return AntiJoinView(left=left, right=right, lkey=lkey, rkey=rkey, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def antijoin(left, right, key=None, lkey=None, rkey=None, presorted=False, buffersize=None, tempdir=None, cache=True)
Return rows from the `left` table where the key value does not occur in the `right` table. E.g.:: >>> import petl as etl >>> table1 = [['id', 'colour'], ... [0, 'black'], ... [1, 'blue'], ... [2, 'red'], ... [4, 'yellow'], ... [5, 'white']] >>> table2 = [['id', 'shape'], ... [1, 'circle'], ... [3, 'square']] >>> table3 = etl.antijoin(table1, table2, key='id') >>> table3 +----+----------+ | id | colour | +====+==========+ | 0 | 'black' | +----+----------+ | 2 | 'red' | +----+----------+ | 4 | 'yellow' | +----+----------+ | 5 | 'white' | +----+----------+ If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments.
2.260617
3.30812
0.683354
lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return LookupJoinView(left, right, lkey, rkey, presorted=presorted, missing=missing, buffersize=buffersize, tempdir=tempdir, cache=cache, lprefix=lprefix, rprefix=rprefix)
def lookupjoin(left, right, key=None, lkey=None, rkey=None, missing=None, presorted=False, buffersize=None, tempdir=None, cache=True, lprefix=None, rprefix=None)
Perform a left join, but where the key is not unique in the right-hand table, arbitrarily choose the first row and ignore others. E.g.:: >>> import petl as etl >>> table1 = [['id', 'color', 'cost'], ... [1, 'blue', 12], ... [2, 'red', 8], ... [3, 'purple', 4]] >>> table2 = [['id', 'shape', 'size'], ... [1, 'circle', 'big'], ... [1, 'circle', 'small'], ... [2, 'square', 'tiny'], ... [2, 'square', 'big'], ... [3, 'ellipse', 'small'], ... [3, 'ellipse', 'tiny']] >>> table3 = etl.lookupjoin(table1, table2, key='id') >>> table3 +----+----------+------+-----------+---------+ | id | color | cost | shape | size | +====+==========+======+===========+=========+ | 1 | 'blue' | 12 | 'circle' | 'big' | +----+----------+------+-----------+---------+ | 2 | 'red' | 8 | 'square' | 'tiny' | +----+----------+------+-----------+---------+ | 3 | 'purple' | 4 | 'ellipse' | 'small' | +----+----------+------+-----------+---------+ See also :func:`petl.transform.joins.leftjoin`.
2.196508
3.178069
0.691146
if key is None: # first sort the table by the value field if presorted: tbl_sorted = table else: tbl_sorted = sort(table, value, buffersize=buffersize, tempdir=tempdir, cache=cache) # on the left, return the original table but with the value field # replaced by an incrementing integer left = ConvertToIncrementingCounterView(tbl_sorted, value, autoincrement) # on the right, return a new table with distinct values from the # given field right = EnumerateDistinctView(tbl_sorted, value, autoincrement) else: # on the left, return distinct rows from the original table # with the value field cut out left = distinct(cutout(table, value)) # on the right, return distinct rows from the original table # with all fields but the key and value cut out right = distinct(cut(table, key, value)) return left, right
def unjoin(table, value, key=None, autoincrement=(1, 1), presorted=False, buffersize=None, tempdir=None, cache=True)
Split a table into two tables by reversing an inner join. E.g.:: >>> import petl as etl >>> # join key is present in the table ... table1 = (('foo', 'bar', 'baz'), ... ('A', 1, 'apple'), ... ('B', 1, 'apple'), ... ('C', 2, 'orange')) >>> table2, table3 = etl.unjoin(table1, 'baz', key='bar') >>> table2 +-----+-----+ | foo | bar | +=====+=====+ | 'A' | 1 | +-----+-----+ | 'B' | 1 | +-----+-----+ | 'C' | 2 | +-----+-----+ >>> table3 +-----+----------+ | bar | baz | +=====+==========+ | 1 | 'apple' | +-----+----------+ | 2 | 'orange' | +-----+----------+ >>> # an integer join key can also be reconstructed ... table4 = (('foo', 'bar'), ... ('A', 'apple'), ... ('B', 'apple'), ... ('C', 'orange')) >>> table5, table6 = etl.unjoin(table4, 'bar') >>> table5 +-----+--------+ | foo | bar_id | +=====+========+ | 'A' | 1 | +-----+--------+ | 'B' | 1 | +-----+--------+ | 'C' | 2 | +-----+--------+ >>> table6 +----+----------+ | id | bar | +====+==========+ | 1 | 'apple' | +----+----------+ | 2 | 'orange' | +----+----------+ The `autoincrement` parameter controls how an integer join key is reconstructed, and should be a tuple of (`start`, `step`).
4.645677
4.638439
1.00156
return RowReduceView(table, key, reducer, header=header, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def rowreduce(table, key, reducer, header=None, presorted=False, buffersize=None, tempdir=None, cache=True)
Group rows under the given key then apply `reducer` to produce a single output row for each input group of rows. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 3], ... ['a', 7], ... ['b', 2], ... ['b', 1], ... ['b', 9], ... ['c', 4]] >>> def sumbar(key, rows): ... return [key, sum(row[1] for row in rows)] ... >>> table2 = etl.rowreduce(table1, key='foo', reducer=sumbar, ... header=['foo', 'barsum']) >>> table2 +-----+--------+ | foo | barsum | +=====+========+ | 'a' | 10 | +-----+--------+ | 'b' | 12 | +-----+--------+ | 'c' | 4 | +-----+--------+ N.B., this is not strictly a "reduce" in the sense of the standard Python :func:`reduce` function, i.e., the `reducer` function is *not* applied recursively to values within a group, rather it is applied once to each row group as a whole. See also :func:`petl.transform.reductions.aggregate` and :func:`petl.transform.reductions.fold`.
2.62105
3.506253
0.747536
s1 = cut(table, key, value) s2 = distinct(s1) s3 = aggregate(s2, key, len) return s3
def groupcountdistinctvalues(table, key, value)
Group by the `key` field then count the number of distinct values in the `value` field.
5.484594
6.73633
0.814181
def _reducer(k, rows): return next(rows) return rowreduce(table, key, reducer=_reducer, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def groupselectfirst(table, key, presorted=False, buffersize=None, tempdir=None, cache=True)
Group by the `key` field then return the first row within each group. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['A', 1, True], ... ['C', 7, False], ... ['B', 2, False], ... ['C', 9, True]] >>> table2 = etl.groupselectfirst(table1, key='foo') >>> table2 +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'A' | 1 | True | +-----+-----+-------+ | 'B' | 2 | False | +-----+-----+-------+ | 'C' | 7 | False | +-----+-----+-------+ See also :func:`petl.transform.reductions.groupselectlast`, :func:`petl.transform.dedup.distinct`.
4.045703
5.304965
0.762626
def _reducer(k, rows): row = None for row in rows: pass return row return rowreduce(table, key, reducer=_reducer, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def groupselectlast(table, key, presorted=False, buffersize=None, tempdir=None, cache=True)
Group by the `key` field then return the last row within each group. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['A', 1, True], ... ['C', 7, False], ... ['B', 2, False], ... ['C', 9, True]] >>> table2 = etl.groupselectlast(table1, key='foo') >>> table2 +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'A' | 1 | True | +-----+-----+-------+ | 'B' | 2 | False | +-----+-----+-------+ | 'C' | 9 | True | +-----+-----+-------+ See also :func:`petl.transform.reductions.groupselectfirst`, :func:`petl.transform.dedup.distinct`. .. versionadded:: 1.1.0
3.450268
4.576968
0.753833
return groupselectfirst(sort(table, value, reverse=False), key, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def groupselectmin(table, key, value, presorted=False, buffersize=None, tempdir=None, cache=True)
Group by the `key` field then return the row with the minimum of the `value` field within each group. N.B., will only return one row for each group, even if multiple rows have the same (minimum) value.
3.939404
4.569053
0.862193
return MergeDuplicatesView(table, key, missing=missing, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def mergeduplicates(table, key, missing=None, presorted=False, buffersize=None, tempdir=None, cache=True)
Merge duplicate rows under the given key. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['A', 1, 2.7], ... ['B', 2, None], ... ['D', 3, 9.4], ... ['B', None, 7.8], ... ['E', None, 42.], ... ['D', 3, 12.3], ... ['A', 2, None]] >>> table2 = etl.mergeduplicates(table1, 'foo') >>> table2 +-----+------------------+-----------------------+ | foo | bar | baz | +=====+==================+=======================+ | 'A' | Conflict({1, 2}) | 2.7 | +-----+------------------+-----------------------+ | 'B' | 2 | 7.8 | +-----+------------------+-----------------------+ | 'D' | 3 | Conflict({9.4, 12.3}) | +-----+------------------+-----------------------+ | 'E' | None | 42.0 | +-----+------------------+-----------------------+ Missing values are overridden by non-missing values. Conflicting values are reported as an instance of the Conflict class (sub-class of frozenset). If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. See also :func:`petl.transform.dedup.conflicts`.
2.617121
4.390842
0.596041
assert 'key' in kwargs, 'keyword argument "key" is required' key = kwargs['key'] t1 = mergesort(*tables, **kwargs) t2 = mergeduplicates(t1, key=key, presorted=True) return t2
def merge(*tables, **kwargs)
Convenience function to combine multiple tables (via :func:`petl.transform.sorts.mergesort`) then combine duplicate rows by merging under the given key (via :func:`petl.transform.reductions.mergeduplicates`). E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... [1, 'A', True], ... [2, 'B', None], ... [4, 'C', True]] >>> table2 = [['bar', 'baz', 'quux'], ... ['A', True, 42.0], ... ['B', False, 79.3], ... ['C', False, 12.4]] >>> table3 = etl.merge(table1, table2, key='bar') >>> table3 +-----+-----+-------------------------+------+ | bar | foo | baz | quux | +=====+=====+=========================+======+ | 'A' | 1 | True | 42.0 | +-----+-----+-------------------------+------+ | 'B' | 2 | False | 79.3 | +-----+-----+-------------------------+------+ | 'C' | 4 | Conflict({False, True}) | 12.4 | +-----+-----+-------------------------+------+ Keyword arguments are the same as for :func:`petl.transform.sorts.mergesort`, except `key` is required.
5.252374
4.408922
1.191306
return FoldView(table, key, f, value=value, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def fold(table, key, f, value=None, presorted=False, buffersize=None, tempdir=None, cache=True)
Reduce rows recursively via the Python standard :func:`reduce` function. E.g.:: >>> import petl as etl >>> table1 = [['id', 'count'], ... [1, 3], ... [1, 5], ... [2, 4], ... [2, 8]] >>> import operator >>> table2 = etl.fold(table1, 'id', operator.add, 'count', ... presorted=True) >>> table2 +-----+-------+ | key | value | +=====+=======+ | 1 | 8 | +-----+-------+ | 2 | 12 | +-----+-------+ See also :func:`petl.transform.reductions.aggregate`, :func:`petl.transform.reductions.rowreduce`.
2.510911
3.695757
0.679404
return TextIndexView(index_or_dirname, indexname=indexname, docnum_field=docnum_field)
def fromtextindex(index_or_dirname, indexname=None, docnum_field=None)
Extract all documents from a Whoosh index. E.g.:: >>> import petl as etl >>> import os >>> # set up an index and load some documents via the Whoosh API ... from whoosh.index import create_in >>> from whoosh.fields import * >>> schema = Schema(title=TEXT(stored=True), path=ID(stored=True), ... content=TEXT) >>> dirname = 'example.whoosh' >>> if not os.path.exists(dirname): ... os.mkdir(dirname) ... >>> index = create_in(dirname, schema) >>> writer = index.writer() >>> writer.add_document(title=u"First document", path=u"/a", ... content=u"This is the first document we've added!") >>> writer.add_document(title=u"Second document", path=u"/b", ... content=u"The second one is even more interesting!") >>> writer.commit() >>> # extract documents as a table ... table = etl.fromtextindex(dirname) >>> table +------+-------------------+ | path | title | +======+===================+ | '/a' | 'First document' | +------+-------------------+ | '/b' | 'Second document' | +------+-------------------+ Keyword arguments: index_or_dirname Either an instance of `whoosh.index.Index` or a string containing the directory path where the index is stored. indexname String containing the name of the index, if multiple indexes are stored in the same directory. docnum_field If not None, an extra field will be added to the output table containing the internal document number stored in the index. The name of the field will be the value of this argument.
3.353411
5.554796
0.603696
import whoosh.index import whoosh.writing # deal with polymorphic argument if isinstance(index_or_dirname, string_types): dirname = index_or_dirname index = whoosh.index.create_in(dirname, schema, indexname=indexname) needs_closing = True elif isinstance(index_or_dirname, whoosh.index.Index): index = index_or_dirname needs_closing = False else: raise ArgumentError('expected string or index, found %r' % index_or_dirname) writer = index.writer() try: for d in dicts(table): writer.add_document(**d) writer.commit(merge=merge, optimize=optimize, mergetype=whoosh.writing.CLEAR) except: writer.cancel() raise finally: if needs_closing: index.close()
def totextindex(table, index_or_dirname, schema=None, indexname=None, merge=False, optimize=False)
Load all rows from `table` into a Whoosh index. N.B., this will clear any existing data in the index before loading. E.g.:: >>> import petl as etl >>> import datetime >>> import os >>> # here is the table we want to load into an index ... table = (('f0', 'f1', 'f2', 'f3', 'f4'), ... ('AAA', 12, 4.3, True, datetime.datetime.now()), ... ('BBB', 6, 3.4, False, datetime.datetime(1900, 1, 31)), ... ('CCC', 42, 7.8, True, datetime.datetime(2100, 12, 25))) >>> # define a schema for the index ... from whoosh.fields import * >>> schema = Schema(f0=TEXT(stored=True), ... f1=NUMERIC(int, stored=True), ... f2=NUMERIC(float, stored=True), ... f3=BOOLEAN(stored=True), ... f4=DATETIME(stored=True)) >>> # load index ... dirname = 'example.whoosh' >>> if not os.path.exists(dirname): ... os.mkdir(dirname) ... >>> etl.totextindex(table, dirname, schema=schema) Keyword arguments: table A table container with the data to be loaded. index_or_dirname Either an instance of `whoosh.index.Index` or a string containing the directory path where the index is to be stored. schema Index schema to use if creating the index. indexname String containing the name of the index, if multiple indexes are stored in the same directory. merge Merge small segments during commit? optimize Merge all segments together?
2.919772
3.130653
0.93264
import whoosh.index # deal with polymorphic argument if isinstance(index_or_dirname, string_types): dirname = index_or_dirname index = whoosh.index.open_dir(dirname, indexname=indexname, readonly=False) needs_closing = True elif isinstance(index_or_dirname, whoosh.index.Index): index = index_or_dirname needs_closing = False else: raise ArgumentError('expected string or index, found %r' % index_or_dirname) writer = index.writer() try: for d in dicts(table): writer.add_document(**d) writer.commit(merge=merge, optimize=optimize) except Exception: writer.cancel() raise finally: if needs_closing: index.close()
def appendtextindex(table, index_or_dirname, indexname=None, merge=True, optimize=False)
Load all rows from `table` into a Whoosh index, adding them to any existing data in the index. Keyword arguments: table A table container with the data to be loaded. index_or_dirname Either an instance of `whoosh.index.Index` or a string containing the directory path where the index is to be stored. indexname String containing the name of the index, if multiple indexes are stored in the same directory. merge Merge small segments during commit? optimize Merge all segments together?
2.637564
2.891928
0.912043
return SearchTextIndexView(index_or_dirname, query, limit=limit, indexname=indexname, docnum_field=docnum_field, score_field=score_field, fieldboosts=fieldboosts, search_kwargs=search_kwargs)
def searchtextindex(index_or_dirname, query, limit=10, indexname=None, docnum_field=None, score_field=None, fieldboosts=None, search_kwargs=None)
Search a Whoosh index using a query. E.g.:: >>> import petl as etl >>> import os >>> # set up an index and load some documents via the Whoosh API ... from whoosh.index import create_in >>> from whoosh.fields import * >>> schema = Schema(title=TEXT(stored=True), path=ID(stored=True), ... content=TEXT) >>> dirname = 'example.whoosh' >>> if not os.path.exists(dirname): ... os.mkdir(dirname) ... >>> index = create_in('example.whoosh', schema) >>> writer = index.writer() >>> writer.add_document(title=u"Oranges", path=u"/a", ... content=u"This is the first document we've added!") >>> writer.add_document(title=u"Apples", path=u"/b", ... content=u"The second document is even more " ... u"interesting!") >>> writer.commit() >>> # demonstrate the use of searchtextindex() ... table1 = etl.searchtextindex('example.whoosh', 'oranges') >>> table1 +------+-----------+ | path | title | +======+===========+ | '/a' | 'Oranges' | +------+-----------+ >>> table2 = etl.searchtextindex('example.whoosh', 'doc*') >>> table2 +------+-----------+ | path | title | +======+===========+ | '/a' | 'Oranges' | +------+-----------+ | '/b' | 'Apples' | +------+-----------+ Keyword arguments: index_or_dirname Either an instance of `whoosh.index.Index` or a string containing the directory path where the index is to be stored. query Either a string or an instance of `whoosh.query.Query`. If a string, it will be parsed as a multi-field query, i.e., any terms not bound to a specific field will match **any** field. limit Return at most `limit` results. indexname String containing the name of the index, if multiple indexes are stored in the same directory. docnum_field If not None, an extra field will be added to the output table containing the internal document number stored in the index. The name of the field will be the value of this argument. score_field If not None, an extra field will be added to the output table containing the score of the result. The name of the field will be the value of this argument. fieldboosts An optional dictionary mapping field names to boosts. search_kwargs Any extra keyword arguments to be passed through to the Whoosh `search()` method.
1.83547
2.512743
0.730465
return SearchTextIndexView(index_or_dirname, query, pagenum=pagenum, pagelen=pagelen, indexname=indexname, docnum_field=docnum_field, score_field=score_field, fieldboosts=fieldboosts, search_kwargs=search_kwargs)
def searchtextindexpage(index_or_dirname, query, pagenum, pagelen=10, indexname=None, docnum_field=None, score_field=None, fieldboosts=None, search_kwargs=None)
Search an index using a query, returning a result page. Keyword arguments: index_or_dirname Either an instance of `whoosh.index.Index` or a string containing the directory path where the index is to be stored. query Either a string or an instance of `whoosh.query.Query`. If a string, it will be parsed as a multi-field query, i.e., any terms not bound to a specific field will match **any** field. pagenum Number of the page to return (e.g., 1 = first page). pagelen Number of results per page. indexname String containing the name of the index, if multiple indexes are stored in the same directory. docnum_field If not None, an extra field will be added to the output table containing the internal document number stored in the index. The name of the field will be the value of this argument. score_field If not None, an extra field will be added to the output table containing the score of the result. The name of the field will be the value of this argument. fieldboosts An optional dictionary mapping field names to boosts. search_kwargs Any extra keyword arguments to be passed through to the Whoosh `search()` method.
1.751624
2.165013
0.809059
lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return HashJoinView(left, right, lkey=lkey, rkey=rkey, cache=cache, lprefix=lprefix, rprefix=rprefix)
def hashjoin(left, right, key=None, lkey=None, rkey=None, cache=True, lprefix=None, rprefix=None)
Alternative implementation of :func:`petl.transform.joins.join`, where the join is executed by constructing an in-memory lookup for the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. By default data from right hand table is cached to improve performance (only available when `key` is given). Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments.
2.417028
3.003709
0.804681
lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return HashLeftJoinView(left, right, lkey, rkey, missing=missing, cache=cache, lprefix=lprefix, rprefix=rprefix)
def hashleftjoin(left, right, key=None, lkey=None, rkey=None, missing=None, cache=True, lprefix=None, rprefix=None)
Alternative implementation of :func:`petl.transform.joins.leftjoin`, where the join is executed by constructing an in-memory lookup for the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. By default data from right hand table is cached to improve performance (only available when `key` is given). Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments.
2.509444
3.13737
0.799856
lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return HashRightJoinView(left, right, lkey, rkey, missing=missing, cache=cache, lprefix=lprefix, rprefix=rprefix)
def hashrightjoin(left, right, key=None, lkey=None, rkey=None, missing=None, cache=True, lprefix=None, rprefix=None)
Alternative implementation of :func:`petl.transform.joins.rightjoin`, where the join is executed by constructing an in-memory lookup for the left hand table, then iterating over rows from the right hand table. May be faster and/or more resource efficient where the left table is small and the right table is large. By default data from right hand table is cached to improve performance (only available when `key` is given). Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments.
2.616418
3.214598
0.813918
lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return HashAntiJoinView(left, right, lkey, rkey)
def hashantijoin(left, right, key=None, lkey=None, rkey=None)
Alternative implementation of :func:`petl.transform.joins.antijoin`, where the join is executed by constructing an in-memory set for all keys found in the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments.
3.525857
4.335847
0.813187
lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return HashLookupJoinView(left, right, lkey, rkey, missing=missing, lprefix=lprefix, rprefix=rprefix)
def hashlookupjoin(left, right, key=None, lkey=None, rkey=None, missing=None, lprefix=None, rprefix=None)
Alternative implementation of :func:`petl.transform.joins.lookupjoin`, where the join is executed by constructing an in-memory lookup for the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments.
2.586198
3.342292
0.77378
return XLSView(filename, sheet=sheet, use_view=use_view)
def fromxls(filename, sheet=None, use_view=True)
Extract a table from a sheet in an Excel .xls file. Sheet is identified by its name or index number. N.B., the sheet name is case sensitive.
3.636334
4.971703
0.731406
import xlwt if encoding is None: encoding = locale.getpreferredencoding() wb = xlwt.Workbook(encoding=encoding, style_compression=style_compression) ws = wb.add_sheet(sheet) if styles is None: # simple version, don't worry about styles for r, row in enumerate(tbl): for c, v in enumerate(row): ws.write(r, c, label=v) else: # handle styles it = iter(tbl) hdr = next(it) flds = list(map(str, hdr)) for c, f in enumerate(flds): ws.write(0, c, label=f) if f not in styles or styles[f] is None: styles[f] = xlwt.Style.default_style # convert to list for easy zipping styles = [styles[f] for f in flds] for r, row in enumerate(it): for c, (v, style) in enumerate(izip_longest(row, styles, fillvalue=None)): ws.write(r+1, c, label=v, style=style) wb.save(filename)
def toxls(tbl, filename, sheet, encoding=None, style_compression=0, styles=None)
Write a table to a new Excel .xls file.
2.330978
2.349209
0.99224
flds = list(map(text_type, hdr)) indices = list() if not isinstance(spec, (list, tuple)): spec = (spec,) for s in spec: # spec could be a field index (takes priority) if isinstance(s, int) and s < len(hdr): indices.append(s) # index fields from 0 # spec could be a field elif s in flds: indices.append(flds.index(s)) else: raise FieldSelectionError(s) return indices
def asindices(hdr, spec)
Convert the given field `spec` into a list of field indices.
4.070918
3.728864
1.091731
prog = re.compile('\{([^}]+)\}') def repl(matchobj): return "rec['%s']" % matchobj.group(1) return eval("lambda rec: " + prog.sub(repl, s))
def expr(s)
Construct a function operating on a table record. The expression string is converted into a lambda function by prepending the string with ``'lambda rec: '``, then replacing anything enclosed in curly braces (e.g., ``"{foo}"``) with a lookup on the record (e.g., ``"rec['foo']"``), then finally calling :func:`eval`. So, e.g., the expression string ``"{foo} * {bar}"`` is converted to the function ``lambda rec: rec['foo'] * rec['bar']``
5.154497
3.603914
1.43025
it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) # wrap rows as records it = (Record(row, flds) for row in it) # determine key function if callable(key): getkey = key native_key = True else: kindices = asindices(hdr, key) getkey = comparable_itemgetter(*kindices) native_key = False git = groupby(it, key=getkey) if value is None: if native_key: return git else: return ((k.inner, vals) for (k, vals) in git) else: if callable(value): getval = value else: vindices = asindices(hdr, value) getval = operator.itemgetter(*vindices) if native_key: return ((k, (getval(v) for v in vals)) for (k, vals) in git) else: return ((k.inner, (getval(v) for v in vals)) for (k, vals) in git)
def rowgroupby(table, key, value=None)
Convenient adapter for :func:`itertools.groupby`. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['a', 1, True], ... ['b', 3, True], ... ['b', 2]] >>> # group entire rows ... for key, group in etl.rowgroupby(table1, 'foo'): ... print(key, list(group)) ... a [('a', 1, True)] b [('b', 3, True), ('b', 2)] >>> # group specific values ... for key, group in etl.rowgroupby(table1, 'foo', 'bar'): ... print(key, list(group)) ... a [1] b [3, 2] N.B., assumes the input table is already sorted by the given key.
2.962157
3.086702
0.959651
source = read_source_from_arg(source) csvargs.setdefault('dialect', 'excel') return fromcsv_impl(source=source, encoding=encoding, errors=errors, header=header, **csvargs)
def fromcsv(source=None, encoding=None, errors='strict', header=None, **csvargs)
Extract a table from a delimited file. E.g.:: >>> import petl as etl >>> import csv >>> # set up a CSV file to demonstrate with ... table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2], ... ['c', 2]] >>> with open('example.csv', 'w') as f: ... writer = csv.writer(f) ... writer.writerows(table1) ... >>> # now demonstrate the use of fromcsv() ... table2 = etl.fromcsv('example.csv') >>> table2 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | '1' | +-----+-----+ | 'b' | '2' | +-----+-----+ | 'c' | '2' | +-----+-----+ The `source` argument is the path of the delimited file, all other keyword arguments are passed to :func:`csv.reader`. So, e.g., to override the delimiter from the default CSV dialect, provide the `delimiter` keyword argument. Note that all data values are strings, and any intended numeric values will need to be converted, see also :func:`petl.transform.conversions.convert`.
3.293433
5.029729
0.654793
csvargs.setdefault('dialect', 'excel-tab') return fromcsv(source, encoding=encoding, errors=errors, **csvargs)
def fromtsv(source=None, encoding=None, errors='strict', header=None, **csvargs)
Convenience function, as :func:`petl.io.csv.fromcsv` but with different default dialect (tab delimited).
2.969149
3.350176
0.886267
source = write_source_from_arg(source) csvargs.setdefault('dialect', 'excel') tocsv_impl(table, source=source, encoding=encoding, errors=errors, write_header=write_header, **csvargs)
def tocsv(table, source=None, encoding=None, errors='strict', write_header=True, **csvargs)
Write the table to a CSV file. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2], ... ['c', 2]] >>> etl.tocsv(table1, 'example.csv') >>> # look what it did ... print(open('example.csv').read()) foo,bar a,1 b,2 c,2 The `source` argument is the path of the delimited file, and the optional `write_header` argument specifies whether to include the field names in the delimited file. All other keyword arguments are passed to :func:`csv.writer`. So, e.g., to override the delimiter from the default CSV dialect, provide the `delimiter` keyword argument. Note that if a file already exists at the given location, it will be overwritten.
3.469223
5.002425
0.693508
source = write_source_from_arg(source) csvargs.setdefault('dialect', 'excel') appendcsv_impl(table, source=source, encoding=encoding, errors=errors, write_header=write_header, **csvargs)
def appendcsv(table, source=None, encoding=None, errors='strict', write_header=False, **csvargs)
Append data rows to an existing CSV file. As :func:`petl.io.csv.tocsv` but the file is opened in append mode and the table header is not written by default. Note that no attempt is made to check that the fields or row lengths are consistent with the existing data, the data rows from the table are simply appended to the file.
3.384406
4.349571
0.778101
csvargs.setdefault('dialect', 'excel-tab') return tocsv(table, source=source, encoding=encoding, errors=errors, write_header=write_header, **csvargs)
def totsv(table, source=None, encoding=None, errors='strict', write_header=True, **csvargs)
Convenience function, as :func:`petl.io.csv.tocsv` but with different default dialect (tab delimited).
2.271929
2.419671
0.938941
csvargs.setdefault('dialect', 'excel-tab') return appendcsv(table, source=source, encoding=encoding, errors=errors, write_header=write_header, **csvargs)
def appendtsv(table, source=None, encoding=None, errors='strict', write_header=False, **csvargs)
Convenience function, as :func:`petl.io.csv.appendcsv` but with different default dialect (tab delimited).
2.243729
2.44936
0.916047
source = write_source_from_arg(source) csvargs.setdefault('dialect', 'excel') return teecsv_impl(table, source=source, encoding=encoding, errors=errors, write_header=write_header, **csvargs)
def teecsv(table, source=None, encoding=None, errors='strict', write_header=True, **csvargs)
Returns a table that writes rows to a CSV file as they are iterated over.
3.317238
3.351697
0.989719
csvargs.setdefault('dialect', 'excel-tab') return teecsv(table, source=source, encoding=encoding, errors=errors, write_header=write_header, **csvargs)
def teetsv(table, source=None, encoding=None, errors='strict', write_header=True, **csvargs)
Convenience function, as :func:`petl.io.csv.teecsv` but with different default dialect (tab delimited).
2.669908
2.387051
1.118496
return CaptureView(table, field, pattern, newfields=newfields, include_original=include_original, flags=flags, fill=fill)
def capture(table, field, pattern, newfields=None, include_original=False, flags=0, fill=None)
Add one or more new fields with values captured from an existing field searched via a regular expression. E.g.:: >>> import petl as etl >>> table1 = [['id', 'variable', 'value'], ... ['1', 'A1', '12'], ... ['2', 'A2', '15'], ... ['3', 'B1', '18'], ... ['4', 'C12', '19']] >>> table2 = etl.capture(table1, 'variable', '(\\w)(\\d+)', ... ['treat', 'time']) >>> table2 +-----+-------+-------+------+ | id | value | treat | time | +=====+=======+=======+======+ | '1' | '12' | 'A' | '1' | +-----+-------+-------+------+ | '2' | '15' | 'A' | '2' | +-----+-------+-------+------+ | '3' | '18' | 'B' | '1' | +-----+-------+-------+------+ | '4' | '19' | 'C' | '12' | +-----+-------+-------+------+ >>> # using the include_original argument ... table3 = etl.capture(table1, 'variable', '(\\w)(\\d+)', ... ['treat', 'time'], ... include_original=True) >>> table3 +-----+----------+-------+-------+------+ | id | variable | value | treat | time | +=====+==========+=======+=======+======+ | '1' | 'A1' | '12' | 'A' | '1' | +-----+----------+-------+-------+------+ | '2' | 'A2' | '15' | 'A' | '2' | +-----+----------+-------+-------+------+ | '3' | 'B1' | '18' | 'B' | '1' | +-----+----------+-------+-------+------+ | '4' | 'C12' | '19' | 'C' | '12' | +-----+----------+-------+-------+------+ By default the field on which the capture is performed is omitted. It can be included using the `include_original` argument. The ``fill`` parameter can be used to provide a list or tuple of values to use if the regular expression does not match. The ``fill`` parameter should contain as many values as there are capturing groups in the regular expression. If ``fill`` is ``None`` (default) then a ``petl.transform.TransformError`` will be raised on the first non-matching value.
2.868335
5.046776
0.56835
return SplitView(table, field, pattern, newfields, include_original, maxsplit, flags)
def split(table, field, pattern, newfields=None, include_original=False, maxsplit=0, flags=0)
Add one or more new fields with values generated by splitting an existing value around occurrences of a regular expression. E.g.:: >>> import petl as etl >>> table1 = [['id', 'variable', 'value'], ... ['1', 'parad1', '12'], ... ['2', 'parad2', '15'], ... ['3', 'tempd1', '18'], ... ['4', 'tempd2', '19']] >>> table2 = etl.split(table1, 'variable', 'd', ['variable', 'day']) >>> table2 +-----+-------+----------+-----+ | id | value | variable | day | +=====+=======+==========+=====+ | '1' | '12' | 'para' | '1' | +-----+-------+----------+-----+ | '2' | '15' | 'para' | '2' | +-----+-------+----------+-----+ | '3' | '18' | 'temp' | '1' | +-----+-------+----------+-----+ | '4' | '19' | 'temp' | '2' | +-----+-------+----------+-----+ By default the field on which the split is performed is omitted. It can be included using the `include_original` argument.
3.828207
7.005208
0.54648
prog = re.compile(pattern, flags) conv = lambda v: prog.sub(repl, v, count=count) return convert(table, field, conv)
def sub(table, field, pattern, repl, count=0, flags=0)
Convenience function to convert values under the given field using a regular expression substitution. See also :func:`re.sub`.
4.049287
3.529737
1.147193
if len(args) == 1: field = None pattern = args[0] elif len(args) == 2: field = args[0] pattern = args[1] else: raise ArgumentError('expected 1 or 2 positional arguments') return SearchView(table, pattern, field=field, **kwargs)
def search(table, *args, **kwargs)
Perform a regular expression search, returning rows that match a given pattern, either anywhere in the row or within a specific field. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['orange', 12, 'oranges are nice fruit'], ... ['mango', 42, 'I like them'], ... ['banana', 74, 'lovely too'], ... ['cucumber', 41, 'better than mango']] >>> # search any field ... table2 = etl.search(table1, '.g.') >>> table2 +------------+-----+--------------------------+ | foo | bar | baz | +============+=====+==========================+ | 'orange' | 12 | 'oranges are nice fruit' | +------------+-----+--------------------------+ | 'mango' | 42 | 'I like them' | +------------+-----+--------------------------+ | 'cucumber' | 41 | 'better than mango' | +------------+-----+--------------------------+ >>> # search a specific field ... table3 = etl.search(table1, 'foo', '.g.') >>> table3 +----------+-----+--------------------------+ | foo | bar | baz | +==========+=====+==========================+ | 'orange' | 12 | 'oranges are nice fruit' | +----------+-----+--------------------------+ | 'mango' | 42 | 'I like them' | +----------+-----+--------------------------+ The complement can be found via :func:`petl.transform.regex.searchcomplement`.
2.898463
2.841654
1.019992
return search(table, *args, complement=True, **kwargs)
def searchcomplement(table, *args, **kwargs)
Perform a regular expression search, returning rows that **do not** match a given pattern, either anywhere in the row or within a specific field. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['orange', 12, 'oranges are nice fruit'], ... ['mango', 42, 'I like them'], ... ['banana', 74, 'lovely too'], ... ['cucumber', 41, 'better than mango']] >>> # search any field ... table2 = etl.searchcomplement(table1, '.g.') >>> table2 +----------+-----+--------------+ | foo | bar | baz | +==========+=====+==============+ | 'banana' | 74 | 'lovely too' | +----------+-----+--------------+ >>> # search a specific field ... table3 = etl.searchcomplement(table1, 'foo', '.g.') >>> table3 +------------+-----+---------------------+ | foo | bar | baz | +============+=====+=====================+ | 'banana' | 74 | 'lovely too' | +------------+-----+---------------------+ | 'cucumber' | 41 | 'better than mango' | +------------+-----+---------------------+ This returns the complement of :func:`petl.transform.regex.search`.
7.32421
16.98101
0.431318
return ColumnsView(cols, header=header, missing=missing)
def fromcolumns(cols, header=None, missing=None)
View a sequence of columns as a table, e.g.:: >>> import petl as etl >>> cols = [[0, 1, 2], ['a', 'b', 'c']] >>> tbl = etl.fromcolumns(cols) >>> tbl +----+-----+ | f0 | f1 | +====+=====+ | 0 | 'a' | +----+-----+ | 1 | 'b' | +----+-----+ | 2 | 'c' | +----+-----+ If columns are not the same length, values will be padded to the length of the longest column with `missing`, which is None by default, e.g.:: >>> cols = [[0, 1, 2], ['a', 'b']] >>> tbl = etl.fromcolumns(cols, missing='NA') >>> tbl +----+------+ | f0 | f1 | +====+======+ | 0 | 'a' | +----+------+ | 1 | 'b' | +----+------+ | 2 | 'NA' | +----+------+ See also :func:`petl.io.json.fromdicts`. .. versionadded:: 1.1.0
8.517364
17.467209
0.48762
# support passing a single list or tuple of fields if len(args) == 1 and isinstance(args[0], (list, tuple)): args = args[0] return CutView(table, args, **kwargs)
def cut(table, *args, **kwargs)
Choose and/or re-order fields. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['A', 1, 2.7], ... ['B', 2, 3.4], ... ['B', 3, 7.8], ... ['D', 42, 9.0], ... ['E', 12]] >>> table2 = etl.cut(table1, 'foo', 'baz') >>> table2 +-----+------+ | foo | baz | +=====+======+ | 'A' | 2.7 | +-----+------+ | 'B' | 3.4 | +-----+------+ | 'B' | 7.8 | +-----+------+ | 'D' | 9.0 | +-----+------+ | 'E' | None | +-----+------+ >>> # fields can also be specified by index, starting from zero ... table3 = etl.cut(table1, 0, 2) >>> table3 +-----+------+ | foo | baz | +=====+======+ | 'A' | 2.7 | +-----+------+ | 'B' | 3.4 | +-----+------+ | 'B' | 7.8 | +-----+------+ | 'D' | 9.0 | +-----+------+ | 'E' | None | +-----+------+ >>> # field names and indices can be mixed ... table4 = etl.cut(table1, 'bar', 0) >>> table4 +-----+-----+ | bar | foo | +=====+=====+ | 1 | 'A' | +-----+-----+ | 2 | 'B' | +-----+-----+ | 3 | 'B' | +-----+-----+ | 42 | 'D' | +-----+-----+ | 12 | 'E' | +-----+-----+ >>> # select a range of fields ... table5 = etl.cut(table1, *range(0, 2)) >>> table5 +-----+-----+ | foo | bar | +=====+=====+ | 'A' | 1 | +-----+-----+ | 'B' | 2 | +-----+-----+ | 'B' | 3 | +-----+-----+ | 'D' | 42 | +-----+-----+ | 'E' | 12 | +-----+-----+ Note that any short rows will be padded with `None` values (or whatever is provided via the `missing` keyword argument). See also :func:`petl.transform.basics.cutout`.
4.394819
5.268387
0.834187
return AddFieldView(table, field, value=value, index=index, missing=missing)
def addfield(table, field, value=None, index=None, missing=None)
Add a field with a fixed or calculated value. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['M', 12], ... ['F', 34], ... ['-', 56]] >>> # using a fixed value ... table2 = etl.addfield(table1, 'baz', 42) >>> table2 +-----+-----+-----+ | foo | bar | baz | +=====+=====+=====+ | 'M' | 12 | 42 | +-----+-----+-----+ | 'F' | 34 | 42 | +-----+-----+-----+ | '-' | 56 | 42 | +-----+-----+-----+ >>> # calculating the value ... table2 = etl.addfield(table1, 'baz', lambda rec: rec['bar'] * 2) >>> table2 +-----+-----+-----+ | foo | bar | baz | +=====+=====+=====+ | 'M' | 12 | 24 | +-----+-----+-----+ | 'F' | 34 | 68 | +-----+-----+-----+ | '-' | 56 | 112 | +-----+-----+-----+ Use the `index` parameter to control the position of the inserted field.
5.645075
8.106971
0.696324
return AddRowNumbersView(table, start, step, field)
def addrownumbers(table, start=1, step=1, field='row')
Add a field of row numbers. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['A', 9], ... ['C', 2], ... ['F', 1]] >>> table2 = etl.addrownumbers(table1) >>> table2 +-----+-----+-----+ | row | foo | bar | +=====+=====+=====+ | 1 | 'A' | 9 | +-----+-----+-----+ | 2 | 'C' | 2 | +-----+-----+-----+ | 3 | 'F' | 1 | +-----+-----+-----+ Parameters `start` and `step` control the numbering.
9.201872
15.159793
0.606992
return AddColumnView(table, field, col, index=index, missing=missing)
def addcolumn(table, field, col, index=None, missing=None)
Add a column of data to the table. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['A', 1], ... ['B', 2]] >>> col = [True, False] >>> table2 = etl.addcolumn(table1, 'baz', col) >>> table2 +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'A' | 1 | True | +-----+-----+-------+ | 'B' | 2 | False | +-----+-----+-------+ Use the `index` parameter to control the position of the new column.
4.974385
8.666939
0.573949
s = set() for v in values(table, field): try: s.add(type(v).__name__) except IndexError: pass # ignore short rows return s
def typeset(table, field)
Return a set containing all Python types found for values in the given field. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar', 'baz'], ... ['A', 1, '2'], ... ['B', u'2', '3.4'], ... [u'B', u'3', '7.8', True], ... ['D', u'xyz', 9.0], ... ['E', 42]] >>> sorted(etl.typeset(table, 'foo')) ['str'] >>> sorted(etl.typeset(table, 'bar')) ['int', 'str'] >>> sorted(etl.typeset(table, 'baz')) ['NoneType', 'float', 'str'] The `field` argument can be a field name or index (starting from zero).
7.061031
9.403749
0.750874
t1h = set(header(t1)) t2h = set(header(t2)) return t2h - t1h, t1h - t2h
def diffheaders(t1, t2)
Return the difference between the headers of the two tables as a pair of sets. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['a', 1, .3]] >>> table2 = [['baz', 'bar', 'quux'], ... ['a', 1, .3]] >>> add, sub = etl.diffheaders(table1, table2) >>> add {'quux'} >>> sub {'foo'}
3.237124
4.38946
0.737477
t1v = set(values(t1, f)) t2v = set(values(t2, f)) return t2v - t1v, t1v - t2v
def diffvalues(t1, t2, f)
Return the difference between the values under the given field in the two tables, e.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 3]] >>> table2 = [['bar', 'foo'], ... [1, 'a'], ... [3, 'c']] >>> add, sub = etl.diffvalues(table1, table2, 'foo') >>> add {'c'} >>> sub {'b'}
3.024529
4.175059
0.724428
missing = kwargs.get('missing', None) default = kwargs.get('default', None) def _coalesce(row): for f in fields: v = row[f] if v is not missing: return v return default return _coalesce
def coalesce(*fields, **kwargs)
Return a function which accepts a row and returns the first non-missing value from the specified fields. Intended for use with :func:`petl.transform.basics.addfield`.
2.42557
2.330233
1.040913
def parser(value): try: return datetime.datetime.strptime(value.strip(), fmt) except Exception as e: if strict: raise e else: return value return parser
def datetimeparser(fmt, strict=True)
Return a function to parse strings as :class:`datetime.datetime` objects using a given format. E.g.:: >>> from petl import datetimeparser >>> isodatetime = datetimeparser('%Y-%m-%dT%H:%M:%S') >>> isodatetime('2002-12-25T00:00:00') datetime.datetime(2002, 12, 25, 0, 0) >>> try: ... isodatetime('2002-12-25T00:00:99') ... except ValueError as e: ... print(e) ... unconverted data remains: 9 If ``strict=False`` then if an error occurs when parsing, the original value will be returned as-is, and no error will be raised.
2.880525
3.27645
0.87916
def parser(value): try: return datetime.datetime.strptime(value.strip(), fmt).date() except Exception as e: if strict: raise e else: return value return parser
def dateparser(fmt, strict=True)
Return a function to parse strings as :class:`datetime.date` objects using a given format. E.g.:: >>> from petl import dateparser >>> isodate = dateparser('%Y-%m-%d') >>> isodate('2002-12-25') datetime.date(2002, 12, 25) >>> try: ... isodate('2002-02-30') ... except ValueError as e: ... print(e) ... day is out of range for month If ``strict=False`` then if an error occurs when parsing, the original value will be returned as-is, and no error will be raised.
3.003235
3.321364
0.904217
def parser(value): try: return datetime.datetime.strptime(value.strip(), fmt).time() except Exception as e: if strict: raise e else: return value return parser
def timeparser(fmt, strict=True)
Return a function to parse strings as :class:`datetime.time` objects using a given format. E.g.:: >>> from petl import timeparser >>> isotime = timeparser('%H:%M:%S') >>> isotime('00:00:00') datetime.time(0, 0) >>> isotime('13:00:00') datetime.time(13, 0) >>> try: ... isotime('12:00:99') ... except ValueError as e: ... print(e) ... unconverted data remains: 9 >>> try: ... isotime('25:00:00') ... except ValueError as e: ... print(e) ... time data '25:00:00' does not match format '%H:%M:%S' If ``strict=False`` then if an error occurs when parsing, the original value will be returned as-is, and no error will be raised.
3.074619
3.370998
0.91208
if not case_sensitive: true_strings = [s.lower() for s in true_strings] false_strings = [s.lower() for s in false_strings] def parser(value): value = value.strip() if not case_sensitive: value = value.lower() if value in true_strings: return True elif value in false_strings: return False elif strict: raise ValueError('value is not one of recognised boolean strings: ' '%r' % value) else: return value return parser
def boolparser(true_strings=('true', 't', 'yes', 'y', '1'), false_strings=('false', 'f', 'no', 'n', '0'), case_sensitive=False, strict=True)
Return a function to parse strings as :class:`bool` objects using a given set of string representations for `True` and `False`. E.g.:: >>> from petl import boolparser >>> mybool = boolparser(true_strings=['yes', 'y'], false_strings=['no', 'n']) >>> mybool('y') True >>> mybool('yes') True >>> mybool('Y') True >>> mybool('No') False >>> try: ... mybool('foo') ... except ValueError as e: ... print(e) ... value is not one of recognised boolean strings: 'foo' >>> try: ... mybool('True') ... except ValueError as e: ... print(e) ... value is not one of recognised boolean strings: 'true' If ``strict=False`` then if an error occurs when parsing, the original value will be returned as-is, and no error will be raised.
1.85796
1.897398
0.979214
def f(v): try: return int(v) except (ValueError, TypeError): pass try: return long(v) except (ValueError, TypeError): pass try: return float(v) except (ValueError, TypeError): pass try: return complex(v) except (ValueError, TypeError) as e: if strict: raise e return v return f
def numparser(strict=False)
Return a function that will attempt to parse the value as a number, trying :func:`int`, :func:`long`, :func:`float` and :func:`complex` in that order. If all fail, return the value as-is, unless ``strict=True``, in which case raise the underlying exception.
2.077306
1.66331
1.248899
return ProgressView(table, batchsize, prefix, out)
def progress(table, batchsize=1000, prefix="", out=sys.stderr)
Report progress on rows passing through. E.g.:: >>> import petl as etl >>> table = etl.dummytable(100000) >>> table.progress(10000).tocsv('example.csv') 10000 rows in 0.13s (78363 row/s); batch in 0.13s (78363 row/s) 20000 rows in 0.22s (91679 row/s); batch in 0.09s (110448 row/s) 30000 rows in 0.31s (96573 row/s); batch in 0.09s (108114 row/s) 40000 rows in 0.40s (99535 row/s); batch in 0.09s (109625 row/s) 50000 rows in 0.49s (101396 row/s); batch in 0.09s (109591 row/s) 60000 rows in 0.59s (102245 row/s); batch in 0.09s (106709 row/s) 70000 rows in 0.68s (103221 row/s); batch in 0.09s (109498 row/s) 80000 rows in 0.77s (103810 row/s); batch in 0.09s (108126 row/s) 90000 rows in 0.90s (99465 row/s); batch in 0.13s (74516 row/s) 100000 rows in 1.02s (98409 row/s); batch in 0.11s (89821 row/s) 100000 rows in 1.02s (98402 row/s); batches in 0.10 +/- 0.02s [0.09-0.13] (100481 +/- 13340 rows/s [74516-110448]) See also :func:`petl.util.timing.clock`.
10.517169
19.193533
0.547954
return ComplementView(a, b, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache, strict=strict)
def complement(a, b, presorted=False, buffersize=None, tempdir=None, cache=True, strict=False)
Return rows in `a` that are not in `b`. E.g.:: >>> import petl as etl >>> a = [['foo', 'bar', 'baz'], ... ['A', 1, True], ... ['C', 7, False], ... ['B', 2, False], ... ['C', 9, True]] >>> b = [['x', 'y', 'z'], ... ['B', 2, False], ... ['A', 9, False], ... ['B', 3, True], ... ['C', 9, True]] >>> aminusb = etl.complement(a, b) >>> aminusb +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'A' | 1 | True | +-----+-----+-------+ | 'C' | 7 | False | +-----+-----+-------+ >>> bminusa = etl.complement(b, a) >>> bminusa +-----+---+-------+ | x | y | z | +=====+===+=======+ | 'A' | 9 | False | +-----+---+-------+ | 'B' | 3 | True | +-----+---+-------+ Note that the field names of each table are ignored - rows are simply compared following a lexical sort. See also the :func:`petl.transform.setops.recordcomplement` function. If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. Note that the default behaviour is not strictly set-like, because duplicate rows are counted separately, e.g.:: >>> a = [['foo', 'bar'], ... ['A', 1], ... ['B', 2], ... ['B', 2], ... ['C', 7]] >>> b = [['foo', 'bar'], ... ['B', 2]] >>> aminusb = etl.complement(a, b) >>> aminusb +-----+-----+ | foo | bar | +=====+=====+ | 'A' | 1 | +-----+-----+ | 'B' | 2 | +-----+-----+ | 'C' | 7 | +-----+-----+ This behaviour can be changed with the `strict` keyword argument, e.g.:: >>> aminusb = etl.complement(a, b, strict=True) >>> aminusb +-----+-----+ | foo | bar | +=====+=====+ | 'A' | 1 | +-----+-----+ | 'C' | 7 | +-----+-----+ .. versionchanged:: 1.1.0 If `strict` is `True` then strict set-like behaviour is used, i.e., only rows in `a` not found in `b` are returned.
2.709136
3.76983
0.718636
# TODO possible with only one pass? ha = header(a) hb = header(b) assert set(ha) == set(hb), 'both tables must have the same set of fields' # make sure fields are in the same order bv = cut(b, *ha) return complement(a, bv, buffersize=buffersize, tempdir=tempdir, cache=cache, strict=strict)
def recordcomplement(a, b, buffersize=None, tempdir=None, cache=True, strict=False)
Find records in `a` that are not in `b`. E.g.:: >>> import petl as etl >>> a = [['foo', 'bar', 'baz'], ... ['A', 1, True], ... ['C', 7, False], ... ['B', 2, False], ... ['C', 9, True]] >>> b = [['bar', 'foo', 'baz'], ... [2, 'B', False], ... [9, 'A', False], ... [3, 'B', True], ... [9, 'C', True]] >>> aminusb = etl.recordcomplement(a, b) >>> aminusb +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'A' | 1 | True | +-----+-----+-------+ | 'C' | 7 | False | +-----+-----+-------+ >>> bminusa = etl.recordcomplement(b, a) >>> bminusa +-----+-----+-------+ | bar | foo | baz | +=====+=====+=======+ | 3 | 'B' | True | +-----+-----+-------+ | 9 | 'A' | False | +-----+-----+-------+ Note that both tables must have the same set of fields, but that the order of the fields does not matter. See also the :func:`petl.transform.setops.complement` function. See also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function.
5.926434
5.523671
1.072916
if not presorted: a = sort(a) b = sort(b) added = complement(b, a, presorted=True, buffersize=buffersize, tempdir=tempdir, cache=cache, strict=strict) subtracted = complement(a, b, presorted=True, buffersize=buffersize, tempdir=tempdir, cache=cache, strict=strict) return added, subtracted
def diff(a, b, presorted=False, buffersize=None, tempdir=None, cache=True, strict=False)
Find the difference between rows in two tables. Returns a pair of tables. E.g.:: >>> import petl as etl >>> a = [['foo', 'bar', 'baz'], ... ['A', 1, True], ... ['C', 7, False], ... ['B', 2, False], ... ['C', 9, True]] >>> b = [['x', 'y', 'z'], ... ['B', 2, False], ... ['A', 9, False], ... ['B', 3, True], ... ['C', 9, True]] >>> added, subtracted = etl.diff(a, b) >>> # rows in b not in a ... added +-----+---+-------+ | x | y | z | +=====+===+=======+ | 'A' | 9 | False | +-----+---+-------+ | 'B' | 3 | True | +-----+---+-------+ >>> # rows in a not in b ... subtracted +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'A' | 1 | True | +-----+-----+-------+ | 'C' | 7 | False | +-----+-----+-------+ Convenient shorthand for ``(complement(b, a), complement(a, b))``. See also :func:`petl.transform.setops.complement`. If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. .. versionchanged:: 1.1.0 If `strict` is `True` then strict set-like behaviour is used.
2.000455
1.966433
1.017302