code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
added = recordcomplement(b, a, buffersize=buffersize, tempdir=tempdir, cache=cache, strict=strict) subtracted = recordcomplement(a, b, buffersize=buffersize, tempdir=tempdir, cache=cache, strict=strict) return added, subtracted
def recorddiff(a, b, buffersize=None, tempdir=None, cache=True, strict=False)
Find the difference between records in two tables. E.g.:: >>> import petl as etl >>> a = [['foo', 'bar', 'baz'], ... ['A', 1, True], ... ['C', 7, False], ... ['B', 2, False], ... ['C', 9, True]] >>> b = [['bar', 'foo', 'baz'], ... [2, 'B', False], ... [9, 'A', False], ... [3, 'B', True], ... [9, 'C', True]] >>> added, subtracted = etl.recorddiff(a, b) >>> added +-----+-----+-------+ | bar | foo | baz | +=====+=====+=======+ | 3 | 'B' | True | +-----+-----+-------+ | 9 | 'A' | False | +-----+-----+-------+ >>> subtracted +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'A' | 1 | True | +-----+-----+-------+ | 'C' | 7 | False | +-----+-----+-------+ Convenient shorthand for ``(recordcomplement(b, a), recordcomplement(a, b))``. See also :func:`petl.transform.setops.recordcomplement`. See also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. .. versionchanged:: 1.1.0 If `strict` is `True` then strict set-like behaviour is used.
2.561276
2.043648
1.253286
return IntersectionView(a, b, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def intersection(a, b, presorted=False, buffersize=None, tempdir=None, cache=True)
Return rows in `a` that are also in `b`. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['A', 1, True], ... ['C', 7, False], ... ['B', 2, False], ... ['C', 9, True]] >>> table2 = [['x', 'y', 'z'], ... ['B', 2, False], ... ['A', 9, False], ... ['B', 3, True], ... ['C', 9, True]] >>> table3 = etl.intersection(table1, table2) >>> table3 +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'B' | 2 | False | +-----+-----+-------+ | 'C' | 9 | True | +-----+-----+-------+ If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function.
3.141496
3.769503
0.833398
return HashComplementView(a, b, strict=strict)
def hashcomplement(a, b, strict=False)
Alternative implementation of :func:`petl.transform.setops.complement`, where the complement is executed by constructing an in-memory set for all rows found in the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. .. versionchanged:: 1.1.0 If `strict` is `True` then strict set-like behaviour is used, i.e., only rows in `a` not found in `b` are returned.
14.558587
12.37715
1.176247
return DuplicatesView(table, key=key, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def duplicates(table, key=None, presorted=False, buffersize=None, tempdir=None, cache=True)
Select rows with duplicate values under a given key (or duplicate rows where no key is given). E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['A', 1, 2.0], ... ['B', 2, 3.4], ... ['D', 6, 9.3], ... ['B', 3, 7.8], ... ['B', 2, 12.3], ... ['E', None, 1.3], ... ['D', 4, 14.5]] >>> table2 = etl.duplicates(table1, 'foo') >>> table2 +-----+-----+------+ | foo | bar | baz | +=====+=====+======+ | 'B' | 2 | 3.4 | +-----+-----+------+ | 'B' | 3 | 7.8 | +-----+-----+------+ | 'B' | 2 | 12.3 | +-----+-----+------+ | 'D' | 6 | 9.3 | +-----+-----+------+ | 'D' | 4 | 14.5 | +-----+-----+------+ >>> # compound keys are supported ... table3 = etl.duplicates(table1, key=['foo', 'bar']) >>> table3 +-----+-----+------+ | foo | bar | baz | +=====+=====+======+ | 'B' | 2 | 3.4 | +-----+-----+------+ | 'B' | 2 | 12.3 | +-----+-----+------+ If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. See also :func:`petl.transform.dedup.unique` and :func:`petl.transform.dedup.distinct`.
2.489773
3.482502
0.714938
return UniqueView(table, key=key, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def unique(table, key=None, presorted=False, buffersize=None, tempdir=None, cache=True)
Select rows with unique values under a given key (or unique rows if no key is given). E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['A', 1, 2], ... ['B', '2', '3.4'], ... ['D', 'xyz', 9.0], ... ['B', u'3', u'7.8'], ... ['B', '2', 42], ... ['E', None, None], ... ['D', 4, 12.3], ... ['F', 7, 2.3]] >>> table2 = etl.unique(table1, 'foo') >>> table2 +-----+------+------+ | foo | bar | baz | +=====+======+======+ | 'A' | 1 | 2 | +-----+------+------+ | 'E' | None | None | +-----+------+------+ | 'F' | 7 | 2.3 | +-----+------+------+ If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. See also :func:`petl.transform.dedup.duplicates` and :func:`petl.transform.dedup.distinct`.
2.86164
4.23175
0.676231
return ConflictsView(table, key, missing=missing, exclude=exclude, include=include, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def conflicts(table, key, missing=None, include=None, exclude=None, presorted=False, buffersize=None, tempdir=None, cache=True)
Select rows with the same key value but differing in some other field. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['A', 1, 2.7], ... ['B', 2, None], ... ['D', 3, 9.4], ... ['B', None, 7.8], ... ['E', None], ... ['D', 3, 12.3], ... ['A', 2, None]] >>> table2 = etl.conflicts(table1, 'foo') >>> table2 +-----+-----+------+ | foo | bar | baz | +=====+=====+======+ | 'A' | 1 | 2.7 | +-----+-----+------+ | 'A' | 2 | None | +-----+-----+------+ | 'D' | 3 | 9.4 | +-----+-----+------+ | 'D' | 3 | 12.3 | +-----+-----+------+ Missing values are not considered conflicts. By default, `None` is treated as the missing value, this can be changed via the `missing` keyword argument. One or more fields can be ignored when determining conflicts by providing the `exclude` keyword argument. Alternatively, fields to use when determining conflicts can be specified explicitly with the `include` keyword argument. This provides a simple mechanism for analysing the source of conflicting rows from multiple tables, e.g.:: >>> table1 = [['foo', 'bar'], [1, 'a'], [2, 'b']] >>> table2 = [['foo', 'bar'], [1, 'a'], [2, 'c']] >>> table3 = etl.cat(etl.addfield(table1, 'source', 1), ... etl.addfield(table2, 'source', 2)) >>> table4 = etl.conflicts(table3, key='foo', exclude='source') >>> table4 +-----+-----+--------+ | foo | bar | source | +=====+=====+========+ | 2 | 'b' | 1 | +-----+-----+--------+ | 2 | 'c' | 2 | +-----+-----+--------+ If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function.
2.203741
3.189452
0.690947
return DistinctView(table, key=key, count=count, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def distinct(table, key=None, count=None, presorted=False, buffersize=None, tempdir=None, cache=True)
Return only distinct rows in the table. If the `count` argument is not None, it will be used as the name for an additional field, and the values of the field will be the number of duplicate rows. If the `key` keyword argument is passed, the comparison is done on the given key instead of the full row. See also :func:`petl.transform.dedup.duplicates`, :func:`petl.transform.dedup.unique`, :func:`petl.transform.reductions.groupselectfirst`, :func:`petl.transform.reductions.groupselectlast`.
2.355716
2.904253
0.811127
vals = set() for v in itervalues(table, field): if v in vals: return False else: vals.add(v) return True
def isunique(table, field)
Return True if there are no duplicate values for the given field(s), otherwise False. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b'], ... ['b', 2], ... ['c', 3, True]] >>> etl.isunique(table1, 'foo') False >>> etl.isunique(table1, 'bar') True The `field` argument can be a single field name or index (starting from zero) or a tuple of field names and/or indexes.
3.732783
4.913568
0.759689
import sqlalchemy col_not_none = [v for v in col if v is not None] sql_column_kwargs = {} sql_type_kwargs = {} if len(col_not_none) == 0: sql_column_type = sqlalchemy.String if constraints: sql_type_kwargs['length'] = NULL_COLUMN_MAX_LENGTH elif all(isinstance(v, bool) for v in col_not_none): sql_column_type = sqlalchemy.Boolean elif all(isinstance(v, int) for v in col_not_none): if max(col_not_none) > SQL_INTEGER_MAX \ or min(col_not_none) < SQL_INTEGER_MIN: sql_column_type = sqlalchemy.BigInteger else: sql_column_type = sqlalchemy.Integer elif all(isinstance(v, long) for v in col_not_none): sql_column_type = sqlalchemy.BigInteger elif all(isinstance(v, (int, long)) for v in col_not_none): sql_column_type = sqlalchemy.BigInteger elif all(isinstance(v, (int, long, float)) for v in col_not_none): sql_column_type = sqlalchemy.Float elif all(isinstance(v, datetime.datetime) for v in col_not_none): sql_column_type = sqlalchemy.DateTime elif all(isinstance(v, datetime.date) for v in col_not_none): sql_column_type = sqlalchemy.Date elif all(isinstance(v, datetime.time) for v in col_not_none): sql_column_type = sqlalchemy.Time else: sql_column_type = sqlalchemy.String if constraints: sql_type_kwargs['length'] = max([len(text_type(v)) for v in col]) if constraints: sql_column_kwargs['nullable'] = len(col_not_none) < len(col) return sqlalchemy.Column(colname, sql_column_type(**sql_type_kwargs), **sql_column_kwargs)
def make_sqlalchemy_column(col, colname, constraints=True)
Infer an appropriate SQLAlchemy column type based on a sequence of values. Keyword arguments: col : sequence A sequence of values to use to infer type, length etc. colname : string Name of column constraints : bool If True use length and nullable constraints
1.708899
1.679956
1.017228
import sqlalchemy if not metadata: metadata = sqlalchemy.MetaData() sql_table = sqlalchemy.Table(tablename, metadata, schema=schema) cols = columns(table) flds = list(cols.keys()) for f in flds: sql_column = make_sqlalchemy_column(cols[f], f, constraints=constraints) sql_table.append_column(sql_column) return sql_table
def make_sqlalchemy_table(table, tablename, schema=None, constraints=True, metadata=None)
Create an SQLAlchemy table definition based on data in `table`. Keyword arguments: table : table container Table data to use to infer types etc. tablename : text Name of the table schema : text Name of the database schema to create the table in constraints : bool If True use length and nullable constraints metadata : sqlalchemy.MetaData Custom table metadata
2.612601
2.943677
0.88753
import sqlalchemy sql_table = make_sqlalchemy_table(table, tablename, schema=schema, constraints=constraints, metadata=metadata) if dialect: module = __import__('sqlalchemy.dialects.%s' % DIALECTS[dialect], fromlist=['dialect']) sql_dialect = module.dialect() else: sql_dialect = None return text_type(sqlalchemy.schema.CreateTable(sql_table) .compile(dialect=sql_dialect)).strip()
def make_create_table_statement(table, tablename, schema=None, constraints=True, metadata=None, dialect=None)
Generate a CREATE TABLE statement based on data in `table`. Keyword arguments: table : table container Table data to use to infer types etc. tablename : text Name of the table schema : text Name of the database schema to create the table in constraints : bool If True use length and nullable constraints metadata : sqlalchemy.MetaData Custom table metadata dialect : text One of {'access', 'sybase', 'sqlite', 'informix', 'firebird', 'mysql', 'oracle', 'maxdb', 'postgresql', 'mssql'}
2.832237
3.136905
0.902876
if sample > 0: table = head(table, sample) sql = make_create_table_statement(table, tablename, schema=schema, constraints=constraints, metadata=metadata, dialect=dialect) _execute(sql, dbo, commit=commit)
def create_table(table, dbo, tablename, schema=None, commit=True, constraints=True, metadata=None, dialect=None, sample=1000)
Create a database table based on a sample of data in the given `table`. Keyword arguments: table : table container Table data to load dbo : database object DB-API 2.0 connection, callable returning a DB-API 2.0 cursor, or SQLAlchemy connection, engine or session tablename : text Name of the table schema : text Name of the database schema to create the table in commit : bool If True commit the changes constraints : bool If True use length and nullable constraints metadata : sqlalchemy.MetaData Custom table metadata dialect : text One of {'access', 'sybase', 'sqlite', 'informix', 'firebird', 'mysql', 'oracle', 'maxdb', 'postgresql', 'mssql'} sample : int Number of rows to sample when inferring types etc., set to 0 to use the whole table
3.266443
3.769973
0.866437
# sanitise table name tablename = _quote(tablename) if schema is not None: tablename = _quote(schema) + '.' + tablename sql = u'DROP TABLE %s' % tablename _execute(sql, dbo, commit)
def drop_table(dbo, tablename, schema=None, commit=True)
Drop a database table. Keyword arguments: dbo : database object DB-API 2.0 connection, callable returning a DB-API 2.0 cursor, or SQLAlchemy connection, engine or session tablename : text Name of the table schema : text Name of the database schema the table is in commit : bool If True commit the changes
3.685596
4.64506
0.793444
return HDF5View(source, where=where, name=name, condition=condition, condvars=condvars, start=start, stop=stop, step=step)
def fromhdf5(source, where=None, name=None, condition=None, condvars=None, start=None, stop=None, step=None)
Provides access to an HDF5 table. E.g.:: >>> import petl as etl >>> import tables >>> # set up a new hdf5 table to demonstrate with ... h5file = tables.open_file('example.h5', mode='w', ... title='Example file') >>> h5file.create_group('/', 'testgroup', 'Test Group') /testgroup (Group) 'Test Group' children := [] >>> class FooBar(tables.IsDescription): ... foo = tables.Int32Col(pos=0) ... bar = tables.StringCol(6, pos=2) ... >>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar, ... 'Test Table') >>> # load some data into the table ... table1 = (('foo', 'bar'), ... (1, b'asdfgh'), ... (2, b'qwerty'), ... (3, b'zxcvbn')) >>> for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... >>> h5file.flush() >>> h5file.close() >>> # ... # now demonstrate use of fromhdf5 ... table1 = etl.fromhdf5('example.h5', '/testgroup', 'testtable') >>> table1 +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'asdfgh' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'zxcvbn' | +-----+-----------+ >>> # alternatively just specify path to table node ... table1 = etl.fromhdf5('example.h5', '/testgroup/testtable') >>> # ...or use an existing tables.File object ... h5file = tables.open_file('example.h5') >>> table1 = etl.fromhdf5(h5file, '/testgroup/testtable') >>> # ...or use an existing tables.Table object ... h5tbl = h5file.get_node('/testgroup/testtable') >>> table1 = etl.fromhdf5(h5tbl) >>> # use a condition to filter data ... table2 = etl.fromhdf5(h5tbl, condition='foo < 3') >>> table2 +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'asdfgh' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ >>> h5file.close()
2.351212
3.178922
0.739626
assert sortby is not None, 'no column specified to sort by' return HDF5SortedView(source, where=where, name=name, sortby=sortby, checkCSI=checkCSI, start=start, stop=stop, step=step)
def fromhdf5sorted(source, where=None, name=None, sortby=None, checkCSI=False, start=None, stop=None, step=None)
Provides access to an HDF5 table, sorted by an indexed column, e.g.:: >>> import petl as etl >>> import tables >>> # set up a new hdf5 table to demonstrate with ... h5file = tables.open_file('example.h5', mode='w', title='Test file') >>> h5file.create_group('/', 'testgroup', 'Test Group') /testgroup (Group) 'Test Group' children := [] >>> class FooBar(tables.IsDescription): ... foo = tables.Int32Col(pos=0) ... bar = tables.StringCol(6, pos=2) ... >>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar, 'Test Table') >>> # load some data into the table ... table1 = (('foo', 'bar'), ... (3, b'asdfgh'), ... (2, b'qwerty'), ... (1, b'zxcvbn')) >>> for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... >>> h5table.cols.foo.create_csindex() # CS index is required 0 >>> h5file.flush() >>> h5file.close() >>> # ... # access the data, sorted by the indexed column ... table2 = etl.fromhdf5sorted('example.h5', '/testgroup', 'testtable', ... sortby='foo') >>> table2 +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'zxcvbn' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'asdfgh' | +-----+-----------+
2.644825
3.422112
0.772864
import tables it = iter(table) if create: with _get_hdf5_file(source, mode='a') as h5file: if drop: try: h5file.get_node(where, name) except tables.NoSuchNodeError: pass else: h5file.remove_node(where, name) # determine datatype if description is None: peek, it = iterpeek(it, sample) # use a numpy dtype description = infer_dtype(peek) # create the table h5file.create_table(where, name, description, title=title, filters=filters, expectedrows=expectedrows, chunkshape=chunkshape, byteorder=byteorder, createparents=createparents) with _get_hdf5_table(source, where, name, mode='a') as h5table: # truncate the existing table h5table.truncate(0) # load the data _insert(it, h5table)
def tohdf5(table, source, where=None, name=None, create=False, drop=False, description=None, title='', filters=None, expectedrows=10000, chunkshape=None, byteorder=None, createparents=False, sample=1000)
Write to an HDF5 table. If `create` is `False`, assumes the table already exists, and attempts to truncate it before loading. If `create` is `True`, a new table will be created, and if `drop` is True, any existing table will be dropped first. If `description` is `None`, the description will be guessed. E.g.:: >>> import petl as etl >>> table1 = (('foo', 'bar'), ... (1, b'asdfgh'), ... (2, b'qwerty'), ... (3, b'zxcvbn')) >>> etl.tohdf5(table1, 'example.h5', '/testgroup', 'testtable', ... drop=True, create=True, createparents=True) >>> etl.fromhdf5('example.h5', '/testgroup', 'testtable') +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'asdfgh' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'zxcvbn' | +-----+-----------+
2.942688
3.132639
0.939364
with _get_hdf5_table(source, where, name, mode='a') as h5table: # load the data _insert(table, h5table)
def appendhdf5(table, source, where=None, name=None)
As :func:`petl.io.hdf5.tohdf5` but don't truncate the target table before loading.
6.532303
6.345669
1.029411
import pandas as pd l = list(table) data = l[1:] if columns is None: columns = l[0] return pd.DataFrame.from_records(data, index=index, exclude=exclude, columns=columns, coerce_float=coerce_float, nrows=nrows)
def todataframe(table, index=None, exclude=None, columns=None, coerce_float=False, nrows=None)
Load data from the given `table` into a `pandas <http://pandas.pydata.org/>`_ DataFrame. E.g.:: >>> import petl as etl >>> table = [('foo', 'bar', 'baz'), ... ('apples', 1, 2.5), ... ('oranges', 3, 4.4), ... ('pears', 7, .1)] >>> df = etl.todataframe(table) >>> df foo bar baz 0 apples 1 2.5 1 oranges 3 4.4 2 pears 7 0.1
2.29835
3.341954
0.687726
vals = iter(values(table, field)) try: minv = maxv = next(vals) except StopIteration: return None, None else: for v in vals: if v < minv: minv = v if v > maxv: maxv = v return minv, maxv
def limits(table, field)
Find minimum and maximum values under the given field. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] >>> minv, maxv = etl.limits(table, 'bar') >>> minv 1 >>> maxv 3 The `field` argument can be a field name or index (starting from zero).
2.309609
2.395629
0.964093
_min = None _max = None _sum = 0 _mean = 0 _var = 0 _count = 0 _errors = 0 for v in values(table, field): try: v = float(v) except (ValueError, TypeError): _errors += 1 else: _count += 1 if _min is None or v < _min: _min = v if _max is None or v > _max: _max = v _sum += v _mean, _var = onlinestats(v, _count, mean=_mean, variance=_var) _std = _var**.5 return _stats(_count, _errors, _sum, _min, _max, _mean, _var, _std)
def stats(table, field)
Calculate basic descriptive statistics on a given field. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar', 'baz'], ... ['A', 1, 2], ... ['B', '2', '3.4'], ... [u'B', u'3', u'7.8', True], ... ['D', 'xyz', 9.0], ... ['E', None]] >>> etl.stats(table, 'bar') stats(count=3, errors=2, sum=6.0, min=1.0, max=3.0, mean=2.0, pvariance=0.6666666666666666, pstdev=0.816496580927726) The `field` argument can be a field name or index (starting from zero).
2.516304
2.486704
1.011904
total = 0 vs = 0 for v in values(table, field, missing=missing): total += 1 if v == value: vs += 1 return vs, float(vs)/total
def valuecount(table, field, value, missing=None)
Count the number of occurrences of `value` under the given field. Returns the absolute count and relative frequency as a pair. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ... ['a', 1], ... ['b', 2], ... ['b', 7]] >>> etl.valuecount(table, 'foo', 'b') (2, 0.6666666666666666) The `field` argument can be a single field name or index (starting from zero) or a tuple of field names and/or indexes.
3.301548
4.744624
0.69585
missing = kwargs.get('missing', None) counter = Counter() for v in values(table, field, missing=missing): try: counter[v] += 1 except IndexError: pass # short row return counter
def valuecounter(table, *field, **kwargs)
Find distinct values for the given field and count the number of occurrences. Returns a :class:`dict` mapping values to counts. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ... ['a', True], ... ['b'], ... ['b', True], ... ['c', False]] >>> etl.valuecounter(table, 'foo') Counter({'b': 2, 'a': 1, 'c': 1}) The `field` argument can be a single field name or index (starting from zero) or a tuple of field names and/or indexes.
4.519077
5.109025
0.884528
if isinstance(parsers, (list, tuple)): parsers = dict(parsers) counter, errors = Counter(), Counter() # need to initialise for n in parsers.keys(): counter[n] = 0 errors[n] = 0 for v in values(table, field): if isinstance(v, string_types): for name, parser in parsers.items(): try: parser(v) except: errors[name] += 1 else: counter[name] += 1 return counter, errors
def parsecounter(table, field, parsers=(('int', int), ('float', float)))
Count the number of `str` or `unicode` values under the given fields that can be parsed as ints, floats or via custom parser functions. Return a pair of `Counter` objects, the first mapping parser names to the number of strings successfully parsed, the second mapping parser names to the number of errors. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar', 'baz'], ... ['A', 'aaa', 2], ... ['B', u'2', '3.4'], ... [u'B', u'3', u'7.8', True], ... ['D', '3.7', 9.0], ... ['E', 42]] >>> counter, errors = etl.parsecounter(table, 'bar') >>> counter Counter({'float': 3, 'int': 2}) >>> errors Counter({'int': 2, 'float': 1}) The `field` argument can be a field name or index (starting from zero).
3.069731
2.957422
1.037975
return ParseCountsView(table, field, parsers=parsers)
def parsecounts(table, field, parsers=(('int', int), ('float', float)))
Count the number of `str` or `unicode` values that can be parsed as ints, floats or via custom parser functions. Return a table mapping parser names to the number of values successfully parsed and the number of errors. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar', 'baz'], ... ['A', 'aaa', 2], ... ['B', u'2', '3.4'], ... [u'B', u'3', u'7.8', True], ... ['D', '3.7', 9.0], ... ['E', 42]] >>> etl.parsecounts(table, 'bar') +---------+-------+--------+ | type | count | errors | +=========+=======+========+ | 'float' | 3 | 1 | +---------+-------+--------+ | 'int' | 2 | 2 | +---------+-------+--------+ The `field` argument can be a field name or index (starting from zero).
12.085215
19.324446
0.625385
counter = Counter() for v in values(table, field): try: counter[v.__class__.__name__] += 1 except IndexError: pass # ignore short rows return counter
def typecounter(table, field)
Count the number of values found for each Python type. >>> import petl as etl >>> table = [['foo', 'bar', 'baz'], ... ['A', 1, 2], ... ['B', u'2', '3.4'], ... [u'B', u'3', u'7.8', True], ... ['D', u'xyz', 9.0], ... ['E', 42]] >>> etl.typecounter(table, 'foo') Counter({'str': 5}) >>> etl.typecounter(table, 'bar') Counter({'str': 3, 'int': 2}) >>> etl.typecounter(table, 'baz') Counter({'str': 2, 'int': 1, 'float': 1, 'NoneType': 1}) The `field` argument can be a field name or index (starting from zero).
5.488662
6.792196
0.808084
trans = maketrans( 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', 'AAAAAAAAAAAAAAAAAAAAAAAAAAaaaaaaaaaaaaaaaaaaaaaaaaaa9999999999' ) counter = Counter() for v in values(table, field): p = str(v).translate(trans) counter[p] += 1 return counter
def stringpatterncounter(table, field)
Profile string patterns in the given field, returning a :class:`dict` mapping patterns to counts.
3.268853
3.113102
1.050031
counter = stringpatterncounter(table, field) output = [('pattern', 'count', 'frequency')] counter = counter.most_common() total = sum(c[1] for c in counter) cnts = [(c[0], c[1], float(c[1])/total) for c in counter] output.extend(cnts) return wrap(output)
def stringpatterns(table, field)
Profile string patterns in the given field, returning a table of patterns, counts and frequencies. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ... ['Mr. Foo', '123-1254'], ... ['Mrs. Bar', '234-1123'], ... ['Mr. Spo', '123-1254'], ... [u'Mr. Baz', u'321 1434'], ... [u'Mrs. Baz', u'321 1434'], ... ['Mr. Quux', '123-1254-XX']] >>> etl.stringpatterns(table, 'foo') +------------+-------+---------------------+ | pattern | count | frequency | +============+=======+=====================+ | 'Aa. Aaa' | 3 | 0.5 | +------------+-------+---------------------+ | 'Aaa. Aaa' | 2 | 0.3333333333333333 | +------------+-------+---------------------+ | 'Aa. Aaaa' | 1 | 0.16666666666666666 | +------------+-------+---------------------+ >>> etl.stringpatterns(table, 'bar') +---------------+-------+---------------------+ | pattern | count | frequency | +===============+=======+=====================+ | '999-9999' | 3 | 0.5 | +---------------+-------+---------------------+ | '999 9999' | 2 | 0.3333333333333333 | +---------------+-------+---------------------+ | '999-9999-AA' | 1 | 0.16666666666666666 | +---------------+-------+---------------------+
3.875334
4.437272
0.87336
counter = Counter() for row in data(table): counter[len(row)] += 1 output = [('length', 'count')] output.extend(counter.most_common()) return wrap(output)
def rowlengths(table)
Report on row lengths found in the table. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar', 'baz'], ... ['A', 1, 2], ... ['B', '2', '3.4'], ... [u'B', u'3', u'7.8', True], ... ['D', 'xyz', 9.0], ... ['E', None], ... ['F', 9]] >>> etl.rowlengths(table) +--------+-------+ | length | count | +========+=======+ | 3 | 3 | +--------+-------+ | 2 | 2 | +--------+-------+ | 4 | 1 | +--------+-------+ Useful for finding potential problems in data files.
4.867489
5.44126
0.894552
source = write_source_from_arg(source) return TeeHTMLView(table, source=source, encoding=encoding, errors=errors, caption=caption, vrepr=vrepr, lineterminator=lineterminator, index_header=index_header, tr_style=tr_style, td_styles=td_styles, truncate=truncate)
def teehtml(table, source=None, encoding=None, errors='strict', caption=None, vrepr=text_type, lineterminator='\n', index_header=False, tr_style=None, td_styles=None, truncate=None)
Return a table that writes rows to a Unicode HTML file as they are iterated over.
2.16511
2.233051
0.969575
return RandomTable(numflds, numrows, wait=wait, seed=seed)
def randomtable(numflds=5, numrows=100, wait=0, seed=None)
Construct a table with random numerical data. Use `numflds` and `numrows` to specify the number of fields and rows respectively. Set `wait` to a float greater than zero to simulate a delay on each row generation (number of seconds per row). E.g.:: >>> import petl as etl >>> table = etl.randomtable(3, 100, seed=42) >>> table +----------------------+----------------------+---------------------+ | f0 | f1 | f2 | +======================+======================+=====================+ | 0.6394267984578837 | 0.025010755222666936 | 0.27502931836911926 | +----------------------+----------------------+---------------------+ | 0.22321073814882275 | 0.7364712141640124 | 0.6766994874229113 | +----------------------+----------------------+---------------------+ | 0.8921795677048454 | 0.08693883262941615 | 0.4219218196852704 | +----------------------+----------------------+---------------------+ | 0.029797219438070344 | 0.21863797480360336 | 0.5053552881033624 | +----------------------+----------------------+---------------------+ | 0.026535969683863625 | 0.1988376506866485 | 0.6498844377795232 | +----------------------+----------------------+---------------------+ ... Note that the data are generated on the fly and are not stored in memory, so this function can be used to simulate very large tables.
3.925643
6.547901
0.599527
return DummyTable(numrows=numrows, fields=fields, wait=wait, seed=seed)
def dummytable(numrows=100, fields=(('foo', partial(random.randint, 0, 100)), ('bar', partial(random.choice, ('apples', 'pears', 'bananas', 'oranges'))), ('baz', random.random)), wait=0, seed=None)
Construct a table with dummy data. Use `numrows` to specify the number of rows. Set `wait` to a float greater than zero to simulate a delay on each row generation (number of seconds per row). E.g.:: >>> import petl as etl >>> table1 = etl.dummytable(100, seed=42) >>> table1 +-----+----------+----------------------+ | foo | bar | baz | +=====+==========+======================+ | 81 | 'apples' | 0.025010755222666936 | +-----+----------+----------------------+ | 35 | 'pears' | 0.22321073814882275 | +-----+----------+----------------------+ | 94 | 'apples' | 0.6766994874229113 | +-----+----------+----------------------+ | 69 | 'apples' | 0.5904925124490397 | +-----+----------+----------------------+ | 4 | 'apples' | 0.09369523986159245 | +-----+----------+----------------------+ ... >>> # customise fields ... import random >>> from functools import partial >>> fields = [('foo', random.random), ... ('bar', partial(random.randint, 0, 500)), ... ('baz', partial(random.choice, ... ['chocolate', 'strawberry', 'vanilla']))] >>> table2 = etl.dummytable(100, fields=fields, seed=42) >>> table2 +---------------------+-----+-------------+ | foo | bar | baz | +=====================+=====+=============+ | 0.6394267984578837 | 12 | 'vanilla' | +---------------------+-----+-------------+ | 0.27502931836911926 | 114 | 'chocolate' | +---------------------+-----+-------------+ | 0.7364712141640124 | 346 | 'vanilla' | +---------------------+-----+-------------+ | 0.8921795677048454 | 44 | 'vanilla' | +---------------------+-----+-------------+ | 0.4219218196852704 | 15 | 'chocolate' | +---------------------+-----+-------------+ ... Data generation functions can be specified via the `fields` keyword argument. Note that the data are generated on the fly and are not stored in memory, so this function can be used to simulate very large tables.
2.593992
5.744788
0.451538
import intervaltree tree = intervaltree.IntervalTree() it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) assert start in flds, 'start field not recognised' assert stop in flds, 'stop field not recognised' getstart = itemgetter(flds.index(start)) getstop = itemgetter(flds.index(stop)) if value is None: getvalue = tuple else: valueindices = asindices(hdr, value) assert len(valueindices) > 0, 'invalid value field specification' getvalue = itemgetter(*valueindices) for row in it: tree.addi(getstart(row), getstop(row), getvalue(row)) return tree
def tupletree(table, start='start', stop='stop', value=None)
Construct an interval tree for the given table, where each node in the tree is a row of the table.
2.862635
2.858076
1.001595
import intervaltree it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) assert start in flds, 'start field not recognised' assert stop in flds, 'stop field not recognised' getstart = itemgetter(flds.index(start)) getstop = itemgetter(flds.index(stop)) if value is None: getvalue = tuple else: valueindices = asindices(hdr, value) assert len(valueindices) > 0, 'invalid value field specification' getvalue = itemgetter(*valueindices) keyindices = asindices(hdr, key) assert len(keyindices) > 0, 'invalid key' getkey = itemgetter(*keyindices) trees = dict() for row in it: k = getkey(row) if k not in trees: trees[k] = intervaltree.IntervalTree() trees[k].addi(getstart(row), getstop(row), getvalue(row)) return trees
def facettupletrees(table, key, start='start', stop='stop', value=None)
Construct faceted interval trees for the given table, where each node in the tree is a row of the table.
2.508662
2.491036
1.007076
import intervaltree getstart = attrgetter(start) getstop = attrgetter(stop) tree = intervaltree.IntervalTree() for rec in records(table): tree.addi(getstart(rec), getstop(rec), rec) return tree
def recordtree(table, start='start', stop='stop')
Construct an interval tree for the given table, where each node in the tree is a row of the table represented as a record object.
3.250651
3.079958
1.055421
import intervaltree getstart = attrgetter(start) getstop = attrgetter(stop) getkey = attrgetter(key) trees = dict() for rec in records(table): k = getkey(rec) if k not in trees: trees[k] = intervaltree.IntervalTree() trees[k].addi(getstart(rec), getstop(rec), rec) return trees
def facetrecordtrees(table, key, start='start', stop='stop')
Construct faceted interval trees for the given table, where each node in the tree is a record.
2.634124
2.514467
1.047587
tree = tupletree(table, start=start, stop=stop, value=value) return IntervalTreeLookup(tree, include_stop=include_stop)
def intervallookup(table, start='start', stop='stop', value=None, include_stop=False)
Construct an interval lookup for the given table. E.g.:: >>> import petl as etl >>> table = [['start', 'stop', 'value'], ... [1, 4, 'foo'], ... [3, 7, 'bar'], ... [4, 9, 'baz']] >>> lkp = etl.intervallookup(table, 'start', 'stop') >>> lkp.search(0, 1) [] >>> lkp.search(1, 2) [(1, 4, 'foo')] >>> lkp.search(2, 4) [(1, 4, 'foo'), (3, 7, 'bar')] >>> lkp.search(2, 5) [(1, 4, 'foo'), (3, 7, 'bar'), (4, 9, 'baz')] >>> lkp.search(9, 14) [] >>> lkp.search(19, 140) [] >>> lkp.search(0) [] >>> lkp.search(1) [(1, 4, 'foo')] >>> lkp.search(2) [(1, 4, 'foo')] >>> lkp.search(4) [(3, 7, 'bar'), (4, 9, 'baz')] >>> lkp.search(5) [(3, 7, 'bar'), (4, 9, 'baz')] Note start coordinates are included and stop coordinates are excluded from the interval. Use the `include_stop` keyword argument to include the upper bound of the interval when finding overlaps. Some examples using the `include_stop` and `value` keyword arguments:: >>> import petl as etl >>> table = [['start', 'stop', 'value'], ... [1, 4, 'foo'], ... [3, 7, 'bar'], ... [4, 9, 'baz']] >>> lkp = etl.intervallookup(table, 'start', 'stop', include_stop=True, ... value='value') >>> lkp.search(0, 1) ['foo'] >>> lkp.search(1, 2) ['foo'] >>> lkp.search(2, 4) ['foo', 'bar', 'baz'] >>> lkp.search(2, 5) ['foo', 'bar', 'baz'] >>> lkp.search(9, 14) ['baz'] >>> lkp.search(19, 140) [] >>> lkp.search(0) [] >>> lkp.search(1) ['foo'] >>> lkp.search(2) ['foo'] >>> lkp.search(4) ['foo', 'bar', 'baz'] >>> lkp.search(5) ['bar', 'baz']
5.558577
8.617972
0.644998
tree = tupletree(table, start=start, stop=stop, value=value) return IntervalTreeLookupOne(tree, strict=strict, include_stop=include_stop)
def intervallookupone(table, start='start', stop='stop', value=None, include_stop=False, strict=True)
Construct an interval lookup for the given table, returning at most one result for each query. E.g.:: >>> import petl as etl >>> table = [['start', 'stop', 'value'], ... [1, 4, 'foo'], ... [3, 7, 'bar'], ... [4, 9, 'baz']] >>> lkp = etl.intervallookupone(table, 'start', 'stop', strict=False) >>> lkp.search(0, 1) >>> lkp.search(1, 2) (1, 4, 'foo') >>> lkp.search(2, 4) (1, 4, 'foo') >>> lkp.search(2, 5) (1, 4, 'foo') >>> lkp.search(9, 14) >>> lkp.search(19, 140) >>> lkp.search(0) >>> lkp.search(1) (1, 4, 'foo') >>> lkp.search(2) (1, 4, 'foo') >>> lkp.search(4) (3, 7, 'bar') >>> lkp.search(5) (3, 7, 'bar') If ``strict=True``, queries returning more than one result will raise a `DuplicateKeyError`. If ``strict=False`` and there is more than one result, the first result is returned. Note start coordinates are included and stop coordinates are excluded from the interval. Use the `include_stop` keyword argument to include the upper bound of the interval when finding overlaps.
4.679167
6.28742
0.744211
tree = recordtree(table, start=start, stop=stop) return IntervalTreeLookup(tree, include_stop=include_stop)
def intervalrecordlookup(table, start='start', stop='stop', include_stop=False)
As :func:`petl.transform.intervals.intervallookup` but return records instead of tuples.
5.825962
5.750293
1.013159
tree = recordtree(table, start=start, stop=stop) return IntervalTreeLookupOne(tree, include_stop=include_stop, strict=strict)
def intervalrecordlookupone(table, start='start', stop='stop', include_stop=False, strict=True)
As :func:`petl.transform.intervals.intervallookupone` but return records instead of tuples.
4.835885
4.769049
1.014015
trees = facettupletrees(table, key, start=start, stop=stop, value=value) out = dict() for k in trees: out[k] = IntervalTreeLookupOne(trees[k], include_stop=include_stop, strict=strict) return out
def facetintervallookupone(table, key, start='start', stop='stop', value=None, include_stop=False, strict=True)
Construct a faceted interval lookup for the given table, returning at most one result for each query. If ``strict=True``, queries returning more than one result will raise a `DuplicateKeyError`. If ``strict=False`` and there is more than one result, the first result is returned.
3.831985
4.675437
0.819599
trees = facetrecordtrees(table, key, start=start, stop=stop) out = dict() for k in trees: out[k] = IntervalTreeLookup(trees[k], include_stop=include_stop) return out
def facetintervalrecordlookup(table, key, start='start', stop='stop', include_stop=False)
As :func:`petl.transform.intervals.facetintervallookup` but return records.
4.505131
4.256986
1.058291
trees = facetrecordtrees(table, key, start=start, stop=stop) out = dict() for k in trees: out[k] = IntervalTreeLookupOne(trees[k], include_stop=include_stop, strict=strict) return out
def facetintervalrecordlookupone(table, key, start, stop, include_stop=False, strict=True)
As :func:`petl.transform.intervals.facetintervallookupone` but return records.
4.128101
4.212748
0.979907
assert (lkey is None) == (rkey is None), \ 'facet key field must be provided for both or neither table' return IntervalLeftJoinView(left, right, lstart=lstart, lstop=lstop, rstart=rstart, rstop=rstop, lkey=lkey, rkey=rkey, include_stop=include_stop, missing=missing, lprefix=lprefix, rprefix=rprefix)
def intervalleftjoin(left, right, lstart='start', lstop='stop', rstart='start', rstop='stop', lkey=None, rkey=None, include_stop=False, missing=None, lprefix=None, rprefix=None)
Like :func:`petl.transform.intervals.intervaljoin` but rows from the left table without a match in the right table are also included. E.g.:: >>> import petl as etl >>> left = [['begin', 'end', 'quux'], ... [1, 2, 'a'], ... [2, 4, 'b'], ... [2, 5, 'c'], ... [9, 14, 'd'], ... [1, 1, 'e'], ... [10, 10, 'f']] >>> right = [['start', 'stop', 'value'], ... [1, 4, 'foo'], ... [3, 7, 'bar'], ... [4, 9, 'baz']] >>> table1 = etl.intervalleftjoin(left, right, ... lstart='begin', lstop='end', ... rstart='start', rstop='stop') >>> table1.lookall() +-------+-----+------+-------+------+-------+ | begin | end | quux | start | stop | value | +=======+=====+======+=======+======+=======+ | 1 | 2 | 'a' | 1 | 4 | 'foo' | +-------+-----+------+-------+------+-------+ | 2 | 4 | 'b' | 1 | 4 | 'foo' | +-------+-----+------+-------+------+-------+ | 2 | 4 | 'b' | 3 | 7 | 'bar' | +-------+-----+------+-------+------+-------+ | 2 | 5 | 'c' | 1 | 4 | 'foo' | +-------+-----+------+-------+------+-------+ | 2 | 5 | 'c' | 3 | 7 | 'bar' | +-------+-----+------+-------+------+-------+ | 2 | 5 | 'c' | 4 | 9 | 'baz' | +-------+-----+------+-------+------+-------+ | 9 | 14 | 'd' | None | None | None | +-------+-----+------+-------+------+-------+ | 1 | 1 | 'e' | None | None | None | +-------+-----+------+-------+------+-------+ | 10 | 10 | 'f' | None | None | None | +-------+-----+------+-------+------+-------+ Note start coordinates are included and stop coordinates are excluded from the interval. Use the `include_stop` keyword argument to include the upper bound of the interval when finding overlaps.
2.503453
2.713077
0.922736
assert (lkey is None) == (rkey is None), \ 'facet key field must be provided for both or neither table' return IntervalAntiJoinView(left, right, lstart=lstart, lstop=lstop, rstart=rstart, rstop=rstop, lkey=lkey, rkey=rkey, include_stop=include_stop, missing=missing)
def intervalantijoin(left, right, lstart='start', lstop='stop', rstart='start', rstop='stop', lkey=None, rkey=None, include_stop=False, missing=None)
Return rows from the `left` table with no overlapping rows from the `right` table. Note start coordinates are included and stop coordinates are excluded from the interval. Use the `include_stop` keyword argument to include the upper bound of the interval when finding overlaps.
2.897457
3.014638
0.961129
assert (lkey is None) == (rkey is None), \ 'facet key field must be provided for both or neither table' if lkey is None: lkp = intervallookup(right, start=rstart, stop=rstop, value=value, include_stop=include_stop) f = lambda row: lkp.search(row[lstart], row[lstop]) else: lkp = facetintervallookup(right, rkey, start=rstart, stop=rstop, value=value, include_stop=include_stop) f = lambda row: lkp[row[lkey]].search(row[lstart], row[lstop]) return addfield(left, value, f)
def intervaljoinvalues(left, right, value, lstart='start', lstop='stop', rstart='start', rstop='stop', lkey=None, rkey=None, include_stop=False)
Convenience function to join the left table with values from a specific field in the right hand table. Note start coordinates are included and stop coordinates are excluded from the interval. Use the `include_stop` keyword argument to include the upper bound of the interval when finding overlaps.
3.382246
3.335873
1.013901
assert (lkey is None) == (rkey is None), \ 'facet key field must be provided for both or neither table' return IntervalSubtractView(left, right, lstart=lstart, lstop=lstop, rstart=rstart, rstop=rstop, lkey=lkey, rkey=rkey, include_stop=include_stop)
def intervalsubtract(left, right, lstart='start', lstop='stop', rstart='start', rstop='stop', lkey=None, rkey=None, include_stop=False)
Subtract intervals in the right hand table from intervals in the left hand table.
3.327983
3.091532
1.076483
if key is None: table = sort(table, key=start) for iv in _collapse(values(table, (start, stop))): yield iv else: table = sort(table, key=(key, start)) for k, g in rowgroupby(table, key=key, value=(start, stop)): for iv in _collapse(g): yield (k,) + iv
def collapsedintervals(table, start='start', stop='stop', key=None)
Utility function to collapse intervals in a table. If no facet `key` is given, returns an iterator over `(start, stop)` tuples. If facet `key` is given, returns an iterator over `(key, start, stop)` tuples.
3.780619
3.877654
0.974976
span = None for start, stop in intervals: if span is None: span = _Interval(start, stop) elif start <= span.stop < stop: span = _Interval(span.start, stop) elif start > span.stop: yield span span = _Interval(start, stop) if span is not None: yield span
def _collapse(intervals)
Collapse an iterable of intervals sorted by start coord.
2.233231
2.061756
1.08317
remainder_start = start sub_stop = None for sub_start, sub_stop in _collapse(intervals): if remainder_start < sub_start: yield _Interval(remainder_start, sub_start) remainder_start = sub_stop if sub_stop is not None and sub_stop < stop: yield _Interval(sub_stop, stop)
def _subtract(start, stop, intervals)
Subtract intervals from a spanning interval.
2.882269
2.734144
1.054176
return FieldMapView(table, mappings=mappings, failonerror=failonerror, errorvalue=errorvalue)
def fieldmap(table, mappings=None, failonerror=False, errorvalue=None)
Transform a table, mapping fields arbitrarily between input and output. E.g.:: >>> import petl as etl >>> from collections import OrderedDict >>> table1 = [['id', 'sex', 'age', 'height', 'weight'], ... [1, 'male', 16, 1.45, 62.0], ... [2, 'female', 19, 1.34, 55.4], ... [3, 'female', 17, 1.78, 74.4], ... [4, 'male', 21, 1.33, 45.2], ... [5, '-', 25, 1.65, 51.9]] >>> mappings = OrderedDict() >>> # rename a field ... mappings['subject_id'] = 'id' >>> # translate a field ... mappings['gender'] = 'sex', {'male': 'M', 'female': 'F'} >>> # apply a calculation to a field ... mappings['age_months'] = 'age', lambda v: v * 12 >>> # apply a calculation to a combination of fields ... mappings['bmi'] = lambda rec: rec['weight'] / rec['height']**2 >>> # transform and inspect the output ... table2 = etl.fieldmap(table1, mappings) >>> table2 +------------+--------+------------+--------------------+ | subject_id | gender | age_months | bmi | +============+========+============+====================+ | 1 | 'M' | 192 | 29.48870392390012 | +------------+--------+------------+--------------------+ | 2 | 'F' | 228 | 30.8531967030519 | +------------+--------+------------+--------------------+ | 3 | 'F' | 204 | 23.481883600555488 | +------------+--------+------------+--------------------+ | 4 | 'M' | 252 | 25.55260331279326 | +------------+--------+------------+--------------------+ | 5 | '-' | 300 | 19.0633608815427 | +------------+--------+------------+--------------------+ Note also that the mapping value can be an expression string, which will be converted to a lambda function via :func:`petl.util.base.expr`.
4.457997
6.649468
0.670429
return RowMapView(table, rowmapper, header, failonerror=failonerror)
def rowmap(table, rowmapper, header, failonerror=False)
Transform rows via an arbitrary function. E.g.:: >>> import petl as etl >>> table1 = [['id', 'sex', 'age', 'height', 'weight'], ... [1, 'male', 16, 1.45, 62.0], ... [2, 'female', 19, 1.34, 55.4], ... [3, 'female', 17, 1.78, 74.4], ... [4, 'male', 21, 1.33, 45.2], ... [5, '-', 25, 1.65, 51.9]] >>> def rowmapper(row): ... transmf = {'male': 'M', 'female': 'F'} ... return [row[0], ... transmf[row['sex']] if row['sex'] in transmf else None, ... row.age * 12, ... row.height / row.weight ** 2] ... >>> table2 = etl.rowmap(table1, rowmapper, ... header=['subject_id', 'gender', 'age_months', ... 'bmi']) >>> table2 +------------+--------+------------+-----------------------+ | subject_id | gender | age_months | bmi | +============+========+============+=======================+ | 1 | 'M' | 192 | 0.0003772112382934443 | +------------+--------+------------+-----------------------+ | 2 | 'F' | 228 | 0.0004366015456998006 | +------------+--------+------------+-----------------------+ | 3 | 'F' | 204 | 0.0003215689675106949 | +------------+--------+------------+-----------------------+ | 4 | 'M' | 252 | 0.0006509906805544679 | +------------+--------+------------+-----------------------+ | 5 | None | 300 | 0.0006125608384287258 | +------------+--------+------------+-----------------------+ The `rowmapper` function should accept a single row and return a single row (list or tuple).
6.157611
13.36199
0.46083
return RowMapManyView(table, rowgenerator, header, failonerror=failonerror)
def rowmapmany(table, rowgenerator, header, failonerror=False)
Map each input row to any number of output rows via an arbitrary function. E.g.:: >>> import petl as etl >>> table1 = [['id', 'sex', 'age', 'height', 'weight'], ... [1, 'male', 16, 1.45, 62.0], ... [2, 'female', 19, 1.34, 55.4], ... [3, '-', 17, 1.78, 74.4], ... [4, 'male', 21, 1.33]] >>> def rowgenerator(row): ... transmf = {'male': 'M', 'female': 'F'} ... yield [row[0], 'gender', ... transmf[row['sex']] if row['sex'] in transmf else None] ... yield [row[0], 'age_months', row.age * 12] ... yield [row[0], 'bmi', row.height / row.weight ** 2] ... >>> table2 = etl.rowmapmany(table1, rowgenerator, ... header=['subject_id', 'variable', 'value']) >>> table2.lookall() +------------+--------------+-----------------------+ | subject_id | variable | value | +============+==============+=======================+ | 1 | 'gender' | 'M' | +------------+--------------+-----------------------+ | 1 | 'age_months' | 192 | +------------+--------------+-----------------------+ | 1 | 'bmi' | 0.0003772112382934443 | +------------+--------------+-----------------------+ | 2 | 'gender' | 'F' | +------------+--------------+-----------------------+ | 2 | 'age_months' | 228 | +------------+--------------+-----------------------+ | 2 | 'bmi' | 0.0004366015456998006 | +------------+--------------+-----------------------+ | 3 | 'gender' | None | +------------+--------------+-----------------------+ | 3 | 'age_months' | 204 | +------------+--------------+-----------------------+ | 3 | 'bmi' | 0.0003215689675106949 | +------------+--------------+-----------------------+ | 4 | 'gender' | 'M' | +------------+--------------+-----------------------+ | 4 | 'age_months' | 252 | +------------+--------------+-----------------------+ The `rowgenerator` function should accept a single row and yield zero or more rows (lists or tuples). See also the :func:`petl.transform.reshape.melt` function.
5.314515
8.245671
0.644522
return RowGroupMapView(table, key, mapper, header=header, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def rowgroupmap(table, key, mapper, header=None, presorted=False, buffersize=None, tempdir=None, cache=True)
Group rows under the given key then apply `mapper` to yield zero or more output rows for each input group of rows.
2.590383
3.17411
0.816097
return UnpackView(table, field, newfields=newfields, include_original=include_original, missing=missing)
def unpack(table, field, newfields=None, include_original=False, missing=None)
Unpack data values that are lists or tuples. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... [1, ['a', 'b']], ... [2, ['c', 'd']], ... [3, ['e', 'f']]] >>> table2 = etl.unpack(table1, 'bar', ['baz', 'quux']) >>> table2 +-----+-----+------+ | foo | baz | quux | +=====+=====+======+ | 1 | 'a' | 'b' | +-----+-----+------+ | 2 | 'c' | 'd' | +-----+-----+------+ | 3 | 'e' | 'f' | +-----+-----+------+ This function will attempt to unpack exactly the number of values as given by the number of new fields specified. If there are more values than new fields, remaining values will not be unpacked. If there are less values than new fields, `missing` values will be added. See also :func:`petl.transform.unpacks.unpackdict`.
3.513766
6.470906
0.54301
return UnpackDictView(table, field, keys=keys, includeoriginal=includeoriginal, samplesize=samplesize, missing=missing)
def unpackdict(table, field, keys=None, includeoriginal=False, samplesize=1000, missing=None)
Unpack dictionary values into separate fields. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... [1, {'baz': 'a', 'quux': 'b'}], ... [2, {'baz': 'c', 'quux': 'd'}], ... [3, {'baz': 'e', 'quux': 'f'}]] >>> table2 = etl.unpackdict(table1, 'bar') >>> table2 +-----+-----+------+ | foo | baz | quux | +=====+=====+======+ | 1 | 'a' | 'b' | +-----+-----+------+ | 2 | 'c' | 'd' | +-----+-----+------+ | 3 | 'e' | 'f' | +-----+-----+------+ See also :func:`petl.transform.unpacks.unpack`.
2.9298
4.853331
0.603668
missing = kwargs.get('missing', None) complement = kwargs.get('complement', False) if len(args) == 0: raise ArgumentError('missing positional argument') elif len(args) == 1: where = args[0] if isinstance(where, string_types): where = expr(where) else: assert callable(where), 'second argument must be string or callable' return RowSelectView(table, where, missing=missing, complement=complement) else: field = args[0] where = args[1] assert callable(where), 'third argument must be callable' return FieldSelectView(table, field, where, complement=complement, missing=missing)
def select(table, *args, **kwargs)
Select rows meeting a condition. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['a', 4, 9.3], ... ['a', 2, 88.2], ... ['b', 1, 23.3], ... ['c', 8, 42.0], ... ['d', 7, 100.9], ... ['c', 2]] >>> # the second positional argument can be a function accepting ... # a row ... table2 = etl.select(table1, ... lambda rec: rec.foo == 'a' and rec.baz > 88.1) >>> table2 +-----+-----+------+ | foo | bar | baz | +=====+=====+======+ | 'a' | 2 | 88.2 | +-----+-----+------+ >>> # the second positional argument can also be an expression ... # string, which will be converted to a function using petl.expr() ... table3 = etl.select(table1, "{foo} == 'a' and {baz} > 88.1") >>> table3 +-----+-----+------+ | foo | bar | baz | +=====+=====+======+ | 'a' | 2 | 88.2 | +-----+-----+------+ >>> # the condition can also be applied to a single field ... table4 = etl.select(table1, 'foo', lambda v: v == 'a') >>> table4 +-----+-----+------+ | foo | bar | baz | +=====+=====+======+ | 'a' | 4 | 9.3 | +-----+-----+------+ | 'a' | 2 | 88.2 | +-----+-----+------+ The complement of the selection can be returned (i.e., the query can be inverted) by providing `complement=True` as a keyword argument.
2.833642
2.590976
1.093658
where = lambda row: len(row) == n return select(table, where, complement=complement)
def rowlenselect(table, n, complement=False)
Select rows of length `n`.
7.048186
5.083992
1.386349
return select(table, field, lambda v: op(v, value), complement=complement)
def selectop(table, field, value, op, complement=False)
Select rows where the function `op` applied to the given field and the given value returns `True`.
6.291781
7.622236
0.825451
return selectop(table, field, value, operator.eq, complement=complement)
def selecteq(table, field, value, complement=False)
Select rows where the given field equals the given value.
6.233254
7.270361
0.857351
return selectop(table, field, value, operator.ne, complement=complement)
def selectne(table, field, value, complement=False)
Select rows where the given field does not equal the given value.
7.012259
7.707438
0.909804
value = Comparable(value) return selectop(table, field, value, operator.lt, complement=complement)
def selectlt(table, field, value, complement=False)
Select rows where the given field is less than the given value.
7.669435
7.390989
1.037674
value = Comparable(value) return selectop(table, field, value, operator.le, complement=complement)
def selectle(table, field, value, complement=False)
Select rows where the given field is less than or equal to the given value.
9.278999
8.591363
1.080038
value = Comparable(value) return selectop(table, field, value, operator.gt, complement=complement)
def selectgt(table, field, value, complement=False)
Select rows where the given field is greater than the given value.
8.314805
7.750395
1.072823
value = Comparable(value) return selectop(table, field, value, operator.ge, complement=complement)
def selectge(table, field, value, complement=False)
Select rows where the given field is greater than or equal to the given value.
7.952013
8.691435
0.914925
return selectop(table, field, value, operator.contains, complement=complement)
def selectcontains(table, field, value, complement=False)
Select rows where the given field contains the given value.
7.827554
8.892768
0.880216
return select(table, field, lambda v: v in value, complement=complement)
def selectin(table, field, value, complement=False)
Select rows where the given field is a member of the given value.
8.153592
9.041647
0.901782
return select(table, field, lambda v: v not in value, complement=complement)
def selectnotin(table, field, value, complement=False)
Select rows where the given field is not a member of the given value.
6.674954
7.783082
0.857624
return selectop(table, field, value, operator.is_, complement=complement)
def selectis(table, field, value, complement=False)
Select rows where the given field `is` the given value.
7.490405
7.694666
0.973454
return selectop(table, field, value, operator.is_not, complement=complement)
def selectisnot(table, field, value, complement=False)
Select rows where the given field `is not` the given value.
6.578506
6.745807
0.975199
return selectop(table, field, value, isinstance, complement=complement)
def selectisinstance(table, field, value, complement=False)
Select rows where the given field is an instance of the given type.
9.745914
10.954898
0.88964
minv = Comparable(minv) maxv = Comparable(maxv) return select(table, field, lambda v: minv <= v < maxv, complement=complement)
def selectrangeopenleft(table, field, minv, maxv, complement=False)
Select rows where the given field is greater than or equal to `minv` and less than `maxv`.
4.037148
4.449415
0.907344
return select(table, field, lambda v: bool(v), complement=complement)
def selecttrue(table, field, complement=False)
Select rows where the given field evaluates `True`.
6.643557
6.127656
1.084192
return select(table, field, lambda v: not bool(v), complement=complement)
def selectfalse(table, field, complement=False)
Select rows where the given field evaluates `False`.
7.760404
7.418283
1.046119
return select(table, field, lambda v: v is None, complement=complement)
def selectnone(table, field, complement=False)
Select rows where the given field is `None`.
6.317542
6.066131
1.041445
return select(table, field, lambda v: v is not None, complement=complement)
def selectnotnone(table, field, complement=False)
Select rows where the given field is not `None`.
6.734931
6.751173
0.997594
fct = dict() for v in set(values(table, key)): fct[v] = selecteq(table, key, v) return fct
def facet(table, key)
Return a dictionary mapping field values to tables. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['a', 4, 9.3], ... ['a', 2, 88.2], ... ['b', 1, 23.3], ... ['c', 8, 42.0], ... ['d', 7, 100.9], ... ['c', 2]] >>> foo = etl.facet(table1, 'foo') >>> sorted(foo.keys()) ['a', 'b', 'c', 'd'] >>> foo['a'] +-----+-----+------+ | foo | bar | baz | +=====+=====+======+ | 'a' | 4 | 9.3 | +-----+-----+------+ | 'a' | 2 | 88.2 | +-----+-----+------+ >>> foo['c'] +-----+-----+------+ | foo | bar | baz | +=====+=====+======+ | 'c' | 8 | 42.0 | +-----+-----+------+ | 'c' | 2 | | +-----+-----+------+ See also :func:`petl.util.materialise.facetcolumns`.
6.31308
8.748895
0.721586
# override complement kwarg kwargs['complement'] = False t1 = select(table, *args, **kwargs) kwargs['complement'] = True t2 = select(table, *args, **kwargs) return t1, t2
def biselect(table, *args, **kwargs)
Return two tables, the first containing selected rows, the second containing remaining rows. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['a', 4, 9.3], ... ['a', 2, 88.2], ... ['b', 1, 23.3], ... ['c', 8, 42.0], ... ['d', 7, 100.9], ... ['c', 2]] >>> table2, table3 = etl.biselect(table1, lambda rec: rec.foo == 'a') >>> table2 +-----+-----+------+ | foo | bar | baz | +=====+=====+======+ | 'a' | 4 | 9.3 | +-----+-----+------+ | 'a' | 2 | 88.2 | +-----+-----+------+ >>> table3 +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'b' | 1 | 23.3 | +-----+-----+-------+ | 'c' | 8 | 42.0 | +-----+-----+-------+ | 'd' | 7 | 100.9 | +-----+-----+-------+ | 'c' | 2 | | +-----+-----+-------+ .. versionadded:: 1.1.0
3.935748
5.914978
0.665387
try: s = io.StringIO() old = getattr(sys, stdchannel) setattr(sys, stdchannel, s) yield s finally: setattr(sys, stdchannel, old)
def stdchannel_redirected(stdchannel)
Redirects stdout or stderr to a StringIO object. As of python 3.4, there is a standard library contextmanager for this, but backwards compatibility!
2.929929
2.335031
1.254771
return ( Pattern(param) if isinstance(param, str) else param if param is not None else Null() )
def load(param)
If the supplied parameter is a string, assum it's a simple pattern.
7.995796
5.960632
1.341434
saved_results = {} def wrapper(cls, module): if module in saved_results: return saved_results[module] saved_results[module] = func(cls, module) return saved_results[module] return wrapper
def simple_cache(func)
Save results for the :meth:'path.using_module' classmethod. When Python 3.2 is available, use functools.lru_cache instead.
2.919243
2.417974
1.20731
@functools.wraps(copy_func) def wrapper(src, dst, *args, **kwargs): is_newer_dst = ( dst.exists() and dst.getmtime() >= src.getmtime() ) if is_newer_dst: return dst return copy_func(src, dst, *args, **kwargs) return wrapper
def only_newer(copy_func)
Wrap a copy function (like shutil.copy2) to return the dst if it's newer than the source.
2.146565
2.133539
1.006105
def compose(f, g): return lambda *args, **kwargs: g(f(*args, **kwargs)) return functools.reduce(compose, map(_permission_mask, mode.split(',')))
def _multi_permission_mask(mode)
Support multiple, comma-separated Unix chmod symbolic modes. >>> _multi_permission_mask('a=r,u+w')(0) == 0o644 True
4.554023
4.969055
0.916477
# parse the symbolic mode parsed = re.match('(?P<who>[ugoa]+)(?P<op>[-+=])(?P<what>[rwx]*)$', mode) if not parsed: raise ValueError("Unrecognized symbolic mode", mode) # generate a mask representing the specified permission spec_map = dict(r=4, w=2, x=1) specs = (spec_map[perm] for perm in parsed.group('what')) spec = functools.reduce(operator.or_, specs, 0) # now apply spec to each subject in who shift_map = dict(u=6, g=3, o=0) who = parsed.group('who').replace('a', 'ugo') masks = (spec << shift_map[subj] for subj in who) mask = functools.reduce(operator.or_, masks) op = parsed.group('op') # if op is -, invert the mask if op == '-': mask ^= 0o777 # if op is =, retain extant values for unreferenced subjects if op == '=': masks = (0o7 << shift_map[subj] for subj in who) retain = functools.reduce(operator.or_, masks) ^ 0o777 op_map = { '+': operator.or_, '-': operator.and_, '=': lambda mask, target: target & retain ^ mask, } return functools.partial(op_map[op], mask)
def _permission_mask(mode)
Convert a Unix chmod symbolic mode like ``'ugo+rwx'`` to a function suitable for applying to a mask to affect that change. >>> mask = _permission_mask('ugo+rwx') >>> mask(0o554) == 0o777 True >>> _permission_mask('go-x')(0o777) == 0o766 True >>> _permission_mask('o-x')(0o445) == 0o444 True >>> _permission_mask('a+x')(0) == 0o111 True >>> _permission_mask('a=rw')(0o057) == 0o666 True >>> _permission_mask('u=x')(0o666) == 0o166 True >>> _permission_mask('g=')(0o157) == 0o107 True
3.948083
3.920299
1.007087
base, ext = self.module.splitext(self.name) return base
def stem(self)
The same as :meth:`name`, but with one file extension stripped off. >>> Path('/home/guido/python.tar.gz').stem 'python.tar'
16.9221
16.700064
1.013296
if not suffix.startswith('.'): raise ValueError("Invalid suffix {suffix!r}".format(**locals())) return self.stripext() + suffix
def with_suffix(self, suffix)
Return a new path with the file suffix changed (or added, if none) >>> Path('/home/guido/python.tar.gz').with_suffix(".foo") Path('/home/guido/python.tar.foo') >>> Path('python').with_suffix('.zip') Path('python.zip') >>> Path('filename.ext').with_suffix('zip') Traceback (most recent call last): ... ValueError: Invalid suffix 'zip'
7.02001
8.879272
0.790606
drive, r = self.module.splitdrive(self) return self._next_class(drive)
def drive(self)
The drive specifier, for example ``'C:'``. This is always empty on systems that don't use drive specifiers.
37.829712
32.081661
1.179169
parent, child = self.module.split(self) return self._next_class(parent), child
def splitpath(self)
p.splitpath() -> Return ``(p.parent, p.name)``. .. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split`
16.558867
28.967291
0.57164
drive, rel = self.module.splitdrive(self) return self._next_class(drive), rel
def splitdrive(self)
p.splitdrive() -> Return ``(p.drive, <the rest of p>)``. Split the drive specifier from this path. If there is no drive specifier, :samp:`{p.drive}` is empty, so the return value is simply ``(Path(''), p)``. This is always the case on Unix. .. seealso:: :func:`os.path.splitdrive`
18.075617
33.669319
0.536857
filename, ext = self.module.splitext(self) return self._next_class(filename), ext
def splitext(self)
p.splitext() -> Return ``(p.stripext(), p.ext)``. Split the filename extension from this path and return the two parts. Either part may be empty. The extension is everything from ``'.'`` to the end of the last path segment. This has the property that if ``(a, b) == p.splitext()``, then ``a + b == p``. .. seealso:: :func:`os.path.splitext`
17.18041
26.351837
0.651963
unc, rest = self.module.splitunc(self) return self._next_class(unc), rest
def splitunc(self)
.. seealso:: :func:`os.path.splitunc`
12.645357
11.880182
1.064408
unc, r = self.module.splitunc(self) return self._next_class(unc)
def uncshare(self)
The UNC mount point for this path. This is empty for paths on local drives.
24.855066
25.644112
0.969231
if not isinstance(first, cls): first = cls(first) return first._next_class(first.module.join(first, *others))
def joinpath(cls, first, *others)
Join first to zero or more :class:`Path` components, adding a separator character (:samp:`{first}.module.sep`) if needed. Returns a new instance of :samp:`{first}._next_class`. .. seealso:: :func:`os.path.join`
6.218543
4.85425
1.281051
r parts = [] loc = self while loc != os.curdir and loc != os.pardir: prev = loc loc, child = prev.splitpath() if loc == prev: break parts.append(child) parts.append(loc) parts.reverse() return parts
def splitall(self)
r""" Return a list of the path components in this path. The first item in the list will be a Path. Its value will be either :data:`os.curdir`, :data:`os.pardir`, empty, or the root directory of this path (for example, ``'/'`` or ``'C:\\'``). The other items in the list will be strings. ``path.Path.joinpath(*result)`` will yield the original path.
4.70143
4.489499
1.047206
cwd = self._next_class(start) return cwd.relpathto(self)
def relpath(self, start='.')
Return this path as a relative path, based from `start`, which defaults to the current working directory.
16.378057
15.029567
1.089723
match = matchers.load(match) return list(filter(match, ( self / child for child in os.listdir(self) )))
def listdir(self, match=None)
D.listdir() -> List of items in this directory. Use :meth:`files` or :meth:`dirs` instead if you want a listing of just files or just subdirectories. The elements of the list are Path objects. With the optional `match` argument, a callable, only return items whose names match the given pattern. .. seealso:: :meth:`files`, :meth:`dirs`
9.638621
15.635053
0.616475
return [p for p in self.listdir(*args, **kwargs) if p.isdir()]
def dirs(self, *args, **kwargs)
D.dirs() -> List of this directory's subdirectories. The elements of the list are Path objects. This does not walk recursively into subdirectories (but see :meth:`walkdirs`). Accepts parameters to :meth:`listdir`.
5.198133
5.164109
1.006589
return [p for p in self.listdir(*args, **kwargs) if p.isfile()]
def files(self, *args, **kwargs)
D.files() -> List of the files in this directory. The elements of the list are Path objects. This does not walk into subdirectories (see :meth:`walkfiles`). Accepts parameters to :meth:`listdir`.
7.403533
5.514875
1.342466
class Handlers: def strict(msg): raise def warn(msg): warnings.warn(msg, TreeWalkWarning) def ignore(msg): pass if not callable(errors) and errors not in vars(Handlers): raise ValueError("invalid errors parameter") errors = vars(Handlers).get(errors, errors) match = matchers.load(match) try: childList = self.listdir() except Exception: exc = sys.exc_info()[1] tmpl = "Unable to list directory '%(self)s': %(exc)s" msg = tmpl % locals() errors(msg) return for child in childList: if match(child): yield child try: isdir = child.isdir() except Exception: exc = sys.exc_info()[1] tmpl = "Unable to access '%(child)s': %(exc)s" msg = tmpl % locals() errors(msg) isdir = False if isdir: for item in child.walk(errors=errors, match=match): yield item
def walk(self, match=None, errors='strict')
D.walk() -> iterator over files and subdirs, recursively. The iterator yields Path objects naming each child item of this directory and its descendants. This requires that ``D.isdir()``. This performs a depth-first traversal of the directory tree. Each directory is returned just before all its children. The `errors=` keyword argument controls behavior when an error occurs. The default is ``'strict'``, which causes an exception. Other allowed values are ``'warn'`` (which reports the error via :func:`warnings.warn()`), and ``'ignore'``. `errors` may also be an arbitrary callable taking a msg parameter.
3.184897
3.028873
1.051512
return ( item for item in self.walk(*args, **kwargs) if item.isdir() )
def walkdirs(self, *args, **kwargs)
D.walkdirs() -> iterator over subdirs, recursively.
5.196843
3.941501
1.318493
return ( item for item in self.walk(*args, **kwargs) if item.isfile() )
def walkfiles(self, *args, **kwargs)
D.walkfiles() -> iterator over files in D, recursively.
5.066426
4.054044
1.249722