index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
69,722 |
fastparquet.api
|
__len__
|
Return number of row groups.
|
def __len__(self):
"""Return number of row groups."""
if self.fmd.row_groups:
return len(self.fmd.row_groups)
else:
return 0
|
(self)
|
69,723 |
fastparquet.api
|
__str__
| null |
def __str__(self):
return "<Parquet File: %s>" % self.info
|
(self)
|
69,724 |
fastparquet.api
|
__setstate__
| null |
def __setstate__(self, state):
self.__dict__.update(state)
# Decode 'file_path'.
rgs = self.fmd[4] or []
# 4th condition should not be necessary, depends on 'deepcopy' version.
# https://github.com/dask/fastparquet/pull/731#issuecomment-1013507287
if (rgs and rgs[0][1] and rgs[0][1][0] and rgs[0][1][0].get(1)
and isinstance(rgs[0][1][0].get(1), bytes)):
# for rg in fmd.row_groups:
for rg in rgs:
# chunk = rg.columns[0]
chunk = rg[1][0]
# chunk.file_path = chunk.file_path.decode()
chunk[1] = chunk.get(1).decode()
self._set_attrs()
|
(self, state)
|
69,726 |
fastparquet.api
|
_column_filter
| null |
def _column_filter(self, df, filters):
out = np.zeros(len(df), dtype=bool)
for or_part in filters:
if isinstance(or_part[0], str):
name, op, val = or_part
if name in self.cats:
continue
if op == 'in':
out |= df[name].isin(val).values
elif op == "not in":
out |= ~df[name].isin(val).values
elif op in ops:
out |= ops[op](df[name], val).values
elif op == "~":
out |= ~df[name].values
else:
and_part = np.ones(len(df), dtype=bool)
for name, op, val in or_part:
if name in self.cats:
continue
if op == 'in':
and_part &= df[name].isin(val).values
elif op == "not in":
and_part &= ~df[name].isin(val).values
elif op in ops:
and_part &= ops[op](df[name].values, val)
elif op == "~":
and_part &= ~df[name].values
out |= and_part
return out
|
(self, df, filters)
|
69,727 |
fastparquet.api
|
_columns_from_filters
| null |
def _columns_from_filters(self, filters):
return [
c for c in
set(sum([[f[0]]
if isinstance(f[0], str)
else [g[0] for g in f] for f in filters], []))
if c not in self.cats
]
|
(self, filters)
|
69,728 |
fastparquet.api
|
_dtypes
|
Implied types of the columns in the schema
|
def _dtypes(self, categories=None):
""" Implied types of the columns in the schema """
import pandas as pd
if self._base_dtype is None:
if self.has_pandas_metadata:
md = self.pandas_metadata['columns']
md = {c['name']: c for c in md}
tz = {k: v["metadata"]['timezone'] for k, v in md.items()
if v.get('metadata', {}) and v.get('metadata', {}).get('timezone', None)}
else:
tz = None
md = None
self.tz = tz
dtype = OrderedDict((name, (converted_types.typemap(f, md=md)
if f.num_children in [None, 0] else np.dtype("O")))
for name, f in self.schema.root["children"].items()
if getattr(f, 'isflat', False) is False)
for i, (col, dt) in enumerate(dtype.copy().items()):
# int and bool columns produce masked pandas types, no need to
# promote types here
if dt.kind == "M":
if self.pandas_metadata and PANDAS_VERSION.major >= 2:
# get original resolution when pandas supports non-ns
dt = md[col]["numpy_type"]
if tz is not None and tz.get(col, False):
z = dataframe.tz_to_dt_tz(tz[col])
dt_series = pd.Series([], dtype=dt)
if PANDAS_VERSION.major >= 2 and dt_series.dt.tz is not None:
dt = dt_series.dt.tz_convert(z).dtype
else:
dt = dt_series.dt.tz_localize(z).dtype
dtype[col] = dt
elif dt in converted_types.nullable:
if self.pandas_metadata:
tt = md.get(col, {}).get("numpy_type")
if tt and ("int" in tt or "bool" in tt):
continue
# uint/int/bool columns that may have nulls become nullable
# skip is pandas_metadata gives original types
num_nulls = 0
for rg in self.row_groups:
if rg[3] == 0:
continue
st = rg[1][i][3].get(12)
if st is None:
num_nulls = True
break
if st.get(3):
num_nulls = True
break
if num_nulls:
if self.pandas_nulls:
dtype[col] = converted_types.nullable[dt]
else:
dtype[col] = np.float_()
elif dt == 'S12':
dtype[col] = 'M8[ns]'
self._base_dtype = dtype
dtype = self._base_dtype.copy()
categories = self.check_categories(categories)
for field in categories:
dtype[field] = 'category'
for cat in self.cats:
dtype[cat] = "category"
self.dtypes = dtype
return dtype
|
(self, categories=None)
|
69,729 |
fastparquet.api
|
_get_index
| null |
def _get_index(self, index=None):
if index is None:
index = [i if isinstance(i, str) else i["name"]
for i in self.pandas_metadata.get('index_columns', [])
if isinstance(i, str) or i.get("kind") != "range"
]
if isinstance(index, str):
index = [index]
return index
|
(self, index=None)
|
69,730 |
fastparquet.api
|
_parse_header
| null |
def _parse_header(self, f, verify=True):
if self.fn and self.fn.endswith("_metadata"):
# no point attempting to read footer only for pure metadata
data = f.read()[4:-8]
self._head_size = len(data)
else:
try:
f.seek(0)
if verify:
assert f.read(4) == b'PAR1'
f.seek(-8, 2)
head_size = struct.unpack('<I', f.read(4))[0]
if verify:
assert f.read() == b'PAR1'
self._head_size = head_size
f.seek(-(head_size + 8), 2)
data = f.read(head_size)
except (AssertionError, struct.error):
raise ParquetException('File parse failed: %s' % self.fn)
try:
fmd = from_buffer(data, "FileMetaData")
except Exception:
raise ParquetException('Metadata parse failed: %s' % self.fn)
# for rg in fmd.row_groups:
for rg in fmd[4]:
# chunks = rg.columns
chunks = rg[1]
if chunks:
chunk = chunks[0]
# s = chunk.file_path
s = chunk.get(1)
if s:
# chunk.file_path = s.decode()
chunk[1] = s.decode()
self.fmd = fmd
self._set_attrs()
|
(self, f, verify=True)
|
69,731 |
fastparquet.api
|
_read_partitions
| null |
def _read_partitions(self):
# paths = [rg.columns[0].file_path] ... if rg.columns]
paths = [rg[1][0].get(1, "") for rg in self.row_groups if rg[1]]
self.file_scheme, self.cats = paths_to_cats(paths, self.partition_meta)
|
(self)
|
69,732 |
fastparquet.api
|
_set_attrs
| null |
def _set_attrs(self):
fmd = self.fmd
self.version = fmd.version
self._schema = fmd.schema
self.row_groups = fmd.row_groups or []
self.created_by = fmd.created_by
self.schema = schema.SchemaHelper(self._schema)
self.selfmade = (
b"fastparquet" in self.created_by if self.created_by is not None
else False
)
self._read_partitions()
self._dtypes()
|
(self)
|
69,733 |
fastparquet.api
|
_sort_part_names
|
Align parquet files id to that of the first row group they contain.
This method only manages files which name follows pattern
"part.{id}.parquet". Field `id` is then aligned to the index of the
first row group it contains. The index of a row groups is its position
in row group list.
Parameters
----------
write_fmd : bool, default True
Write updated common metadata to disk.
open_with : function
When called with a f(path, mode), returns an open file-like object.
Only needed if `write_fmd` is `True`.
|
def _sort_part_names(self, write_fmd:bool=True, open_with=default_open):
"""Align parquet files id to that of the first row group they contain.
This method only manages files which name follows pattern
"part.{id}.parquet". Field `id` is then aligned to the index of the
first row group it contains. The index of a row groups is its position
in row group list.
Parameters
----------
write_fmd : bool, default True
Write updated common metadata to disk.
open_with : function
When called with a f(path, mode), returns an open file-like object.
Only needed if `write_fmd` is `True`.
"""
from .writer import part_ids
pids = part_ids(self.fmd.row_groups)
if pids:
# Keep only items for which row group position does not match part
# name id.
pids = dict(filter(lambda item: item[0] != item[1][0],
pids.items()))
basepath = self.basepath
# Give temporary names in a 1st pass to prevent overwritings.
for pid in pids:
item = pids[pid]
rgid, fname = item[0], item[1]
src = f'{basepath}/{fname}'
parts = partitions(fname)
dst = join_path(basepath, parts, f'part.{rgid}.parquet.tmp')
self.fs.rename(src, dst)
# Give definitive names in a 2nd pass.
for pid in pids:
item = pids[pid]
rgid, fname = item[0], item[1]
parts = partitions(fname)
src = join_path(basepath, parts, f'part.{rgid}.parquet.tmp')
dst_part = join_path(parts, f'part.{rgid}.parquet')
dst = join_path(basepath, dst_part)
self.fs.rename(src, dst)
for col in self.fmd.row_groups[rgid].columns:
col.file_path = dst_part
if write_fmd:
self._write_common_metadata(open_with)
|
(self, write_fmd: bool = True, open_with=<built-in function open>)
|
69,734 |
fastparquet.api
|
_write_common_metadata
|
Write common metadata to disk.
Parameter
---------
open_with: function
When called with a f(path, mode), returns an open file-like object.
|
def _write_common_metadata(self, open_with=default_open):
"""
Write common metadata to disk.
Parameter
---------
open_with: function
When called with a f(path, mode), returns an open file-like object.
"""
from .writer import write_common_metadata
if self.file_scheme == 'simple':
raise ValueError("Not possible to write common metadata when file \
scheme is 'simple'.")
fmd = self.fmd
write_common_metadata(self.fn, fmd, open_with, no_row_groups=False)
# replace '_metadata' with '_common_metadata'
fn = f'{self.fn[:-9]}_common_metadata'
write_common_metadata(fn, fmd, open_with)
|
(self, open_with=<built-in function open>)
|
69,735 |
fastparquet.api
|
check_categories
| null |
def check_categories(self, cats):
categ = self.categories
if not self.has_pandas_metadata:
return cats or {}
if cats is None:
return categ or {}
if set(cats) - set(categ) and len(self.row_groups) > 1:
raise TypeError("Attempt to read as category a field that "
"was not stored as such")
if isinstance(cats, dict):
return cats
out = {k: v for k, v in categ.items() if k in cats}
out.update({c: pd.RangeIndex(0, 2**14) for c in cats if c not in categ})
return out
|
(self, cats)
|
69,736 |
fastparquet.api
|
count
|
Total number of rows
filters and row_filters have the same meaning as in to_pandas. Unless both are given,
this method will not need to decode any data
|
def count(self, filters=None, row_filter=False):
""" Total number of rows
filters and row_filters have the same meaning as in to_pandas. Unless both are given,
this method will not need to decode any data
"""
if row_filter:
cs = self._columns_from_filters(filters)
df = self.to_pandas(columns=cs, filters=filters, row_filter=False,
index=False)
return self._column_filter(df, filters=filters).sum()
rgs = filter_row_groups(self, filters)
return sum(rg.num_rows for rg in rgs)
|
(self, filters=None, row_filter=False)
|
69,737 |
fastparquet.api
|
head
|
Get the first nrows of data
This will load the whole of the first valid row-group for the given
columns.
kwargs can include things like columns, filters, etc., with the same
semantics as to_pandas(). If filters are applied, it may happen that
data is so reduced that 'nrows' is not ensured (fewer rows).
returns: dataframe
|
def head(self, nrows, **kwargs):
"""Get the first nrows of data
This will load the whole of the first valid row-group for the given
columns.
kwargs can include things like columns, filters, etc., with the same
semantics as to_pandas(). If filters are applied, it may happen that
data is so reduced that 'nrows' is not ensured (fewer rows).
returns: dataframe
"""
# TODO: implement with truncated assign and early exit
# from reading
total_rows = 0
for i, rg in enumerate(self.row_groups):
total_rows += rg.num_rows
if total_rows >= nrows:
break
return self[:i+1].to_pandas(**kwargs).head(nrows)
|
(self, nrows, **kwargs)
|
69,738 |
fastparquet.api
|
iter_row_groups
|
Iterate a dataset by row-groups
If filters is given, omits row-groups that fail the filer
(saving execution time)
Returns
-------
Generator yielding one Pandas data-frame per row-group.
|
def iter_row_groups(self, filters=None, **kwargs):
"""
Iterate a dataset by row-groups
If filters is given, omits row-groups that fail the filer
(saving execution time)
Returns
-------
Generator yielding one Pandas data-frame per row-group.
"""
rgs = filter_row_groups(self, filters) if filters else self.row_groups
for rg in rgs:
i = self.row_groups.index(rg)
df = self[i].to_pandas(filters=filters, **kwargs)
if not df.empty:
yield df
|
(self, filters=None, **kwargs)
|
69,739 |
fastparquet.api
|
pre_allocate
| null |
def pre_allocate(self, size, columns, categories, index, dtypes=None):
if dtypes is not None:
columns = list(dtypes)
else:
dtypes = self._dtypes(categories)
categories = self.check_categories(categories)
cats = {k: v for k, v in self.cats.items() if k in columns}
df, arrs = _pre_allocate(size, columns, categories, index, cats,
dtypes, self.tz, columns_dtype=self._columns_dtype)
i_no_name = re.compile(r"__index_level_\d+__")
if self.has_pandas_metadata:
md = self.pandas_metadata
if categories:
for c in md['columns']:
if c['name'] in categories and c['name'] in df and c['metadata']:
df[c['name']].dtype._ordered = c['metadata']['ordered']
if md.get('index_columns', False) and not (index or index is False):
if len(md['index_columns']) == 1:
ic = md['index_columns'][0]
if isinstance(ic, dict) and ic.get('kind') == 'range':
from pandas import RangeIndex
df.index = RangeIndex(
start=ic['start'],
stop=ic['start'] + size * ic['step'] + 1,
step=ic['step']
)[:size]
names = [(c['name'] if isinstance(c, dict) else c)
for c in md['index_columns']]
names = [None if n is None or i_no_name.match(n) else n
for n in names]
df.index.names = names
if md.get('column_indexes', False):
names = [(c['name'] if isinstance(c, dict) else c)
for c in md['column_indexes']]
names = [None if n is None or i_no_name.match(n) else n
for n in names]
if len(names) > 1:
df.columns = pd.MultiIndex.from_tuples(
[ast.literal_eval(c) for c in df.columns if c not in df.index.names],
names=names
)
else:
df.columns.names = names
return df, arrs
|
(self, size, columns, categories, index, dtypes=None)
|
69,740 |
fastparquet.api
|
read_row_group_file
|
Open file for reading, and process it as a row-group
assign is None if this method is called directly (not from to_pandas),
in which case we return the resultant dataframe
row_filter can be:
- False (don't do row filtering)
- a list of filters (do filtering here for this one row-group;
only makes sense if assign=None
- bool array with a size equal to the number of rows in this group
and the length of the assign arrays
|
def read_row_group_file(self, rg, columns, categories, index=None,
assign=None, partition_meta=None, row_filter=False,
infile=None):
""" Open file for reading, and process it as a row-group
assign is None if this method is called directly (not from to_pandas),
in which case we return the resultant dataframe
row_filter can be:
- False (don't do row filtering)
- a list of filters (do filtering here for this one row-group;
only makes sense if assign=None
- bool array with a size equal to the number of rows in this group
and the length of the assign arrays
"""
categories = self.check_categories(categories)
fn = self.row_group_filename(rg)
ret = False
if assign is None:
if row_filter and isinstance(row_filter, list):
cs = self._columns_from_filters(row_filter)
df = self.read_row_group_file(
rg, cs, categories, index=False,
infile=infile, row_filter=False)
row_filter = self._column_filter(df, filters=row_filter)
size = row_filter.sum()
if size == rg.num_rows:
row_filter = False
else:
size = rg.num_rows
df, assign = self.pre_allocate(
size, columns, categories, index)
if "PANDAS_ATTRS" in self.key_value_metadata:
import json
df.attrs = json.loads(self.key_value_metadata["PANDAS_ATTRS"])
ret = True
f = infile or self.open(fn, mode='rb')
core.read_row_group(
f, rg, columns, categories, self.schema, self.cats,
selfmade=self.selfmade, index=index,
assign=assign, scheme=self.file_scheme, partition_meta=partition_meta,
row_filter=row_filter
)
if ret:
return df
|
(self, rg, columns, categories, index=None, assign=None, partition_meta=None, row_filter=False, infile=None)
|
69,741 |
fastparquet.api
|
remove_row_groups
|
Remove list of row groups from disk. `ParquetFile` metadata are
updated accordingly. This method can not be applied if file scheme is
simple.
Parameter
---------
rgs: row group or list of row groups
List of row groups to be removed from disk.
sort_pnames : bool, default False
Align name of part files with position of the 1st row group they
contain. Only used if `file_scheme` of parquet file is set to
`hive` or `drill`.
write_fmd: bool, True
Write updated common metadata to disk.
open_with: function
When called with f(path, mode), returns an open file-like object.
remove_with: function
When called with f(path) removes the file or directory given
(and any contained files). Not required if this ParquetFile has
a .fs file system attribute
|
def remove_row_groups(self, rgs, sort_pnames:bool=False,
write_fmd:bool=True, open_with=default_open,
remove_with=None):
"""
Remove list of row groups from disk. `ParquetFile` metadata are
updated accordingly. This method can not be applied if file scheme is
simple.
Parameter
---------
rgs: row group or list of row groups
List of row groups to be removed from disk.
sort_pnames : bool, default False
Align name of part files with position of the 1st row group they
contain. Only used if `file_scheme` of parquet file is set to
`hive` or `drill`.
write_fmd: bool, True
Write updated common metadata to disk.
open_with: function
When called with f(path, mode), returns an open file-like object.
remove_with: function
When called with f(path) removes the file or directory given
(and any contained files). Not required if this ParquetFile has
a .fs file system attribute
"""
if not isinstance(rgs, list):
if isinstance(rgs, ThriftObject) or isinstance(rgs, dict):
# Case 'rgs' is a single row group ('ThriftObject' or 'dict').
rgs = [rgs]
else:
# Use `list()` here, not `[]`, as the latter does not transform
# generator or tuple into list but encapsulates them in a list.
rgs = list(rgs)
if rgs:
if self.file_scheme == 'simple':
raise ValueError("Not possible to remove row groups when file "
"scheme is 'simple'.")
if remove_with is None:
if hasattr(self, 'fs'):
remove_with = self.fs.rm
else:
remove_with = default_remove
rgs_to_remove = row_groups_map(rgs)
if (b"fastparquet" not in self.created_by
or self.file_scheme == 'flat'):
# Check if some files contain row groups both to be removed and
# to be kept.
all_rgs = row_groups_map(self.fmd.row_groups)
for file in rgs_to_remove:
if len(rgs_to_remove[file]) < len(all_rgs[file]):
raise ValueError(
f"File {file} contains row groups both to be kept "
"and to be removed. Removing row groups partially "
"from a file is not possible.")
if rgs != self.fmd.row_groups:
rg_new = self.fmd.row_groups
else:
# Deep copy required if 'rg_new' and 'rgs' points both to
# 'self.fmd.row_groups'.
from copy import deepcopy
rg_new = deepcopy(self.fmd.row_groups)
for rg in rgs:
rg_new.remove(rg)
self.fmd.num_rows -= rg.num_rows
self.fmd.row_groups = rg_new
try:
basepath = self.basepath
remove_with([f'{basepath}/{file}' for file in rgs_to_remove])
except IOError:
pass
self._set_attrs()
if sort_pnames:
self._sort_part_names(False, open_with)
if write_fmd:
self._write_common_metadata(open_with)
|
(self, rgs, sort_pnames: bool = False, write_fmd: bool = True, open_with=<built-in function open>, remove_with=None)
|
69,742 |
fastparquet.api
|
row_group_filename
| null |
def row_group_filename(self, rg):
if rg.columns and rg.columns[0].file_path:
base = self.basepath
if base:
return join_path(base, rg.columns[0].file_path)
else:
return rg.columns[0].file_path
else:
return self.fn
|
(self, rg)
|
69,743 |
fastparquet.api
|
to_pandas
|
Read data from parquet into a Pandas dataframe.
Parameters
----------
columns: list of names or `None`
Column to load (see `ParquetFile.columns`). Any columns in the
data not in this list will be ignored. If `None`, read all columns.
categories: list, dict or `None`
If a column is encoded using dictionary encoding in every row-group
and its name is also in this list, it will generate a Pandas
Category-type column, potentially saving memory and time. If a
dict {col: int}, the value indicates the number of categories,
so that the optimal data-dtype can be allocated. If ``None``,
will automatically set *if* the data was written from pandas.
filters: list of list of tuples or list of tuples
To filter out data.
Filter syntax: [[(column, op, val), ...],...]
where op is [==, =, >, >=, <, <=, !=, in, not in]
The innermost tuples are transposed into a set of filters applied
through an `AND` operation.
The outer list combines these sets of filters through an `OR`
operation.
A single list of tuples can also be used, meaning that no `OR`
operation between set of filters is to be conducted.
index: string or list of strings or False or None
Column(s) to assign to the (multi-)index. If None, index is
inferred from the metadata (if this was originally pandas data); if
the metadata does not exist or index is False, index is simple
sequential integers.
row_filter: bool or boolean ndarray
Whether filters are applied to whole row-groups (False, default)
or row-wise (True, experimental). The latter requires two passes of
any row group that may contain valid rows, but can be much more
memory-efficient, especially if the filter columns are not required
in the output.
If boolean array, it is applied as custom row filter. In this case,
'filter' parameter is ignored, and length of the array has to be
equal to the total number of rows.
Returns
-------
Pandas data-frame
|
def to_pandas(self, columns=None, categories=None, filters=[],
index=None, row_filter=False, dtypes=None):
"""
Read data from parquet into a Pandas dataframe.
Parameters
----------
columns: list of names or `None`
Column to load (see `ParquetFile.columns`). Any columns in the
data not in this list will be ignored. If `None`, read all columns.
categories: list, dict or `None`
If a column is encoded using dictionary encoding in every row-group
and its name is also in this list, it will generate a Pandas
Category-type column, potentially saving memory and time. If a
dict {col: int}, the value indicates the number of categories,
so that the optimal data-dtype can be allocated. If ``None``,
will automatically set *if* the data was written from pandas.
filters: list of list of tuples or list of tuples
To filter out data.
Filter syntax: [[(column, op, val), ...],...]
where op is [==, =, >, >=, <, <=, !=, in, not in]
The innermost tuples are transposed into a set of filters applied
through an `AND` operation.
The outer list combines these sets of filters through an `OR`
operation.
A single list of tuples can also be used, meaning that no `OR`
operation between set of filters is to be conducted.
index: string or list of strings or False or None
Column(s) to assign to the (multi-)index. If None, index is
inferred from the metadata (if this was originally pandas data); if
the metadata does not exist or index is False, index is simple
sequential integers.
row_filter: bool or boolean ndarray
Whether filters are applied to whole row-groups (False, default)
or row-wise (True, experimental). The latter requires two passes of
any row group that may contain valid rows, but can be much more
memory-efficient, especially if the filter columns are not required
in the output.
If boolean array, it is applied as custom row filter. In this case,
'filter' parameter is ignored, and length of the array has to be
equal to the total number of rows.
Returns
-------
Pandas data-frame
"""
rgs = filter_row_groups(self, filters) if filters else self.row_groups
index = self._get_index(index)
if columns is not None:
columns = columns[:]
else:
columns = self.columns + list(self.cats)
if index:
columns += [i for i in index if i not in columns]
check_column_names(self.columns + list(self.cats), columns, categories)
if row_filter is not False:
if filters and row_filter is True:
# Rows are selected as per filters.
# TODO: special case when filter columns are also in output
cs = self._columns_from_filters(filters)
df = self.to_pandas(columns=cs, filters=filters, row_filter=False,
index=False)
sel = self._column_filter(df, filters=filters)
else:
# Row are selected as per custom 'sel'.
if sum(rg.num_rows for rg in rgs) != len(row_filter):
raise ValueError('Provided boolean array for custom row \
selection does not match number of rows in DataFrame.')
sel = row_filter
size = sel.sum()
selected = []
start = 0
for rg in rgs[:]:
selected.append(sel[start:start+rg.num_rows])
start += rg.num_rows
else:
size = sum(rg.num_rows for rg in rgs)
selected = [None] * len(rgs) # just to fill zip, below
df, views = self.pre_allocate(size, columns, categories, index, dtypes=dtypes)
if "PANDAS_ATTRS" in self.key_value_metadata:
import json
df.attrs = json.loads(self.key_value_metadata["PANDAS_ATTRS"])
start = 0
if self.file_scheme == 'simple':
infile = self.open(self.fn, 'rb')
else:
infile = None
for rg, sel in zip(rgs, selected):
thislen = sel.sum() if sel is not None else rg.num_rows
if thislen == rg.num_rows:
# all good; noop if no row filtering
sel = None
elif thislen == 0:
# no valid rows
continue
parts = {name: (v if name.endswith('-catdef')
else v[start:start + thislen])
for (name, v) in views.items()}
self.read_row_group_file(rg, columns, categories, index,
assign=parts, partition_meta=self.partition_meta,
row_filter=sel, infile=infile)
start += thislen
return df
|
(self, columns=None, categories=None, filters=[], index=None, row_filter=False, dtypes=None)
|
69,744 |
fastparquet.api
|
write_row_groups
|
Write data as new row groups to disk, with optional sorting.
Parameters
----------
data : pandas dataframe or iterable of pandas dataframe
Data to add to existing parquet dataset. Only columns are written
to disk. Row index is not kept.
If a dataframe, columns are checked against parquet file schema.
row_group_offsets: int or list of int
If int, row-groups will be approximately this many rows, rounded down
to make row groups about the same size;
If a list, the explicit index values to start new row groups;
If `None`, set to 50_000_000.
sort_key : function, default None
Sorting function used as `key` parameter for `row_groups.sort()`
function. This function is called once new row groups have been
added to list of existing ones.
If not provided, new row groups are only appended to existing ones
and the updated list of row groups is not sorted.
sort_pnames : bool, default False
Align name of part files with position of the 1st row group they
contain. Only used if `file_scheme` of parquet file is set to
`hive` or `drill`.
compression : str or dict, default None
Compression to apply to each column, e.g. ``GZIP`` or ``SNAPPY`` or
a ``dict`` like ``{"col1": "SNAPPY", "col2": None}`` to specify per
column compression types.
By default, do not compress.
Please, review full description of this parameter in `write`
docstring.
write_fmd : bool, default True
Write updated common metadata to disk.
open_with : function
When called with a f(path, mode), returns an open file-like object.
mkdirs : function
When called with a path/URL, creates any necessary dictionaries to
make that location writable, e.g., ``os.makedirs``. This is not
necessary if using the simple file scheme.
stats : True|False|list of str
Whether to calculate and write summary statistics.
If True (default), do it for every column;
If False, never do;
If a list of str, do it only for those specified columns.
"auto" means True for any int/float or timemstamp column, False
otherwise. This will become the default in a future release.
|
def write_row_groups(self, data, row_group_offsets=None, sort_key=None,
sort_pnames:bool=False, compression=None,
write_fmd:bool=True, open_with=default_open,
mkdirs=None, stats="auto"):
"""Write data as new row groups to disk, with optional sorting.
Parameters
----------
data : pandas dataframe or iterable of pandas dataframe
Data to add to existing parquet dataset. Only columns are written
to disk. Row index is not kept.
If a dataframe, columns are checked against parquet file schema.
row_group_offsets: int or list of int
If int, row-groups will be approximately this many rows, rounded down
to make row groups about the same size;
If a list, the explicit index values to start new row groups;
If `None`, set to 50_000_000.
sort_key : function, default None
Sorting function used as `key` parameter for `row_groups.sort()`
function. This function is called once new row groups have been
added to list of existing ones.
If not provided, new row groups are only appended to existing ones
and the updated list of row groups is not sorted.
sort_pnames : bool, default False
Align name of part files with position of the 1st row group they
contain. Only used if `file_scheme` of parquet file is set to
`hive` or `drill`.
compression : str or dict, default None
Compression to apply to each column, e.g. ``GZIP`` or ``SNAPPY`` or
a ``dict`` like ``{"col1": "SNAPPY", "col2": None}`` to specify per
column compression types.
By default, do not compress.
Please, review full description of this parameter in `write`
docstring.
write_fmd : bool, default True
Write updated common metadata to disk.
open_with : function
When called with a f(path, mode), returns an open file-like object.
mkdirs : function
When called with a path/URL, creates any necessary dictionaries to
make that location writable, e.g., ``os.makedirs``. This is not
necessary if using the simple file scheme.
stats : True|False|list of str
Whether to calculate and write summary statistics.
If True (default), do it for every column;
If False, never do;
If a list of str, do it only for those specified columns.
"auto" means True for any int/float or timemstamp column, False
otherwise. This will become the default in a future release.
"""
from .writer import write_simple, write_multi
partition_on = list(self.cats)
if isinstance(data, pd.DataFrame):
self_cols = sorted(self.columns + partition_on)
if self_cols != sorted(data.columns):
diff_cols = set(data.columns) ^ set(self_cols)
raise ValueError(
f'Column names of new data are {sorted(data.columns)}. '
f'But column names in existing file are {self_cols}. '
f'{diff_cols} are columns being either only in existing '
'file or only in new data. This is not possible.')
if (self.file_scheme == 'simple'
or (self.file_scheme == 'empty' and self.fn[-9:] != '_metadata')):
# Case 'simple'.
write_simple(self.fn, data, self.fmd,
row_group_offsets=row_group_offsets,
compression=compression, open_with=open_with,
has_nulls=None, append=True, stats=stats)
else:
# Case 'hive' or 'drill'.
write_multi(self.basepath, data, self.fmd,
row_group_offsets=row_group_offsets,
compression=compression, file_scheme=self.file_scheme,
write_fmd=False, open_with=open_with, mkdirs=mkdirs,
partition_on=partition_on, append=True, stats=stats)
if sort_key:
# Not using 'sort()' because 'row_groups' is a ThriftObject,
# not a list.
self.fmd.row_groups = sorted(self.fmd.row_groups, key=sort_key)
if sort_pnames:
self._sort_part_names(False, open_with)
if write_fmd:
self._write_common_metadata(open_with)
self._set_attrs()
|
(self, data, row_group_offsets=None, sort_key=None, sort_pnames: bool = False, compression=None, write_fmd: bool = True, open_with=<built-in function open>, mkdirs=None, stats='auto')
|
69,757 |
fastparquet.writer
|
update_file_custom_metadata
|
Update metadata in file without rewriting data portion if a data file.
This function updates only the user key-values metadata, not the whole
metadata of a parquet file.
Update strategy depends if key found in new custom metadata is also found
in already existing custom metadata within thrift object, as well as its
value.
- If not found in existing, it is added.
- If found in existing, it is updated.
- If its value is `None`, it is not added, and if found in existing,
it is removed from existing.
Parameters
----------
path : str
Local path to file.
custom_metadata : dict
Key-value metadata to update in thrift object.
The values must be strings or binary. To pass a dictionary, serialize it as json string then encode it in binary.
is_metadata_file : bool, default None
Define if target file is a pure metadata file, or is a parquet data
file. If `None`, is set depending file name.
- if ending with '_metadata', it assumes file is a metadata file.
- otherwise, it assumes it is a parquet data file.
Notes
-----
This method does not work for remote files.
|
def update_file_custom_metadata(path: str, custom_metadata: dict,
is_metadata_file: bool = None):
"""Update metadata in file without rewriting data portion if a data file.
This function updates only the user key-values metadata, not the whole
metadata of a parquet file.
Update strategy depends if key found in new custom metadata is also found
in already existing custom metadata within thrift object, as well as its
value.
- If not found in existing, it is added.
- If found in existing, it is updated.
- If its value is `None`, it is not added, and if found in existing,
it is removed from existing.
Parameters
----------
path : str
Local path to file.
custom_metadata : dict
Key-value metadata to update in thrift object.
The values must be strings or binary. To pass a dictionary, serialize it as json string then encode it in binary.
is_metadata_file : bool, default None
Define if target file is a pure metadata file, or is a parquet data
file. If `None`, is set depending file name.
- if ending with '_metadata', it assumes file is a metadata file.
- otherwise, it assumes it is a parquet data file.
Notes
-----
This method does not work for remote files.
"""
if is_metadata_file is None:
if path[-9:] == '_metadata':
is_metadata_file = True
else:
is_metadata_file = False
with open(path, "rb+") as f:
if is_metadata_file:
# For pure metadata file, metadata starts just four bytes in.
loc = 4
else:
loc0 = f.seek(-8, 2)
size = int.from_bytes(f.read(4), "little")
loc = loc0 - size
f.seek(loc)
data = f.read()
fmd = from_buffer(data, "FileMetaData")
update_custom_metadata(fmd, custom_metadata)
f.seek(loc)
foot_size = write_thrift(f, fmd)
f.write(struct.pack(b"<I", foot_size))
f.write(b"PAR1")
|
(path: str, custom_metadata: dict, is_metadata_file: Optional[bool] = None)
|
69,759 |
fastparquet.writer
|
write
|
Write pandas dataframe to filename with parquet format.
Parameters
----------
filename: str or pathlib.Path
Parquet collection to write to, either a single file (if file_scheme
is simple) or a directory containing the metadata and data-files.
data: pandas dataframe
The table to write.
row_group_offsets: int or list of int
If int, row-groups will be approximately this many rows, rounded down
to make row groups about the same size;
If a list, the explicit index values to start new row groups;
If `None`, set to 50_000_000.
In case of partitioning the data, final row-groups size can be reduced
significantly further by the partitioning, occuring as a subsequent
step.
compression: str, dict
compression to apply to each column, e.g. ``GZIP`` or ``SNAPPY`` or a
``dict`` like ``{"col1": "SNAPPY", "col2": None}`` to specify per
column compression types.
In both cases, the compressor settings would be the underlying
compressor defaults. To pass arguments to the underlying compressor,
each ``dict`` entry should itself be a dictionary::
{
col1: {
"type": "LZ4",
"args": {
"mode": "high_compression",
"compression": 9
}
},
col2: {
"type": "SNAPPY",
"args": None
}
"_default": {
"type": "GZIP",
"args": None
}
}
where ``"type"`` specifies the compression type to use, and ``"args"``
specifies a ``dict`` that will be turned into keyword arguments for
the compressor.
If the dictionary contains a "_default" entry, this will be used for any
columns not explicitly specified in the dictionary.
file_scheme: 'simple'|'hive'|'drill'
If simple: all goes in a single file
If hive or drill: each row group is in a separate file, and a separate
file (called "_metadata") contains the metadata.
open_with: function
When called with a f(path, mode), returns an open file-like object
mkdirs: function
When called with a path/URL, creates any necessary dictionaries to
make that location writable, e.g., ``os.makedirs``. This is not
necessary if using the simple file scheme
has_nulls: bool, 'infer' or list of strings
Whether columns can have nulls. If a list of strings, those given
columns will be marked as "optional" in the metadata, and include
null definition blocks on disk. Some data types (floats and times)
can instead use the sentinel values NaN and NaT, which are not the same
as NULL in parquet, but functionally act the same in many cases,
particularly if converting back to pandas later. A value of 'infer'
will assume nulls for object columns and not otherwise.
Ignored if appending to an existing parquet data-set.
write_index: boolean
Whether or not to write the index to a separate column. By default we
write the index *if* it is not 0, 1, ..., n.
Ignored if appending to an existing parquet data-set.
partition_on: string or list of string
Column names passed to groupby in order to split data within each
row-group, producing a structured directory tree. Note: as with pandas,
null values will be dropped. Ignored if file_scheme is simple.
Checked when appending to an existing parquet dataset that requested
partition column names match those of existing parquet data-set.
fixed_text: {column: int length} or None
For bytes or str columns, values will be converted
to fixed-length strings of the given length for the given columns
before writing, potentially providing a large speed
boost. The length applies to the binary representation *after*
conversion for utf8, json or bson.
Ignored if appending to an existing parquet dataset.
append: bool (False) or 'overwrite'
If False, construct data-set from scratch; if True, add new row-group(s)
to existing data-set. In the latter case, the data-set must exist,
and the schema must match the input data.
If 'overwrite', existing partitions will be replaced in-place, where
the given data has any rows within a given partition. To use this,
the existing dataset had to be written with these other parameters set
to specific values, or will raise ValueError:
* ``file_scheme='hive'``
* ``partition_on`` set to at least one column name.
object_encoding: str or {col: type}
For object columns, this gives the data type, so that the values can
be encoded to bytes.
Possible values are bytes|utf8|json|bson|bool|int|int32|decimal,
where bytes is assumed if not specified (i.e., no conversion). The
special value 'infer' will cause the type to be guessed from the first
ten non-null values. The decimal.Decimal type is a valid choice, but will
result in float encoding with possible loss of accuracy.
Ignored if appending to an existing parquet data-set.
times: 'int64' (default), or 'int96':
In "int64" mode, datetimes are written as 8-byte integers, us
resolution; in "int96" mode, they are written as 12-byte blocks, with
the first 8 bytes as ns within the day, the next 4 bytes the julian day.
'int96' mode is included only for compatibility.
Ignored if appending to an existing parquet data-set.
custom_metadata: dict
Key-value metadata to write
Ignored if appending to an existing parquet data-set.
stats: True|False|list(str)|"auto"
Whether to calculate and write summary statistics.
If True, do it for every column;
If False, never do;
And if a list of str, do it only for those specified columns.
"auto" (default) means True for any int/float or timestamp column
Examples
--------
>>> fastparquet.write('myfile.parquet', df) # doctest: +SKIP
|
def write(filename, data, row_group_offsets=None,
compression=None, file_scheme='simple', open_with=default_open,
mkdirs=None, has_nulls=True, write_index=None,
partition_on=[], fixed_text=None, append=False,
object_encoding='infer', times='int64',
custom_metadata=None, stats="auto"):
"""Write pandas dataframe to filename with parquet format.
Parameters
----------
filename: str or pathlib.Path
Parquet collection to write to, either a single file (if file_scheme
is simple) or a directory containing the metadata and data-files.
data: pandas dataframe
The table to write.
row_group_offsets: int or list of int
If int, row-groups will be approximately this many rows, rounded down
to make row groups about the same size;
If a list, the explicit index values to start new row groups;
If `None`, set to 50_000_000.
In case of partitioning the data, final row-groups size can be reduced
significantly further by the partitioning, occuring as a subsequent
step.
compression: str, dict
compression to apply to each column, e.g. ``GZIP`` or ``SNAPPY`` or a
``dict`` like ``{"col1": "SNAPPY", "col2": None}`` to specify per
column compression types.
In both cases, the compressor settings would be the underlying
compressor defaults. To pass arguments to the underlying compressor,
each ``dict`` entry should itself be a dictionary::
{
col1: {
"type": "LZ4",
"args": {
"mode": "high_compression",
"compression": 9
}
},
col2: {
"type": "SNAPPY",
"args": None
}
"_default": {
"type": "GZIP",
"args": None
}
}
where ``"type"`` specifies the compression type to use, and ``"args"``
specifies a ``dict`` that will be turned into keyword arguments for
the compressor.
If the dictionary contains a "_default" entry, this will be used for any
columns not explicitly specified in the dictionary.
file_scheme: 'simple'|'hive'|'drill'
If simple: all goes in a single file
If hive or drill: each row group is in a separate file, and a separate
file (called "_metadata") contains the metadata.
open_with: function
When called with a f(path, mode), returns an open file-like object
mkdirs: function
When called with a path/URL, creates any necessary dictionaries to
make that location writable, e.g., ``os.makedirs``. This is not
necessary if using the simple file scheme
has_nulls: bool, 'infer' or list of strings
Whether columns can have nulls. If a list of strings, those given
columns will be marked as "optional" in the metadata, and include
null definition blocks on disk. Some data types (floats and times)
can instead use the sentinel values NaN and NaT, which are not the same
as NULL in parquet, but functionally act the same in many cases,
particularly if converting back to pandas later. A value of 'infer'
will assume nulls for object columns and not otherwise.
Ignored if appending to an existing parquet data-set.
write_index: boolean
Whether or not to write the index to a separate column. By default we
write the index *if* it is not 0, 1, ..., n.
Ignored if appending to an existing parquet data-set.
partition_on: string or list of string
Column names passed to groupby in order to split data within each
row-group, producing a structured directory tree. Note: as with pandas,
null values will be dropped. Ignored if file_scheme is simple.
Checked when appending to an existing parquet dataset that requested
partition column names match those of existing parquet data-set.
fixed_text: {column: int length} or None
For bytes or str columns, values will be converted
to fixed-length strings of the given length for the given columns
before writing, potentially providing a large speed
boost. The length applies to the binary representation *after*
conversion for utf8, json or bson.
Ignored if appending to an existing parquet dataset.
append: bool (False) or 'overwrite'
If False, construct data-set from scratch; if True, add new row-group(s)
to existing data-set. In the latter case, the data-set must exist,
and the schema must match the input data.
If 'overwrite', existing partitions will be replaced in-place, where
the given data has any rows within a given partition. To use this,
the existing dataset had to be written with these other parameters set
to specific values, or will raise ValueError:
* ``file_scheme='hive'``
* ``partition_on`` set to at least one column name.
object_encoding: str or {col: type}
For object columns, this gives the data type, so that the values can
be encoded to bytes.
Possible values are bytes|utf8|json|bson|bool|int|int32|decimal,
where bytes is assumed if not specified (i.e., no conversion). The
special value 'infer' will cause the type to be guessed from the first
ten non-null values. The decimal.Decimal type is a valid choice, but will
result in float encoding with possible loss of accuracy.
Ignored if appending to an existing parquet data-set.
times: 'int64' (default), or 'int96':
In "int64" mode, datetimes are written as 8-byte integers, us
resolution; in "int96" mode, they are written as 12-byte blocks, with
the first 8 bytes as ns within the day, the next 4 bytes the julian day.
'int96' mode is included only for compatibility.
Ignored if appending to an existing parquet data-set.
custom_metadata: dict
Key-value metadata to write
Ignored if appending to an existing parquet data-set.
stats: True|False|list(str)|"auto"
Whether to calculate and write summary statistics.
If True, do it for every column;
If False, never do;
And if a list of str, do it only for those specified columns.
"auto" (default) means True for any int/float or timestamp column
Examples
--------
>>> fastparquet.write('myfile.parquet', df) # doctest: +SKIP
"""
custom_metadata = custom_metadata or {}
if getattr(data, "attrs", None):
custom_metadata["PANDAS_ATTRS"] = json.dumps(data.attrs)
if file_scheme not in ('simple', 'hive', 'drill'):
raise ValueError( 'File scheme should be simple|hive|drill, not '
f'{file_scheme}.')
fs, filename, open_with, mkdirs = get_fs(filename, open_with, mkdirs)
if append == 'overwrite':
overwrite(dirpath=filename, data=data,
row_group_offsets=row_group_offsets, compression=compression,
open_with=open_with, mkdirs=mkdirs, remove_with=None,
stats=stats)
return
if isinstance(partition_on, str):
partition_on = [partition_on]
if append:
pf = ParquetFile(filename, open_with=open_with)
if pf._get_index():
# Format dataframe (manage row index).
data = reset_row_idx(data)
if file_scheme == 'simple':
# Case 'simple'
if pf.file_scheme not in ['simple', 'empty']:
raise ValueError( 'File scheme requested is simple, but '
f'existing file scheme is {pf.file_scheme}.')
else:
# Case 'hive', 'drill'
if pf.file_scheme not in ['hive', 'empty', 'flat']:
raise ValueError(f'Requested file scheme is {file_scheme}, but'
' existing file scheme is not.')
if tuple(partition_on) != tuple(pf.cats):
raise ValueError('When appending, partitioning columns must '
'match existing data')
pf.write_row_groups(data, row_group_offsets, sort_key=None,
sort_pnames=False, compression=compression,
write_fmd=True, open_with=open_with,
mkdirs=mkdirs, stats=stats)
else:
# Case 'append=False'.
# Define 'index_cols' to be recorded in metadata.
cols_dtype = data.columns.dtype
if (write_index or write_index is None
and not isinstance(data.index, pd.RangeIndex)):
# Keep name(s) of index to metadata.
cols = set(data)
data = reset_row_idx(data)
index_cols = [c for c in data if c not in cols]
elif write_index is None and isinstance(data.index, pd.RangeIndex):
# write_index=None, range to metadata
index_cols = data.index
else:
# write_index=False
index_cols = []
# Initialize common metadata.
if str(has_nulls) == 'infer':
has_nulls = None
check_column_names(data.columns, partition_on, fixed_text,
object_encoding, has_nulls)
ignore = partition_on if file_scheme != 'simple' else []
fmd = make_metadata(data, has_nulls=has_nulls, ignore_columns=ignore,
fixed_text=fixed_text,
object_encoding=object_encoding,
times=times, index_cols=index_cols,
partition_cols=partition_on, cols_dtype=cols_dtype)
if custom_metadata:
kvm = fmd.key_value_metadata or []
kvm.extend(
[
parquet_thrift.KeyValue(key=key, value=value)
for key, value in custom_metadata.items()
]
)
fmd.key_value_metadata = kvm
if file_scheme == 'simple':
# Case 'simple'
write_simple(filename, data, fmd,
row_group_offsets=row_group_offsets,
compression=compression, open_with=open_with,
has_nulls=None, append=False, stats=stats)
else:
# Case 'hive', 'drill'
write_multi(filename, data, fmd,
row_group_offsets=row_group_offsets,
compression=compression, file_scheme=file_scheme,
write_fmd=True, open_with=open_with,
mkdirs=mkdirs, partition_on=partition_on,
append=False, stats=stats)
|
(filename, data, row_group_offsets=None, compression=None, file_scheme='simple', open_with=<built-in function open>, mkdirs=None, has_nulls=True, write_index=None, partition_on=[], fixed_text=None, append=False, object_encoding='infer', times='int64', custom_metadata=None, stats='auto')
|
69,762 |
pprintpp
|
pformat
|
Format a Python object into a pretty-printed representation.
|
def pformat(object, indent=4, width=80, depth=None):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
|
(object, indent=4, width=80, depth=None)
|
69,763 |
pytest_icdiff
|
pytest_addoption
| null |
def pytest_addoption(parser):
parser.addoption(
"--icdiff-cols",
action="store",
default=None,
help="pytest-icdiff: specify the width of the screen, in case autodetection fails you",
)
parser.addoption(
"--icdiff-show-all-spaces",
default=False,
action="store_true",
help="pytest-icdiff: color all non-matching whitespace including that which is not needed for drawing the eye to changes. Slow, ugly, displays all changes",
)
parser.addoption(
"--icdiff-highlight",
default=False,
action="store_true",
help="pytest-icdiff: color by changing the background color instead of the foreground color. Very fast, ugly, displays all changes",
)
parser.addoption(
"--icdiff-line-numbers",
default=False,
action="store_true",
help="pytest-icdiff: generate output with line numbers. Not compatible with the 'exclude-lines' option.",
)
parser.addoption(
"--icdiff-tabsize",
default=2,
help="pytest-icdiff: tab stop spacing",
)
parser.addoption(
"--icdiff-truncate",
default=False,
action="store_true",
help="pytest-icdiff: truncate long lines instead of wrapping them",
)
parser.addoption(
"--icdiff-strip-trailing-cr",
default=False,
action="store_true",
help="pytest-icdiff: strip any trailing carriage return at the end of an input line",
)
|
(parser)
|
69,764 |
pytest_icdiff
|
pytest_assertrepr_compare
| null |
def pytest_assertrepr_compare(config, op, left, right):
if op != "==":
return
try:
if abs(left + right) < 19999:
return
except TypeError:
pass
except ValueError:
# ValueErrors are raised when numpy / pandas errors are checked
# Bail out of generating a diff and use pytest default output
return
COLS = int(config.getoption("--icdiff-cols") or AUTO_COLS)
half_cols = COLS / 2 - MARGINS
TABSIZE = int(config.getoption("--icdiff-tabsize") or 2)
pretty_left = pformat(left, indent=TABSIZE, width=half_cols).splitlines()
pretty_right = pformat(right, indent=TABSIZE, width=half_cols).splitlines()
diff_cols = COLS - MARGINS
if len(pretty_left) < 3 or len(pretty_right) < 3:
# avoid small diffs far apart by smooshing them up to the left
smallest_left = pformat(left, indent=TABSIZE, width=1).splitlines()
smallest_right = pformat(right, indent=TABSIZE, width=1).splitlines()
max_side = max(len(l) + 1 for l in smallest_left + smallest_right)
if (max_side * 2 + MARGINS) < COLS:
diff_cols = max_side * 2 + GUTTER
pretty_left = pformat(left, indent=TABSIZE, width=max_side).splitlines()
pretty_right = pformat(right, indent=TABSIZE, width=max_side).splitlines()
differ = icdiff.ConsoleDiff(
cols=diff_cols,
show_all_spaces=config.getoption("--icdiff-show-all-spaces"),
highlight=config.getoption("--icdiff-highlight"),
line_numbers=config.getoption("--icdiff-line-numbers"),
tabsize=TABSIZE,
truncate=config.getoption("--icdiff-truncate"),
strip_trailing_cr=config.getoption("--icdiff-strip-trailing-cr"),
)
if not config.get_terminal_writer().hasmarkup:
# colorization is disabled in Pytest - either due to the terminal not
# supporting it or the user disabling it. We should obey, but there is
# no option in icdiff to disable it, so we replace its colorization
# function with a no-op
differ.colorize = lambda string: string
color_off = ""
else:
color_off = icdiff.color_codes["none"]
icdiff_lines = list(differ.make_table(pretty_left, pretty_right))
if len(icdiff_lines) > 50:
icdiff_lines = list(differ.make_table(pretty_left, pretty_right, context=True))
return ["equals failed"] + [color_off + l for l in icdiff_lines]
|
(config, op, left, right)
|
69,767 |
delocate.delocating
|
delocate_path
|
Copy required libraries for files in `tree_path` into `lib_path`.
Parameters
----------
tree_path : str
Root path of tree to search for required libraries
lib_path : str
Directory into which we copy required libraries
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
Libraries which won't be copied will not be inspected for dependencies.
executable_path : None or str, optional
If not None, an alternative path to use for resolving
`@executable_path`.
ignore_missing : bool, default=False
Continue even if missing dependencies are detected.
sanitize_rpaths : bool, default=False, keyword-only
If True, absolute paths in rpaths of binaries are removed.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a file in the path depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library.
Raises
------
DelocationError
When any dependencies can not be located.
|
def delocate_path(
tree_path: Text,
lib_path: Text,
lib_filt_func: Optional[Union[str, Callable[[Text], bool]]] = None,
copy_filt_func: Optional[Callable[[Text], bool]] = filter_system_libs,
executable_path: Optional[Text] = None,
ignore_missing: bool = False,
*,
sanitize_rpaths: bool = False,
) -> Dict[Text, Dict[Text, Text]]:
"""Copy required libraries for files in `tree_path` into `lib_path`.
Parameters
----------
tree_path : str
Root path of tree to search for required libraries
lib_path : str
Directory into which we copy required libraries
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
Libraries which won't be copied will not be inspected for dependencies.
executable_path : None or str, optional
If not None, an alternative path to use for resolving
`@executable_path`.
ignore_missing : bool, default=False
Continue even if missing dependencies are detected.
sanitize_rpaths : bool, default=False, keyword-only
If True, absolute paths in rpaths of binaries are removed.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a file in the path depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library.
Raises
------
DelocationError
When any dependencies can not be located.
"""
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
elif isinstance(lib_filt_func, str):
raise TypeError('lib_filt_func string can only be "dylibs-only"')
if lib_filt_func is None:
lib_filt_func = _allow_all
if copy_filt_func is None:
copy_filt_func = _allow_all
if not exists(lib_path):
os.makedirs(lib_path)
# Do not inspect dependencies of libraries that will not be copied.
filt_func = functools.partial(
_delocate_filter_function,
lib_filt_func=lib_filt_func,
copy_filt_func=copy_filt_func,
)
lib_dict = tree_libs_from_directory(
tree_path,
lib_filt_func=filt_func,
copy_filt_func=filt_func,
executable_path=executable_path,
ignore_missing=ignore_missing,
)
return delocate_tree_libs(
lib_dict, lib_path, tree_path, sanitize_rpaths=sanitize_rpaths
)
|
(tree_path: str, lib_path: str, lib_filt_func: Union[str, Callable[[str], bool], NoneType] = None, copy_filt_func: Optional[Callable[[str], bool]] = <function filter_system_libs at 0x7f7d1272add0>, executable_path: Optional[str] = None, ignore_missing: bool = False, *, sanitize_rpaths: bool = False) -> Dict[str, Dict[str, str]]
|
69,768 |
delocate.delocating
|
delocate_wheel
|
Update wheel by copying required libraries to `lib_sdir` in wheel.
Create `lib_sdir` in wheel tree only if we are copying one or more
libraries.
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
out_wheel : None or str
Filename of processed wheel to write. If None, overwrite `in_wheel`
lib_sdir : str, optional
Subdirectory name in wheel package directory (or directories) to store
needed libraries.
Ignored if the wheel has no package directories, and only contains
stand-alone modules.
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
require_archs : None or str or sequence, optional
If None, do no checks of architectures in libraries. If sequence,
sequence of architectures (output from ``lipo -info``) that every
library in the wheels should have (e.g. ``['x86_64, 'i386']``). An
empty sequence results in checks that depended libraries have the same
archs as depending libraries. If string, either "intel" (corresponds
to sequence ``['x86_64, 'i386']``) or name of required architecture
(e.g "i386" or "x86_64").
check_verbose : bool, optional
This flag is deprecated, and has no effect.
executable_path : None or str, optional, keyword-only
An alternative path to use for resolving `@executable_path`.
ignore_missing : bool, default=False, keyword-only
Continue even if missing dependencies are detected.
sanitize_rpaths : bool, default=False, keyword-only
If True, absolute paths in rpaths of binaries are removed.
require_target_macos_version : None or Version, optional, keyword-only
If provided, the minimum macOS version that the wheel should support.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a path in the wheel depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. The filenames in the keys are relative to the wheel root path.
|
def delocate_wheel(
in_wheel: str,
out_wheel: Optional[str] = None,
lib_sdir: str = ".dylibs",
lib_filt_func: Union[None, str, Callable[[str], bool]] = None,
copy_filt_func: Optional[Callable[[str], bool]] = filter_system_libs,
require_archs: Union[None, str, Iterable[str]] = None,
check_verbose: Optional[bool] = None,
*,
executable_path: Optional[str] = None,
ignore_missing: bool = False,
sanitize_rpaths: bool = False,
require_target_macos_version: Optional[Version] = None,
) -> Dict[str, Dict[str, str]]:
"""Update wheel by copying required libraries to `lib_sdir` in wheel.
Create `lib_sdir` in wheel tree only if we are copying one or more
libraries.
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
out_wheel : None or str
Filename of processed wheel to write. If None, overwrite `in_wheel`
lib_sdir : str, optional
Subdirectory name in wheel package directory (or directories) to store
needed libraries.
Ignored if the wheel has no package directories, and only contains
stand-alone modules.
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
require_archs : None or str or sequence, optional
If None, do no checks of architectures in libraries. If sequence,
sequence of architectures (output from ``lipo -info``) that every
library in the wheels should have (e.g. ``['x86_64, 'i386']``). An
empty sequence results in checks that depended libraries have the same
archs as depending libraries. If string, either "intel" (corresponds
to sequence ``['x86_64, 'i386']``) or name of required architecture
(e.g "i386" or "x86_64").
check_verbose : bool, optional
This flag is deprecated, and has no effect.
executable_path : None or str, optional, keyword-only
An alternative path to use for resolving `@executable_path`.
ignore_missing : bool, default=False, keyword-only
Continue even if missing dependencies are detected.
sanitize_rpaths : bool, default=False, keyword-only
If True, absolute paths in rpaths of binaries are removed.
require_target_macos_version : None or Version, optional, keyword-only
If provided, the minimum macOS version that the wheel should support.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a path in the wheel depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. The filenames in the keys are relative to the wheel root path.
"""
if check_verbose is not None:
warnings.warn(
"The check_verbose flag is deprecated and shouldn't be provided,"
" all subsequent parameters should be changed over to keywords.",
DeprecationWarning,
stacklevel=2,
)
in_wheel = abspath(in_wheel)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
in_place = in_wheel == out_wheel
remove_old = in_place
with TemporaryDirectory() as tmpdir:
wheel_dir = realpath(pjoin(tmpdir, "wheel"))
zip2dir(in_wheel, wheel_dir)
# Assume the package name from the wheel filename.
package_name = basename(in_wheel).split("-")[0]
lib_sdir = _decide_dylib_bundle_directory(
wheel_dir, package_name, lib_sdir
)
lib_path = pjoin(wheel_dir, lib_sdir)
lib_path_exists_before_delocate = exists(lib_path)
copied_libs = delocate_path(
wheel_dir,
lib_path,
lib_filt_func,
copy_filt_func,
executable_path=executable_path,
ignore_missing=ignore_missing,
sanitize_rpaths=sanitize_rpaths,
)
if copied_libs and lib_path_exists_before_delocate:
raise DelocationError(
"f{lib_path} already exists in wheel but need to copy "
+ "; ".join(copied_libs)
)
if len(os.listdir(lib_path)) == 0:
shutil.rmtree(lib_path)
# Check architectures
if require_archs is not None:
bads = check_archs(copied_libs, require_archs)
if bads:
raise DelocationError(
"Some missing architectures in wheel"
f"\n{bads_report(bads, pjoin(tmpdir, 'wheel'))}"
)
libraries_in_lib_path = [
pjoin(lib_path, basename(lib)) for lib in copied_libs
]
_make_install_name_ids_unique(
libraries=libraries_in_lib_path,
install_id_prefix=DLC_PREFIX + relpath(lib_sdir, wheel_dir),
)
rewrite_record(wheel_dir)
out_wheel_ = Path(out_wheel)
out_wheel_fixed = _check_and_update_wheel_name(
out_wheel_, Path(wheel_dir), require_target_macos_version
)
if out_wheel_fixed != out_wheel_:
out_wheel_ = out_wheel_fixed
in_place = False
_update_wheelfile(Path(wheel_dir), out_wheel_.name)
if len(copied_libs) or not in_place:
if remove_old:
os.remove(in_wheel)
dir2zip(wheel_dir, out_wheel_)
return stripped_lib_dict(copied_libs, wheel_dir + os.path.sep)
|
(in_wheel: str, out_wheel: Optional[str] = None, lib_sdir: str = '.dylibs', lib_filt_func: Union[str, Callable[[str], bool], NoneType] = None, copy_filt_func: Optional[Callable[[str], bool]] = <function filter_system_libs at 0x7f7d1272add0>, require_archs: Union[NoneType, str, Iterable[str]] = None, check_verbose: Optional[bool] = None, *, executable_path: Optional[str] = None, ignore_missing: bool = False, sanitize_rpaths: bool = False, require_target_macos_version: Optional[packaging.version.Version] = None) -> Dict[str, Dict[str, str]]
|
69,771 |
delocate.delocating
|
patch_wheel
|
Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel`.
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
patch_fname : str
Filename of patch file. Will be applied with ``patch -p1 <
patch_fname``
out_wheel : None or str
Filename of patched wheel to write. If None, overwrite `in_wheel`
|
def patch_wheel(
in_wheel: Text, patch_fname: Text, out_wheel: Optional[Text] = None
) -> None:
"""Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel`.
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
patch_fname : str
Filename of patch file. Will be applied with ``patch -p1 <
patch_fname``
out_wheel : None or str
Filename of patched wheel to write. If None, overwrite `in_wheel`
"""
in_wheel = abspath(in_wheel)
patch_fname = abspath(patch_fname)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
if not exists(patch_fname):
raise ValueError("patch file {0} does not exist".format(patch_fname))
with InWheel(in_wheel, out_wheel):
with open(patch_fname, "rb") as fobj:
patch_proc = Popen(
["patch", "-p1"], stdin=fobj, stdout=PIPE, stderr=PIPE
)
stdout, stderr = patch_proc.communicate()
if patch_proc.returncode != 0:
raise RuntimeError(
"Patch failed with stdout:\n" + stdout.decode("latin1")
)
|
(in_wheel: str, patch_fname: str, out_wheel: Optional[str] = None) -> NoneType
|
69,775 |
delocate.libsana
|
tree_libs
|
Return analysis of library dependencies within `start_path`.
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is a canonical (``os.path.realpath``) filename of library,
or library name starting with {'@loader_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html
.. deprecated:: 0.9
This function does not support `@loader_path` and only returns the
direct dependencies of the libraries in `start_path`.
:func:`tree_libs_from_directory` should be used instead.
|
def tree_libs(
start_path: Text,
filt_func: Optional[Callable[[Text], bool]] = None,
) -> Dict[Text, Dict[Text, Text]]:
"""Return analysis of library dependencies within `start_path`.
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is a canonical (``os.path.realpath``) filename of library,
or library name starting with {'@loader_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html
.. deprecated:: 0.9
This function does not support `@loader_path` and only returns the
direct dependencies of the libraries in `start_path`.
:func:`tree_libs_from_directory` should be used instead.
"""
warnings.warn(
"tree_libs doesn't support @loader_path and has been deprecated.",
DeprecationWarning,
stacklevel=2,
)
if filt_func is None:
filt_func = _allow_all
lib_dict: Dict[Text, Dict[Text, Text]] = {}
for dirpath, dirnames, basenames in os.walk(start_path):
for base in basenames:
depending_path = realpath(pjoin(dirpath, base))
for dependency_path, install_name in get_dependencies(
depending_path,
filt_func=filt_func,
):
if dependency_path is None:
# Mimic deprecated behavior.
# A lib_dict with unresolved paths is unsuitable for
# delocating, this is a missing dependency.
dependency_path = realpath(install_name)
if install_name.startswith("@loader_path/"):
# Support for `@loader_path` would break existing callers.
logger.debug(
"Excluding %s because it has '@loader_path'.",
install_name,
)
continue
lib_dict.setdefault(dependency_path, {})
lib_dict[dependency_path][depending_path] = install_name
return lib_dict
|
(start_path: str, filt_func: Optional[Callable[[str], bool]] = None) -> Dict[str, Dict[str, str]]
|
69,777 |
delocate.libsana
|
wheel_libs
|
Return analysis of library dependencies with a Python wheel.
Use this routine for a dump of the dependency tree.
Parameters
----------
wheel_fname : str
Filename of wheel
filt_func : None or callable, optional
If None, inspect all non-system files for library dependencies.
If callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise.
ignore_missing : bool, default=False, optional, keyword-only
Continue even if missing dependencies are detected.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``). ``libpath`` is library being depended on,
relative to wheel root path if within wheel tree. ``dependings_dict``
is (key, value) of (``depending_lib_path``, ``install_name``). Again,
``depending_lib_path`` is library relative to wheel root path, if
within wheel tree.
Raises
------
DelocationError
When dependencies can not be located and `ignore_missing` is False.
|
def wheel_libs(
wheel_fname: str,
filt_func: Optional[Callable[[Text], bool]] = None,
*,
ignore_missing: bool = False,
) -> Dict[Text, Dict[Text, Text]]:
"""Return analysis of library dependencies with a Python wheel.
Use this routine for a dump of the dependency tree.
Parameters
----------
wheel_fname : str
Filename of wheel
filt_func : None or callable, optional
If None, inspect all non-system files for library dependencies.
If callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise.
ignore_missing : bool, default=False, optional, keyword-only
Continue even if missing dependencies are detected.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``). ``libpath`` is library being depended on,
relative to wheel root path if within wheel tree. ``dependings_dict``
is (key, value) of (``depending_lib_path``, ``install_name``). Again,
``depending_lib_path`` is library relative to wheel root path, if
within wheel tree.
Raises
------
DelocationError
When dependencies can not be located and `ignore_missing` is False.
"""
if filt_func is None:
filt_func = _filter_system_libs
with TemporaryDirectory() as tmpdir:
zip2dir(wheel_fname, tmpdir)
lib_dict = tree_libs_from_directory(
tmpdir, lib_filt_func=filt_func, ignore_missing=ignore_missing
)
return stripped_lib_dict(lib_dict, realpath(tmpdir) + os.path.sep)
|
(wheel_fname: str, filt_func: Optional[Callable[[str], bool]] = None, *, ignore_missing: bool = False) -> Dict[str, Dict[str, str]]
|
69,779 |
kedro
|
KedroDeprecationWarning
|
Custom class for warnings about deprecated Kedro features.
|
class KedroDeprecationWarning(DeprecationWarning):
"""Custom class for warnings about deprecated Kedro features."""
| null |
69,782 |
bingads.authorization
|
Authentication
|
The base class for all authentication classes.
*See also:*
* :class:`.ServiceClient`
* :class:`.BulkServiceManager`
* :class:`.AuthorizationData`
|
class Authentication(object):
""" The base class for all authentication classes.
*See also:*
* :class:`.ServiceClient`
* :class:`.BulkServiceManager`
* :class:`.AuthorizationData`
"""
def enrich_headers(self, headers):
""" Sets the required header elements for the corresponding Bing Ads service or bulk file upload operation.
The header elements that the method sets will differ depending on the type of authentication.
For example if you use one of the OAuth classes, the AuthenticationToken header will be set by this method,
whereas the UserName and Password headers will remain empty.
:param headers: Bing Ads service or bulk file upload operation headers.
:type headers: dict
:rtype: None
"""
raise NotImplementedError()
|
()
|
69,783 |
bingads.authorization
|
enrich_headers
|
Sets the required header elements for the corresponding Bing Ads service or bulk file upload operation.
The header elements that the method sets will differ depending on the type of authentication.
For example if you use one of the OAuth classes, the AuthenticationToken header will be set by this method,
whereas the UserName and Password headers will remain empty.
:param headers: Bing Ads service or bulk file upload operation headers.
:type headers: dict
:rtype: None
|
def enrich_headers(self, headers):
""" Sets the required header elements for the corresponding Bing Ads service or bulk file upload operation.
The header elements that the method sets will differ depending on the type of authentication.
For example if you use one of the OAuth classes, the AuthenticationToken header will be set by this method,
whereas the UserName and Password headers will remain empty.
:param headers: Bing Ads service or bulk file upload operation headers.
:type headers: dict
:rtype: None
"""
raise NotImplementedError()
|
(self, headers)
|
69,784 |
bingads.authorization
|
AuthorizationData
|
Represents a user who intends to access the corresponding customer and account.
An instance of this class is required to authenticate with Bing Ads if you are using either
:class:`.ServiceClient` or :class:`.BulkServiceManager`.
|
class AuthorizationData:
""" Represents a user who intends to access the corresponding customer and account.
An instance of this class is required to authenticate with Bing Ads if you are using either
:class:`.ServiceClient` or :class:`.BulkServiceManager`.
"""
def __init__(self,
account_id=None,
customer_id=None,
developer_token=None,
authentication=None):
""" Initialize an instance of this class.
:param account_id: The identifier of the account that owns the entities in the request.
Used as the CustomerAccountId header and the AccountId body elements
in calls to the Bing Ads web services.
:type account_id: int
:param customer_id: The identifier of the customer that owns the account.
Used as the CustomerId header element in calls to the Bing Ads web services.
:type customer_id: int
:param developer_token: The Bing Ads developer access token.
Used as the DeveloperToken header element in calls to the Bing Ads web services.
:type developer_token: str
:param authentication: An object representing the authentication method that should be used in calls
to the Bing Ads web services.
:type authentication: Authentication
"""
self._account_id = account_id
self._customer_id = customer_id
self._developer_token = developer_token
self._authentication = authentication
@property
def account_id(self):
""" The identifier of the account that owns the entities in the request.
Used as the CustomerAccountId header and the AccountId body elements in calls to the Bing Ads web services.
:rtype: int
"""
return self._account_id
@property
def customer_id(self):
""" The identifier of the customer that owns the account.
Used as the CustomerId header element in calls to the Bing Ads web services.
:rtype: int
"""
return self._customer_id
@property
def developer_token(self):
""" The Bing Ads developer access token.
Used as the DeveloperToken header element in calls to the Bing Ads web services.
:rtype: str
"""
return self._developer_token
@property
def authentication(self):
""" An object representing the authentication method that should be used in calls to the Bing Ads web services.
*See also:*
* :class:`.OAuthDesktopMobileAuthCodeGrant`
* :class:`.OAuthDesktopMobileImplicitGrant`
* :class:`.OAuthWebAuthCodeGrant`
* :class:`.PasswordAuthentication`
:rtype: Authentication
"""
return self._authentication
@account_id.setter
def account_id(self, account_id):
self._account_id = account_id
@customer_id.setter
def customer_id(self, customer_id):
self._customer_id = customer_id
@developer_token.setter
def developer_token(self, developer_token):
self._developer_token = developer_token
@authentication.setter
def authentication(self, authentication):
self._authentication = authentication
|
(account_id=None, customer_id=None, developer_token=None, authentication=None)
|
69,785 |
bingads.authorization
|
__init__
|
Initialize an instance of this class.
:param account_id: The identifier of the account that owns the entities in the request.
Used as the CustomerAccountId header and the AccountId body elements
in calls to the Bing Ads web services.
:type account_id: int
:param customer_id: The identifier of the customer that owns the account.
Used as the CustomerId header element in calls to the Bing Ads web services.
:type customer_id: int
:param developer_token: The Bing Ads developer access token.
Used as the DeveloperToken header element in calls to the Bing Ads web services.
:type developer_token: str
:param authentication: An object representing the authentication method that should be used in calls
to the Bing Ads web services.
:type authentication: Authentication
|
def __init__(self,
account_id=None,
customer_id=None,
developer_token=None,
authentication=None):
""" Initialize an instance of this class.
:param account_id: The identifier of the account that owns the entities in the request.
Used as the CustomerAccountId header and the AccountId body elements
in calls to the Bing Ads web services.
:type account_id: int
:param customer_id: The identifier of the customer that owns the account.
Used as the CustomerId header element in calls to the Bing Ads web services.
:type customer_id: int
:param developer_token: The Bing Ads developer access token.
Used as the DeveloperToken header element in calls to the Bing Ads web services.
:type developer_token: str
:param authentication: An object representing the authentication method that should be used in calls
to the Bing Ads web services.
:type authentication: Authentication
"""
self._account_id = account_id
self._customer_id = customer_id
self._developer_token = developer_token
self._authentication = authentication
|
(self, account_id=None, customer_id=None, developer_token=None, authentication=None)
|
69,786 |
bingads.service_client
|
BingAdsBuilder
| null |
class BingAdsBuilder (Builder):
# See https://github.com/suds-community/suds/issues/67 and https://github.com/suds-community/suds/commit/366f7f1616595b9e4163a3f90fc6e84ac0ae23f5
def __init__(self, resolver):
"""
@param resolver: A schema object name resolver.
@type resolver: L{resolver.Resolver}
"""
self.resolver = resolver
def skip_value(self, type):
""" whether or not to skip setting the value """
return False
|
(resolver)
|
69,787 |
bingads.service_client
|
__init__
|
@param resolver: A schema object name resolver.
@type resolver: L{resolver.Resolver}
|
def __init__(self, resolver):
"""
@param resolver: A schema object name resolver.
@type resolver: L{resolver.Resolver}
"""
self.resolver = resolver
|
(self, resolver)
|
69,788 |
suds.builder
|
add_attributes
|
add required attributes
|
def add_attributes(self, data, type):
""" add required attributes """
for attr, ancestry in type.attributes():
name = '_%s' % attr.name
value = attr.get_default()
setattr(data, name, value)
|
(self, data, type)
|
69,789 |
suds.builder
|
build
|
build a an object for the specified typename as defined in the schema
|
def build(self, name):
""" build a an object for the specified typename as defined in the schema """
if isinstance(name, str):
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
else:
type = name
cls = type.name
if type.mixed():
data = Factory.property(cls)
else:
data = Factory.object(cls)
resolved = type.resolve()
md = data.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
history = []
self.add_attributes(data, resolved)
for child, ancestry in type.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
return data
|
(self, name)
|
69,790 |
suds.builder
|
ordering
|
get the ordering
|
def ordering(self, type):
""" get the ordering """
result = []
for child, ancestry in type.resolve():
name = child.name
if child.name is None:
continue
if child.isattr():
name = '_%s' % child.name
result.append(name)
return result
|
(self, type)
|
69,791 |
suds.builder
|
process
|
process the specified type then process its children
|
def process(self, data, type, history):
""" process the specified type then process its children """
if type in history:
return
if type.enum():
return
history.append(type)
resolved = type.resolve()
value = None
if type.multi_occurrence():
value = []
else:
if len(resolved) > 0:
if resolved.mixed():
value = Factory.property(resolved.name)
md = value.__metadata__
md.sxtype = resolved
else:
value = Factory.object(resolved.name)
md = value.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
setattr(data, type.name, None if self.skip_value(type) else value)
if value is not None:
data = value
if not isinstance(data, list):
self.add_attributes(data, resolved)
for child, ancestry in resolved.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
|
(self, data, type, history)
|
69,792 |
suds.builder
|
skip_child
|
get whether or not to skip the specified child
|
def skip_child(self, child, ancestry):
""" get whether or not to skip the specified child """
if child.any(): return True
for x in ancestry:
if x.choice():
return True
return False
|
(self, child, ancestry)
|
69,793 |
bingads.service_client
|
skip_value
|
whether or not to skip setting the value
|
def skip_value(self, type):
""" whether or not to skip setting the value """
return False
|
(self, type)
|
69,794 |
suds.builder
|
Builder
|
Builder used to construct an object for types defined in the schema
|
class Builder:
""" Builder used to construct an object for types defined in the schema """
def __init__(self, resolver):
"""
@param resolver: A schema object name resolver.
@type resolver: L{resolver.Resolver}
"""
self.resolver = resolver
def build(self, name):
""" build a an object for the specified typename as defined in the schema """
if isinstance(name, str):
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
else:
type = name
cls = type.name
if type.mixed():
data = Factory.property(cls)
else:
data = Factory.object(cls)
resolved = type.resolve()
md = data.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
history = []
self.add_attributes(data, resolved)
for child, ancestry in type.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
return data
def process(self, data, type, history):
""" process the specified type then process its children """
if type in history:
return
if type.enum():
return
history.append(type)
resolved = type.resolve()
value = None
if type.multi_occurrence():
value = []
else:
if len(resolved) > 0:
if resolved.mixed():
value = Factory.property(resolved.name)
md = value.__metadata__
md.sxtype = resolved
else:
value = Factory.object(resolved.name)
md = value.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
setattr(data, type.name, None if self.skip_value(type) else value)
if value is not None:
data = value
if not isinstance(data, list):
self.add_attributes(data, resolved)
for child, ancestry in resolved.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
def add_attributes(self, data, type):
""" add required attributes """
for attr, ancestry in type.attributes():
name = '_%s' % attr.name
value = attr.get_default()
setattr(data, name, value)
def skip_child(self, child, ancestry):
""" get whether or not to skip the specified child """
if child.any(): return True
for x in ancestry:
if x.choice():
return True
return False
def skip_value(self, type):
""" whether or not to skip setting the value """
return type.optional() and not type.multi_occurrence()
def ordering(self, type):
""" get the ordering """
result = []
for child, ancestry in type.resolve():
name = child.name
if child.name is None:
continue
if child.isattr():
name = '_%s' % child.name
result.append(name)
return result
|
(resolver)
|
69,801 |
suds.builder
|
skip_value
|
whether or not to skip setting the value
|
def skip_value(self, type):
""" whether or not to skip setting the value """
return type.optional() and not type.multi_occurrence()
|
(self, type)
|
69,802 |
suds.client
|
Client
|
A lightweight web service client.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
|
class Client(UnicodeMixin):
"""
A lightweight web service client.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract I{items} from a suds object.
Much like the items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A dictionary of items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = suds.transport.https.HttpAuthenticated()
self.options = options
if "cache" not in kwargs:
kwargs["cache"] = suds.cache.ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
Useful for cases when a WSDL and referenced XSD schemas make heavy use
of namespaces and those namespaces are subject to change.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: prefix already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are unique to the
cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __unicode__(self):
s = ["\n"]
s.append("Suds ( https://fedorahosted.org/suds/ )")
s.append(" version: %s" % (suds.__version__,))
if suds.__build__:
s.append(" build: %s" % (suds.__build__,))
for sd in self.sd:
s.append("\n\n%s" % (str(sd),))
return "".join(s)
|
(url, **kwargs)
|
69,803 |
suds.client
|
__init__
|
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
|
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = suds.transport.https.HttpAuthenticated()
self.options = options
if "cache" not in kwargs:
kwargs["cache"] = suds.cache.ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
|
(self, url, **kwargs)
|
69,804 |
suds
|
<lambda>
| null |
__str__ = lambda x: x.__unicode__()
|
(x)
|
69,805 |
suds.client
|
__unicode__
| null |
def __unicode__(self):
s = ["\n"]
s.append("Suds ( https://fedorahosted.org/suds/ )")
s.append(" version: %s" % (suds.__version__,))
if suds.__build__:
s.append(" build: %s" % (suds.__build__,))
for sd in self.sd:
s.append("\n\n%s" % (str(sd),))
return "".join(s)
|
(self)
|
69,806 |
suds.client
|
add_prefix
|
Add I{static} mapping of an XML namespace prefix to a namespace.
Useful for cases when a WSDL and referenced XSD schemas make heavy use
of namespaces and those namespaces are subject to change.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: prefix already mapped.
|
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
Useful for cases when a WSDL and referenced XSD schemas make heavy use
of namespaces and those namespaces are subject to change.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: prefix already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
|
(self, prefix, uri)
|
69,807 |
suds.client
|
clone
|
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are unique to the
cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
|
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are unique to the
cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
|
(self)
|
69,808 |
suds.client
|
last_received
|
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
|
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
|
(self)
|
69,809 |
suds.client
|
last_sent
|
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
|
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
|
(self)
|
69,810 |
suds.client
|
set_options
|
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
|
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
|
(self, **kwargs)
|
69,811 |
bingads.util
|
DictCache
| null |
class DictCache(dict, Cache):
# .get and .clear work as intended
purge = dict.__delitem__
def put(self, id_, obj):
self[id_] = obj
return obj
| null |
69,812 |
bingads.util
|
put
| null |
def put(self, id_, obj):
self[id_] = obj
return obj
|
(self, id_, obj)
|
69,813 |
suds.client
|
Factory
|
A factory for instantiating types defined in the WSDL.
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
|
class Factory:
"""
A factory for instantiating types defined in the WSDL.
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
Create a WSDL type by name.
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = sudsobject.Factory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception as e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug("%s created: %s", name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
|
(wsdl)
|
69,814 |
suds.client
|
__init__
|
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
|
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
|
(self, wsdl)
|
69,815 |
suds.client
|
create
|
Create a WSDL type by name.
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
|
def create(self, name):
"""
Create a WSDL type by name.
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = sudsobject.Factory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception as e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug("%s created: %s", name, timer)
return result
|
(self, name)
|
69,816 |
suds.client
|
separator
|
Set the path separator.
@param ps: The new path separator.
@type ps: char
|
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
|
(self, ps)
|
69,817 |
bingads.exceptions
|
FileDownloadException
|
This exception is thrown if timeout occurs
|
class FileDownloadException(SdkException):
""" This exception is thrown if timeout occurs """
def __init__(self, description):
""" Initializes a new instance of this class with the specified error messages.
:param description: The description of the file download error.
:type description: str
"""
super(FileDownloadException, self).__init__(str(description))
|
(description)
|
69,818 |
bingads.exceptions
|
__init__
|
Initializes a new instance of this class with the specified error messages.
:param description: The description of the file download error.
:type description: str
|
def __init__(self, description):
""" Initializes a new instance of this class with the specified error messages.
:param description: The description of the file download error.
:type description: str
"""
super(FileDownloadException, self).__init__(str(description))
|
(self, description)
|
69,819 |
bingads.exceptions
|
__str__
| null |
def __str__(self):
return self.message
|
(self)
|
69,820 |
bingads.exceptions
|
FileUploadException
|
This exception is thrown if timeout occurs
|
class FileUploadException(SdkException):
""" This exception is thrown if timeout occurs """
def __init__(self, description):
""" Initializes a new instance of this class with the specified error messages.
:param description: The description of the file upload error.
:type description: str
"""
super(FileUploadException, self).__init__(str(description))
|
(description)
|
69,821 |
bingads.exceptions
|
__init__
|
Initializes a new instance of this class with the specified error messages.
:param description: The description of the file upload error.
:type description: str
|
def __init__(self, description):
""" Initializes a new instance of this class with the specified error messages.
:param description: The description of the file upload error.
:type description: str
"""
super(FileUploadException, self).__init__(str(description))
|
(self, description)
|
69,823 |
bingads.headerplugin
|
HeaderPlugin
| null |
class HeaderPlugin(MessagePlugin):
def __init__(self):
self.document = None
def parsed(self, context):
self.document = context.reply
def get_response_header(self):
result = {}
envns = ('SOAP-ENV', 'http://schemas.xmlsoap.org/soap/envelope/')
soapenv = self.document.getChild('Envelope', envns)
soapheaders = soapenv.getChild('Header', envns)
SHeaderNodes = soapheaders.children
for Node in SHeaderNodes:
result[Node.name] = Node.text
return result
|
()
|
69,824 |
bingads.headerplugin
|
__init__
| null |
def __init__(self):
self.document = None
|
(self)
|
69,825 |
bingads.headerplugin
|
get_response_header
| null |
def get_response_header(self):
result = {}
envns = ('SOAP-ENV', 'http://schemas.xmlsoap.org/soap/envelope/')
soapenv = self.document.getChild('Envelope', envns)
soapheaders = soapenv.getChild('Header', envns)
SHeaderNodes = soapheaders.children
for Node in SHeaderNodes:
result[Node.name] = Node.text
return result
|
(self)
|
69,826 |
suds.plugin
|
marshalled
|
Suds is about to send the specified SOAP envelope.
Provides the plugin with the opportunity to inspect/modify the envelope
Document before it is sent.
@param context: The send context.
The I{envelope} is the envelope document.
@type context: L{MessageContext}
|
def marshalled(self, context):
"""
Suds is about to send the specified SOAP envelope.
Provides the plugin with the opportunity to inspect/modify the envelope
Document before it is sent.
@param context: The send context.
The I{envelope} is the envelope document.
@type context: L{MessageContext}
"""
pass
|
(self, context)
|
69,827 |
bingads.headerplugin
|
parsed
| null |
def parsed(self, context):
self.document = context.reply
|
(self, context)
|
69,828 |
suds.plugin
|
received
|
Suds has received the specified reply.
Provides the plugin with the opportunity to inspect/modify the received
XML text before it is SAX parsed.
@param context: The reply context.
The I{reply} is the raw text.
@type context: L{MessageContext}
|
def received(self, context):
"""
Suds has received the specified reply.
Provides the plugin with the opportunity to inspect/modify the received
XML text before it is SAX parsed.
@param context: The reply context.
The I{reply} is the raw text.
@type context: L{MessageContext}
"""
pass
|
(self, context)
|
69,829 |
suds.plugin
|
sending
|
Suds is about to send the specified SOAP envelope.
Provides the plugin with the opportunity to inspect/modify the message
text before it is sent.
@param context: The send context.
The I{envelope} is the envelope text.
@type context: L{MessageContext}
|
def sending(self, context):
"""
Suds is about to send the specified SOAP envelope.
Provides the plugin with the opportunity to inspect/modify the message
text before it is sent.
@param context: The send context.
The I{envelope} is the envelope text.
@type context: L{MessageContext}
"""
pass
|
(self, context)
|
69,830 |
suds.plugin
|
unmarshalled
|
Suds has unmarshalled the received reply.
Provides the plugin with the opportunity to inspect/modify the
unmarshalled reply object before it is returned.
@param context: The reply context.
The I{reply} is unmarshalled suds object.
@type context: L{MessageContext}
|
def unmarshalled(self, context):
"""
Suds has unmarshalled the received reply.
Provides the plugin with the opportunity to inspect/modify the
unmarshalled reply object before it is returned.
@param context: The reply context.
The I{reply} is unmarshalled suds object.
@type context: L{MessageContext}
"""
pass
|
(self, context)
|
69,831 |
bingads.authorization
|
OAuthAuthorization
|
The abstract base class for all OAuth authentication classes.
You can use this class to dynamically instantiate a derived OAuth authentication class at run time.
This class cannot be instantiated, and instead you should use either :class:`.OAuthDesktopMobileAuthCodeGrant`,
:class:`.OAuthDesktopMobileImplicitGrant`, :class:`.OAuthWebAuthCodeGrant`, which extend this class.
*See also:*
* :class:`.OAuthDesktopMobileAuthCodeGrant`
* :class:`.OAuthDesktopMobileImplicitGrant`
* :class:`.OAuthWebAuthCodeGrant`
|
class OAuthAuthorization(Authentication):
""" The abstract base class for all OAuth authentication classes.
You can use this class to dynamically instantiate a derived OAuth authentication class at run time.
This class cannot be instantiated, and instead you should use either :class:`.OAuthDesktopMobileAuthCodeGrant`,
:class:`.OAuthDesktopMobileImplicitGrant`, :class:`.OAuthWebAuthCodeGrant`, which extend this class.
*See also:*
* :class:`.OAuthDesktopMobileAuthCodeGrant`
* :class:`.OAuthDesktopMobileImplicitGrant`
* :class:`.OAuthWebAuthCodeGrant`
"""
def __init__(self, client_id, oauth_tokens=None, env=PRODUCTION, oauth_scope=MSADS_MANAGE, tenant='common'):
""" Initializes a new instance of the OAuthAuthorization class.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
:rtype: str
"""
if client_id is None:
raise ValueError('Client id cannot be None.')
self._client_id = client_id
self._oauth_tokens = oauth_tokens
self._state = None
self.environment=env
self._oauth_scope=oauth_scope
self._tenant = tenant
@property
def tenant(self):
""" tenant
:rtype: str
"""
return self._tenant
@property
def client_id(self):
""" The client identifier corresponding to your registered application.
For more information about using a client identifier for authentication, see the
Client Password Authentication section of the OAuth 2.0 spec at
https://tools.ietf.org/html/rfc6749#section-4.1
:rtype: str
"""
return self._client_id
@property
def oauth_tokens(self):
""" Contains information about OAuth access tokens received from the Microsoft Account authorization service.
:rtype: OAuthTokens
"""
return self._oauth_tokens
@property
def state(self):
""" An opaque value used by the client to maintain state between the request and callback
:rtype: str
"""
return self._state
@state.setter
def state(self, value):
""" An opaque value used by the client to maintain state between the request and callback
:rtype: str
"""
self._state = value
@property
def redirection_uri(self):
""" The URI to which the user of the app will be redirected after receiving user consent.
:rtype: str
"""
raise NotImplementedError()
def get_authorization_endpoint(self):
""" Gets the Microsoft Account authorization endpoint where the user should be navigated to give his or her consent.
:return: The Microsoft Account authorization endpoint.
:rtype: str
"""
raise NotImplementedError()
def enrich_headers(self, headers):
""" Sets the AuthenticationToken headers elements for Bing Ads service or bulk file upload operation. """
if self.oauth_tokens is None:
raise NotImplementedError("OAuth access token hasn't been requested.")
headers['AuthenticationToken'] = self.oauth_tokens.access_token
|
(client_id, oauth_tokens=None, env='production', oauth_scope='msads.manage', tenant='common')
|
69,832 |
bingads.authorization
|
__init__
|
Initializes a new instance of the OAuthAuthorization class.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
:rtype: str
|
def __init__(self, client_id, oauth_tokens=None, env=PRODUCTION, oauth_scope=MSADS_MANAGE, tenant='common'):
""" Initializes a new instance of the OAuthAuthorization class.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
:rtype: str
"""
if client_id is None:
raise ValueError('Client id cannot be None.')
self._client_id = client_id
self._oauth_tokens = oauth_tokens
self._state = None
self.environment=env
self._oauth_scope=oauth_scope
self._tenant = tenant
|
(self, client_id, oauth_tokens=None, env='production', oauth_scope='msads.manage', tenant='common')
|
69,833 |
bingads.authorization
|
enrich_headers
|
Sets the AuthenticationToken headers elements for Bing Ads service or bulk file upload operation.
|
def enrich_headers(self, headers):
""" Sets the AuthenticationToken headers elements for Bing Ads service or bulk file upload operation. """
if self.oauth_tokens is None:
raise NotImplementedError("OAuth access token hasn't been requested.")
headers['AuthenticationToken'] = self.oauth_tokens.access_token
|
(self, headers)
|
69,834 |
bingads.authorization
|
get_authorization_endpoint
|
Gets the Microsoft Account authorization endpoint where the user should be navigated to give his or her consent.
:return: The Microsoft Account authorization endpoint.
:rtype: str
|
def get_authorization_endpoint(self):
""" Gets the Microsoft Account authorization endpoint where the user should be navigated to give his or her consent.
:return: The Microsoft Account authorization endpoint.
:rtype: str
"""
raise NotImplementedError()
|
(self)
|
69,835 |
bingads.authorization
|
OAuthDesktopMobileAuthCodeGrant
|
Represents an OAuth authorization object implementing the authorization code grant flow for use in a desktop
or mobile application.
You can use an instance of this class as the AuthorizationData.Authentication property
of an :class:`.AuthorizationData` object to authenticate with Bing Ads services.
In this case the AuthenticationToken request header will be set to the corresponding OAuthTokens.AccessToken value.
This class implements the authorization code grant flow for Managing User Authentication with OAuth
documented at http://go.microsoft.com/fwlink/?LinkID=511609. This is a standard OAuth 2.0 flow and is defined in detail in the
Authorization Code Grant section of the OAuth 2.0 spec at https://tools.ietf.org/html/rfc6749#section-4.1.
For more information about registering a Bing Ads application, see http://go.microsoft.com/fwlink/?LinkID=511607.
|
class OAuthDesktopMobileAuthCodeGrant(OAuthWithAuthorizationCode):
""" Represents an OAuth authorization object implementing the authorization code grant flow for use in a desktop
or mobile application.
You can use an instance of this class as the AuthorizationData.Authentication property
of an :class:`.AuthorizationData` object to authenticate with Bing Ads services.
In this case the AuthenticationToken request header will be set to the corresponding OAuthTokens.AccessToken value.
This class implements the authorization code grant flow for Managing User Authentication with OAuth
documented at http://go.microsoft.com/fwlink/?LinkID=511609. This is a standard OAuth 2.0 flow and is defined in detail in the
Authorization Code Grant section of the OAuth 2.0 spec at https://tools.ietf.org/html/rfc6749#section-4.1.
For more information about registering a Bing Ads application, see http://go.microsoft.com/fwlink/?LinkID=511607.
"""
def __init__(self, client_id, oauth_tokens=None, env=PRODUCTION, oauth_scope=MSADS_MANAGE, tenant='common'):
""" Initializes a new instance of the this class with the specified client id.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
"""
super(OAuthDesktopMobileAuthCodeGrant, self).__init__(
client_id,
None,
_UriOAuthService.REDIRECTION_URI[(env, oauth_scope)],
oauth_tokens=oauth_tokens,
env=env,
oauth_scope=oauth_scope,
tenant=tenant
)
|
(client_id, oauth_tokens=None, env='production', oauth_scope='msads.manage', tenant='common')
|
69,836 |
bingads.authorization
|
__init__
|
Initializes a new instance of the this class with the specified client id.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
|
def __init__(self, client_id, oauth_tokens=None, env=PRODUCTION, oauth_scope=MSADS_MANAGE, tenant='common'):
""" Initializes a new instance of the this class with the specified client id.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
"""
super(OAuthDesktopMobileAuthCodeGrant, self).__init__(
client_id,
None,
_UriOAuthService.REDIRECTION_URI[(env, oauth_scope)],
oauth_tokens=oauth_tokens,
env=env,
oauth_scope=oauth_scope,
tenant=tenant
)
|
(self, client_id, oauth_tokens=None, env='production', oauth_scope='msads.manage', tenant='common')
|
69,838 |
bingads.authorization
|
get_authorization_endpoint
|
Gets the Microsoft Account authorization endpoint where the user should be navigated to give his or her consent.
:return: The Microsoft Account authorization endpoint.
:rtype: str
|
def get_authorization_endpoint(self):
""" Gets the Microsoft Account authorization endpoint where the user should be navigated to give his or her consent.
:return: The Microsoft Account authorization endpoint.
:rtype: str
"""
endpoint_url = _UriOAuthService.AUTHORIZE_URI[(self.environment, self._oauth_scope)]
if self.environment == PRODUCTION and (self._oauth_scope == MSADS_MANAGE or self._oauth_scope == ADS_MANAGE):
endpoint_url = endpoint_url.replace('common', self.tenant);
endpoint = str.format(
endpoint_url,
self._client_id,
'code',
quote_plus(self._redirection_uri)
)
return endpoint if self.state is None else endpoint + '&state=' + self.state
|
(self)
|
69,839 |
bingads.authorization
|
request_oauth_tokens_by_refresh_token
|
Retrieves OAuth access and refresh tokens from the Microsoft Account authorization service.
Using the specified refresh token.
For more information, see the Refreshing an Access Token section in the OAuth 2.0 spec
at https://tools.ietf.org/html/rfc6749#section-6.
:param refresh_token: The refresh token used to request new access and refresh tokens.
:type refresh_token: str
:return: OAuth tokens
:rtype: OAuthTokens
|
def request_oauth_tokens_by_refresh_token(self, refresh_token):
""" Retrieves OAuth access and refresh tokens from the Microsoft Account authorization service.
Using the specified refresh token.
For more information, see the Refreshing an Access Token section in the OAuth 2.0 spec
at https://tools.ietf.org/html/rfc6749#section-6.
:param refresh_token: The refresh token used to request new access and refresh tokens.
:type refresh_token: str
:return: OAuth tokens
:rtype: OAuthTokens
"""
self._oauth_tokens = _UriOAuthService.get_access_token(
client_id=self.client_id,
client_secret=self.client_secret,
grant_type='refresh_token',
refresh_token=refresh_token,
environment=self.environment,
scope=_UriOAuthService.SCOPE[(self.environment, self._oauth_scope)],
oauth_scope=self._oauth_scope,
tenant = self.tenant
)
if self.token_refreshed_callback is not None:
self.token_refreshed_callback(self.oauth_tokens) # invoke the callback when token refreshed.
return self.oauth_tokens
|
(self, refresh_token)
|
69,840 |
bingads.authorization
|
request_oauth_tokens_by_response_uri
|
Retrieves OAuth access and refresh tokens from the Microsoft Account authorization service.
Using the specified authorization response redirection uri.
For more information, see the Authorization Response section in the OAuth 2.0 spec
at https://tools.ietf.org/html/rfc6749#section-4.1.
:param response_uri: The response redirection uri.
:type response_uri: str
:return: OAuth tokens
:rtype: OAuthTokens
|
def request_oauth_tokens_by_response_uri(self, response_uri, **kwargs):
""" Retrieves OAuth access and refresh tokens from the Microsoft Account authorization service.
Using the specified authorization response redirection uri.
For more information, see the Authorization Response section in the OAuth 2.0 spec
at https://tools.ietf.org/html/rfc6749#section-4.1.
:param response_uri: The response redirection uri.
:type response_uri: str
:return: OAuth tokens
:rtype: OAuthTokens
"""
parameters = parse_qs(urlparse(response_uri).query)
if 'code' not in parameters or len(parameters['code']) == 0:
raise ValueError(
"Uri passed doesn't contain code param. "
"Please make sure the uri has a code in it, for example http://myurl.com?code=123"
)
code = parameters['code'][0]
self._oauth_tokens = _UriOAuthService.get_access_token(
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=self.redirection_uri,
grant_type='authorization_code',
environment=self.environment,
code=code,
oauth_scope=self._oauth_scope,
tenant=self.tenant,
**kwargs
)
if self.token_refreshed_callback is not None:
self.token_refreshed_callback(self.oauth_tokens) # invoke the callback when token refreshed.
return self.oauth_tokens
|
(self, response_uri, **kwargs)
|
69,841 |
bingads.authorization
|
OAuthDesktopMobileImplicitGrant
|
Represents an OAuth authorization object implementing the implicit grant flow for use in a desktop or mobile application.
You can use an instance of this class as the AuthorizationData.Authentication property
of an :class:`.AuthorizationData` object to authenticate with Bing Ads services.
In this case the AuthenticationToken request header will be set to the corresponding OAuthTokens.AccessToken value.
This class implements the implicit grant flow for Managing User Authentication with OAuth
documented at http://go.microsoft.com/fwlink/?LinkID=511608. This is a standard OAuth 2.0 flow and is defined in detail in the
Authorization Code Grant section of the OAuth 2.0 spec at https://tools.ietf.org/html/rfc6749#section-4.1.
For more information about registering a Bing Ads application, see http://go.microsoft.com/fwlink/?LinkID=511607.
|
class OAuthDesktopMobileImplicitGrant(OAuthAuthorization):
""" Represents an OAuth authorization object implementing the implicit grant flow for use in a desktop or mobile application.
You can use an instance of this class as the AuthorizationData.Authentication property
of an :class:`.AuthorizationData` object to authenticate with Bing Ads services.
In this case the AuthenticationToken request header will be set to the corresponding OAuthTokens.AccessToken value.
This class implements the implicit grant flow for Managing User Authentication with OAuth
documented at http://go.microsoft.com/fwlink/?LinkID=511608. This is a standard OAuth 2.0 flow and is defined in detail in the
Authorization Code Grant section of the OAuth 2.0 spec at https://tools.ietf.org/html/rfc6749#section-4.1.
For more information about registering a Bing Ads application, see http://go.microsoft.com/fwlink/?LinkID=511607.
"""
def __init__(self, client_id, oauth_tokens=None, env=PRODUCTION, oauth_scope=MSADS_MANAGE, tenant='common'):
""" Initializes a new instance of the this class with the specified client id.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
"""
super(OAuthDesktopMobileImplicitGrant, self).__init__(client_id, oauth_tokens=oauth_tokens, env=env, oauth_scope=oauth_scope, tenant=tenant)
def get_authorization_endpoint(self):
""" Gets the Microsoft Account authorization endpoint where the user should be navigated to give his or her consent.
:return: The Microsoft Account authorization endpoint.
:rtype: str
"""
endpoint_url = _UriOAuthService.AUTHORIZE_URI[(self.environment, self._oauth_scope)]
if self.environment == PRODUCTION and (self._oauth_scope == MSADS_MANAGE or self._oauth_scope == ADS_MANAGE):
endpoint_url = endpoint_url.replace('common', self.tenant);
endpoint = str.format(
endpoint_url,
self.client_id,
'token',
_UriOAuthService.REDIRECTION_URI[(self.environment, self._oauth_scope)],
)
return endpoint if self.state is None else endpoint + '&state=' + self.state
def extract_access_token_from_uri(self, redirection_uri):
""" Extracts the access token from the specified redirect URI.
:param redirection_uri: The redirect URI that contains an access token.
:type redirection_uri: str
:return: The :class:`.OAuthTokens` object which contains both the access_token and access_token_expires_in_seconds properties.
:rtype: OAuthTokens
"""
parameters = parse_qs(urlparse(redirection_uri).fragment)
if 'access_token' not in parameters or len(parameters['access_token']) == 0:
raise ValueError(str.format("Input URI: {0} doesn't contain access_token parameter", redirection_uri))
access_token = parameters['access_token'][0]
if 'expires_in' not in parameters or len(parameters['expires_in']) == 0:
expires_in = None
else:
expires_in = parameters['expires_in'][0]
self._oauth_tokens = OAuthTokens(
access_token,
int(expires_in) if expires_in is not None else None
)
return self.oauth_tokens
@property
def redirection_uri(self):
return _UriOAuthService.REDIRECTION_URI[(self.environment, self._oauth_scope)]
|
(client_id, oauth_tokens=None, env='production', oauth_scope='msads.manage', tenant='common')
|
69,842 |
bingads.authorization
|
__init__
|
Initializes a new instance of the this class with the specified client id.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
|
def __init__(self, client_id, oauth_tokens=None, env=PRODUCTION, oauth_scope=MSADS_MANAGE, tenant='common'):
""" Initializes a new instance of the this class with the specified client id.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
"""
super(OAuthDesktopMobileImplicitGrant, self).__init__(client_id, oauth_tokens=oauth_tokens, env=env, oauth_scope=oauth_scope, tenant=tenant)
|
(self, client_id, oauth_tokens=None, env='production', oauth_scope='msads.manage', tenant='common')
|
69,844 |
bingads.authorization
|
extract_access_token_from_uri
|
Extracts the access token from the specified redirect URI.
:param redirection_uri: The redirect URI that contains an access token.
:type redirection_uri: str
:return: The :class:`.OAuthTokens` object which contains both the access_token and access_token_expires_in_seconds properties.
:rtype: OAuthTokens
|
def extract_access_token_from_uri(self, redirection_uri):
""" Extracts the access token from the specified redirect URI.
:param redirection_uri: The redirect URI that contains an access token.
:type redirection_uri: str
:return: The :class:`.OAuthTokens` object which contains both the access_token and access_token_expires_in_seconds properties.
:rtype: OAuthTokens
"""
parameters = parse_qs(urlparse(redirection_uri).fragment)
if 'access_token' not in parameters or len(parameters['access_token']) == 0:
raise ValueError(str.format("Input URI: {0} doesn't contain access_token parameter", redirection_uri))
access_token = parameters['access_token'][0]
if 'expires_in' not in parameters or len(parameters['expires_in']) == 0:
expires_in = None
else:
expires_in = parameters['expires_in'][0]
self._oauth_tokens = OAuthTokens(
access_token,
int(expires_in) if expires_in is not None else None
)
return self.oauth_tokens
|
(self, redirection_uri)
|
69,845 |
bingads.authorization
|
get_authorization_endpoint
|
Gets the Microsoft Account authorization endpoint where the user should be navigated to give his or her consent.
:return: The Microsoft Account authorization endpoint.
:rtype: str
|
def get_authorization_endpoint(self):
""" Gets the Microsoft Account authorization endpoint where the user should be navigated to give his or her consent.
:return: The Microsoft Account authorization endpoint.
:rtype: str
"""
endpoint_url = _UriOAuthService.AUTHORIZE_URI[(self.environment, self._oauth_scope)]
if self.environment == PRODUCTION and (self._oauth_scope == MSADS_MANAGE or self._oauth_scope == ADS_MANAGE):
endpoint_url = endpoint_url.replace('common', self.tenant);
endpoint = str.format(
endpoint_url,
self.client_id,
'token',
_UriOAuthService.REDIRECTION_URI[(self.environment, self._oauth_scope)],
)
return endpoint if self.state is None else endpoint + '&state=' + self.state
|
(self)
|
69,846 |
bingads.exceptions
|
OAuthTokenRequestException
|
This exception is thrown if an error was returned from the Microsoft Account authorization server.
|
class OAuthTokenRequestException(SdkException):
""" This exception is thrown if an error was returned from the Microsoft Account authorization server. """
def __init__(self, error_code, description):
""" Initializes a new instance of this class with the specified error code and OAuth error details.
:param error_code: The error code of the OAuth error.
:type error_code: str
:param description: The description of the OAuth error.
:type description: str
"""
super(OAuthTokenRequestException, self).__init__(
str.format("error_code: {0}, error_description: {1}", error_code, description))
self._error_code = error_code
self._error_description = description
@property
def error_code(self):
""" The error code of the OAuth error.
:rtype: str
"""
return self._error_code
@property
def error_description(self):
""" The description of the OAuth error.
:rtype: str
"""
return self._error_description
|
(error_code, description)
|
69,847 |
bingads.exceptions
|
__init__
|
Initializes a new instance of this class with the specified error code and OAuth error details.
:param error_code: The error code of the OAuth error.
:type error_code: str
:param description: The description of the OAuth error.
:type description: str
|
def __init__(self, error_code, description):
""" Initializes a new instance of this class with the specified error code and OAuth error details.
:param error_code: The error code of the OAuth error.
:type error_code: str
:param description: The description of the OAuth error.
:type description: str
"""
super(OAuthTokenRequestException, self).__init__(
str.format("error_code: {0}, error_description: {1}", error_code, description))
self._error_code = error_code
self._error_description = description
|
(self, error_code, description)
|
69,849 |
bingads.authorization
|
OAuthTokens
|
Contains information about OAuth access tokens received from the Microsoft Account authorization service.
You can get OAuthTokens using the RequestAccessAndRefreshTokens method of RequestAccessAndRefreshTokens method of
either the :class:`.OAuthDesktopMobileAuthCodeGrant` or :class:`.OAuthWebAuthCodeGrant` classes.
|
class OAuthTokens:
""" Contains information about OAuth access tokens received from the Microsoft Account authorization service.
You can get OAuthTokens using the RequestAccessAndRefreshTokens method of RequestAccessAndRefreshTokens method of
either the :class:`.OAuthDesktopMobileAuthCodeGrant` or :class:`.OAuthWebAuthCodeGrant` classes.
"""
def __init__(self, access_token=None, access_token_expires_in_seconds=None, refresh_token=None, response_json = None):
""" Initialize an instance of this class.
:param access_token: OAuth access token that will be used for authorization in the Bing Ads services.
:type access_token: (optional) str or None
:param access_token_expires_in_seconds: (optional) The access token expiration time in seconds.
:type access_token_expires_in_seconds: int or None
:param refresh_token: (optional) OAuth refresh token that can be used to refresh an access token.
:type refresh_token: str or None
:param response_json: (optional) Whole json response along with the get access token request.
:type response_json: dictionary
"""
self._access_token = access_token
self._access_token_expires_in_seconds = access_token_expires_in_seconds
self._refresh_token = refresh_token
self._response_json = response_json
self._access_token_received_datetime=datetime.utcnow()
@property
def access_token_received_datetime(self):
""" The datetime when access token was received
:rtype: datetime
"""
return self._access_token_received_datetime
@property
def access_token(self):
""" OAuth access token that will be used for authorization in the Bing Ads services.
:rtype: str
"""
return self._access_token
@property
def access_token_expires_in_seconds(self):
""" Expiration time for the corresponding access token in seconds.
:rtype: int
"""
return self._access_token_expires_in_seconds
@property
def access_token_expired(self):
""" Whether the access token has been expired.
:rtype: bool
"""
return self.access_token_expires_in_seconds is not None and \
self.access_token_expires_in_seconds > 0 and \
datetime.utcnow() > self.access_token_received_datetime + timedelta(seconds = self.access_token_expires_in_seconds)
@property
def refresh_token(self):
""" OAuth refresh token that can be user to refresh an access token.
:rtype: str
"""
return self._refresh_token
@property
def response_json(self):
""" OAuth whole attribute that got along with access token.
:rtype: dictionary
"""
return self._response_json
|
(access_token=None, access_token_expires_in_seconds=None, refresh_token=None, response_json=None)
|
69,850 |
bingads.authorization
|
__init__
|
Initialize an instance of this class.
:param access_token: OAuth access token that will be used for authorization in the Bing Ads services.
:type access_token: (optional) str or None
:param access_token_expires_in_seconds: (optional) The access token expiration time in seconds.
:type access_token_expires_in_seconds: int or None
:param refresh_token: (optional) OAuth refresh token that can be used to refresh an access token.
:type refresh_token: str or None
:param response_json: (optional) Whole json response along with the get access token request.
:type response_json: dictionary
|
def __init__(self, access_token=None, access_token_expires_in_seconds=None, refresh_token=None, response_json = None):
""" Initialize an instance of this class.
:param access_token: OAuth access token that will be used for authorization in the Bing Ads services.
:type access_token: (optional) str or None
:param access_token_expires_in_seconds: (optional) The access token expiration time in seconds.
:type access_token_expires_in_seconds: int or None
:param refresh_token: (optional) OAuth refresh token that can be used to refresh an access token.
:type refresh_token: str or None
:param response_json: (optional) Whole json response along with the get access token request.
:type response_json: dictionary
"""
self._access_token = access_token
self._access_token_expires_in_seconds = access_token_expires_in_seconds
self._refresh_token = refresh_token
self._response_json = response_json
self._access_token_received_datetime=datetime.utcnow()
|
(self, access_token=None, access_token_expires_in_seconds=None, refresh_token=None, response_json=None)
|
69,851 |
bingads.authorization
|
OAuthWebAuthCodeGrant
|
Represents an OAuth authorization object implementing the authorization code grant flow for use in a web application.
You can use an instance of this class as the AuthorizationData.Authentication property
of an :class:`.AuthorizationData` object to authenticate with Bing Ads services.
In this case the AuthenticationToken request header will be set to the corresponding OAuthTokens.AccessToken value.
This class implements the authorization code grant flow for Managing User Authentication with OAuth
documented at http://go.microsoft.com/fwlink/?LinkID=511609. This is a standard OAuth 2.0 flow and is defined in detail in the
Authorization Code Grant section of the OAuth 2.0 spec at https://tools.ietf.org/html/rfc6749#section-4.1.
For more information about registering a Bing Ads application, see http://go.microsoft.com/fwlink/?LinkID=511607.
|
class OAuthWebAuthCodeGrant(OAuthWithAuthorizationCode):
""" Represents an OAuth authorization object implementing the authorization code grant flow for use in a web application.
You can use an instance of this class as the AuthorizationData.Authentication property
of an :class:`.AuthorizationData` object to authenticate with Bing Ads services.
In this case the AuthenticationToken request header will be set to the corresponding OAuthTokens.AccessToken value.
This class implements the authorization code grant flow for Managing User Authentication with OAuth
documented at http://go.microsoft.com/fwlink/?LinkID=511609. This is a standard OAuth 2.0 flow and is defined in detail in the
Authorization Code Grant section of the OAuth 2.0 spec at https://tools.ietf.org/html/rfc6749#section-4.1.
For more information about registering a Bing Ads application, see http://go.microsoft.com/fwlink/?LinkID=511607.
"""
pass
|
(client_id, client_secret, redirection_uri, token_refreshed_callback=None, oauth_tokens=None, env='production', oauth_scope='msads.manage', tenant='common')
|
69,852 |
bingads.authorization
|
__init__
|
Initialize a new instance of this class.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param client_secret: The client secret corresponding to your registered application, or None if your app is a
desktop or mobile app.
:type client_secret: str or None
:param redirection_uri: The URI to which the user of the app will be redirected after receiving user consent.
:type redirection_uri: str
:param token_refreshed_callback: (optional) Call back function when oauth_tokens be refreshed.
:type token_refreshed_callback: (OAuthTokens)->None or None
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
:return:
|
def __init__(self, client_id, client_secret, redirection_uri, token_refreshed_callback=None, oauth_tokens=None, env=PRODUCTION, oauth_scope=MSADS_MANAGE, tenant="common"):
""" Initialize a new instance of this class.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param client_secret: The client secret corresponding to your registered application, or None if your app is a
desktop or mobile app.
:type client_secret: str or None
:param redirection_uri: The URI to which the user of the app will be redirected after receiving user consent.
:type redirection_uri: str
:param token_refreshed_callback: (optional) Call back function when oauth_tokens be refreshed.
:type token_refreshed_callback: (OAuthTokens)->None or None
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
:return:
"""
super(OAuthWithAuthorizationCode, self).__init__(client_id, oauth_tokens=oauth_tokens, env=env, oauth_scope=oauth_scope, tenant=tenant)
self._client_secret = client_secret
self._redirection_uri = redirection_uri
self._token_refreshed_callback = token_refreshed_callback
|
(self, client_id, client_secret, redirection_uri, token_refreshed_callback=None, oauth_tokens=None, env='production', oauth_scope='msads.manage', tenant='common')
|
69,857 |
bingads.authorization
|
OAuthWithAuthorizationCode
|
Represents a proxy to the Microsoft account authorization service.
Implement an extension of this class in compliance with the authorization code grant flow for Managing User
Authentication with OAuth documented at http://go.microsoft.com/fwlink/?LinkID=511609. This is a standard OAuth 2.0
flow and is defined in detail in the Authorization Code Grant section of the OAuth 2.0 spec at
https://tools.ietf.org/html/rfc6749#section-4.1.
For more information about registering a Bing Ads application, see http://go.microsoft.com/fwlink/?LinkID=511607.
|
class OAuthWithAuthorizationCode(OAuthAuthorization):
""" Represents a proxy to the Microsoft account authorization service.
Implement an extension of this class in compliance with the authorization code grant flow for Managing User
Authentication with OAuth documented at http://go.microsoft.com/fwlink/?LinkID=511609. This is a standard OAuth 2.0
flow and is defined in detail in the Authorization Code Grant section of the OAuth 2.0 spec at
https://tools.ietf.org/html/rfc6749#section-4.1.
For more information about registering a Bing Ads application, see http://go.microsoft.com/fwlink/?LinkID=511607.
"""
def __init__(self, client_id, client_secret, redirection_uri, token_refreshed_callback=None, oauth_tokens=None, env=PRODUCTION, oauth_scope=MSADS_MANAGE, tenant="common"):
""" Initialize a new instance of this class.
:param client_id: The client identifier corresponding to your registered application.
:type client_id: str
:param client_secret: The client secret corresponding to your registered application, or None if your app is a
desktop or mobile app.
:type client_secret: str or None
:param redirection_uri: The URI to which the user of the app will be redirected after receiving user consent.
:type redirection_uri: str
:param token_refreshed_callback: (optional) Call back function when oauth_tokens be refreshed.
:type token_refreshed_callback: (OAuthTokens)->None or None
:param oauth_tokens: Contains information about OAuth access tokens received from the Microsoft Account authorization service
:type oauth_tokens: OAuthTokens
:return:
"""
super(OAuthWithAuthorizationCode, self).__init__(client_id, oauth_tokens=oauth_tokens, env=env, oauth_scope=oauth_scope, tenant=tenant)
self._client_secret = client_secret
self._redirection_uri = redirection_uri
self._token_refreshed_callback = token_refreshed_callback
def get_authorization_endpoint(self):
""" Gets the Microsoft Account authorization endpoint where the user should be navigated to give his or her consent.
:return: The Microsoft Account authorization endpoint.
:rtype: str
"""
endpoint_url = _UriOAuthService.AUTHORIZE_URI[(self.environment, self._oauth_scope)]
if self.environment == PRODUCTION and (self._oauth_scope == MSADS_MANAGE or self._oauth_scope == ADS_MANAGE):
endpoint_url = endpoint_url.replace('common', self.tenant);
endpoint = str.format(
endpoint_url,
self._client_id,
'code',
quote_plus(self._redirection_uri)
)
return endpoint if self.state is None else endpoint + '&state=' + self.state
def request_oauth_tokens_by_response_uri(self, response_uri, **kwargs):
""" Retrieves OAuth access and refresh tokens from the Microsoft Account authorization service.
Using the specified authorization response redirection uri.
For more information, see the Authorization Response section in the OAuth 2.0 spec
at https://tools.ietf.org/html/rfc6749#section-4.1.
:param response_uri: The response redirection uri.
:type response_uri: str
:return: OAuth tokens
:rtype: OAuthTokens
"""
parameters = parse_qs(urlparse(response_uri).query)
if 'code' not in parameters or len(parameters['code']) == 0:
raise ValueError(
"Uri passed doesn't contain code param. "
"Please make sure the uri has a code in it, for example http://myurl.com?code=123"
)
code = parameters['code'][0]
self._oauth_tokens = _UriOAuthService.get_access_token(
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=self.redirection_uri,
grant_type='authorization_code',
environment=self.environment,
code=code,
oauth_scope=self._oauth_scope,
tenant=self.tenant,
**kwargs
)
if self.token_refreshed_callback is not None:
self.token_refreshed_callback(self.oauth_tokens) # invoke the callback when token refreshed.
return self.oauth_tokens
def request_oauth_tokens_by_refresh_token(self, refresh_token):
""" Retrieves OAuth access and refresh tokens from the Microsoft Account authorization service.
Using the specified refresh token.
For more information, see the Refreshing an Access Token section in the OAuth 2.0 spec
at https://tools.ietf.org/html/rfc6749#section-6.
:param refresh_token: The refresh token used to request new access and refresh tokens.
:type refresh_token: str
:return: OAuth tokens
:rtype: OAuthTokens
"""
self._oauth_tokens = _UriOAuthService.get_access_token(
client_id=self.client_id,
client_secret=self.client_secret,
grant_type='refresh_token',
refresh_token=refresh_token,
environment=self.environment,
scope=_UriOAuthService.SCOPE[(self.environment, self._oauth_scope)],
oauth_scope=self._oauth_scope,
tenant = self.tenant
)
if self.token_refreshed_callback is not None:
self.token_refreshed_callback(self.oauth_tokens) # invoke the callback when token refreshed.
return self.oauth_tokens
@property
def client_secret(self):
""" The client secret corresponding to your registered application, or None if your app is a desktop or mobile app.
:rtype: str
"""
return self._client_secret
@property
def redirection_uri(self):
""" The URI to which your client browser will be redirected after receiving user consent.
:rtype: str
"""
return self._redirection_uri
@property
def token_refreshed_callback(self):
""" The callback function registered, will be invoked after oauth tokens has been refreshed.
:rtype: OAuthTokens->None
"""
return self._token_refreshed_callback
@client_secret.setter
def client_secret(self, client_secret):
self._client_secret = client_secret
@redirection_uri.setter
def redirection_uri(self, redirection_uri):
self._redirection_uri = redirection_uri
@token_refreshed_callback.setter
def token_refreshed_callback(self, token_refreshed_callback):
self._token_refreshed_callback = token_refreshed_callback
|
(client_id, client_secret, redirection_uri, token_refreshed_callback=None, oauth_tokens=None, env='production', oauth_scope='msads.manage', tenant='common')
|
69,863 |
bingads.authorization
|
PasswordAuthentication
|
Represents a legacy Bing Ads authentication method using user name and password.
You can use an instance of this class as the authentication property of a :class:`.AuthorizationData` object to
authenticate with Bing Ads services.
Existing users with legacy Bing Ads credentials may continue to specify the UserName and Password header elements.
In future versions of the API, Bing Ads will transition exclusively to Microsoft Account authentication.
New customers are required to sign up for Bing Ads with a Microsoft Account, and to manage those accounts you must
use OAuth.
For example instead of using this :class:`.PasswordAuthentication` class, you would authenticate with an instance
of either :class:`.OAuthDesktopMobileAuthCodeGrant`, :class:`.OAuthDesktopMobileImplicitGrant`,
or :class:`.OAuthWebAuthCodeGrant`.
|
class PasswordAuthentication(Authentication):
""" Represents a legacy Bing Ads authentication method using user name and password.
You can use an instance of this class as the authentication property of a :class:`.AuthorizationData` object to
authenticate with Bing Ads services.
Existing users with legacy Bing Ads credentials may continue to specify the UserName and Password header elements.
In future versions of the API, Bing Ads will transition exclusively to Microsoft Account authentication.
New customers are required to sign up for Bing Ads with a Microsoft Account, and to manage those accounts you must
use OAuth.
For example instead of using this :class:`.PasswordAuthentication` class, you would authenticate with an instance
of either :class:`.OAuthDesktopMobileAuthCodeGrant`, :class:`.OAuthDesktopMobileImplicitGrant`,
or :class:`.OAuthWebAuthCodeGrant`.
"""
def __init__(self, user_name, password):
""" Initializes a new instance of this class using the specified user name and password.
:param user_name: The Bing Ads user's sign-in user name. You may not set this element to a Microsoft account.
:type user_name: str
:param password: The Bing Ads user's sign-in password.
:type password: str
"""
self._user_name = user_name
self._password = password
@property
def user_name(self):
""" The Bing Ads user's sign-in user name. You may not set this element to a Microsoft account.
:rtype: str
"""
return self._user_name
@property
def password(self):
""" The Bing Ads user's sign-in password.
:rtype: str
"""
return self._password
def enrich_headers(self, headers):
""" Sets the user name and password as headers elements for Bing Ads service or bulk file upload operation. """
headers['UserName'] = self.user_name
headers['Password'] = self.password
|
(user_name, password)
|
69,864 |
bingads.authorization
|
__init__
|
Initializes a new instance of this class using the specified user name and password.
:param user_name: The Bing Ads user's sign-in user name. You may not set this element to a Microsoft account.
:type user_name: str
:param password: The Bing Ads user's sign-in password.
:type password: str
|
def __init__(self, user_name, password):
""" Initializes a new instance of this class using the specified user name and password.
:param user_name: The Bing Ads user's sign-in user name. You may not set this element to a Microsoft account.
:type user_name: str
:param password: The Bing Ads user's sign-in password.
:type password: str
"""
self._user_name = user_name
self._password = password
|
(self, user_name, password)
|
69,865 |
bingads.authorization
|
enrich_headers
|
Sets the user name and password as headers elements for Bing Ads service or bulk file upload operation.
|
def enrich_headers(self, headers):
""" Sets the user name and password as headers elements for Bing Ads service or bulk file upload operation. """
headers['UserName'] = self.user_name
headers['Password'] = self.password
|
(self, headers)
|
69,866 |
suds.sudsobject
|
Property
| null |
class Property(Object):
def __init__(self, value):
Object.__init__(self)
self.value = value
def items(self):
for item in self:
if item[0] != "value":
yield item
def get(self):
return self.value
def set(self, value):
self.value = value
return self
|
(value)
|
69,867 |
suds.sudsobject
|
__contains__
| null |
def __contains__(self, name):
return name in self.__keylist__
|
(self, name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.