index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
50,515 |
capstone
|
__getattr__
| null |
def __getattr__(self, name):
if not self._cs._detail:
raise CsError(CS_ERR_DETAIL)
attr = object.__getattribute__
if not attr(self, '_cs')._detail:
raise AttributeError(name)
_dict = attr(self, '__dict__')
if 'operands' not in _dict:
self.__gen_detail()
if name not in _dict:
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
raise AttributeError(name)
return _dict[name]
|
(self, name)
|
50,516 |
capstone
|
__init__
| null |
def __init__(self, cs, all_info):
self._raw = copy_ctypes(all_info)
self._cs = cs
if self._cs._detail and self._raw.id != 0:
# save detail
self._raw.detail = ctypes.pointer(all_info.detail._type_())
ctypes.memmove(ctypes.byref(self._raw.detail[0]), ctypes.byref(all_info.detail[0]), ctypes.sizeof(type(all_info.detail[0])))
|
(self, cs, all_info)
|
50,517 |
capstone
|
__repr__
| null |
def __repr__(self):
return '<CsInsn 0x%x [%s]: %s %s>' % (self.address, self.bytes.hex(), self.mnemonic, self.op_str)
|
(self)
|
50,518 |
capstone
|
errno
| null |
def errno(self):
return _cs.cs_errno(self._cs.csh)
|
(self)
|
50,519 |
capstone
|
group
| null |
def group(self, group_id):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
if self._cs._diet:
# Diet engine cannot provide group information
raise CsError(CS_ERR_DIET)
return group_id in self.groups
|
(self, group_id)
|
50,520 |
capstone
|
group_name
| null |
def group_name(self, group_id, default=None):
if self._cs._diet:
# Diet engine cannot provide group name
raise CsError(CS_ERR_DIET)
return _ascii_name_or_default(_cs.cs_group_name(self._cs.csh, group_id), default)
|
(self, group_id, default=None)
|
50,521 |
capstone
|
insn_name
| null |
def insn_name(self, default=None):
if self._cs._diet:
# Diet engine cannot provide instruction name
raise CsError(CS_ERR_DIET)
if self._raw.id == 0:
return default
return _ascii_name_or_default(_cs.cs_insn_name(self._cs.csh, self.id), default)
|
(self, default=None)
|
50,522 |
capstone
|
op_count
| null |
def op_count(self, op_type):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
c = 0
for op in self.operands:
if op.type == op_type:
c += 1
return c
|
(self, op_type)
|
50,523 |
capstone
|
op_find
| null |
def op_find(self, op_type, position):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
c = 0
for op in self.operands:
if op.type == op_type:
c += 1
if c == position:
return op
|
(self, op_type, position)
|
50,524 |
capstone
|
reg_name
| null |
def reg_name(self, reg_id, default=None):
if self._cs._diet:
# Diet engine cannot provide register name
raise CsError(CS_ERR_DIET)
return _ascii_name_or_default(_cs.cs_reg_name(self._cs.csh, reg_id), default)
|
(self, reg_id, default=None)
|
50,525 |
capstone
|
reg_read
| null |
def reg_read(self, reg_id):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
if self._cs._diet:
# Diet engine cannot provide regs_read information
raise CsError(CS_ERR_DIET)
return reg_id in self.regs_read
|
(self, reg_id)
|
50,526 |
capstone
|
reg_write
| null |
def reg_write(self, reg_id):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
if self._cs._diet:
# Diet engine cannot provide regs_write information
raise CsError(CS_ERR_DIET)
return reg_id in self.regs_write
|
(self, reg_id)
|
50,527 |
capstone
|
regs_access
| null |
def regs_access(self):
if self._raw.id == 0:
raise CsError(CS_ERR_SKIPDATA)
regs_read = (ctypes.c_uint16 * 64)()
regs_read_count = ctypes.c_uint8()
regs_write = (ctypes.c_uint16 * 64)()
regs_write_count = ctypes.c_uint8()
status = _cs.cs_regs_access(self._cs.csh, self._raw, ctypes.byref(regs_read), ctypes.byref(regs_read_count), ctypes.byref(regs_write), ctypes.byref(regs_write_count))
if status != CS_ERR_OK:
raise CsError(status)
if regs_read_count.value > 0:
regs_read = regs_read[:regs_read_count.value]
else:
regs_read = []
if regs_write_count.value > 0:
regs_write = regs_write[:regs_write_count.value]
else:
regs_write = []
return (regs_read, regs_write)
|
(self)
|
50,528 |
capstone
|
_ascii_name_or_default
| null |
def _ascii_name_or_default(name, default):
return default if name is None else name.decode('ascii')
|
(name, default)
|
50,529 |
capstone
|
_cs_arch
| null |
class _cs_arch(ctypes.Union):
_fields_ = (
('arm64', arm64.CsArm64),
('arm', arm.CsArm),
('m68k', m68k.CsM68K),
('mips', mips.CsMips),
('x86', x86.CsX86),
('ppc', ppc.CsPpc),
('sparc', sparc.CsSparc),
('sysz', systemz.CsSysz),
('xcore', xcore.CsXcore),
('tms320c64x', tms320c64x.CsTMS320C64x),
('m680x', m680x.CsM680x),
('evm', evm.CsEvm),
('mos65xx', mos65xx.CsMOS65xx),
('wasm', wasm.CsWasm),
('bpf', bpf.CsBPF),
('riscv', riscv.CsRISCV),
('sh', sh.CsSH),
('tricore', tricore.CsTriCore),
)
| null |
50,530 |
capstone
|
_cs_detail
| null |
class _cs_detail(ctypes.Structure):
_fields_ = (
('regs_read', ctypes.c_uint16 * 20),
('regs_read_count', ctypes.c_ubyte),
('regs_write', ctypes.c_uint16 * 20),
('regs_write_count', ctypes.c_ubyte),
('groups', ctypes.c_ubyte * 8),
('groups_count', ctypes.c_ubyte),
('writeback', ctypes.c_bool),
('arch', _cs_arch),
)
| null |
50,531 |
capstone
|
_cs_insn
| null |
class _cs_insn(ctypes.Structure):
_fields_ = (
('id', ctypes.c_uint),
('address', ctypes.c_uint64),
('size', ctypes.c_uint16),
('bytes', ctypes.c_ubyte * 24),
('mnemonic', ctypes.c_char * 32),
('op_str', ctypes.c_char * 160),
('detail', ctypes.POINTER(_cs_detail)),
)
| null |
50,532 |
capstone
|
_cs_opt_mnem
| null |
class _cs_opt_mnem(ctypes.Structure):
_fields_ = (
('id', ctypes.c_uint),
('mnemonic', ctypes.c_char_p),
)
| null |
50,533 |
capstone
|
_cs_opt_skipdata
| null |
class _cs_opt_skipdata(ctypes.Structure):
_fields_ = (
('mnemonic', ctypes.c_char_p),
('callback', CS_SKIPDATA_CALLBACK),
('user_data', ctypes.c_void_p),
)
| null |
50,534 |
capstone
|
_dummy_cs
| null |
class _dummy_cs(object):
def __init__(self, csh, arch):
self.csh = csh
self.arch = arch
self._detail = False
|
(csh, arch)
|
50,535 |
capstone
|
__init__
| null |
def __init__(self, csh, arch):
self.csh = csh
self.arch = arch
self._detail = False
|
(self, csh, arch)
|
50,536 |
capstone
|
_load_lib
| null |
def _load_lib(path):
lib_file = join(path, _lib)
if os.path.exists(lib_file):
return ctypes.cdll.LoadLibrary(lib_file)
else:
# if we're on linux, try again with .so.5 extension
if lib_file.endswith('.so'):
if os.path.exists(lib_file + '.{}'.format(CS_VERSION_MAJOR)):
return ctypes.cdll.LoadLibrary(lib_file + '.{}'.format(CS_VERSION_MAJOR))
return None
|
(path)
|
50,537 |
capstone
|
_setup_prototype
| null |
def _setup_prototype(lib, fname, restype, *argtypes):
getattr(lib, fname).restype = restype
getattr(lib, fname).argtypes = argtypes
|
(lib, fname, restype, *argtypes)
|
50,544 |
capstone
|
copy_ctypes
|
Returns a new ctypes object which is a bitwise copy of an existing one
|
def copy_ctypes(src):
"""Returns a new ctypes object which is a bitwise copy of an existing one"""
dst = type(src)()
ctypes.memmove(ctypes.byref(dst), ctypes.byref(src), ctypes.sizeof(type(src)))
return dst
|
(src)
|
50,545 |
capstone
|
copy_ctypes_list
| null |
def copy_ctypes_list(src):
return [copy_ctypes(n) for n in src]
|
(src)
|
50,546 |
capstone
|
cs_disasm_lite
| null |
def cs_disasm_lite(arch, mode, code, offset, count=0):
# verify version compatibility with the core before doing anything
(major, minor, _combined) = cs_version()
if major != CS_API_MAJOR or minor != CS_API_MINOR:
# our binding version is different from the core's API version
raise CsError(CS_ERR_VERSION)
if cs_support(CS_SUPPORT_DIET):
# Diet engine cannot provide @mnemonic & @op_str
raise CsError(CS_ERR_DIET)
csh = ctypes.c_size_t()
status = _cs.cs_open(arch, mode, ctypes.byref(csh))
if status != CS_ERR_OK:
raise CsError(status)
all_insn = ctypes.POINTER(_cs_insn)()
res = _cs.cs_disasm(csh, code, len(code), offset, count, ctypes.byref(all_insn))
if res > 0:
try:
for i in range(res):
insn = all_insn[i]
yield (insn.address, insn.size, insn.mnemonic.decode('ascii'), insn.op_str.decode('ascii'))
finally:
_cs.cs_free(all_insn, res)
else:
status = _cs.cs_errno(csh)
if status != CS_ERR_OK:
raise CsError(status)
return
yield
status = _cs.cs_close(ctypes.byref(csh))
if status != CS_ERR_OK:
raise CsError(status)
|
(arch, mode, code, offset, count=0)
|
50,547 |
capstone
|
cs_disasm_quick
| null |
def cs_disasm_quick(arch, mode, code, offset, count=0):
# verify version compatibility with the core before doing anything
(major, minor, _combined) = cs_version()
if major != CS_API_MAJOR or minor != CS_API_MINOR:
# our binding version is different from the core's API version
raise CsError(CS_ERR_VERSION)
csh = ctypes.c_size_t()
status = _cs.cs_open(arch, mode, ctypes.byref(csh))
if status != CS_ERR_OK:
raise CsError(status)
all_insn = ctypes.POINTER(_cs_insn)()
res = _cs.cs_disasm(csh, code, len(code), offset, count, ctypes.byref(all_insn))
if res > 0:
try:
for i in range(res):
yield CsInsn(_dummy_cs(csh, arch), all_insn[i])
finally:
_cs.cs_free(all_insn, res)
else:
status = _cs.cs_errno(csh)
if status != CS_ERR_OK:
raise CsError(status)
return
yield
status = _cs.cs_close(ctypes.byref(csh))
if status != CS_ERR_OK:
raise CsError(status)
|
(arch, mode, code, offset, count=0)
|
50,548 |
capstone
|
cs_support
| null |
def cs_support(query):
return _cs.cs_support(query)
|
(query)
|
50,549 |
capstone
|
cs_version
| null |
def cs_version():
major = ctypes.c_int()
minor = ctypes.c_int()
combined = _cs.cs_version(ctypes.byref(major), ctypes.byref(minor))
return (major.value, minor.value, combined)
|
()
|
50,551 |
capstone
|
debug
| null |
def debug():
# is Cython there?
try:
from . import ccapstone
return ccapstone.debug()
except:
# no Cython, fallback to Python code below
pass
if cs_support(CS_SUPPORT_DIET):
diet = "diet"
else:
diet = "standard"
archs = {
"arm": CS_ARCH_ARM, "arm64": CS_ARCH_ARM64, "m68k": CS_ARCH_M68K,
"mips": CS_ARCH_MIPS, "ppc": CS_ARCH_PPC, "sparc": CS_ARCH_SPARC,
"sysz": CS_ARCH_SYSZ, 'xcore': CS_ARCH_XCORE, "tms320c64x": CS_ARCH_TMS320C64X,
"m680x": CS_ARCH_M680X, 'evm': CS_ARCH_EVM, 'mos65xx': CS_ARCH_MOS65XX,
'bpf': CS_ARCH_BPF, 'riscv': CS_ARCH_RISCV, 'tricore': CS_ARCH_TRICORE,
'wasm': CS_ARCH_WASM, 'sh': CS_ARCH_SH,
}
all_archs = ""
keys = archs.keys()
for k in sorted(keys):
if cs_support(archs[k]):
all_archs += "-%s" % k
if cs_support(CS_ARCH_X86):
all_archs += "-x86"
if cs_support(CS_SUPPORT_X86_REDUCE):
all_archs += "_reduce"
(major, minor, _combined) = cs_version()
return "python-%s%s-c%u.%u-b%u.%u" % (diet, all_archs, major, minor, CS_API_MAJOR, CS_API_MINOR)
|
()
|
50,552 |
posixpath
|
dirname
|
Returns the directory component of a pathname
|
def dirname(p):
"""Returns the directory component of a pathname"""
p = os.fspath(p)
sep = _get_sep(p)
i = p.rfind(sep) + 1
head = p[:i]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head
|
(p)
|
50,557 |
posixpath
|
join
|
Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator.
|
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
a = os.fspath(a)
sep = _get_sep(a)
path = a
try:
if not p:
path[:0] + sep #23780: Ensure compatible data type even if p is null.
for b in map(os.fspath, p):
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
except (TypeError, AttributeError, BytesWarning):
genericpath._check_arg_types('join', a, *p)
raise
return path
|
(a, *p)
|
50,576 |
posixpath
|
split
|
Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty.
|
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
p = os.fspath(p)
sep = _get_sep(p)
i = p.rfind(sep) + 1
head, tail = p[:i], p[i:]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head, tail
|
(p)
|
50,578 |
platform
|
system
|
Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
|
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname().system
|
()
|
50,585 |
capstone
|
version_bind
| null |
def version_bind():
return (CS_API_MAJOR, CS_API_MINOR, (CS_API_MAJOR << 8) + CS_API_MINOR)
|
()
|
50,592 |
pytest_translations
|
pytest_addoption
| null |
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--translations",
action="store_true",
help="perform some checks on .mo and .po files",
)
|
(parser)
|
50,593 |
pytest_translations
|
pytest_collect_file
| null |
def pytest_collect_file(file_path, parent):
from .mo_files import MoFile
from .po_files import PoFile
config = parent.config
if config.option.translations:
if file_path.suffix == ".mo":
return MoFile.from_parent(
path=file_path,
parent=parent,
)
elif file_path.suffix == ".po":
return PoFile.from_parent(path=file_path, parent=parent)
|
(file_path, parent)
|
50,594 |
pytest_translations
|
pytest_configure
| null |
def pytest_configure(config):
config.addinivalue_line("markers", "translations: translation tests")
|
(config)
|
50,595 |
yellowbrick.target.class_balance
|
ClassBalance
|
One of the biggest challenges for classification models is an imbalance of
classes in the training data. The ClassBalance visualizer shows the
relationship of the support for each class in both the training and test
data by displaying how frequently each class occurs as a bar graph.
The ClassBalance visualizer can be displayed in two modes:
1. Balance mode: show the frequency of each class in the dataset.
2. Compare mode: show the relationship of support in train and test data.
These modes are determined by what is passed to the ``fit()`` method.
Parameters
----------
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
labels: list, optional
A list of class names for the x-axis if the target is already encoded.
Ensure that the labels are ordered lexicographically with respect to
the values in the target. A common use case is to pass
``LabelEncoder.classes_`` as this parameter. If not specified, the labels
in the data will be used.
colors: list of strings
Specify colors for the barchart (will override colormap if both are provided).
colormap : string or matplotlib cmap
Specify a colormap to color the classes.
kwargs: dict, optional
Keyword arguments passed to the super class. Here, used
to colorize the bars in the histogram.
Attributes
----------
classes_ : array-like
The actual unique classes discovered in the target.
support_ : array of shape (n_classes,) or (2, n_classes)
A table representing the support of each class in the target. It is a
vector when in balance mode, or a table with two rows in compare mode.
Examples
--------
To simply observe the balance of classes in the target:
>>> viz = ClassBalance().fit(y)
>>> viz.show()
To compare the relationship between training and test data:
>>> _, _, y_train, y_test = train_test_split(X, y, test_size=0.2)
>>> viz = ClassBalance()
>>> viz.fit(y_train, y_test)
>>> viz.show()
|
class ClassBalance(TargetVisualizer):
"""
One of the biggest challenges for classification models is an imbalance of
classes in the training data. The ClassBalance visualizer shows the
relationship of the support for each class in both the training and test
data by displaying how frequently each class occurs as a bar graph.
The ClassBalance visualizer can be displayed in two modes:
1. Balance mode: show the frequency of each class in the dataset.
2. Compare mode: show the relationship of support in train and test data.
These modes are determined by what is passed to the ``fit()`` method.
Parameters
----------
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
labels: list, optional
A list of class names for the x-axis if the target is already encoded.
Ensure that the labels are ordered lexicographically with respect to
the values in the target. A common use case is to pass
``LabelEncoder.classes_`` as this parameter. If not specified, the labels
in the data will be used.
colors: list of strings
Specify colors for the barchart (will override colormap if both are provided).
colormap : string or matplotlib cmap
Specify a colormap to color the classes.
kwargs: dict, optional
Keyword arguments passed to the super class. Here, used
to colorize the bars in the histogram.
Attributes
----------
classes_ : array-like
The actual unique classes discovered in the target.
support_ : array of shape (n_classes,) or (2, n_classes)
A table representing the support of each class in the target. It is a
vector when in balance mode, or a table with two rows in compare mode.
Examples
--------
To simply observe the balance of classes in the target:
>>> viz = ClassBalance().fit(y)
>>> viz.show()
To compare the relationship between training and test data:
>>> _, _, y_train, y_test = train_test_split(X, y, test_size=0.2)
>>> viz = ClassBalance()
>>> viz.fit(y_train, y_test)
>>> viz.show()
"""
def __init__(self, ax=None, labels=None, colors=None, colormap=None, **kwargs):
self.labels = labels
self.colors = colors
self.colormap = colormap
super(ClassBalance, self).__init__(ax, **kwargs)
def fit(self, y_train, y_test=None):
"""
Fit the visualizer to the the target variables, which must be 1D
vectors containing discrete (classification) data. Fit has two modes:
1. Balance mode: if only y_train is specified
2. Compare mode: if both train and test are specified
In balance mode, the bar chart is displayed with each class as its own
color. In compare mode, a side-by-side bar chart is displayed colored
by train or test respectively.
Parameters
----------
y_train : array-like
Array or list of shape (n,) that contains discrete data.
y_test : array-like, optional
Array or list of shape (m,) that contains discrete data. If
specified, the bar chart will be drawn in compare mode.
"""
# check to make sure that y_train is not a 2D array, e.g. X
if y_train.ndim == 2:
raise YellowbrickValueError(
(
"fit has changed to only require a 1D array, y "
"since version 0.9; please see the docs for more info"
)
)
# Check the target types for the y variables
self._validate_target(y_train)
self._validate_target(y_test)
# Get the unique values from the dataset
targets = (y_train,) if y_test is None else (y_train, y_test)
self.classes_ = unique_labels(*targets)
# Validate the classes with the class names
if self.labels is not None:
if len(self.labels) != len(self.classes_):
raise YellowbrickValueError(
(
"discovered {} classes in the data, does not match "
"the {} labels specified."
).format(len(self.classes_), len(self.labels))
)
# Determine if we're in compare or balance mode
self._mode = BALANCE if y_test is None else COMPARE
# Compute the support values
if self._mode == BALANCE:
self.support_ = np.array([(y_train == idx).sum() for idx in self.classes_])
else:
self.support_ = np.array(
[[(y == idx).sum() for idx in self.classes_] for y in targets]
)
# Draw the bar chart
self.draw()
# Fit returns self
return self
def draw(self):
"""
Renders the class balance chart on the specified axes from support.
"""
# Number of colors is either number of classes or 2
colors = resolve_colors(
len(self.support_), colormap=self.colormap, colors=self.colors
)
if self._mode == BALANCE:
self.ax.bar(
np.arange(len(self.support_)),
self.support_,
color=colors,
align="center",
width=0.5,
)
# Compare mode
else:
bar_width = 0.35
labels = ["train", "test"]
for idx, support in enumerate(self.support_):
index = np.arange(len(self.classes_))
if idx > 0:
index = index + bar_width
self.ax.bar(
index, support, bar_width, color=colors[idx], label=labels[idx]
)
return self.ax
def finalize(self, **kwargs):
"""
Finalizes the figure for drawing by setting a title, the legend, and axis
labels, removing the grid, and making sure the figure is correctly zoomed
into the bar chart.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
# Set the title
self.set_title("Class Balance for {:,} Instances".format(self.support_.sum()))
# Set the x ticks with the class names or labels if specified
labels = self.labels if self.labels is not None else self.classes_
xticks = np.arange(len(labels))
if self._mode == COMPARE:
xticks = xticks + (0.35 / 2)
self.ax.set_xticks(xticks)
self.ax.set_xticklabels(labels)
# Compute the ceiling for the y limit
cmax = self.support_.max()
self.ax.set_ylim(0, cmax + cmax * 0.1)
self.ax.set_ylabel("support")
# Remove the vertical grid
self.ax.grid(False, axis="x")
# Add the legend if in compare mode:
if self._mode == COMPARE:
self.ax.legend(frameon=True)
def _validate_target(self, y):
"""
Raises a value error if the target is not a classification target.
"""
# Ignore None values
if y is None:
return
y_type = type_of_target(y)
if y_type not in ("binary", "multiclass"):
raise YellowbrickValueError(
("'{}' target type not supported, only binary and multiclass").format(
y_type
)
)
|
(ax=None, labels=None, colors=None, colormap=None, **kwargs)
|
50,597 |
yellowbrick.target.class_balance
|
__init__
| null |
def __init__(self, ax=None, labels=None, colors=None, colormap=None, **kwargs):
self.labels = labels
self.colors = colors
self.colormap = colormap
super(ClassBalance, self).__init__(ax, **kwargs)
|
(self, ax=None, labels=None, colors=None, colormap=None, **kwargs)
|
50,611 |
yellowbrick.target.class_balance
|
_validate_target
|
Raises a value error if the target is not a classification target.
|
def _validate_target(self, y):
"""
Raises a value error if the target is not a classification target.
"""
# Ignore None values
if y is None:
return
y_type = type_of_target(y)
if y_type not in ("binary", "multiclass"):
raise YellowbrickValueError(
("'{}' target type not supported, only binary and multiclass").format(
y_type
)
)
|
(self, y)
|
50,612 |
yellowbrick.target.class_balance
|
draw
|
Renders the class balance chart on the specified axes from support.
|
def draw(self):
"""
Renders the class balance chart on the specified axes from support.
"""
# Number of colors is either number of classes or 2
colors = resolve_colors(
len(self.support_), colormap=self.colormap, colors=self.colors
)
if self._mode == BALANCE:
self.ax.bar(
np.arange(len(self.support_)),
self.support_,
color=colors,
align="center",
width=0.5,
)
# Compare mode
else:
bar_width = 0.35
labels = ["train", "test"]
for idx, support in enumerate(self.support_):
index = np.arange(len(self.classes_))
if idx > 0:
index = index + bar_width
self.ax.bar(
index, support, bar_width, color=colors[idx], label=labels[idx]
)
return self.ax
|
(self)
|
50,613 |
yellowbrick.target.class_balance
|
finalize
|
Finalizes the figure for drawing by setting a title, the legend, and axis
labels, removing the grid, and making sure the figure is correctly zoomed
into the bar chart.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
|
def finalize(self, **kwargs):
"""
Finalizes the figure for drawing by setting a title, the legend, and axis
labels, removing the grid, and making sure the figure is correctly zoomed
into the bar chart.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
# Set the title
self.set_title("Class Balance for {:,} Instances".format(self.support_.sum()))
# Set the x ticks with the class names or labels if specified
labels = self.labels if self.labels is not None else self.classes_
xticks = np.arange(len(labels))
if self._mode == COMPARE:
xticks = xticks + (0.35 / 2)
self.ax.set_xticks(xticks)
self.ax.set_xticklabels(labels)
# Compute the ceiling for the y limit
cmax = self.support_.max()
self.ax.set_ylim(0, cmax + cmax * 0.1)
self.ax.set_ylabel("support")
# Remove the vertical grid
self.ax.grid(False, axis="x")
# Add the legend if in compare mode:
if self._mode == COMPARE:
self.ax.legend(frameon=True)
|
(self, **kwargs)
|
50,614 |
yellowbrick.target.class_balance
|
fit
|
Fit the visualizer to the the target variables, which must be 1D
vectors containing discrete (classification) data. Fit has two modes:
1. Balance mode: if only y_train is specified
2. Compare mode: if both train and test are specified
In balance mode, the bar chart is displayed with each class as its own
color. In compare mode, a side-by-side bar chart is displayed colored
by train or test respectively.
Parameters
----------
y_train : array-like
Array or list of shape (n,) that contains discrete data.
y_test : array-like, optional
Array or list of shape (m,) that contains discrete data. If
specified, the bar chart will be drawn in compare mode.
|
def fit(self, y_train, y_test=None):
"""
Fit the visualizer to the the target variables, which must be 1D
vectors containing discrete (classification) data. Fit has two modes:
1. Balance mode: if only y_train is specified
2. Compare mode: if both train and test are specified
In balance mode, the bar chart is displayed with each class as its own
color. In compare mode, a side-by-side bar chart is displayed colored
by train or test respectively.
Parameters
----------
y_train : array-like
Array or list of shape (n,) that contains discrete data.
y_test : array-like, optional
Array or list of shape (m,) that contains discrete data. If
specified, the bar chart will be drawn in compare mode.
"""
# check to make sure that y_train is not a 2D array, e.g. X
if y_train.ndim == 2:
raise YellowbrickValueError(
(
"fit has changed to only require a 1D array, y "
"since version 0.9; please see the docs for more info"
)
)
# Check the target types for the y variables
self._validate_target(y_train)
self._validate_target(y_test)
# Get the unique values from the dataset
targets = (y_train,) if y_test is None else (y_train, y_test)
self.classes_ = unique_labels(*targets)
# Validate the classes with the class names
if self.labels is not None:
if len(self.labels) != len(self.classes_):
raise YellowbrickValueError(
(
"discovered {} classes in the data, does not match "
"the {} labels specified."
).format(len(self.classes_), len(self.labels))
)
# Determine if we're in compare or balance mode
self._mode = BALANCE if y_test is None else COMPARE
# Compute the support values
if self._mode == BALANCE:
self.support_ = np.array([(y_train == idx).sum() for idx in self.classes_])
else:
self.support_ = np.array(
[[(y == idx).sum() for idx in self.classes_] for y in targets]
)
# Draw the bar chart
self.draw()
# Fit returns self
return self
|
(self, y_train, y_test=None)
|
50,617 |
yellowbrick.base
|
poof
|
This method is deprecated, please use ``show()`` instead.
|
def poof(self, *args, **kwargs):
"""
This method is deprecated, please use ``show()`` instead.
"""
warnings.warn(
"this method is deprecated, please use show() instead", DeprecationWarning
)
return self.show(*args, **kwargs)
|
(self, *args, **kwargs)
|
50,618 |
sklearn.utils._metadata_requests
|
set_fit_request
|
Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
y_test : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``y_test`` parameter in ``fit``.
y_train : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``y_train`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
|
def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
|
(self: yellowbrick.target.class_balance.ClassBalance, *, y_test: Union[bool, NoneType, str] = '$UNCHANGED$', y_train: Union[bool, NoneType, str] = '$UNCHANGED$') -> yellowbrick.target.class_balance.ClassBalance
|
50,620 |
yellowbrick.base
|
set_title
|
Sets the title on the current axes.
Parameters
----------
title: string, default: None
Add title to figure or if None leave untitled.
|
def set_title(self, title=None):
"""
Sets the title on the current axes.
Parameters
----------
title: string, default: None
Add title to figure or if None leave untitled.
"""
title = self.title or title
if title is not None:
self.ax.set_title(title)
|
(self, title=None)
|
50,621 |
yellowbrick.base
|
show
|
Makes the magic happen and a visualizer appear! You can pass in a path to
save the figure to disk with various backends, or you can call it with no
arguments to show the figure either in a notebook or in a GUI window that
pops up on screen.
Parameters
----------
outpath: string, default: None
path or None. Save figure to disk or if None show in window
clear_figure: boolean, default: False
When True, this flag clears the figure after saving to file or
showing on screen. This is useful when making consecutive plots.
kwargs: dict
generic keyword arguments.
Notes
-----
Developers of visualizers don't usually override show, as it is
primarily called by the user to render the visualization.
|
def show(self, outpath=None, clear_figure=False, **kwargs):
"""
Makes the magic happen and a visualizer appear! You can pass in a path to
save the figure to disk with various backends, or you can call it with no
arguments to show the figure either in a notebook or in a GUI window that
pops up on screen.
Parameters
----------
outpath: string, default: None
path or None. Save figure to disk or if None show in window
clear_figure: boolean, default: False
When True, this flag clears the figure after saving to file or
showing on screen. This is useful when making consecutive plots.
kwargs: dict
generic keyword arguments.
Notes
-----
Developers of visualizers don't usually override show, as it is
primarily called by the user to render the visualization.
"""
# Ensure that draw has been called
if self._ax is None:
warn_message = (
"{} does not have a reference to a matplotlib.Axes "
"the figure may not render as expected!"
)
warnings.warn(
warn_message.format(self.__class__.__name__), YellowbrickWarning
)
# Finalize the figure
self.finalize()
if outpath is not None:
plt.savefig(outpath, **kwargs)
else:
plt.show()
if clear_figure:
self.fig.clear()
# Return ax to ensure display in notebooks
return self.ax
|
(self, outpath=None, clear_figure=False, **kwargs)
|
50,622 |
yellowbrick.classifier.base
|
ClassificationScoreVisualizer
|
Base class for classifier model selection.
The ClassificationScoreVisualizer wraps a classifier to produce a
visualization when the score method is called, usually to allow the user
to effectively compare the performance between models.
The base class provides helper functionality to ensure that classification
visualizers are able to correctly identify and encode classes with human
readable labels and to map colors to the classes if required.
Parameters
----------
estimator : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
fig : matplotlib Figure, default: None
The figure to plot the Visualizer on. If None is passed in the current
plot will be used (or generated if required).
classes : list of str, defult: None
The class labels to use for the legend ordered by the index of the sorted
classes discovered in the ``fit()`` method. Specifying classes in this
manner is used to change the class names to a more specific format or
to label encoded integer classes. Some visualizers may also use this
field to filter the visualization for specific classes. For more advanced
usage specify an encoder rather than class labels.
encoder : dict or LabelEncoder, default: None
A mapping of classes to human readable labels. Often there is a mismatch
between desired class labels and those contained in the target variable
passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch
ensuring that classes are labeled correctly in the visualization.
is_fitted : bool or str, default="auto"
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If "auto" (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
kwargs : dict
Keyword arguments passed to the visualizer base classes.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The class labels observed while fitting.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting.
score_ : float
An evaluation metric of the classifier on test data produced when
``score()`` is called. This metric is between 0 and 1 -- higher scores are
generally better. For classifiers, this score is usually accuracy, but
ensure you check the underlying model for more details about the metric.
|
class ClassificationScoreVisualizer(ScoreVisualizer):
"""Base class for classifier model selection.
The ClassificationScoreVisualizer wraps a classifier to produce a
visualization when the score method is called, usually to allow the user
to effectively compare the performance between models.
The base class provides helper functionality to ensure that classification
visualizers are able to correctly identify and encode classes with human
readable labels and to map colors to the classes if required.
Parameters
----------
estimator : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
fig : matplotlib Figure, default: None
The figure to plot the Visualizer on. If None is passed in the current
plot will be used (or generated if required).
classes : list of str, defult: None
The class labels to use for the legend ordered by the index of the sorted
classes discovered in the ``fit()`` method. Specifying classes in this
manner is used to change the class names to a more specific format or
to label encoded integer classes. Some visualizers may also use this
field to filter the visualization for specific classes. For more advanced
usage specify an encoder rather than class labels.
encoder : dict or LabelEncoder, default: None
A mapping of classes to human readable labels. Often there is a mismatch
between desired class labels and those contained in the target variable
passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch
ensuring that classes are labeled correctly in the visualization.
is_fitted : bool or str, default="auto"
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If "auto" (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
kwargs : dict
Keyword arguments passed to the visualizer base classes.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The class labels observed while fitting.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting.
score_ : float
An evaluation metric of the classifier on test data produced when
``score()`` is called. This metric is between 0 and 1 -- higher scores are
generally better. For classifiers, this score is usually accuracy, but
ensure you check the underlying model for more details about the metric.
"""
def __init__(
self,
estimator,
ax=None,
fig=None,
classes=None,
encoder=None,
is_fitted="auto",
force_model=False,
**kwargs
):
# A bit of type checking
if not force_model and not isclassifier(estimator):
raise YellowbrickTypeError(
"This estimator is not a classifier; "
"try a regression or clustering score visualizer instead!"
)
# Initialize the super method.
super(ClassificationScoreVisualizer, self).__init__(
estimator, ax=ax, fig=fig, is_fitted=is_fitted, **kwargs
)
self.classes = classes
self.encoder = encoder
self.force_model = force_model
@property
def class_colors_(self):
"""
Returns ``_colors`` if it exists, otherwise computes a categorical color
per class based on the matplotlib color cycle. If the visualizer is not
fitted, raises a NotFitted exception.
If subclasses require users to choose colors or have specialized color
handling, they should set ``_colors`` on init or during fit.
Notes
-----
Because this is a property, this docstring is for developers only.
"""
if not hasattr(self, "_colors"):
if not hasattr(self, "classes_"):
raise NotFitted("cannot determine colors before fit")
# TODO: replace with resolve_colors
self._colors = color_palette(None, len(self.classes_))
return self._colors
def fit(self, X, y=None, **kwargs):
"""
Fit the visualizer to the specified data.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
self : instance
Returns the instance of the classification score visualizer
"""
# Super fits the wrapped estimator
super(ClassificationScoreVisualizer, self).fit(X, y, **kwargs)
# Extract the classes and the class counts from the target
self.classes_, self.class_counts_ = np.unique(y, return_counts=True)
# Ensure the classes are aligned with the estimator
# If they are not aligned, ignore class counts and issue a warning
if hasattr(self.estimator, "classes_"):
if not np.array_equal(self.classes_, self.estimator.classes_):
self.classes_ = self.estimator.classes_
self.class_counts_ = None
# Decode classes to human readable labels specified by the user
self.classes_ = self._decode_labels(self.classes_)
# Always return self from fit
return self
def score(self, X, y):
"""
The score function is the hook for visual interaction. Pass in test
data and the visualizer will create predictions on the data and
evaluate them with respect to the test values. The evaluation will
then be passed to draw() and the result of the estimator score will
be returned.
Parameters
----------
X : array-like
X (also X_test) are the dependent variables of test set to predict
y : array-like
y (also y_test) is the independent actual variables to score against
Returns
-------
score : float
Returns the score of the underlying model, usually accuracy for
classification models. Refer to the specific model for more details.
"""
# If the estimator has been passed in fitted but the visualizer was not fit
# then we can retrieve the classes from the estimator, unfortunately we cannot
# retrieve the class counts so we simply set them to None and warn the user.
# NOTE: cannot test if hasattr(self, "classes_") because it will be proxied.
if not hasattr(self, "class_counts_"):
if not hasattr(self.estimator, "classes_"):
raise NotFitted(
(
"could not determine required property classes_; "
"the visualizer must either be fit or instantiated with a "
"fitted classifier before calling score()"
)
)
self.class_counts_ = None
self.classes_ = self._decode_labels(self.estimator.classes_)
warnings.warn(
"could not determine class_counts_ from previously fitted classifier",
YellowbrickWarning,
)
# This method implements ScoreVisualizer (do not call super).
self.score_ = self.estimator.score(X, y)
return self.score_
def _decode_labels(self, y):
"""
An internal helper function that uses either the classes or encoder
properties to correctly decode y as user-readable string labels.
If both classes and encoder are set, a warning is issued and encoder is
used instead of classes. If neither encoder nor classes is set then the
original array is returned unmodified.
"""
if self.classes is not None and self.encoder is not None:
warnings.warn(
"both classes and encoder specified, using encoder", YellowbrickWarning
)
if self.encoder is not None:
# Use the label encoder or other transformer
if hasattr(self.encoder, "inverse_transform"):
try:
return self.encoder.inverse_transform(y)
except ValueError:
y_labels = np.unique(y)
raise ModelError(
"could not decode {} y values to {} labels".format(
y_labels, self._labels()
)
)
# Otherwise, treat as a dictionary
try:
return np.asarray([self.encoder[yi] for yi in y])
except KeyError as e:
raise ModelError(
(
"cannot decode class {} to label, "
"key not specified by encoder"
).format(e)
)
if self.classes is not None:
# Determine indices to perform class mappings on
yp = np.asarray(y)
if yp.dtype.kind in {"i", "u"}:
idx = yp
else:
# Use label encoder to get indices by sorted class names
idx = LabelEncoder().fit_transform(yp)
# Use index mapping for classes
try:
return np.asarray(self.classes)[idx]
except IndexError:
y_labels = np.unique(yp)
raise ModelError(
"could not decode {} y values to {} labels".format(
y_labels, self._labels()
)
)
# could not decode y without encoder or classes, return it as it is, unmodified
return y
def _labels(self):
"""
Returns the human specified labels in either the classes list or from the
encoder. Returns None if no human labels have been specified, but issues a
warning if a transformer has been passed that does not specify labels.
"""
if self.classes is not None and self.encoder is not None:
warnings.warn(
"both classes and encoder specified, using encoder", YellowbrickWarning
)
if self.encoder is not None:
# Use label encoder or other transformer
if hasattr(self.encoder, "transform"):
if hasattr(self.encoder, "classes_"):
return self.encoder.classes_
# This is not a label encoder
msg = "could not determine class labels from {}".format(
self.encoder.__class__.__name__
)
warnings.warn(msg, YellowbrickWarning)
return None
# Otherwise, treat as dictionary and ensure sorted by key
keys = sorted(list(self.encoder.keys()))
return np.asarray([self.encoder[key] for key in keys])
if self.classes is not None:
return np.asarray(self.classes)
return None
|
(estimator, ax=None, fig=None, classes=None, encoder=None, is_fitted='auto', force_model=False, **kwargs)
|
50,623 |
yellowbrick.utils.wrapper
|
__getattr__
| null |
def __getattr__(self, attr):
if self is self._wrapped:
raise YellowbrickTypeError("wrapper cannot wrap itself or recursion will occur")
# proxy to the wrapped object
try:
return getattr(self._wrapped, attr)
except AttributeError as e:
raise YellowbrickAttributeError(f"neither visualizer '{self.__class__.__name__}' nor wrapped estimator '{type(self._wrapped).__name__}' have attribute '{attr}'") from e
|
(self, attr)
|
50,625 |
yellowbrick.classifier.base
|
__init__
| null |
def __init__(
self,
estimator,
ax=None,
fig=None,
classes=None,
encoder=None,
is_fitted="auto",
force_model=False,
**kwargs
):
# A bit of type checking
if not force_model and not isclassifier(estimator):
raise YellowbrickTypeError(
"This estimator is not a classifier; "
"try a regression or clustering score visualizer instead!"
)
# Initialize the super method.
super(ClassificationScoreVisualizer, self).__init__(
estimator, ax=ax, fig=fig, is_fitted=is_fitted, **kwargs
)
self.classes = classes
self.encoder = encoder
self.force_model = force_model
|
(self, estimator, ax=None, fig=None, classes=None, encoder=None, is_fitted='auto', force_model=False, **kwargs)
|
50,631 |
yellowbrick.classifier.base
|
_decode_labels
|
An internal helper function that uses either the classes or encoder
properties to correctly decode y as user-readable string labels.
If both classes and encoder are set, a warning is issued and encoder is
used instead of classes. If neither encoder nor classes is set then the
original array is returned unmodified.
|
def _decode_labels(self, y):
"""
An internal helper function that uses either the classes or encoder
properties to correctly decode y as user-readable string labels.
If both classes and encoder are set, a warning is issued and encoder is
used instead of classes. If neither encoder nor classes is set then the
original array is returned unmodified.
"""
if self.classes is not None and self.encoder is not None:
warnings.warn(
"both classes and encoder specified, using encoder", YellowbrickWarning
)
if self.encoder is not None:
# Use the label encoder or other transformer
if hasattr(self.encoder, "inverse_transform"):
try:
return self.encoder.inverse_transform(y)
except ValueError:
y_labels = np.unique(y)
raise ModelError(
"could not decode {} y values to {} labels".format(
y_labels, self._labels()
)
)
# Otherwise, treat as a dictionary
try:
return np.asarray([self.encoder[yi] for yi in y])
except KeyError as e:
raise ModelError(
(
"cannot decode class {} to label, "
"key not specified by encoder"
).format(e)
)
if self.classes is not None:
# Determine indices to perform class mappings on
yp = np.asarray(y)
if yp.dtype.kind in {"i", "u"}:
idx = yp
else:
# Use label encoder to get indices by sorted class names
idx = LabelEncoder().fit_transform(yp)
# Use index mapping for classes
try:
return np.asarray(self.classes)[idx]
except IndexError:
y_labels = np.unique(yp)
raise ModelError(
"could not decode {} y values to {} labels".format(
y_labels, self._labels()
)
)
# could not decode y without encoder or classes, return it as it is, unmodified
return y
|
(self, y)
|
50,635 |
yellowbrick.classifier.base
|
_labels
|
Returns the human specified labels in either the classes list or from the
encoder. Returns None if no human labels have been specified, but issues a
warning if a transformer has been passed that does not specify labels.
|
def _labels(self):
"""
Returns the human specified labels in either the classes list or from the
encoder. Returns None if no human labels have been specified, but issues a
warning if a transformer has been passed that does not specify labels.
"""
if self.classes is not None and self.encoder is not None:
warnings.warn(
"both classes and encoder specified, using encoder", YellowbrickWarning
)
if self.encoder is not None:
# Use label encoder or other transformer
if hasattr(self.encoder, "transform"):
if hasattr(self.encoder, "classes_"):
return self.encoder.classes_
# This is not a label encoder
msg = "could not determine class labels from {}".format(
self.encoder.__class__.__name__
)
warnings.warn(msg, YellowbrickWarning)
return None
# Otherwise, treat as dictionary and ensure sorted by key
keys = sorted(list(self.encoder.keys()))
return np.asarray([self.encoder[key] for key in keys])
if self.classes is not None:
return np.asarray(self.classes)
return None
|
(self)
|
50,641 |
yellowbrick.base
|
draw
|
The fitting or transformation process usually calls draw (not the
user). This function is implemented for developers to hook into the
matplotlib interface and to create an internal representation of the
data the visualizer was trained on in the form of a figure or axes.
Parameters
----------
kwargs: dict
generic keyword arguments.
|
def draw(self, **kwargs):
"""
The fitting or transformation process usually calls draw (not the
user). This function is implemented for developers to hook into the
matplotlib interface and to create an internal representation of the
data the visualizer was trained on in the form of a figure or axes.
Parameters
----------
kwargs: dict
generic keyword arguments.
"""
raise NotImplementedError("Visualizers must implement a drawing interface.")
|
(self, **kwargs)
|
50,642 |
yellowbrick.base
|
finalize
|
Finalize executes any subclass-specific axes finalization steps.
Parameters
----------
kwargs: dict
generic keyword arguments.
Notes
-----
The user calls show and show calls finalize. Developers should
implement visualizer-specific finalization methods like setting titles
or axes labels, etc.
|
def finalize(self, **kwargs):
"""
Finalize executes any subclass-specific axes finalization steps.
Parameters
----------
kwargs: dict
generic keyword arguments.
Notes
-----
The user calls show and show calls finalize. Developers should
implement visualizer-specific finalization methods like setting titles
or axes labels, etc.
"""
return self.ax
|
(self, **kwargs)
|
50,643 |
yellowbrick.classifier.base
|
fit
|
Fit the visualizer to the specified data.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
self : instance
Returns the instance of the classification score visualizer
|
def fit(self, X, y=None, **kwargs):
"""
Fit the visualizer to the specified data.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
self : instance
Returns the instance of the classification score visualizer
"""
# Super fits the wrapped estimator
super(ClassificationScoreVisualizer, self).fit(X, y, **kwargs)
# Extract the classes and the class counts from the target
self.classes_, self.class_counts_ = np.unique(y, return_counts=True)
# Ensure the classes are aligned with the estimator
# If they are not aligned, ignore class counts and issue a warning
if hasattr(self.estimator, "classes_"):
if not np.array_equal(self.classes_, self.estimator.classes_):
self.classes_ = self.estimator.classes_
self.class_counts_ = None
# Decode classes to human readable labels specified by the user
self.classes_ = self._decode_labels(self.classes_)
# Always return self from fit
return self
|
(self, X, y=None, **kwargs)
|
50,645 |
yellowbrick.base
|
get_params
|
After v0.24 - scikit-learn is able to determine that ``self.estimator`` is
nested and fetches its params using ``estimator__param``. This functionality is
pretty cool but it's a pretty big overhaul to change our "wrapped" estimator API
to a "nested" estimator API, therefore we override ``get_params`` to flatten out
the estimator params.
|
def get_params(self, deep=True):
"""
After v0.24 - scikit-learn is able to determine that ``self.estimator`` is
nested and fetches its params using ``estimator__param``. This functionality is
pretty cool but it's a pretty big overhaul to change our "wrapped" estimator API
to a "nested" estimator API, therefore we override ``get_params`` to flatten out
the estimator params.
"""
params = super(ModelVisualizer, self).get_params(deep=deep)
for param in list(params.keys()):
if param.startswith("estimator__"):
params[param[len("estimator__"):]] = params.pop(param)
return params
|
(self, deep=True)
|
50,647 |
yellowbrick.classifier.base
|
score
|
The score function is the hook for visual interaction. Pass in test
data and the visualizer will create predictions on the data and
evaluate them with respect to the test values. The evaluation will
then be passed to draw() and the result of the estimator score will
be returned.
Parameters
----------
X : array-like
X (also X_test) are the dependent variables of test set to predict
y : array-like
y (also y_test) is the independent actual variables to score against
Returns
-------
score : float
Returns the score of the underlying model, usually accuracy for
classification models. Refer to the specific model for more details.
|
def score(self, X, y):
"""
The score function is the hook for visual interaction. Pass in test
data and the visualizer will create predictions on the data and
evaluate them with respect to the test values. The evaluation will
then be passed to draw() and the result of the estimator score will
be returned.
Parameters
----------
X : array-like
X (also X_test) are the dependent variables of test set to predict
y : array-like
y (also y_test) is the independent actual variables to score against
Returns
-------
score : float
Returns the score of the underlying model, usually accuracy for
classification models. Refer to the specific model for more details.
"""
# If the estimator has been passed in fitted but the visualizer was not fit
# then we can retrieve the classes from the estimator, unfortunately we cannot
# retrieve the class counts so we simply set them to None and warn the user.
# NOTE: cannot test if hasattr(self, "classes_") because it will be proxied.
if not hasattr(self, "class_counts_"):
if not hasattr(self.estimator, "classes_"):
raise NotFitted(
(
"could not determine required property classes_; "
"the visualizer must either be fit or instantiated with a "
"fitted classifier before calling score()"
)
)
self.class_counts_ = None
self.classes_ = self._decode_labels(self.estimator.classes_)
warnings.warn(
"could not determine class_counts_ from previously fitted classifier",
YellowbrickWarning,
)
# This method implements ScoreVisualizer (do not call super).
self.score_ = self.estimator.score(X, y)
return self.score_
|
(self, X, y)
|
50,648 |
yellowbrick.base
|
set_params
|
The latest version of scikit-learn is able to determine that ``self.estimator``
is nested and sets its params using ``estimator__param``. In order to maintain
the Yellowbrick "wrapped" API, this method finds any params belonging to the
underlying estimator and sets them directly.
|
def set_params(self, **params):
"""
The latest version of scikit-learn is able to determine that ``self.estimator``
is nested and sets its params using ``estimator__param``. In order to maintain
the Yellowbrick "wrapped" API, this method finds any params belonging to the
underlying estimator and sets them directly.
"""
estimator_keys = list(self.estimator.get_params(deep=False).keys())
estimator_params = {
key: params.pop(key)
for key in estimator_keys
if key in params
}
self.estimator.set_params(**estimator_params)
return super(ModelVisualizer, self).set_params(**params)
|
(self, **params)
|
50,651 |
yellowbrick.classifier.rocauc
|
ROCAUC
|
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
estimator : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
micro : bool, default: True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes. Micro is not defined
for binary classification problems with estimators with only a
decision_function method.
macro : bool, default: True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes. Macro is not defined for binary
classification problems with estimators with only a decision_function
method.
per_class : bool, default: True
Plot the ROC curves for each individual class. This should be set
to false if only the macro or micro average curves are required. For true
binary classifiers, setting per_class=False will plot the positive class
ROC curve, and per_class=True will use ``1-P(1)`` to compute the curve of
the negative class if only a decision_function method exists on the estimator.
binary : bool, default: False
This argument quickly resets the visualizer for true binary classification
by updating the micro, macro, and per_class arguments to False (do not use
in conjunction with those other arguments). Note that this is not a true
hyperparameter to the visualizer, it just collects other parameters into
a single, simpler argument.
classes : list of str, defult: None
The class labels to use for the legend ordered by the index of the sorted
classes discovered in the ``fit()`` method. Specifying classes in this
manner is used to change the class names to a more specific format or
to label encoded integer classes. Some visualizers may also use this
field to filter the visualization for specific classes. For more advanced
usage specify an encoder rather than class labels.
encoder : dict or LabelEncoder, default: None
A mapping of classes to human readable labels. Often there is a mismatch
between desired class labels and those contained in the target variable
passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch
ensuring that classes are labeled correctly in the visualization.
is_fitted : bool or str, default="auto"
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If "auto" (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
kwargs : dict
Keyword arguments passed to the visualizer base classes.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The class labels observed while fitting.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting.
score_ : float
An evaluation metric of the classifier on test data produced when
``score()`` is called. This metric is between 0 and 1 -- higher scores are
generally better. For classifiers, this score is usually accuracy, but
if micro or macro is specified this returns an F1 score.
target_type_ : string
Specifies if the detected classification target was binary or multiclass.
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso::
http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import train_test_split
>>> data = load_data("occupancy")
>>> features = ["temp", "relative humidity", "light", "C02", "humidity"]
>>> X_train, X_test, y_train, y_test = train_test_split(X, y)
>>> oz = ROCAUC(LogisticRegression())
>>> oz.fit(X_train, y_train)
>>> oz.score(X_test, y_test)
>>> oz.show()
|
class ROCAUC(ClassificationScoreVisualizer):
"""
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
estimator : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
micro : bool, default: True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes. Micro is not defined
for binary classification problems with estimators with only a
decision_function method.
macro : bool, default: True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes. Macro is not defined for binary
classification problems with estimators with only a decision_function
method.
per_class : bool, default: True
Plot the ROC curves for each individual class. This should be set
to false if only the macro or micro average curves are required. For true
binary classifiers, setting per_class=False will plot the positive class
ROC curve, and per_class=True will use ``1-P(1)`` to compute the curve of
the negative class if only a decision_function method exists on the estimator.
binary : bool, default: False
This argument quickly resets the visualizer for true binary classification
by updating the micro, macro, and per_class arguments to False (do not use
in conjunction with those other arguments). Note that this is not a true
hyperparameter to the visualizer, it just collects other parameters into
a single, simpler argument.
classes : list of str, defult: None
The class labels to use for the legend ordered by the index of the sorted
classes discovered in the ``fit()`` method. Specifying classes in this
manner is used to change the class names to a more specific format or
to label encoded integer classes. Some visualizers may also use this
field to filter the visualization for specific classes. For more advanced
usage specify an encoder rather than class labels.
encoder : dict or LabelEncoder, default: None
A mapping of classes to human readable labels. Often there is a mismatch
between desired class labels and those contained in the target variable
passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch
ensuring that classes are labeled correctly in the visualization.
is_fitted : bool or str, default="auto"
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If "auto" (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
kwargs : dict
Keyword arguments passed to the visualizer base classes.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The class labels observed while fitting.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting.
score_ : float
An evaluation metric of the classifier on test data produced when
``score()`` is called. This metric is between 0 and 1 -- higher scores are
generally better. For classifiers, this score is usually accuracy, but
if micro or macro is specified this returns an F1 score.
target_type_ : string
Specifies if the detected classification target was binary or multiclass.
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso::
http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import train_test_split
>>> data = load_data("occupancy")
>>> features = ["temp", "relative humidity", "light", "C02", "humidity"]
>>> X_train, X_test, y_train, y_test = train_test_split(X, y)
>>> oz = ROCAUC(LogisticRegression())
>>> oz.fit(X_train, y_train)
>>> oz.score(X_test, y_test)
>>> oz.show()
"""
def __init__(
self,
estimator,
ax=None,
micro=True,
macro=True,
per_class=True,
binary=False,
classes=None,
encoder=None,
is_fitted="auto",
force_model=False,
**kwargs
):
super(ROCAUC, self).__init__(
estimator,
ax=ax,
classes=classes,
encoder=encoder,
is_fitted=is_fitted,
force_model=force_model,
**kwargs
)
# Set the visual parameters for ROCAUC
# NOTE: the binary flag breaks our API since it's really just a meta parameter
# for micro, macro, and per_class. We knew this going in, but did it anyway.
self.binary = binary
if self.binary:
self.micro = False
self.macro = False
self.per_class = False
else:
self.micro = micro
self.macro = macro
self.per_class = per_class
def fit(self, X, y=None):
"""
Fit the classification model.
"""
# The target determines what kind of estimator is fit
ttype = type_of_target(y)
if ttype.startswith(MULTICLASS):
self.target_type_ = MULTICLASS
elif ttype.startswith(BINARY):
self.target_type_ = BINARY
else:
raise YellowbrickValueError(
(
"{} does not support target type '{}', "
"please provide a binary or multiclass single-output target"
).format(self.__class__.__name__, ttype)
)
# Fit the model and return self
return super(ROCAUC, self).fit(X, y)
def score(self, X, y=None):
"""
Generates the predicted target values using the Scikit-Learn
estimator.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy unless micro or macro scores are requested.
"""
# Call super to check if fitted and to compute self.score_
# NOTE: this sets score to the base score if neither macro nor micro
super(ROCAUC, self).score(X, y)
# Compute the predictions for the test data
y_pred = self._get_y_scores(X)
if self.target_type_ == BINARY:
# For binary, per_class must be True to draw micro/macro curves
if (self.micro or self.macro) and not self.per_class:
raise ModelError(
"no curves will be drawn; ",
"set per_class=True or micro=False and macro=False.",
)
# For binary, if predictions are returned in shape (n,), micro and macro
# curves are not defined
if (self.micro or self.macro) and len(y_pred.shape) == 1:
raise ModelError(
"no curves will be drawn; set binary=True.",
)
if self.target_type_ == MULTICLASS:
# If it's multiclass classification, at least one of micro, macro, or
# per_class must be True
if not self.micro and not self.macro and not self.per_class:
raise YellowbrickValueError(
"no curves will be drawn; specify micro, macro, or per_class"
)
# Classes may be label encoded so only use what's in y to compute.
# The self.classes_ attribute will be used as names for labels.
classes = np.unique(y)
n_classes = len(classes)
# Store the false positive rate, true positive rate and curve info.
self.fpr = dict()
self.tpr = dict()
self.roc_auc = dict()
# If the decision is binary draw only ROC curve for the positive class
if self.target_type_ is BINARY and not self.per_class:
# In this case predict_proba returns an array of shape (n, 2) which
# specifies the probabilities of both the negative and positive classes.
if len(y_pred.shape) == 2 and y_pred.shape[1] == 2:
self.fpr[BINARY], self.tpr[BINARY], _ = roc_curve(y, y_pred[:, 1])
else:
# decision_function returns array of shape (n,), so plot it directly
self.fpr[BINARY], self.tpr[BINARY], _ = roc_curve(y, y_pred)
self.roc_auc[BINARY] = auc(self.fpr[BINARY], self.tpr[BINARY])
# Per-class binary decisions may have to have the negative class curve computed
elif self.target_type_ is BINARY and self.per_class:
# draw a curve for class 1 (the positive class)
if len(y_pred.shape) == 2 and y_pred.shape[1] == 2:
# predict_proba returns array of shape (n, 2), so use
# probability of class 1 to compute ROC
self.fpr[1], self.tpr[1], _ = roc_curve(y, y_pred[:, 1])
else:
# decision_function returns array of shape (n,)
self.fpr[1], self.tpr[1], _ = roc_curve(y, y_pred)
self.roc_auc[1] = auc(self.fpr[1], self.tpr[1])
# draw a curve for class 0 (the negative class)
if len(y_pred.shape) == 2 and y_pred.shape[1] == 2:
# predict_proba returns array of shape (n, 2), so use
# probability of class 0 to compute ROC
self.fpr[0], self.tpr[0], _ = roc_curve(1 - y, y_pred[:, 0])
else:
# decision_function returns array of shape (n,).
# To draw a ROC curve for class 0 we swap the classes 0 and 1 in y
# and reverse classifiers predictions y_pred.
self.fpr[0], self.tpr[0], _ = roc_curve(1 - y, -y_pred)
self.roc_auc[0] = auc(self.fpr[0], self.tpr[0])
else:
# Otherwise compute the ROC curve and ROC area for each class
for i, c in enumerate(classes):
self.fpr[i], self.tpr[i], _ = roc_curve(y, y_pred[:, i], pos_label=c)
self.roc_auc[i] = auc(self.fpr[i], self.tpr[i])
# Compute micro average
if self.micro:
self._score_micro_average(y, y_pred, classes, n_classes)
# Compute macro average
if self.macro:
self._score_macro_average(n_classes)
# Draw the Curves
self.draw()
# Set score to micro average if specified
if self.micro:
self.score_ = self.roc_auc[MICRO]
# Set score to macro average if not micro
if self.macro:
self.score_ = self.roc_auc[MACRO]
return self.score_
def draw(self):
"""
Renders ROC-AUC plot.
Called internally by score, possibly more than once
Returns
-------
ax : the axis with the plotted figure
"""
colors = self.class_colors_[0 : len(self.classes_)]
n_classes = len(colors)
# If it's a binary decision, plot the single ROC curve
if self.target_type_ == BINARY and not self.per_class:
self.ax.plot(
self.fpr[BINARY],
self.tpr[BINARY],
label="ROC for binary decision, AUC = {:0.2f}".format(
self.roc_auc[BINARY]
),
)
# If per-class plotting is requested, plot ROC curves for each class
if self.per_class:
for i, color in zip(range(n_classes), colors):
self.ax.plot(
self.fpr[i],
self.tpr[i],
color=color,
label="ROC of class {}, AUC = {:0.2f}".format(
self.classes_[i], self.roc_auc[i]
),
)
# If requested, plot the ROC curve for the micro average
if self.micro:
self.ax.plot(
self.fpr[MICRO],
self.tpr[MICRO],
linestyle="--",
color=self.class_colors_[len(self.classes_) - 1],
label="micro-average ROC curve, AUC = {:0.2f}".format(
self.roc_auc["micro"]
),
)
# If requested, plot the ROC curve for the macro average
if self.macro:
self.ax.plot(
self.fpr[MACRO],
self.tpr[MACRO],
linestyle="--",
color=self.class_colors_[len(self.classes_) - 1],
label="macro-average ROC curve, AUC = {:0.2f}".format(
self.roc_auc["macro"]
),
)
# Plot the line of no discrimination to compare the curve to.
self.ax.plot([0, 1], [0, 1], linestyle=":", c=LINE_COLOR)
return self.ax
def finalize(self, **kwargs):
"""
Sets a title and axis labels of the figures and ensures the axis limits
are scaled between the valid ROCAUC score values.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
# Set the title and add the legend
self.set_title("ROC Curves for {}".format(self.name))
self.ax.legend(loc="lower right", frameon=True)
# Set the limits for the ROC/AUC (always between 0 and 1)
self.ax.set_xlim([0.0, 1.0])
self.ax.set_ylim([0.0, 1.0])
# Set x and y axis labels
self.ax.set_ylabel("True Positive Rate")
self.ax.set_xlabel("False Positive Rate")
def _get_y_scores(self, X):
"""
The ``roc_curve`` metric requires target scores that can either be the
probability estimates of the positive class, confidence values or non-
thresholded measure of decisions (as returned by "decision_function").
This method computes the scores by resolving the estimator methods
that retreive these values.
.. todo:: implement confidence values metric.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features -- generally the test data
that is associated with y_true values.
"""
# The resolution order of scoring functions
attrs = ("predict_proba", "decision_function")
# Return the first resolved function
for attr in attrs:
try:
method = getattr(self.estimator, attr, None)
if method:
return method(X)
except AttributeError:
# Some Scikit-Learn estimators have both probability and
# decision functions but override __getattr__ and raise an
# AttributeError on access.
# Note that because of the ordering of our attrs above,
# estimators with both will *only* ever use probability.
continue
# If we've gotten this far, raise an error
raise ModelError(
"ROCAUC requires estimators with predict_proba or "
"decision_function methods."
)
def _score_micro_average(self, y, y_pred, classes, n_classes):
"""
Compute the micro average scores for the ROCAUC curves.
"""
# Convert y to binarized array for micro and macro scores
y = label_binarize(y, classes=classes)
if n_classes == 2:
y = np.hstack((1 - y, y))
# Compute micro-average
self.fpr[MICRO], self.tpr[MICRO], _ = roc_curve(y.ravel(), y_pred.ravel())
self.roc_auc[MICRO] = auc(self.fpr[MICRO], self.tpr[MICRO])
def _score_macro_average(self, n_classes):
"""
Compute the macro average scores for the ROCAUC curves.
"""
# Gather all FPRs
all_fpr = np.unique(np.concatenate([self.fpr[i] for i in range(n_classes)]))
avg_tpr = np.zeros_like(all_fpr)
# Compute the averages per class
for i in range(n_classes):
avg_tpr += np.interp(all_fpr, self.fpr[i], self.tpr[i])
# Finalize the average
avg_tpr /= n_classes
# Store the macro averages
self.fpr[MACRO] = all_fpr
self.tpr[MACRO] = avg_tpr
self.roc_auc[MACRO] = auc(self.fpr[MACRO], self.tpr[MACRO])
|
(estimator, ax=None, micro=True, macro=True, per_class=True, binary=False, classes=None, encoder=None, is_fitted='auto', force_model=False, **kwargs)
|
50,654 |
yellowbrick.classifier.rocauc
|
__init__
| null |
def __init__(
self,
estimator,
ax=None,
micro=True,
macro=True,
per_class=True,
binary=False,
classes=None,
encoder=None,
is_fitted="auto",
force_model=False,
**kwargs
):
super(ROCAUC, self).__init__(
estimator,
ax=ax,
classes=classes,
encoder=encoder,
is_fitted=is_fitted,
force_model=force_model,
**kwargs
)
# Set the visual parameters for ROCAUC
# NOTE: the binary flag breaks our API since it's really just a meta parameter
# for micro, macro, and per_class. We knew this going in, but did it anyway.
self.binary = binary
if self.binary:
self.micro = False
self.macro = False
self.per_class = False
else:
self.micro = micro
self.macro = macro
self.per_class = per_class
|
(self, estimator, ax=None, micro=True, macro=True, per_class=True, binary=False, classes=None, encoder=None, is_fitted='auto', force_model=False, **kwargs)
|
50,664 |
yellowbrick.classifier.rocauc
|
_get_y_scores
|
The ``roc_curve`` metric requires target scores that can either be the
probability estimates of the positive class, confidence values or non-
thresholded measure of decisions (as returned by "decision_function").
This method computes the scores by resolving the estimator methods
that retreive these values.
.. todo:: implement confidence values metric.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features -- generally the test data
that is associated with y_true values.
|
def _get_y_scores(self, X):
"""
The ``roc_curve`` metric requires target scores that can either be the
probability estimates of the positive class, confidence values or non-
thresholded measure of decisions (as returned by "decision_function").
This method computes the scores by resolving the estimator methods
that retreive these values.
.. todo:: implement confidence values metric.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features -- generally the test data
that is associated with y_true values.
"""
# The resolution order of scoring functions
attrs = ("predict_proba", "decision_function")
# Return the first resolved function
for attr in attrs:
try:
method = getattr(self.estimator, attr, None)
if method:
return method(X)
except AttributeError:
# Some Scikit-Learn estimators have both probability and
# decision functions but override __getattr__ and raise an
# AttributeError on access.
# Note that because of the ordering of our attrs above,
# estimators with both will *only* ever use probability.
continue
# If we've gotten this far, raise an error
raise ModelError(
"ROCAUC requires estimators with predict_proba or "
"decision_function methods."
)
|
(self, X)
|
50,669 |
yellowbrick.classifier.rocauc
|
_score_macro_average
|
Compute the macro average scores for the ROCAUC curves.
|
def _score_macro_average(self, n_classes):
"""
Compute the macro average scores for the ROCAUC curves.
"""
# Gather all FPRs
all_fpr = np.unique(np.concatenate([self.fpr[i] for i in range(n_classes)]))
avg_tpr = np.zeros_like(all_fpr)
# Compute the averages per class
for i in range(n_classes):
avg_tpr += np.interp(all_fpr, self.fpr[i], self.tpr[i])
# Finalize the average
avg_tpr /= n_classes
# Store the macro averages
self.fpr[MACRO] = all_fpr
self.tpr[MACRO] = avg_tpr
self.roc_auc[MACRO] = auc(self.fpr[MACRO], self.tpr[MACRO])
|
(self, n_classes)
|
50,670 |
yellowbrick.classifier.rocauc
|
_score_micro_average
|
Compute the micro average scores for the ROCAUC curves.
|
def _score_micro_average(self, y, y_pred, classes, n_classes):
"""
Compute the micro average scores for the ROCAUC curves.
"""
# Convert y to binarized array for micro and macro scores
y = label_binarize(y, classes=classes)
if n_classes == 2:
y = np.hstack((1 - y, y))
# Compute micro-average
self.fpr[MICRO], self.tpr[MICRO], _ = roc_curve(y.ravel(), y_pred.ravel())
self.roc_auc[MICRO] = auc(self.fpr[MICRO], self.tpr[MICRO])
|
(self, y, y_pred, classes, n_classes)
|
50,673 |
yellowbrick.classifier.rocauc
|
draw
|
Renders ROC-AUC plot.
Called internally by score, possibly more than once
Returns
-------
ax : the axis with the plotted figure
|
def draw(self):
"""
Renders ROC-AUC plot.
Called internally by score, possibly more than once
Returns
-------
ax : the axis with the plotted figure
"""
colors = self.class_colors_[0 : len(self.classes_)]
n_classes = len(colors)
# If it's a binary decision, plot the single ROC curve
if self.target_type_ == BINARY and not self.per_class:
self.ax.plot(
self.fpr[BINARY],
self.tpr[BINARY],
label="ROC for binary decision, AUC = {:0.2f}".format(
self.roc_auc[BINARY]
),
)
# If per-class plotting is requested, plot ROC curves for each class
if self.per_class:
for i, color in zip(range(n_classes), colors):
self.ax.plot(
self.fpr[i],
self.tpr[i],
color=color,
label="ROC of class {}, AUC = {:0.2f}".format(
self.classes_[i], self.roc_auc[i]
),
)
# If requested, plot the ROC curve for the micro average
if self.micro:
self.ax.plot(
self.fpr[MICRO],
self.tpr[MICRO],
linestyle="--",
color=self.class_colors_[len(self.classes_) - 1],
label="micro-average ROC curve, AUC = {:0.2f}".format(
self.roc_auc["micro"]
),
)
# If requested, plot the ROC curve for the macro average
if self.macro:
self.ax.plot(
self.fpr[MACRO],
self.tpr[MACRO],
linestyle="--",
color=self.class_colors_[len(self.classes_) - 1],
label="macro-average ROC curve, AUC = {:0.2f}".format(
self.roc_auc["macro"]
),
)
# Plot the line of no discrimination to compare the curve to.
self.ax.plot([0, 1], [0, 1], linestyle=":", c=LINE_COLOR)
return self.ax
|
(self)
|
50,674 |
yellowbrick.classifier.rocauc
|
finalize
|
Sets a title and axis labels of the figures and ensures the axis limits
are scaled between the valid ROCAUC score values.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
|
def finalize(self, **kwargs):
"""
Sets a title and axis labels of the figures and ensures the axis limits
are scaled between the valid ROCAUC score values.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
# Set the title and add the legend
self.set_title("ROC Curves for {}".format(self.name))
self.ax.legend(loc="lower right", frameon=True)
# Set the limits for the ROC/AUC (always between 0 and 1)
self.ax.set_xlim([0.0, 1.0])
self.ax.set_ylim([0.0, 1.0])
# Set x and y axis labels
self.ax.set_ylabel("True Positive Rate")
self.ax.set_xlabel("False Positive Rate")
|
(self, **kwargs)
|
50,675 |
yellowbrick.classifier.rocauc
|
fit
|
Fit the classification model.
|
def fit(self, X, y=None):
"""
Fit the classification model.
"""
# The target determines what kind of estimator is fit
ttype = type_of_target(y)
if ttype.startswith(MULTICLASS):
self.target_type_ = MULTICLASS
elif ttype.startswith(BINARY):
self.target_type_ = BINARY
else:
raise YellowbrickValueError(
(
"{} does not support target type '{}', "
"please provide a binary or multiclass single-output target"
).format(self.__class__.__name__, ttype)
)
# Fit the model and return self
return super(ROCAUC, self).fit(X, y)
|
(self, X, y=None)
|
50,679 |
yellowbrick.classifier.rocauc
|
score
|
Generates the predicted target values using the Scikit-Learn
estimator.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy unless micro or macro scores are requested.
|
def score(self, X, y=None):
"""
Generates the predicted target values using the Scikit-Learn
estimator.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy unless micro or macro scores are requested.
"""
# Call super to check if fitted and to compute self.score_
# NOTE: this sets score to the base score if neither macro nor micro
super(ROCAUC, self).score(X, y)
# Compute the predictions for the test data
y_pred = self._get_y_scores(X)
if self.target_type_ == BINARY:
# For binary, per_class must be True to draw micro/macro curves
if (self.micro or self.macro) and not self.per_class:
raise ModelError(
"no curves will be drawn; ",
"set per_class=True or micro=False and macro=False.",
)
# For binary, if predictions are returned in shape (n,), micro and macro
# curves are not defined
if (self.micro or self.macro) and len(y_pred.shape) == 1:
raise ModelError(
"no curves will be drawn; set binary=True.",
)
if self.target_type_ == MULTICLASS:
# If it's multiclass classification, at least one of micro, macro, or
# per_class must be True
if not self.micro and not self.macro and not self.per_class:
raise YellowbrickValueError(
"no curves will be drawn; specify micro, macro, or per_class"
)
# Classes may be label encoded so only use what's in y to compute.
# The self.classes_ attribute will be used as names for labels.
classes = np.unique(y)
n_classes = len(classes)
# Store the false positive rate, true positive rate and curve info.
self.fpr = dict()
self.tpr = dict()
self.roc_auc = dict()
# If the decision is binary draw only ROC curve for the positive class
if self.target_type_ is BINARY and not self.per_class:
# In this case predict_proba returns an array of shape (n, 2) which
# specifies the probabilities of both the negative and positive classes.
if len(y_pred.shape) == 2 and y_pred.shape[1] == 2:
self.fpr[BINARY], self.tpr[BINARY], _ = roc_curve(y, y_pred[:, 1])
else:
# decision_function returns array of shape (n,), so plot it directly
self.fpr[BINARY], self.tpr[BINARY], _ = roc_curve(y, y_pred)
self.roc_auc[BINARY] = auc(self.fpr[BINARY], self.tpr[BINARY])
# Per-class binary decisions may have to have the negative class curve computed
elif self.target_type_ is BINARY and self.per_class:
# draw a curve for class 1 (the positive class)
if len(y_pred.shape) == 2 and y_pred.shape[1] == 2:
# predict_proba returns array of shape (n, 2), so use
# probability of class 1 to compute ROC
self.fpr[1], self.tpr[1], _ = roc_curve(y, y_pred[:, 1])
else:
# decision_function returns array of shape (n,)
self.fpr[1], self.tpr[1], _ = roc_curve(y, y_pred)
self.roc_auc[1] = auc(self.fpr[1], self.tpr[1])
# draw a curve for class 0 (the negative class)
if len(y_pred.shape) == 2 and y_pred.shape[1] == 2:
# predict_proba returns array of shape (n, 2), so use
# probability of class 0 to compute ROC
self.fpr[0], self.tpr[0], _ = roc_curve(1 - y, y_pred[:, 0])
else:
# decision_function returns array of shape (n,).
# To draw a ROC curve for class 0 we swap the classes 0 and 1 in y
# and reverse classifiers predictions y_pred.
self.fpr[0], self.tpr[0], _ = roc_curve(1 - y, -y_pred)
self.roc_auc[0] = auc(self.fpr[0], self.tpr[0])
else:
# Otherwise compute the ROC curve and ROC area for each class
for i, c in enumerate(classes):
self.fpr[i], self.tpr[i], _ = roc_curve(y, y_pred[:, i], pos_label=c)
self.roc_auc[i] = auc(self.fpr[i], self.tpr[i])
# Compute micro average
if self.micro:
self._score_micro_average(y, y_pred, classes, n_classes)
# Compute macro average
if self.macro:
self._score_macro_average(n_classes)
# Draw the Curves
self.draw()
# Set score to micro average if specified
if self.micro:
self.score_ = self.roc_auc[MICRO]
# Set score to macro average if not micro
if self.macro:
self.score_ = self.roc_auc[MACRO]
return self.score_
|
(self, X, y=None)
|
50,683 |
yellowbrick.anscombe
|
anscombe
|
Creates 2x2 grid plot of the 4 anscombe datasets for illustration.
|
def anscombe():
"""
Creates 2x2 grid plot of the 4 anscombe datasets for illustration.
"""
_, ((axa, axb), (axc, axd)) = plt.subplots(2, 2, sharex="col", sharey="row")
colors = get_color_cycle()
for arr, ax, color in zip(ANSCOMBE, (axa, axb, axc, axd), colors):
x = arr[0]
y = arr[1]
# Set the X and Y limits
ax.set_xlim(0, 15)
ax.set_ylim(0, 15)
# Draw the points in the scatter plot
ax.scatter(x, y, c=color)
# Draw the linear best fit line on the plot
draw_best_fit(x, y, ax, c=color)
return (axa, axb, axc, axd)
|
()
|
50,687 |
yellowbrick.style.palettes
|
color_palette
|
Return a color palette object with color definition and handling.
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
palette : None or str or sequence
Name of a palette or ``None`` to return the current palette. If a
sequence the input colors are used but possibly cycled.
Available palette names from :py:mod:`yellowbrick.colors.palettes` are:
.. hlist::
:columns: 3
* :py:const:`accent`
* :py:const:`dark`
* :py:const:`paired`
* :py:const:`pastel`
* :py:const:`bold`
* :py:const:`muted`
* :py:const:`colorblind`
* :py:const:`sns_colorblind`
* :py:const:`sns_deep`
* :py:const:`sns_muted`
* :py:const:`sns_pastel`
* :py:const:`sns_bright`
* :py:const:`sns_dark`
* :py:const:`flatui`
* :py:const:`neural_paint`
n_colors : None or int
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors
which allow the use of the names "bgrmyck", though others do have more
or less colors; therefore reducing the size of the list can only be
done by specifying this parameter. Asking for more colors than exist
in the palette will cause it to cycle.
Returns
-------
list(tuple)
Returns a ColorPalette object, which behaves like a list, but can be
used as a context manager and possesses functions to convert colors.
.. seealso::
:func:`.set_palette`
Set the default color cycle for all plots.
:func:`.set_color_codes`
Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the yellowbrick palettes.
:func:`..colors.resolve_colors`
Resolve a color map or listed sequence of colors.
|
def color_palette(palette=None, n_colors=None):
"""
Return a color palette object with color definition and handling.
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
palette : None or str or sequence
Name of a palette or ``None`` to return the current palette. If a
sequence the input colors are used but possibly cycled.
Available palette names from :py:mod:`yellowbrick.colors.palettes` are:
.. hlist::
:columns: 3
* :py:const:`accent`
* :py:const:`dark`
* :py:const:`paired`
* :py:const:`pastel`
* :py:const:`bold`
* :py:const:`muted`
* :py:const:`colorblind`
* :py:const:`sns_colorblind`
* :py:const:`sns_deep`
* :py:const:`sns_muted`
* :py:const:`sns_pastel`
* :py:const:`sns_bright`
* :py:const:`sns_dark`
* :py:const:`flatui`
* :py:const:`neural_paint`
n_colors : None or int
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors
which allow the use of the names "bgrmyck", though others do have more
or less colors; therefore reducing the size of the list can only be
done by specifying this parameter. Asking for more colors than exist
in the palette will cause it to cycle.
Returns
-------
list(tuple)
Returns a ColorPalette object, which behaves like a list, but can be
used as a context manager and possesses functions to convert colors.
.. seealso::
:func:`.set_palette`
Set the default color cycle for all plots.
:func:`.set_color_codes`
Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the yellowbrick palettes.
:func:`..colors.resolve_colors`
Resolve a color map or listed sequence of colors.
"""
if palette is None:
palette = get_color_cycle()
if n_colors is None:
n_colors = len(palette)
elif not isinstance(palette, str):
if n_colors is None:
n_colors = len(palette)
else:
if palette.lower() not in PALETTES:
raise YellowbrickValueError(
"'{}' is not a recognized palette!".format(palette)
)
palette = PALETTES[palette.lower()]
if n_colors is None:
n_colors = len(palette)
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in RGB tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = ColorPalette(palette)
except ValueError:
raise YellowbrickValueError(
"Could not generate a palette for %s" % str(palette)
)
return palette
|
(palette=None, n_colors=None)
|
50,689 |
yellowbrick.datasaurus
|
datasaurus
|
Creates 2x2 grid plot of 4 from the Datasaurus Dozen datasets for illustration.
Citation:
Justin Matejka, George Fitzmaurice (2017)
Same Stats, Different Graphs: Generating Datasets with Varied Appearance and
Identical Statistics through Simulated Annealing
CHI 2017 Conference proceedings:
ACM SIGCHI Conference on Human Factors in Computing Systems
|
def datasaurus():
"""
Creates 2x2 grid plot of 4 from the Datasaurus Dozen datasets for illustration.
Citation:
Justin Matejka, George Fitzmaurice (2017)
Same Stats, Different Graphs: Generating Datasets with Varied Appearance and
Identical Statistics through Simulated Annealing
CHI 2017 Conference proceedings:
ACM SIGCHI Conference on Human Factors in Computing Systems
"""
_, ((axa, axb), (axc, axd)) = plt.subplots(2, 2, sharex="col", sharey="row")
colors = get_color_cycle()
for arr, ax, color in zip(DATASAURUS, (axa, axb, axc, axd), colors):
x = arr[0]
y = arr[1]
# Draw the points in the scatter plot
ax.scatter(x, y, c=color)
# Set the X and Y limits
ax.set_xlim(0, 100)
ax.set_ylim(0, 110)
# Draw the linear best fit line on the plot
draw_best_fit(x, y, ax, c=color)
return (axa, axb, axc, axd)
|
()
|
50,692 |
yellowbrick.version
|
get_version
|
Prints the version.
|
def get_version(short=False):
"""
Prints the version.
"""
assert __version_info__["releaselevel"] in ("alpha", "beta", "final")
vers = ["{major}.{minor}".format(**__version_info__)]
if __version_info__["micro"]:
vers.append(".{micro}".format(**__version_info__))
if __version_info__["releaselevel"] != "final" and not short:
vers.append(
"{}{}".format(
__version_info__["releaselevel"][0],
__version_info__["serial"],
)
)
if __version_info__["post"]:
vers.append(".post{}".format(__version_info__["post"]))
return "".join(vers)
|
(short=False)
|
50,694 |
yellowbrick.style.rcmod
|
reset_defaults
|
Restore all RC params to default settings.
|
def reset_defaults():
"""
Restore all RC params to default settings.
"""
mpl.rcParams.update(mpl.rcParamsDefault)
|
()
|
50,695 |
yellowbrick.style.rcmod
|
reset_orig
|
Restore all RC params to original settings (respects custom rc).
|
def reset_orig():
"""
Restore all RC params to original settings (respects custom rc).
"""
mpl.rcParams.update(_orig_rc_params)
|
()
|
50,696 |
yellowbrick.style.rcmod
|
set_aesthetic
|
Set aesthetic parameters in one step.
Each set of parameters can be set directly or temporarily, see the
referenced functions below for more information.
Parameters
----------
palette : string or sequence
Color palette, see :func:`color_palette`
font : string
Font family, see matplotlib font manager.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
color_codes : bool
If ``True`` and ``palette`` is a yellowbrick palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
rc : dict or None
Dictionary of rc parameter mappings to override the above.
|
def set_aesthetic(
palette="yellowbrick", font="sans-serif", font_scale=1, color_codes=True, rc=None
):
"""
Set aesthetic parameters in one step.
Each set of parameters can be set directly or temporarily, see the
referenced functions below for more information.
Parameters
----------
palette : string or sequence
Color palette, see :func:`color_palette`
font : string
Font family, see matplotlib font manager.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
color_codes : bool
If ``True`` and ``palette`` is a yellowbrick palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
rc : dict or None
Dictionary of rc parameter mappings to override the above.
"""
_set_context(font_scale)
set_style(rc={"font.family": font})
set_palette(palette, color_codes=color_codes)
if rc is not None:
mpl.rcParams.update(rc)
|
(palette='yellowbrick', font='sans-serif', font_scale=1, color_codes=True, rc=None)
|
50,697 |
yellowbrick.style.palettes
|
set_color_codes
|
Change how matplotlib color shorthands are interpreted.
Calling this will change how shorthand codes like "b" or "g"
are interpreted by matplotlib in subsequent plots.
Parameters
----------
palette : str
Named yellowbrick palette to use as the source of colors.
See Also
--------
set_palette : Color codes can also be set through the function that
sets the matplotlib color cycle.
|
def set_color_codes(palette="accent"):
"""
Change how matplotlib color shorthands are interpreted.
Calling this will change how shorthand codes like "b" or "g"
are interpreted by matplotlib in subsequent plots.
Parameters
----------
palette : str
Named yellowbrick palette to use as the source of colors.
See Also
--------
set_palette : Color codes can also be set through the function that
sets the matplotlib color cycle.
"""
if palette not in PALETTES:
raise YellowbrickValueError("'{}' is not a recognized palette!".format(palette))
# Fetch the colors and adapt the length
colors = PALETTES[palette]
if len(colors) > 7:
# Truncate colors that are longer than 7
colors = colors[:7]
elif len(colors) < 7:
# Add the key (black) color to colors that are shorter than 7
colors = colors + [YB_KEY]
# Set the color codes on matplotlib
for code, color in zip("bgrmyck", colors):
rgb = mpl.colors.colorConverter.to_rgb(color)
mpl.colors.colorConverter.colors[code] = rgb
mpl.colors.colorConverter.cache[code] = rgb
|
(palette='accent')
|
50,698 |
yellowbrick.style.rcmod
|
set_palette
|
Set the matplotlib color cycle using a seaborn palette.
Parameters
----------
palette : yellowbrick color palette | seaborn color palette (with ``sns_`` prepended)
Palette definition. Should be something that :func:`color_palette`
can process.
n_colors : int
Number of colors in the cycle. The default number of colors will depend
on the format of ``palette``, see the :func:`color_palette`
documentation for more information.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
|
def set_palette(palette, n_colors=None, color_codes=False):
"""
Set the matplotlib color cycle using a seaborn palette.
Parameters
----------
palette : yellowbrick color palette | seaborn color palette (with ``sns_`` prepended)
Palette definition. Should be something that :func:`color_palette`
can process.
n_colors : int
Number of colors in the cycle. The default number of colors will depend
on the format of ``palette``, see the :func:`color_palette`
documentation for more information.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
"""
colors = color_palette(palette, n_colors)
if mpl_ge_150:
from cycler import cycler
cyl = cycler("color", colors)
mpl.rcParams["axes.prop_cycle"] = cyl
else:
mpl.rcParams["axes.color_cycle"] = list(colors)
mpl.rcParams["patch.facecolor"] = colors[0]
if color_codes:
set_color_codes(palette)
|
(palette, n_colors=None, color_codes=False)
|
50,699 |
yellowbrick.style.rcmod
|
set_style
|
Set the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
|
def set_style(style=None, rc=None):
"""
Set the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
"""
style_object = _axes_style(style, rc)
mpl.rcParams.update(style_object)
|
(style=None, rc=None)
|
50,705 |
iptools
|
IpRange
|
Range of ip addresses.
Converts a CIDR notation address, ip address and subnet, tuple of ip
addresses or start and end addresses into a smart object which can perform
``in`` and ``not in`` tests and iterate all of the addresses in the range.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> '127.127.127.127' in r
True
>>> '10.0.0.1' in r
False
>>> 2130706433 in r
True
>>> # IPv4 mapped IPv6 addresses are valid in an IPv4 block
>>> '::ffff:127.127.127.127' in r
True
>>> # but only if they are actually in the block :)
>>> '::ffff:192.0.2.128' in r
False
>>> '::ffff:c000:0280' in r
False
>>> r = IpRange('127/24')
>>> print(r)
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('127/30')
>>> for ip in r:
... print(ip)
127.0.0.0
127.0.0.1
127.0.0.2
127.0.0.3
>>> print(IpRange('127.0.0.255', '127.0.0.0'))
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('127/255.255.255.0')
>>> print(r)
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('::ffff:0000:0000', '::ffff:ffff:ffff')
>>> '::ffff:192.0.2.128' in r
True
>>> '::ffff:c000:0280' in r
True
>>> 281473902969472 in r
True
>>> '192.168.2.128' in r
False
>>> 2130706433 in r
False
>>> r = IpRange('::ffff:ffff:0000/120')
>>> for ip in r:
... print(ip) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
::ffff:ffff:0 ... ::ffff:ffff:6d ... ::ffff:ffff:ff
:param start: Ip address in dotted quad format, CIDR notation, subnet
format or ``(start, end)`` tuple of ip addresses in dotted quad format.
:type start: str or tuple
:param end: Ip address in dotted quad format or ``None``.
:type end: str
|
class IpRange (Sequence):
"""
Range of ip addresses.
Converts a CIDR notation address, ip address and subnet, tuple of ip
addresses or start and end addresses into a smart object which can perform
``in`` and ``not in`` tests and iterate all of the addresses in the range.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> '127.127.127.127' in r
True
>>> '10.0.0.1' in r
False
>>> 2130706433 in r
True
>>> # IPv4 mapped IPv6 addresses are valid in an IPv4 block
>>> '::ffff:127.127.127.127' in r
True
>>> # but only if they are actually in the block :)
>>> '::ffff:192.0.2.128' in r
False
>>> '::ffff:c000:0280' in r
False
>>> r = IpRange('127/24')
>>> print(r)
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('127/30')
>>> for ip in r:
... print(ip)
127.0.0.0
127.0.0.1
127.0.0.2
127.0.0.3
>>> print(IpRange('127.0.0.255', '127.0.0.0'))
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('127/255.255.255.0')
>>> print(r)
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('::ffff:0000:0000', '::ffff:ffff:ffff')
>>> '::ffff:192.0.2.128' in r
True
>>> '::ffff:c000:0280' in r
True
>>> 281473902969472 in r
True
>>> '192.168.2.128' in r
False
>>> 2130706433 in r
False
>>> r = IpRange('::ffff:ffff:0000/120')
>>> for ip in r:
... print(ip) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
::ffff:ffff:0 ... ::ffff:ffff:6d ... ::ffff:ffff:ff
:param start: Ip address in dotted quad format, CIDR notation, subnet
format or ``(start, end)`` tuple of ip addresses in dotted quad format.
:type start: str or tuple
:param end: Ip address in dotted quad format or ``None``.
:type end: str
"""
def __init__(self, start, end=None):
if end is None:
if isinstance(start, IpRange):
# copy constructor
start, end = start[0], start[-1]
elif isinstance(start, tuple):
# occurs when IpRangeList calls via map to pass start and end
start, end = start
elif ipv4.validate_cidr(start):
# CIDR notation range
start, end = ipv4.cidr2block(start)
elif ipv6.validate_cidr(start):
# CIDR notation range
start, end = ipv6.cidr2block(start)
elif ipv4.validate_subnet(start):
# Netmask notation range
start, end = ipv4.subnet2block(start)
else:
# degenerate range
end = start
start = _address2long(start)
end = _address2long(end)
self.startIp = min(start, end)
self.endIp = max(start, end)
self._len = self.endIp - self.startIp + 1
self._ipver = ipv4
if self.endIp > ipv4.MAX_IP:
self._ipver = ipv6
# end __init__
def __repr__(self):
"""
>>> repr(IpRange('127.0.0.1'))
"IpRange('127.0.0.1', '127.0.0.1')"
>>> repr(IpRange('10/8'))
"IpRange('10.0.0.0', '10.255.255.255')"
>>> repr(IpRange('127.0.0.255', '127.0.0.0'))
"IpRange('127.0.0.0', '127.0.0.255')"
"""
return "IpRange(%r, %r)" % (
self._ipver.long2ip(self.startIp),
self._ipver.long2ip(self.endIp))
# end __repr__
def __str__(self):
"""
>>> str(IpRange('127.0.0.1'))
"('127.0.0.1', '127.0.0.1')"
>>> str(IpRange('10/8'))
"('10.0.0.0', '10.255.255.255')"
>>> str(IpRange('127.0.0.255', '127.0.0.0'))
"('127.0.0.0', '127.0.0.255')"
"""
return (
self._ipver.long2ip(self.startIp),
self._ipver.long2ip(self.endIp)).__repr__()
# end __str__
def __eq__(self, other):
"""
>>> IpRange('127.0.0.1') == IpRange('127.0.0.1')
True
>>> IpRange('127.0.0.1') == IpRange('127.0.0.2')
False
>>> IpRange('10/8') == IpRange('10', '10.255.255.255')
True
"""
return isinstance(other, IpRange) and \
self.startIp == other.startIp and \
self.endIp == other.endIp
# end __eq__
def __len__(self):
"""
Return the length of the range.
>>> len(IpRange('127.0.0.1'))
1
>>> len(IpRange('127/31'))
2
>>> len(IpRange('127/22'))
1024
>>> IpRange('fe80::/10').__len__() == 2**118
True
"""
return self._len
# end __len__
def __hash__(self):
"""
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> a.__hash__() == b.__hash__()
True
>>> c = IpRange('10/8')
>>> a.__hash__() == c.__hash__()
False
>>> b.__hash__() == c.__hash__()
False
"""
return hash((self.startIp, self.endIp))
# end __hash__
def _cast(self, item):
if isinstance(item, basestring):
item = _address2long(item)
if type(item) not in (type(1), type(ipv4.MAX_IP), type(ipv6.MAX_IP)):
raise TypeError(
"expected ip address, 32-bit integer or 128-bit integer")
if ipv4 == self._ipver and item > ipv4.MAX_IP:
# casting an ipv6 in an ipv4 range
# downcast to ipv4 iff address is in the IPv4 mapped block
if item in _IPV6_MAPPED_IPV4:
item = item & ipv4.MAX_IP
# end if
return item
# end _cast
def index(self, item):
"""
Return the 0-based position of `item` in this IpRange.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> r.index('127.0.0.1')
0
>>> r.index('127.255.255.255')
16777214
>>> r.index('10.0.0.1')
Traceback (most recent call last):
...
ValueError: 10.0.0.1 is not in range
:param item: Dotted-quad ip address.
:type item: str
:returns: Index of ip address in range
"""
item = self._cast(item)
offset = item - self.startIp
if offset >= 0 and offset < self._len:
return offset
raise ValueError('%s is not in range' % self._ipver.long2ip(item))
# end index
def count(self, item):
return int(item in self)
# end count
def __contains__(self, item):
"""
Implements membership test operators ``in`` and ``not in`` for the
address range.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> '127.127.127.127' in r
True
>>> '10.0.0.1' in r
False
>>> 2130706433 in r
True
>>> 'invalid' in r
Traceback (most recent call last):
...
TypeError: expected ip address, 32-bit integer or 128-bit integer
:param item: Dotted-quad ip address.
:type item: str
:returns: ``True`` if address is in range, ``False`` otherwise.
"""
item = self._cast(item)
return self.startIp <= item <= self.endIp
# end __contains__
def __getitem__(self, index):
"""
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> r[0]
'127.0.0.1'
>>> r[16777214]
'127.255.255.255'
>>> r[-1]
'127.255.255.255'
>>> r[len(r)]
Traceback (most recent call last):
...
IndexError: index out of range
>>> r[:]
IpRange('127.0.0.1', '127.255.255.255')
>>> r[1:]
IpRange('127.0.0.2', '127.255.255.255')
>>> r[-2:]
IpRange('127.255.255.254', '127.255.255.255')
>>> r[0:2]
IpRange('127.0.0.1', '127.0.0.2')
>>> r[0:-1]
IpRange('127.0.0.1', '127.255.255.254')
>>> r[:-2]
IpRange('127.0.0.1', '127.255.255.253')
>>> r[::2]
Traceback (most recent call last):
...
ValueError: slice step not supported
"""
if isinstance(index, slice):
if index.step not in (None, 1):
# TODO: return an IpRangeList
raise ValueError('slice step not supported')
start = index.start or 0
if start < 0:
start = max(0, start + self._len)
if start >= self._len:
raise IndexError('start index out of range')
stop = index.stop or self._len
if stop < 0:
stop = max(start, stop + self._len)
if stop > self._len:
raise IndexError('stop index out of range')
return IpRange(
self._ipver.long2ip(self.startIp + start),
self._ipver.long2ip(self.startIp + stop - 1))
else:
if index < 0:
index = self._len + index
if index < 0 or index >= self._len:
raise IndexError('index out of range')
return self._ipver.long2ip(self.startIp + index)
# end __getitem__
def __iter__(self):
"""
Return an iterator over ip addresses in the range.
>>> iter = IpRange('127/31').__iter__()
>>> next(iter)
'127.0.0.0'
>>> next(iter)
'127.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
"""
i = self.startIp
while i <= self.endIp:
yield self._ipver.long2ip(i)
i += 1
# end __iter__
|
(start, end=None)
|
50,706 |
iptools
|
__contains__
|
Implements membership test operators ``in`` and ``not in`` for the
address range.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> '127.127.127.127' in r
True
>>> '10.0.0.1' in r
False
>>> 2130706433 in r
True
>>> 'invalid' in r
Traceback (most recent call last):
...
TypeError: expected ip address, 32-bit integer or 128-bit integer
:param item: Dotted-quad ip address.
:type item: str
:returns: ``True`` if address is in range, ``False`` otherwise.
|
def __contains__(self, item):
"""
Implements membership test operators ``in`` and ``not in`` for the
address range.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> '127.127.127.127' in r
True
>>> '10.0.0.1' in r
False
>>> 2130706433 in r
True
>>> 'invalid' in r
Traceback (most recent call last):
...
TypeError: expected ip address, 32-bit integer or 128-bit integer
:param item: Dotted-quad ip address.
:type item: str
:returns: ``True`` if address is in range, ``False`` otherwise.
"""
item = self._cast(item)
return self.startIp <= item <= self.endIp
|
(self, item)
|
50,707 |
iptools
|
__eq__
|
>>> IpRange('127.0.0.1') == IpRange('127.0.0.1')
True
>>> IpRange('127.0.0.1') == IpRange('127.0.0.2')
False
>>> IpRange('10/8') == IpRange('10', '10.255.255.255')
True
|
def __eq__(self, other):
"""
>>> IpRange('127.0.0.1') == IpRange('127.0.0.1')
True
>>> IpRange('127.0.0.1') == IpRange('127.0.0.2')
False
>>> IpRange('10/8') == IpRange('10', '10.255.255.255')
True
"""
return isinstance(other, IpRange) and \
self.startIp == other.startIp and \
self.endIp == other.endIp
|
(self, other)
|
50,708 |
iptools
|
__getitem__
|
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> r[0]
'127.0.0.1'
>>> r[16777214]
'127.255.255.255'
>>> r[-1]
'127.255.255.255'
>>> r[len(r)]
Traceback (most recent call last):
...
IndexError: index out of range
>>> r[:]
IpRange('127.0.0.1', '127.255.255.255')
>>> r[1:]
IpRange('127.0.0.2', '127.255.255.255')
>>> r[-2:]
IpRange('127.255.255.254', '127.255.255.255')
>>> r[0:2]
IpRange('127.0.0.1', '127.0.0.2')
>>> r[0:-1]
IpRange('127.0.0.1', '127.255.255.254')
>>> r[:-2]
IpRange('127.0.0.1', '127.255.255.253')
>>> r[::2]
Traceback (most recent call last):
...
ValueError: slice step not supported
|
def __getitem__(self, index):
"""
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> r[0]
'127.0.0.1'
>>> r[16777214]
'127.255.255.255'
>>> r[-1]
'127.255.255.255'
>>> r[len(r)]
Traceback (most recent call last):
...
IndexError: index out of range
>>> r[:]
IpRange('127.0.0.1', '127.255.255.255')
>>> r[1:]
IpRange('127.0.0.2', '127.255.255.255')
>>> r[-2:]
IpRange('127.255.255.254', '127.255.255.255')
>>> r[0:2]
IpRange('127.0.0.1', '127.0.0.2')
>>> r[0:-1]
IpRange('127.0.0.1', '127.255.255.254')
>>> r[:-2]
IpRange('127.0.0.1', '127.255.255.253')
>>> r[::2]
Traceback (most recent call last):
...
ValueError: slice step not supported
"""
if isinstance(index, slice):
if index.step not in (None, 1):
# TODO: return an IpRangeList
raise ValueError('slice step not supported')
start = index.start or 0
if start < 0:
start = max(0, start + self._len)
if start >= self._len:
raise IndexError('start index out of range')
stop = index.stop or self._len
if stop < 0:
stop = max(start, stop + self._len)
if stop > self._len:
raise IndexError('stop index out of range')
return IpRange(
self._ipver.long2ip(self.startIp + start),
self._ipver.long2ip(self.startIp + stop - 1))
else:
if index < 0:
index = self._len + index
if index < 0 or index >= self._len:
raise IndexError('index out of range')
return self._ipver.long2ip(self.startIp + index)
|
(self, index)
|
50,709 |
iptools
|
__hash__
|
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> a.__hash__() == b.__hash__()
True
>>> c = IpRange('10/8')
>>> a.__hash__() == c.__hash__()
False
>>> b.__hash__() == c.__hash__()
False
|
def __hash__(self):
"""
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> a.__hash__() == b.__hash__()
True
>>> c = IpRange('10/8')
>>> a.__hash__() == c.__hash__()
False
>>> b.__hash__() == c.__hash__()
False
"""
return hash((self.startIp, self.endIp))
|
(self)
|
50,710 |
iptools
|
__init__
| null |
def __init__(self, start, end=None):
if end is None:
if isinstance(start, IpRange):
# copy constructor
start, end = start[0], start[-1]
elif isinstance(start, tuple):
# occurs when IpRangeList calls via map to pass start and end
start, end = start
elif ipv4.validate_cidr(start):
# CIDR notation range
start, end = ipv4.cidr2block(start)
elif ipv6.validate_cidr(start):
# CIDR notation range
start, end = ipv6.cidr2block(start)
elif ipv4.validate_subnet(start):
# Netmask notation range
start, end = ipv4.subnet2block(start)
else:
# degenerate range
end = start
start = _address2long(start)
end = _address2long(end)
self.startIp = min(start, end)
self.endIp = max(start, end)
self._len = self.endIp - self.startIp + 1
self._ipver = ipv4
if self.endIp > ipv4.MAX_IP:
self._ipver = ipv6
|
(self, start, end=None)
|
50,711 |
iptools
|
__iter__
|
Return an iterator over ip addresses in the range.
>>> iter = IpRange('127/31').__iter__()
>>> next(iter)
'127.0.0.0'
>>> next(iter)
'127.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
|
def __iter__(self):
"""
Return an iterator over ip addresses in the range.
>>> iter = IpRange('127/31').__iter__()
>>> next(iter)
'127.0.0.0'
>>> next(iter)
'127.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
"""
i = self.startIp
while i <= self.endIp:
yield self._ipver.long2ip(i)
i += 1
|
(self)
|
50,712 |
iptools
|
__len__
|
Return the length of the range.
>>> len(IpRange('127.0.0.1'))
1
>>> len(IpRange('127/31'))
2
>>> len(IpRange('127/22'))
1024
>>> IpRange('fe80::/10').__len__() == 2**118
True
|
def __len__(self):
"""
Return the length of the range.
>>> len(IpRange('127.0.0.1'))
1
>>> len(IpRange('127/31'))
2
>>> len(IpRange('127/22'))
1024
>>> IpRange('fe80::/10').__len__() == 2**118
True
"""
return self._len
|
(self)
|
50,713 |
iptools
|
__repr__
|
>>> repr(IpRange('127.0.0.1'))
"IpRange('127.0.0.1', '127.0.0.1')"
>>> repr(IpRange('10/8'))
"IpRange('10.0.0.0', '10.255.255.255')"
>>> repr(IpRange('127.0.0.255', '127.0.0.0'))
"IpRange('127.0.0.0', '127.0.0.255')"
|
def __repr__(self):
"""
>>> repr(IpRange('127.0.0.1'))
"IpRange('127.0.0.1', '127.0.0.1')"
>>> repr(IpRange('10/8'))
"IpRange('10.0.0.0', '10.255.255.255')"
>>> repr(IpRange('127.0.0.255', '127.0.0.0'))
"IpRange('127.0.0.0', '127.0.0.255')"
"""
return "IpRange(%r, %r)" % (
self._ipver.long2ip(self.startIp),
self._ipver.long2ip(self.endIp))
|
(self)
|
50,714 |
iptools
|
__str__
|
>>> str(IpRange('127.0.0.1'))
"('127.0.0.1', '127.0.0.1')"
>>> str(IpRange('10/8'))
"('10.0.0.0', '10.255.255.255')"
>>> str(IpRange('127.0.0.255', '127.0.0.0'))
"('127.0.0.0', '127.0.0.255')"
|
def __str__(self):
"""
>>> str(IpRange('127.0.0.1'))
"('127.0.0.1', '127.0.0.1')"
>>> str(IpRange('10/8'))
"('10.0.0.0', '10.255.255.255')"
>>> str(IpRange('127.0.0.255', '127.0.0.0'))
"('127.0.0.0', '127.0.0.255')"
"""
return (
self._ipver.long2ip(self.startIp),
self._ipver.long2ip(self.endIp)).__repr__()
|
(self)
|
50,715 |
iptools
|
_cast
| null |
def _cast(self, item):
if isinstance(item, basestring):
item = _address2long(item)
if type(item) not in (type(1), type(ipv4.MAX_IP), type(ipv6.MAX_IP)):
raise TypeError(
"expected ip address, 32-bit integer or 128-bit integer")
if ipv4 == self._ipver and item > ipv4.MAX_IP:
# casting an ipv6 in an ipv4 range
# downcast to ipv4 iff address is in the IPv4 mapped block
if item in _IPV6_MAPPED_IPV4:
item = item & ipv4.MAX_IP
# end if
return item
|
(self, item)
|
50,716 |
iptools
|
count
| null |
def count(self, item):
return int(item in self)
|
(self, item)
|
50,717 |
iptools
|
index
|
Return the 0-based position of `item` in this IpRange.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> r.index('127.0.0.1')
0
>>> r.index('127.255.255.255')
16777214
>>> r.index('10.0.0.1')
Traceback (most recent call last):
...
ValueError: 10.0.0.1 is not in range
:param item: Dotted-quad ip address.
:type item: str
:returns: Index of ip address in range
|
def index(self, item):
"""
Return the 0-based position of `item` in this IpRange.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> r.index('127.0.0.1')
0
>>> r.index('127.255.255.255')
16777214
>>> r.index('10.0.0.1')
Traceback (most recent call last):
...
ValueError: 10.0.0.1 is not in range
:param item: Dotted-quad ip address.
:type item: str
:returns: Index of ip address in range
"""
item = self._cast(item)
offset = item - self.startIp
if offset >= 0 and offset < self._len:
return offset
raise ValueError('%s is not in range' % self._ipver.long2ip(item))
|
(self, item)
|
50,718 |
iptools
|
IpRangeList
|
List of IpRange objects.
Converts a list of ip address and/or CIDR addresses into a list of IpRange
objects. This list can perform ``in`` and ``not in`` tests and iterate all
of the addresses in the range.
:param \*args: List of ip addresses or CIDR notation and/or
``(start, end)`` tuples of ip addresses.
:type \*args: list of str and/or tuple
|
class IpRangeList (object):
r"""
List of IpRange objects.
Converts a list of ip address and/or CIDR addresses into a list of IpRange
objects. This list can perform ``in`` and ``not in`` tests and iterate all
of the addresses in the range.
:param \*args: List of ip addresses or CIDR notation and/or
``(start, end)`` tuples of ip addresses.
:type \*args: list of str and/or tuple
"""
def __init__(self, *args):
self.ips = tuple(map(IpRange, args))
# end __init__
def __repr__(self):
"""
>>> repr(IpRangeList('127.0.0.1', '10/8', '192.168/16'))
... #doctest: +NORMALIZE_WHITESPACE
"IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
IpRange('10.0.0.0', '10.255.255.255'),
IpRange('192.168.0.0', '192.168.255.255'))"
>>> repr(
... IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
... IpRange('10.0.0.0', '10.255.255.255'),
... IpRange('192.168.0.0', '192.168.255.255')))
... #doctest: +NORMALIZE_WHITESPACE
"IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
IpRange('10.0.0.0', '10.255.255.255'),
IpRange('192.168.0.0', '192.168.255.255'))"
"""
return "IpRangeList%r" % (self.ips,)
# end __repr__
def __str__(self):
"""
>>> str(IpRangeList('127.0.0.1', '10/8', '192.168/16'))
... #doctest: +NORMALIZE_WHITESPACE
"(('127.0.0.1', '127.0.0.1'),
('10.0.0.0', '10.255.255.255'),
('192.168.0.0', '192.168.255.255'))"
"""
return "(%s)" % ", ".join(str(i) for i in self.ips)
# end __str__
def __contains__(self, item):
"""
Implements membership test operators ``in`` and ``not in`` for the
address ranges contained in the list.
>>> r = IpRangeList('127.0.0.1', '10/8', '192.168/16')
>>> '127.0.0.1' in r
True
>>> '10.0.0.1' in r
True
>>> 2130706433 in r
True
>>> 'invalid' in r
Traceback (most recent call last):
...
TypeError: expected ip address, 32-bit integer or 128-bit integer
:param item: Dotted-quad ip address.
:type item: str
:returns: ``True`` if address is in list, ``False`` otherwise.
"""
if isinstance(item, basestring):
item = _address2long(item)
if type(item) not in (type(1), type(ipv4.MAX_IP), type(ipv6.MAX_IP)):
raise TypeError(
"expected ip address, 32-bit integer or 128-bit integer")
for r in self.ips:
if item in r:
return True
return False
# end __contains__
def __iter__(self):
"""
Return an iterator over all ip addresses in the list.
>>> iter = IpRangeList('127.0.0.1').__iter__()
>>> next(iter)
'127.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
>>> iter = IpRangeList('127.0.0.1', '10/31').__iter__()
>>> next(iter)
'127.0.0.1'
>>> next(iter)
'10.0.0.0'
>>> next(iter)
'10.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
"""
for r in self.ips:
for ip in r:
yield ip
# end __iter__
def __len__(self):
"""
Return the length of all ranges in the list.
>>> len(IpRangeList('127.0.0.1'))
1
>>> len(IpRangeList('127.0.0.1', '10/31'))
3
>>> len(IpRangeList('1/24'))
256
>>> len(IpRangeList('192.168.0.0/22'))
1024
>>> IpRangeList('fe80::/10').__len__() == 2**118
True
"""
return sum(r.__len__() for r in self.ips)
# end __len__
def __hash__(self):
"""
Return correct hash for IpRangeList object
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> IpRangeList(a, b).__hash__() == IpRangeList(a, b).__hash__()
True
>>> IpRangeList(a, b).__hash__() == IpRangeList(b, a).__hash__()
True
>>> c = IpRange('10.0.0.0/8')
>>> IpRangeList(a, c).__hash__() == IpRangeList(c, a).__hash__()
False
"""
return hash(self.ips)
# end __hash__
def __eq__(self, other):
"""
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> IpRangeList(a, b) == IpRangeList(a, b)
True
>>> IpRangeList(a, b) == IpRangeList(b, a)
True
>>> c = IpRange('10.0.0.0/8')
>>> IpRangeList(a, c) == IpRangeList(c, a)
False
"""
return hash(self) == hash(other)
# end __eq__
|
(*args)
|
50,719 |
iptools
|
__contains__
|
Implements membership test operators ``in`` and ``not in`` for the
address ranges contained in the list.
>>> r = IpRangeList('127.0.0.1', '10/8', '192.168/16')
>>> '127.0.0.1' in r
True
>>> '10.0.0.1' in r
True
>>> 2130706433 in r
True
>>> 'invalid' in r
Traceback (most recent call last):
...
TypeError: expected ip address, 32-bit integer or 128-bit integer
:param item: Dotted-quad ip address.
:type item: str
:returns: ``True`` if address is in list, ``False`` otherwise.
|
def __contains__(self, item):
"""
Implements membership test operators ``in`` and ``not in`` for the
address ranges contained in the list.
>>> r = IpRangeList('127.0.0.1', '10/8', '192.168/16')
>>> '127.0.0.1' in r
True
>>> '10.0.0.1' in r
True
>>> 2130706433 in r
True
>>> 'invalid' in r
Traceback (most recent call last):
...
TypeError: expected ip address, 32-bit integer or 128-bit integer
:param item: Dotted-quad ip address.
:type item: str
:returns: ``True`` if address is in list, ``False`` otherwise.
"""
if isinstance(item, basestring):
item = _address2long(item)
if type(item) not in (type(1), type(ipv4.MAX_IP), type(ipv6.MAX_IP)):
raise TypeError(
"expected ip address, 32-bit integer or 128-bit integer")
for r in self.ips:
if item in r:
return True
return False
|
(self, item)
|
50,720 |
iptools
|
__eq__
|
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> IpRangeList(a, b) == IpRangeList(a, b)
True
>>> IpRangeList(a, b) == IpRangeList(b, a)
True
>>> c = IpRange('10.0.0.0/8')
>>> IpRangeList(a, c) == IpRangeList(c, a)
False
|
def __eq__(self, other):
"""
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> IpRangeList(a, b) == IpRangeList(a, b)
True
>>> IpRangeList(a, b) == IpRangeList(b, a)
True
>>> c = IpRange('10.0.0.0/8')
>>> IpRangeList(a, c) == IpRangeList(c, a)
False
"""
return hash(self) == hash(other)
|
(self, other)
|
50,721 |
iptools
|
__hash__
|
Return correct hash for IpRangeList object
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> IpRangeList(a, b).__hash__() == IpRangeList(a, b).__hash__()
True
>>> IpRangeList(a, b).__hash__() == IpRangeList(b, a).__hash__()
True
>>> c = IpRange('10.0.0.0/8')
>>> IpRangeList(a, c).__hash__() == IpRangeList(c, a).__hash__()
False
|
def __hash__(self):
"""
Return correct hash for IpRangeList object
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> IpRangeList(a, b).__hash__() == IpRangeList(a, b).__hash__()
True
>>> IpRangeList(a, b).__hash__() == IpRangeList(b, a).__hash__()
True
>>> c = IpRange('10.0.0.0/8')
>>> IpRangeList(a, c).__hash__() == IpRangeList(c, a).__hash__()
False
"""
return hash(self.ips)
|
(self)
|
50,722 |
iptools
|
__init__
| null |
def __init__(self, *args):
self.ips = tuple(map(IpRange, args))
|
(self, *args)
|
50,723 |
iptools
|
__iter__
|
Return an iterator over all ip addresses in the list.
>>> iter = IpRangeList('127.0.0.1').__iter__()
>>> next(iter)
'127.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
>>> iter = IpRangeList('127.0.0.1', '10/31').__iter__()
>>> next(iter)
'127.0.0.1'
>>> next(iter)
'10.0.0.0'
>>> next(iter)
'10.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
|
def __iter__(self):
"""
Return an iterator over all ip addresses in the list.
>>> iter = IpRangeList('127.0.0.1').__iter__()
>>> next(iter)
'127.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
>>> iter = IpRangeList('127.0.0.1', '10/31').__iter__()
>>> next(iter)
'127.0.0.1'
>>> next(iter)
'10.0.0.0'
>>> next(iter)
'10.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
"""
for r in self.ips:
for ip in r:
yield ip
|
(self)
|
50,724 |
iptools
|
__len__
|
Return the length of all ranges in the list.
>>> len(IpRangeList('127.0.0.1'))
1
>>> len(IpRangeList('127.0.0.1', '10/31'))
3
>>> len(IpRangeList('1/24'))
256
>>> len(IpRangeList('192.168.0.0/22'))
1024
>>> IpRangeList('fe80::/10').__len__() == 2**118
True
|
def __len__(self):
"""
Return the length of all ranges in the list.
>>> len(IpRangeList('127.0.0.1'))
1
>>> len(IpRangeList('127.0.0.1', '10/31'))
3
>>> len(IpRangeList('1/24'))
256
>>> len(IpRangeList('192.168.0.0/22'))
1024
>>> IpRangeList('fe80::/10').__len__() == 2**118
True
"""
return sum(r.__len__() for r in self.ips)
|
(self)
|
50,725 |
iptools
|
__repr__
|
>>> repr(IpRangeList('127.0.0.1', '10/8', '192.168/16'))
... #doctest: +NORMALIZE_WHITESPACE
"IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
IpRange('10.0.0.0', '10.255.255.255'),
IpRange('192.168.0.0', '192.168.255.255'))"
>>> repr(
... IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
... IpRange('10.0.0.0', '10.255.255.255'),
... IpRange('192.168.0.0', '192.168.255.255')))
... #doctest: +NORMALIZE_WHITESPACE
"IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
IpRange('10.0.0.0', '10.255.255.255'),
IpRange('192.168.0.0', '192.168.255.255'))"
|
def __repr__(self):
"""
>>> repr(IpRangeList('127.0.0.1', '10/8', '192.168/16'))
... #doctest: +NORMALIZE_WHITESPACE
"IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
IpRange('10.0.0.0', '10.255.255.255'),
IpRange('192.168.0.0', '192.168.255.255'))"
>>> repr(
... IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
... IpRange('10.0.0.0', '10.255.255.255'),
... IpRange('192.168.0.0', '192.168.255.255')))
... #doctest: +NORMALIZE_WHITESPACE
"IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
IpRange('10.0.0.0', '10.255.255.255'),
IpRange('192.168.0.0', '192.168.255.255'))"
"""
return "IpRangeList%r" % (self.ips,)
|
(self)
|
50,726 |
iptools
|
__str__
|
>>> str(IpRangeList('127.0.0.1', '10/8', '192.168/16'))
... #doctest: +NORMALIZE_WHITESPACE
"(('127.0.0.1', '127.0.0.1'),
('10.0.0.0', '10.255.255.255'),
('192.168.0.0', '192.168.255.255'))"
|
def __str__(self):
"""
>>> str(IpRangeList('127.0.0.1', '10/8', '192.168/16'))
... #doctest: +NORMALIZE_WHITESPACE
"(('127.0.0.1', '127.0.0.1'),
('10.0.0.0', '10.255.255.255'),
('192.168.0.0', '192.168.255.255'))"
"""
return "(%s)" % ", ".join(str(i) for i in self.ips)
|
(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.