index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
52,557 |
asyncache
|
cached
|
Decorator to wrap a function or a coroutine with a memoizing callable
that saves results in a cache.
When ``lock`` is provided for a standard function, it's expected to
implement ``__enter__`` and ``__exit__`` that will be used to lock
the cache when gets updated. If it wraps a coroutine, ``lock``
must implement ``__aenter__`` and ``__aexit__``.
|
def cached(
cache: Optional[MutableMapping[_KT, Any]],
# ignoring the mypy error to be consistent with the type used
# in https://github.com/python/typeshed/tree/master/stubs/cachetools
key: Callable[..., _KT] = keys.hashkey, # type:ignore
lock: Optional["AbstractContextManager[Any]"] = None,
) -> IdentityFunction:
"""
Decorator to wrap a function or a coroutine with a memoizing callable
that saves results in a cache.
When ``lock`` is provided for a standard function, it's expected to
implement ``__enter__`` and ``__exit__`` that will be used to lock
the cache when gets updated. If it wraps a coroutine, ``lock``
must implement ``__aenter__`` and ``__aexit__``.
"""
lock = lock or NullContext()
def decorator(func):
if asyncio.iscoroutinefunction(func):
async def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
try:
async with lock:
return cache[k]
except KeyError:
pass # key not found
val = await func(*args, **kwargs)
try:
async with lock:
cache[k] = val
except ValueError:
pass # val too large
return val
else:
def wrapper(*args, **kwargs):
k = key(*args, **kwargs)
try:
with lock:
return cache[k]
except KeyError:
pass # key not found
val = func(*args, **kwargs)
try:
with lock:
cache[k] = val
except ValueError:
pass # val too large
return val
return functools.wraps(func)(wrapper)
return decorator
|
(cache: Optional[MutableMapping[~_KT, Any]], key: Callable[..., ~_KT] = <function hashkey at 0x7f76318b20e0>, lock: Optional[contextlib.AbstractContextManager[Any]] = None) -> asyncache.IdentityFunction
|
52,558 |
asyncache
|
cachedmethod
|
Decorator to wrap a class or instance method with a memoizing
callable that saves results in a cache. This works similarly to
`cached`, but the arguments `cache` and `lock` are callables that
return the cache object and the lock respectively.
|
def cachedmethod(
cache: Callable[[Any], Optional[MutableMapping[_KT, Any]]],
# ignoring the mypy error to be consistent with the type used
# in https://github.com/python/typeshed/tree/master/stubs/cachetools
key: Callable[..., _KT] = keys.hashkey, # type:ignore
lock: Optional[Callable[[Any], "AbstractContextManager[Any]"]] = None,
) -> IdentityFunction:
"""Decorator to wrap a class or instance method with a memoizing
callable that saves results in a cache. This works similarly to
`cached`, but the arguments `cache` and `lock` are callables that
return the cache object and the lock respectively.
"""
lock = lock or (lambda _: NullContext())
def decorator(method):
if asyncio.iscoroutinefunction(method):
async def wrapper(self, *args, **kwargs):
method_cache = cache(self)
if method_cache is None:
return await method(self, *args, **kwargs)
k = key(self, *args, **kwargs)
try:
async with lock(self):
return method_cache[k]
except KeyError:
pass # key not found
val = await method(self, *args, **kwargs)
try:
async with lock(self):
method_cache[k] = val
except ValueError:
pass # val too large
return val
else:
def wrapper(self, *args, **kwargs):
method_cache = cache(self)
if method_cache is None:
return method(self, *args, **kwargs)
k = key(*args, **kwargs)
try:
with lock(self):
return method_cache[k]
except KeyError:
pass # key not found
val = method(self, *args, **kwargs)
try:
with lock(self):
method_cache[k] = val
except ValueError:
pass # val too large
return val
return functools.wraps(method)(wrapper)
return decorator
|
(cache: Callable[[Any], Optional[MutableMapping[~_KT, Any]]], key: Callable[..., ~_KT] = <function hashkey at 0x7f76318b20e0>, lock: Optional[Callable[[Any], contextlib.AbstractContextManager[Any]]] = None) -> asyncache.IdentityFunction
|
52,561 |
boxdiff.models.core
|
BoundingBox
|
Identified 2D bounding box: label + position, width, and height.
|
class BoundingBox:
"""
Identified 2D bounding box: label + position, width, and height.
"""
id: ID
label: str
x: float
y: float
width: float
height: float
def __sub__(self, other: 'BoundingBox') -> BoundingBoxDelta:
"""
Compute the delta between two bounding boxes.
"""
return BoundingBoxDelta(
self.id,
other.label,
self.label,
self.x - other.x,
self.y - other.y,
self.width - other.width,
self.height - other.height,
)
def __add__(self, delta: BoundingBoxDelta) -> 'BoundingBox':
return BoundingBox(
delta.id,
delta.label_new,
self.x + delta.x_delta,
self.y + delta.y_delta,
self.width + delta.width_delta,
self.height + delta.height_delta,
)
def __iadd__(self, delta: BoundingBoxDelta) -> 'BoundingBox':
self.label = delta.label_new
self.x += delta.x_delta
self.y += delta.y_delta
self.width += delta.width_delta
self.height += delta.height_delta
return self
@property
def points(self) -> Tuple[float]:
"""
Return the 4 points of the bounding box.
"""
return self.x, self.y, self.x + self.width, self.y + self.height
@property
def area(self) -> float:
"""
Return the area of the bounding box.
"""
return self.width * self.height
def __and__(self, other: 'BoundingBox') -> Optional['BoundingBox']:
"""
Compute the intersection of two bounding boxes, or None if they don't overlap.
"""
x1, y1, x2, y2 = self.points
x3, y3, x4, y4 = other.points
x = max(x1, x3)
y = max(y1, y3)
w = min(x2, x4) - x
h = min(y2, y4) - y
if w <= 0 or h <= 0:
return None
return BoundingBox(self.id, self.label, x, y, w, h)
def iou(self, other: 'BoundingBox') -> float:
"""
Compute the intersection over union of two bounding boxes.
"""
intersection = self & other
if intersection is None:
return 0
return intersection.area / (self.area + other.area - intersection.area)
|
(id: ~ID, label: str, x: float, y: float, width: float, height: float) -> None
|
52,562 |
boxdiff.models.core
|
__add__
| null |
def __add__(self, delta: BoundingBoxDelta) -> 'BoundingBox':
return BoundingBox(
delta.id,
delta.label_new,
self.x + delta.x_delta,
self.y + delta.y_delta,
self.width + delta.width_delta,
self.height + delta.height_delta,
)
|
(self, delta: boxdiff.models.deltas.BoundingBoxDelta) -> boxdiff.models.core.BoundingBox
|
52,563 |
boxdiff.models.core
|
__and__
|
Compute the intersection of two bounding boxes, or None if they don't overlap.
|
def __and__(self, other: 'BoundingBox') -> Optional['BoundingBox']:
"""
Compute the intersection of two bounding boxes, or None if they don't overlap.
"""
x1, y1, x2, y2 = self.points
x3, y3, x4, y4 = other.points
x = max(x1, x3)
y = max(y1, y3)
w = min(x2, x4) - x
h = min(y2, y4) - y
if w <= 0 or h <= 0:
return None
return BoundingBox(self.id, self.label, x, y, w, h)
|
(self, other: boxdiff.models.core.BoundingBox) -> Optional[boxdiff.models.core.BoundingBox]
|
52,564 |
boxdiff.models.core
|
__eq__
| null |
from typing import List, Optional, Tuple, TypeVar
from uuid import UUID
from pydantic.dataclasses import dataclass
from dataclasses_json import dataclass_json
from boxdiff.models.deltas import BoundingBoxDelta, ImageDelta, ImageSetDelta
ID = TypeVar('ID', int, UUID, str) # Parses in order: int, UUID, then str
@dataclass_json
@dataclass
class BoundingBox:
"""
Identified 2D bounding box: label + position, width, and height.
"""
id: ID
label: str
x: float
y: float
width: float
height: float
def __sub__(self, other: 'BoundingBox') -> BoundingBoxDelta:
"""
Compute the delta between two bounding boxes.
"""
return BoundingBoxDelta(
self.id,
other.label,
self.label,
self.x - other.x,
self.y - other.y,
self.width - other.width,
self.height - other.height,
)
def __add__(self, delta: BoundingBoxDelta) -> 'BoundingBox':
return BoundingBox(
delta.id,
delta.label_new,
self.x + delta.x_delta,
self.y + delta.y_delta,
self.width + delta.width_delta,
self.height + delta.height_delta,
)
def __iadd__(self, delta: BoundingBoxDelta) -> 'BoundingBox':
self.label = delta.label_new
self.x += delta.x_delta
self.y += delta.y_delta
self.width += delta.width_delta
self.height += delta.height_delta
return self
@property
def points(self) -> Tuple[float]:
"""
Return the 4 points of the bounding box.
"""
return self.x, self.y, self.x + self.width, self.y + self.height
@property
def area(self) -> float:
"""
Return the area of the bounding box.
"""
return self.width * self.height
def __and__(self, other: 'BoundingBox') -> Optional['BoundingBox']:
"""
Compute the intersection of two bounding boxes, or None if they don't overlap.
"""
x1, y1, x2, y2 = self.points
x3, y3, x4, y4 = other.points
x = max(x1, x3)
y = max(y1, y3)
w = min(x2, x4) - x
h = min(y2, y4) - y
if w <= 0 or h <= 0:
return None
return BoundingBox(self.id, self.label, x, y, w, h)
def iou(self, other: 'BoundingBox') -> float:
"""
Compute the intersection over union of two bounding boxes.
"""
intersection = self & other
if intersection is None:
return 0
return intersection.area / (self.area + other.area - intersection.area)
|
(self, other)
|
52,565 |
boxdiff.models.core
|
__iadd__
| null |
def __iadd__(self, delta: BoundingBoxDelta) -> 'BoundingBox':
self.label = delta.label_new
self.x += delta.x_delta
self.y += delta.y_delta
self.width += delta.width_delta
self.height += delta.height_delta
return self
|
(self, delta: boxdiff.models.deltas.BoundingBoxDelta) -> boxdiff.models.core.BoundingBox
|
52,566 |
pydantic._internal._dataclasses
|
__init__
| null |
def complete_dataclass(
cls: type[Any],
config_wrapper: _config.ConfigWrapper,
*,
raise_errors: bool = True,
types_namespace: dict[str, Any] | None,
) -> bool:
"""Finish building a pydantic dataclass.
This logic is called on a class which has already been wrapped in `dataclasses.dataclass()`.
This is somewhat analogous to `pydantic._internal._model_construction.complete_model_class`.
Args:
cls: The class.
config_wrapper: The config wrapper instance.
raise_errors: Whether to raise errors, defaults to `True`.
types_namespace: The types namespace.
Returns:
`True` if building a pydantic dataclass is successfully completed, `False` otherwise.
Raises:
PydanticUndefinedAnnotation: If `raise_error` is `True` and there is an undefined annotations.
"""
if hasattr(cls, '__post_init_post_parse__'):
warnings.warn(
'Support for `__post_init_post_parse__` has been dropped, the method will not be called', DeprecationWarning
)
if types_namespace is None:
types_namespace = _typing_extra.get_cls_types_namespace(cls)
set_dataclass_fields(cls, types_namespace, config_wrapper=config_wrapper)
typevars_map = get_standard_typevars_map(cls)
gen_schema = GenerateSchema(
config_wrapper,
types_namespace,
typevars_map,
)
# This needs to be called before we change the __init__
sig = generate_pydantic_signature(
init=cls.__init__,
fields=cls.__pydantic_fields__, # type: ignore
config_wrapper=config_wrapper,
is_dataclass=True,
)
# dataclass.__init__ must be defined here so its `__qualname__` can be changed since functions can't be copied.
def __init__(__dataclass_self__: PydanticDataclass, *args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
s = __dataclass_self__
s.__pydantic_validator__.validate_python(ArgsKwargs(args, kwargs), self_instance=s)
__init__.__qualname__ = f'{cls.__qualname__}.__init__'
cls.__init__ = __init__ # type: ignore
cls.__pydantic_config__ = config_wrapper.config_dict # type: ignore
cls.__signature__ = sig # type: ignore
get_core_schema = getattr(cls, '__get_pydantic_core_schema__', None)
try:
if get_core_schema:
schema = get_core_schema(
cls,
CallbackGetCoreSchemaHandler(
partial(gen_schema.generate_schema, from_dunder_get_core_schema=False),
gen_schema,
ref_mode='unpack',
),
)
else:
schema = gen_schema.generate_schema(cls, from_dunder_get_core_schema=False)
except PydanticUndefinedAnnotation as e:
if raise_errors:
raise
set_dataclass_mocks(cls, cls.__name__, f'`{e.name}`')
return False
core_config = config_wrapper.core_config(cls)
try:
schema = gen_schema.clean_schema(schema)
except gen_schema.CollectedInvalid:
set_dataclass_mocks(cls, cls.__name__, 'all referenced types')
return False
# We are about to set all the remaining required properties expected for this cast;
# __pydantic_decorators__ and __pydantic_fields__ should already be set
cls = typing.cast('type[PydanticDataclass]', cls)
# debug(schema)
cls.__pydantic_core_schema__ = schema
cls.__pydantic_validator__ = validator = create_schema_validator(
schema, cls, cls.__module__, cls.__qualname__, 'dataclass', core_config, config_wrapper.plugin_settings
)
cls.__pydantic_serializer__ = SchemaSerializer(schema, core_config)
if config_wrapper.validate_assignment:
@wraps(cls.__setattr__)
def validated_setattr(instance: Any, field: str, value: str, /) -> None:
validator.validate_assignment(instance, field, value)
cls.__setattr__ = validated_setattr.__get__(None, cls) # type: ignore
return True
|
(__dataclass_self__: 'PydanticDataclass', *args: 'Any', **kwargs: 'Any') -> 'None'
|
52,568 |
boxdiff.models.core
|
__sub__
|
Compute the delta between two bounding boxes.
|
def __sub__(self, other: 'BoundingBox') -> BoundingBoxDelta:
"""
Compute the delta between two bounding boxes.
"""
return BoundingBoxDelta(
self.id,
other.label,
self.label,
self.x - other.x,
self.y - other.y,
self.width - other.width,
self.height - other.height,
)
|
(self, other: boxdiff.models.core.BoundingBox) -> boxdiff.models.deltas.BoundingBoxDelta
|
52,569 |
boxdiff.models.core
|
iou
|
Compute the intersection over union of two bounding boxes.
|
def iou(self, other: 'BoundingBox') -> float:
"""
Compute the intersection over union of two bounding boxes.
"""
intersection = self & other
if intersection is None:
return 0
return intersection.area / (self.area + other.area - intersection.area)
|
(self, other: boxdiff.models.core.BoundingBox) -> float
|
52,570 |
dataclasses_json.api
|
to_dict
| null |
def to_dict(self, encode_json=False) -> Dict[str, Json]:
return _asdict(self, encode_json=encode_json)
|
(self, encode_json=False) -> Dict[str, Union[dict, list, str, int, float, bool, NoneType]]
|
52,571 |
dataclasses_json.api
|
to_json
| null |
def to_json(self,
*,
skipkeys: bool = False,
ensure_ascii: bool = True,
check_circular: bool = True,
allow_nan: bool = True,
indent: Optional[Union[int, str]] = None,
separators: Optional[Tuple[str, str]] = None,
default: Optional[Callable] = None,
sort_keys: bool = False,
**kw) -> str:
return json.dumps(self.to_dict(encode_json=False),
cls=_ExtendedEncoder,
skipkeys=skipkeys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
indent=indent,
separators=separators,
default=default,
sort_keys=sort_keys,
**kw)
|
(self, *, skipkeys: bool = False, ensure_ascii: bool = True, check_circular: bool = True, allow_nan: bool = True, indent: Union[int, str, NoneType] = None, separators: Optional[Tuple[str, str]] = None, default: Optional[Callable] = None, sort_keys: bool = False, **kw) -> str
|
52,572 |
boxdiff.models.deltas
|
BoundingBoxDelta
|
BoundingBoxDelta(id: 'ID', label_old: str, label_new: str, x_delta: float, y_delta: float, width_delta: float, height_delta: float)
|
class BoundingBoxDelta:
id: 'ID'
label_old: str
label_new: str
x_delta: float
y_delta: float
width_delta: float
height_delta: float
@property
def flags(self) -> BoundingBoxDifference:
f = BoundingBoxDifference(0)
if self.label_old != self.label_new:
f |= BoundingBoxDifference.RELABELED
if self.x_delta != 0 or self.y_delta != 0:
f |= BoundingBoxDifference.MOVED
if self.width_delta != 0 or self.height_delta != 0:
f |= BoundingBoxDifference.RESIZED
return f
|
(id: 'ID', label_old: str, label_new: str, x_delta: float, y_delta: float, width_delta: float, height_delta: float) -> None
|
52,573 |
boxdiff.models.deltas
|
__eq__
| null |
from typing import List, TYPE_CHECKING
from dataclasses import dataclass
if TYPE_CHECKING:
from boxdiff.models.core import ID, BoundingBox
from boxdiff.models.flags import (
BoundingBoxDifference,
ImageDifference,
ImageSetDifference,
)
@dataclass
class BoundingBoxDelta:
id: 'ID'
label_old: str
label_new: str
x_delta: float
y_delta: float
width_delta: float
height_delta: float
@property
def flags(self) -> BoundingBoxDifference:
f = BoundingBoxDifference(0)
if self.label_old != self.label_new:
f |= BoundingBoxDifference.RELABELED
if self.x_delta != 0 or self.y_delta != 0:
f |= BoundingBoxDifference.MOVED
if self.width_delta != 0 or self.height_delta != 0:
f |= BoundingBoxDifference.RESIZED
return f
|
(self, other)
|
52,576 |
boxdiff.models.flags
|
BoundingBoxDifference
|
Flag enum for the different types of box-level differences.
|
class BoundingBoxDifference(Flag):
"""
Flag enum for the different types of box-level differences.
"""
MOVED = auto()
RESIZED = auto()
RELABELED = auto()
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
52,577 |
boxdiff.models.core
|
Image
|
Identified list of bounding boxes.
|
class Image:
"""
Identified list of bounding boxes.
"""
id: ID
bounding_boxes: List[BoundingBox]
def __sub__(self, other: 'Image') -> 'ImageDelta':
"""
Compute the delta between two images.
"""
# Get the unique set of IDs for the boxes in each image
box_ids_self = {box.id for box in self.bounding_boxes}
box_ids_other = {box.id for box in other.bounding_boxes}
# Find the boxes that are in one image but not the other
boxes_added = [
box for box in other.bounding_boxes if box.id not in box_ids_self
]
boxes_removed = [
box for box in self.bounding_boxes if box.id not in box_ids_other
]
# Find the boxes that are in both images
box_ids_common = box_ids_self & box_ids_other
boxes_common_self = sorted(
[box for box in self.bounding_boxes if box.id in box_ids_common],
key=lambda box: box.id,
)
boxes_common_other = sorted(
[box for box in other.bounding_boxes if box.id in box_ids_common],
key=lambda box: box.id,
)
assert len(boxes_common_self) == len(
boxes_common_other
), 'Common box count mismatch'
# Compute the deltas between the common boxes
box_deltas = []
for box_self, box_other in zip(boxes_common_self, boxes_common_other):
box_deltas.append(box_self - box_other)
return ImageDelta(self.id, boxes_added, boxes_removed, box_deltas)
|
(id: ~ID, bounding_boxes: List[boxdiff.models.core.BoundingBox]) -> None
|
52,581 |
boxdiff.models.core
|
__sub__
|
Compute the delta between two images.
|
def __sub__(self, other: 'Image') -> 'ImageDelta':
"""
Compute the delta between two images.
"""
# Get the unique set of IDs for the boxes in each image
box_ids_self = {box.id for box in self.bounding_boxes}
box_ids_other = {box.id for box in other.bounding_boxes}
# Find the boxes that are in one image but not the other
boxes_added = [
box for box in other.bounding_boxes if box.id not in box_ids_self
]
boxes_removed = [
box for box in self.bounding_boxes if box.id not in box_ids_other
]
# Find the boxes that are in both images
box_ids_common = box_ids_self & box_ids_other
boxes_common_self = sorted(
[box for box in self.bounding_boxes if box.id in box_ids_common],
key=lambda box: box.id,
)
boxes_common_other = sorted(
[box for box in other.bounding_boxes if box.id in box_ids_common],
key=lambda box: box.id,
)
assert len(boxes_common_self) == len(
boxes_common_other
), 'Common box count mismatch'
# Compute the deltas between the common boxes
box_deltas = []
for box_self, box_other in zip(boxes_common_self, boxes_common_other):
box_deltas.append(box_self - box_other)
return ImageDelta(self.id, boxes_added, boxes_removed, box_deltas)
|
(self, other: boxdiff.models.core.Image) -> boxdiff.models.deltas.ImageDelta
|
52,584 |
boxdiff.models.deltas
|
ImageDelta
|
ImageDelta(id: 'ID', boxes_added: List[ForwardRef('BoundingBox')], boxes_removed: List[ForwardRef('BoundingBox')], box_deltas: List[boxdiff.models.deltas.BoundingBoxDelta])
|
class ImageDelta:
id: 'ID'
boxes_added: List['BoundingBox']
boxes_removed: List['BoundingBox']
box_deltas: List[BoundingBoxDelta]
@property
def flags(self) -> ImageDifference:
f = ImageDifference(0)
if self.boxes_added:
f |= ImageDifference.BOXES_ADDED
if self.boxes_removed:
f |= ImageDifference.BOXES_REMOVED
if any(delta.flags for delta in self.box_deltas):
f |= ImageDifference.BOXES_MODIFIED
return f
|
(id: 'ID', boxes_added: List[ForwardRef('BoundingBox')], boxes_removed: List[ForwardRef('BoundingBox')], box_deltas: List[boxdiff.models.deltas.BoundingBoxDelta]) -> None
|
52,588 |
boxdiff.models.flags
|
ImageDifference
|
Flag enum for the different types of image-level differences.
|
class ImageDifference(Flag):
"""
Flag enum for the different types of image-level differences.
"""
BOXES_ADDED = auto()
BOXES_REMOVED = auto()
BOXES_MODIFIED = auto()
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
52,589 |
boxdiff.models.core
|
ImageSet
|
Identified list of images.
|
class ImageSet:
"""
Identified list of images.
"""
id: ID
images: List[Image]
def __sub__(self, other: 'ImageSet') -> ImageSetDelta:
"""
Compute the delta between two image sets.
"""
# Get the unique set of IDs for the images in each set
image_ids_self = {image.id for image in self.images}
image_ids_other = {image.id for image in other.images}
# Find the images that are in one set but not the other
images_added = [
image for image in other.images if image.id not in image_ids_self
]
images_removed = [
image for image in self.images if image.id not in image_ids_other
]
# Find the images that are in both sets
image_ids_common = image_ids_self & image_ids_other
images_common_self = sorted(
[image for image in self.images if image.id in image_ids_common],
key=lambda im: im.id,
)
images_common_other = sorted(
[image for image in other.images if image.id in image_ids_common],
key=lambda im: im.id,
)
assert len(images_common_self) == len(
images_common_other
), 'Common image count mismatch'
# Compute the deltas between the common images
image_deltas = []
for image_self, image_other in zip(images_common_self, images_common_other):
image_deltas.append(image_self - image_other)
return ImageSetDelta(self.id, images_added, images_removed, image_deltas)
|
(id: ~ID, images: List[boxdiff.models.core.Image]) -> None
|
52,593 |
boxdiff.models.core
|
__sub__
|
Compute the delta between two image sets.
|
def __sub__(self, other: 'ImageSet') -> ImageSetDelta:
"""
Compute the delta between two image sets.
"""
# Get the unique set of IDs for the images in each set
image_ids_self = {image.id for image in self.images}
image_ids_other = {image.id for image in other.images}
# Find the images that are in one set but not the other
images_added = [
image for image in other.images if image.id not in image_ids_self
]
images_removed = [
image for image in self.images if image.id not in image_ids_other
]
# Find the images that are in both sets
image_ids_common = image_ids_self & image_ids_other
images_common_self = sorted(
[image for image in self.images if image.id in image_ids_common],
key=lambda im: im.id,
)
images_common_other = sorted(
[image for image in other.images if image.id in image_ids_common],
key=lambda im: im.id,
)
assert len(images_common_self) == len(
images_common_other
), 'Common image count mismatch'
# Compute the deltas between the common images
image_deltas = []
for image_self, image_other in zip(images_common_self, images_common_other):
image_deltas.append(image_self - image_other)
return ImageSetDelta(self.id, images_added, images_removed, image_deltas)
|
(self, other: boxdiff.models.core.ImageSet) -> boxdiff.models.deltas.ImageSetDelta
|
52,596 |
boxdiff.models.deltas
|
ImageSetDelta
|
ImageSetDelta(id: 'ID', images_added: List[boxdiff.models.deltas.ImageDelta], images_removed: List[boxdiff.models.deltas.ImageDelta], image_deltas: List[boxdiff.models.deltas.ImageDelta])
|
class ImageSetDelta:
id: 'ID'
images_added: List[ImageDelta]
images_removed: List[ImageDelta]
image_deltas: List[ImageDelta]
@property
def flags(self) -> ImageSetDifference:
f = ImageSetDifference(0)
if self.images_added:
f |= ImageSetDifference.IMAGES_ADDED
if self.images_removed:
f |= ImageSetDifference.IMAGES_REMOVED
if any(delta.flags for delta in self.image_deltas):
f |= ImageSetDifference.IMAGES_MODIFIED
return f
|
(id: 'ID', images_added: List[boxdiff.models.deltas.ImageDelta], images_removed: List[boxdiff.models.deltas.ImageDelta], image_deltas: List[boxdiff.models.deltas.ImageDelta]) -> None
|
52,600 |
boxdiff.models.flags
|
ImageSetDifference
|
Flag enum for the different types of image-set-level differences.
|
class ImageSetDifference(Flag):
"""
Flag enum for the different types of image-set-level differences.
"""
IMAGES_ADDED = auto()
IMAGES_REMOVED = auto()
IMAGES_MODIFIED = auto()
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
52,602 |
causalimpact.analysis
|
CausalImpact
|
CausalImpact() performs causal inference through counterfactual
predictions using a Bayesian structural time-series model.
Parameters:
----------
data : pandas dataframe
the response variable must be in the first column, and any covariates
in subsequent columns.
pre_period : list
A list specifying the first and the last time point of the
pre-intervention period in the response column. This period can be
thought of as a training period, used to determine the relationship
between the response variable and the covariates.
post_period : list
A vector specifying the first and the last day of the post-intervention
period we wish to study. This is the period after the intervention has
begun whose effect we are interested in. The relationship between
response variable and covariates, as determined during the pre-period,
will be used to predict how the response variable should have evolved
during the post-period had no intervention taken place.
model_args : dict
Optional arguments that can be used to adjust the default construction
of the state-space model used for inference.
For full control over the model, you can construct your own model using
the statsmodels package and feed the model into CausalImpact().
ucm_model : statsmodels.tsa.statespace.structural.UnobservedComponents
Instead of passing in data and having CausalImpact construct a
model, it is possible to construct a model yourself using the
statsmodel package. In this case, omit data, pre_period, and
post_period. Instead only pass in ucm_model, y_post, alpha (optional).
The model must have been fitted on data where the response variable was
set to np.nan during the post-treatment period. The actual observed data
during this period must then be passed to the function in y_post.
post_period_response : list | pd.Series | np.Array
Actual observed data during the post-intervention period. This is required
if and only if a fitted ucm_model is passed instead of data.
alpha : float
Desired tail-area probability for posterior intervals. Defaults to 0.05,
which will produce central 95% intervals.
Returns
-------
CausalImpact Object
|
class CausalImpact:
"""CausalImpact() performs causal inference through counterfactual
predictions using a Bayesian structural time-series model.
Parameters:
----------
data : pandas dataframe
the response variable must be in the first column, and any covariates
in subsequent columns.
pre_period : list
A list specifying the first and the last time point of the
pre-intervention period in the response column. This period can be
thought of as a training period, used to determine the relationship
between the response variable and the covariates.
post_period : list
A vector specifying the first and the last day of the post-intervention
period we wish to study. This is the period after the intervention has
begun whose effect we are interested in. The relationship between
response variable and covariates, as determined during the pre-period,
will be used to predict how the response variable should have evolved
during the post-period had no intervention taken place.
model_args : dict
Optional arguments that can be used to adjust the default construction
of the state-space model used for inference.
For full control over the model, you can construct your own model using
the statsmodels package and feed the model into CausalImpact().
ucm_model : statsmodels.tsa.statespace.structural.UnobservedComponents
Instead of passing in data and having CausalImpact construct a
model, it is possible to construct a model yourself using the
statsmodel package. In this case, omit data, pre_period, and
post_period. Instead only pass in ucm_model, y_post, alpha (optional).
The model must have been fitted on data where the response variable was
set to np.nan during the post-treatment period. The actual observed data
during this period must then be passed to the function in y_post.
post_period_response : list | pd.Series | np.Array
Actual observed data during the post-intervention period. This is required
if and only if a fitted ucm_model is passed instead of data.
alpha : float
Desired tail-area probability for posterior intervals. Defaults to 0.05,
which will produce central 95% intervals.
Returns
-------
CausalImpact Object
"""
def __init__(
self,
data=None,
pre_period=None,
post_period=None,
model_args=None,
ucm_model=None,
post_period_response=None,
alpha=0.05,
estimation="MLE",
):
self.series = None
self.model = {}
if isinstance(data, pd.DataFrame):
self.data = data.copy()
else:
self.data = data
self.params = {
"data": data,
"pre_period": pre_period,
"post_period": post_period,
"model_args": model_args,
"ucm_model": ucm_model,
"post_period_response": post_period_response,
"alpha": alpha,
"estimation": estimation,
}
self.inferences = None
self.results = None
def run(self):
kwargs = self._format_input(
self.params["data"],
self.params["pre_period"],
self.params["post_period"],
self.params["model_args"],
self.params["ucm_model"],
self.params["post_period_response"],
self.params["alpha"],
)
# Depending on input, dispatch to the appropriate Run* method()
if self.data is not None:
self._run_with_data(
kwargs["data"],
kwargs["pre_period"],
kwargs["post_period"],
kwargs["model_args"],
kwargs["alpha"],
self.params["estimation"],
)
else:
self._run_with_ucm(
kwargs["ucm_model"],
kwargs["post_period_response"],
kwargs["alpha"],
kwargs["model_args"],
self.params["estimation"],
)
@staticmethod
def _format_input_data(data):
"""Check and format the data argument provided to CausalImpact().
Args:
data: Pandas DataFrame
Returns:
correctly formatted Pandas DataFrame
"""
# If <data> is a Pandas DataFrame and the first column is 'date',
# try to convert
if (
isinstance(data, pd.DataFrame)
and isinstance(data.columns[0], str)
and data.columns[0].lower() in ["date", "time"]
):
data = data.set_index(data.columns[0])
# Try to convert to Pandas DataFrame
try:
data = pd.DataFrame(data)
except ValueError:
raise ValueError("could not convert input data to Pandas " + "DataFrame")
# Must have at least 3 time points
if len(data.index) < 3:
raise ValueError("data must have at least 3 time points")
# Must not have NA in covariates (if any)
if len(data.columns) >= 2 and pd.isnull(data.iloc[:, 1:]).any(axis=None):
raise ValueError("covariates must not contain null values")
return data
@staticmethod
def _check_periods_are_valid(pre_period, post_period):
if not isinstance(pre_period, list) or not isinstance(post_period, list):
raise ValueError("pre_period and post_period must both be lists")
if len(pre_period) != 2 or len(post_period) != 2:
raise ValueError("pre_period and post_period must both be of " + "length 2")
if pd.isnull(pre_period).any(axis=None) or pd.isnull(post_period).any(
axis=None
):
raise ValueError(
"pre_period and post period must not contain " + "null values"
)
@staticmethod
def _align_periods_dtypes(pre_period, post_period, data):
"""align the dtypes of the pre_period and post_period to the data index.
Args:
pre_period: two-element list
post_period: two-element list
data: already-checked Pandas DataFrame, for reference only
"""
pre_dtype = np.array(pre_period).dtype
post_dtype = np.array(post_period).dtype
# if index is datetime then convert pre and post to datetimes
if isinstance(data.index, pd.core.indexes.datetimes.DatetimeIndex):
pre_period = [pd.to_datetime(date) for date in pre_period]
post_period = [pd.to_datetime(date) for date in post_period]
pd.core.dtypes.common.is_datetime_or_timedelta_dtype(pre_period)
# if index is not datetime then error if datetime pre and post is passed
elif pd.core.dtypes.common.is_datetime_or_timedelta_dtype(
pd.Series(pre_period)
) or pd.core.dtypes.common.is_datetime_or_timedelta_dtype(
pd.Series(post_period)
):
raise ValueError(
"pre_period ("
+ pre_dtype.name
+ ") and post_period ("
+ post_dtype.name
+ ") should have the same class as the "
+ "time points in the data ("
+ data.index.dtype.name
+ ")"
)
# if index is int
elif pd.api.types.is_int64_dtype(data.index):
pre_period = [int(elem) for elem in pre_period]
post_period = [int(elem) for elem in post_period]
# if index is int
elif pd.api.types.is_float_dtype(data.index):
pre_period = [float(elem) for elem in pre_period]
post_period = [float(elem) for elem in post_period]
# if index is string
elif pd.api.types.is_string_dtype(data.index):
if pd.api.types.is_numeric_dtype(
np.array(pre_period)
) or pd.api.types.is_numeric_dtype(np.array(post_period)):
raise ValueError(
"pre_period ("
+ pre_dtype.name
+ ") and post_period ("
+ post_dtype.name
+ ") should have the same class as the "
+ "time points in the data ("
+ data.index.dtype.name
+ ")"
)
else:
pre_period = [str(idx) for idx in pre_period]
post_period = [str(idx) for idx in post_period]
else:
raise ValueError(
"pre_period ("
+ pre_dtype.name
+ ") and post_period ("
+ post_dtype.name
+ ") should have the same class as the "
+ "time points in the data ("
+ data.index.dtype.name
+ ")"
)
return [pre_period, post_period]
def _format_input_prepost(self, pre_period, post_period, data):
"""Check and format the pre_period and post_period input arguments.
Args:
pre_period: two-element list
post_period: two-element list
data: already-checked Pandas DataFrame, for reference only
"""
self._check_periods_are_valid(pre_period, post_period)
pre_period, post_period = self._align_periods_dtypes(
pre_period, post_period, data
)
if pre_period[1] > post_period[0]:
raise ValueError(
"post period must start at least 1 observation"
+ " after the end of the pre_period"
)
if isinstance(data.index, pd.RangeIndex):
loc3 = post_period[0]
loc4 = post_period[1]
else:
loc3 = data.index.get_loc(post_period[0])
loc4 = data.index.get_loc(post_period[1])
if loc4 < loc3:
raise ValueError(
"post_period[1] must not be earlier than " + "post_period[0]"
)
if pre_period[0] < data.index.min():
pre_period[0] = data.index.min()
if post_period[1] > data.index.max():
post_period[1] = data.index.max()
return {"pre_period": pre_period, "post_period": post_period}
@staticmethod
def _check_valid_args_combo(args):
data_model_args = [True, True, True, False, False]
ucm_model_args = [False, False, False, True, True]
if np.any(pd.isnull(args) != data_model_args) and np.any(
pd.isnull(args) != ucm_model_args
):
raise SyntaxError(
"Must either provide ``data``, ``pre_period``"
+ " ,``post_period``, ``model_args``"
" or ``ucm_model" + "and ``post_period_response``"
)
@staticmethod
def _check_valid_alpha(alpha):
if alpha is None:
raise ValueError("alpha must not be None")
if not np.isreal(alpha):
raise ValueError("alpha must be a real number")
if np.isnan(alpha):
raise ValueError("alpha must not be NA")
if alpha <= 0 or alpha >= 1:
raise ValueError("alpha must be between 0 and 1")
def _format_input(
self,
data,
pre_period,
post_period,
model_args,
ucm_model,
post_period_response,
alpha,
):
"""Check and format all input arguments supplied to CausalImpact().
See the documentation of CausalImpact() for details
Args:
data: Pandas DataFrame or data frame
pre_period: beginning and end of pre-period
post_period: beginning and end of post-period
model_args: dict of additional arguments for the model
ucm_model: UnobservedComponents model (instead of data)
post_period_response: observed response in the post-period
alpha: tail-area for posterior intervals
estimation: method of estimation for model fitting
Returns:
list of checked (and possibly reformatted) input arguments
"""
from statsmodels.tsa.statespace.structural import UnobservedComponents
# Check that a consistent set of variables has been provided
args = [data, pre_period, post_period, ucm_model, post_period_response]
self._check_valid_args_combo(args)
# Check <data> and convert to Pandas DataFrame, with rows
# representing time points
if data is not None:
data = self._format_input_data(data)
# Check <pre_period> and <post_period>
if data is not None:
checked = self._format_input_prepost(pre_period, post_period, data)
pre_period = checked["pre_period"]
post_period = checked["post_period"]
self.params["pre_period"] = pre_period
self.params["post_period"] = post_period
# Parse <model_args>, fill gaps using <_defaults>
_defaults = {
"ndraws": 1000,
"nburn": 100,
"niter": 1000,
"standardize_data": True,
"prior_level_sd": 0.01,
"nseasons": 1,
"season_duration": 1,
"dynamic_regression": False,
}
if model_args is None:
model_args = _defaults
else:
missing = [key for key in _defaults if key not in model_args]
for arg in missing:
model_args[arg] = _defaults[arg]
# Check <standardize_data>
if not isinstance(model_args["standardize_data"], bool):
raise ValueError("model_args.standardize_data must be a" + " boolean value")
# Check <ucm_model>
if ucm_model is not None and not isinstance(ucm_model, UnobservedComponents):
raise ValueError(
"ucm_model must be an object of class "
"statsmodels.tsa.statespace.structural.UnobservedComponents "
"instead received " + str(type(ucm_model))[8:-2]
)
# Check <post_period_response>
if ucm_model is not None:
if not is_list_like(post_period_response):
raise ValueError("post_period_response must be list-like")
if np.array(post_period_response).dtype.num == 17:
raise ValueError(
"post_period_response should not be" + " datetime values"
)
if not np.all(np.isreal(post_period_response)):
raise ValueError(
"post_period_response must contain all" + " real values"
)
# Check <alpha>
self._check_valid_alpha(alpha)
# Return updated arguments
kwargs = {
"data": data,
"pre_period": pre_period,
"post_period": post_period,
"model_args": model_args,
"ucm_model": ucm_model,
"post_period_response": post_period_response,
"alpha": alpha,
}
return kwargs
def _run_with_data(
self, data, pre_period, post_period, model_args, alpha, estimation
):
# Zoom in on data in modeling range
if data.shape[1] == 1: # no exogenous values provided
raise ValueError("data contains no exogenous variables")
data_modeling = data.copy()
df_pre = data_modeling.loc[pre_period[0] : pre_period[1], :]
df_post = data_modeling.loc[post_period[0] : post_period[1], :]
# Standardize all variables
orig_std_params = (0, 1)
if model_args["standardize_data"]:
sd_results = standardize_all_variables(
data_modeling, pre_period, post_period
)
df_pre = sd_results["data_pre"]
df_post = sd_results["data_post"]
orig_std_params = sd_results["orig_std_params"]
# Construct model and perform inference
model = construct_model(df_pre, model_args)
self.model = model
model_results = model_fit(model, estimation, model_args)
inferences = compile_inferences(
model_results,
data,
df_pre,
df_post,
None,
alpha,
orig_std_params,
estimation,
)
# "append" to 'CausalImpact' object
self.inferences = inferences["series"]
self.results = model_results
def _run_with_ucm(
self, ucm_model, post_period_response, alpha, model_args, estimation
):
"""Runs an impact analysis on top of a ucm model.
Args:
ucm_model: Model as returned by UnobservedComponents(),
in which the data during the post-period was set to NA
post_period_response: observed data during the post-intervention
period
alpha: tail-probabilities of posterior intervals"""
df_pre = ucm_model.data.orig_endog[: -len(post_period_response)]
df_pre = pd.DataFrame(df_pre)
post_period_response = pd.DataFrame(post_period_response)
data = pd.DataFrame(
np.concatenate([df_pre.values, post_period_response.values])
)
orig_std_params = (0, 1)
model_results = model_fit(ucm_model, estimation, model_args)
# Compile posterior inferences
inferences = compile_inferences(
model_results,
data,
df_pre,
None,
post_period_response,
alpha,
orig_std_params,
estimation,
)
obs_inter = model_results.model_nobs - len(post_period_response)
self.params["pre_period"] = [0, obs_inter - 1]
self.params["post_period"] = [obs_inter, -1]
self.data = pd.concat([df_pre, post_period_response])
self.inferences = inferences["series"]
self.results = model_results
@staticmethod
def _print_report(
mean_pred_fmt,
mean_resp_fmt,
mean_lower_fmt,
mean_upper_fmt,
abs_effect_fmt,
abs_effect_upper_fmt,
abs_effect_lower_fmt,
rel_effect_fmt,
rel_effect_upper_fmt,
rel_effect_lower_fmt,
cum_resp_fmt,
cum_pred_fmt,
cum_lower_fmt,
cum_upper_fmt,
confidence,
cum_rel_effect_lower,
cum_rel_effect_upper,
cum_rel_effect,
width,
p_value,
alpha,
):
sig = not (cum_rel_effect_lower < 0 < cum_rel_effect_upper)
pos = cum_rel_effect > 0
# Summarize averages
stmt = textwrap.dedent(
"""During the post-intervention period, the response
variable had an average value of
approx. {mean_resp}.
""".format(
mean_resp=mean_resp_fmt
)
)
if sig:
stmt += " By contrast, in "
else:
stmt += " In "
stmt += textwrap.dedent(
"""
the absence of an intervention, we would have
expected an average response of {mean_pred}. The
{confidence} interval of this counterfactual
prediction is [{mean_lower}, {mean_upper}].
Subtracting this prediction from the observed
response yields an estimate of the causal effect
the intervention had on the response variable.
This effect is {abs_effect} with a
{confidence} interval of [{abs_lower},
{abs_upper}]. For a discussion of the
significance of this effect,
see below.
""".format(
mean_pred=mean_pred_fmt,
confidence=confidence,
mean_lower=mean_lower_fmt,
mean_upper=mean_upper_fmt,
abs_effect=abs_effect_fmt,
abs_upper=abs_effect_upper_fmt,
abs_lower=abs_effect_lower_fmt,
)
)
# Summarize sums
stmt2 = textwrap.dedent(
"""
Summing up the individual data points during the
post-intervention period (which can only sometimes be
meaningfully interpreted), the response variable had an
overall value of {cum_resp}.
""".format(
cum_resp=cum_resp_fmt
)
)
if sig:
stmt2 += " By contrast, had "
else:
stmt2 += " Had "
stmt2 += textwrap.dedent(
"""
the intervention not taken place, we would have expected
a sum of {cum_pred}. The {confidence} interval of this
prediction is [{cum_pred_lower}, {cum_pred_upper}]
""".format(
cum_pred=cum_pred_fmt,
confidence=confidence,
cum_pred_lower=cum_lower_fmt,
cum_pred_upper=cum_upper_fmt,
)
)
# Summarize relative numbers (in which case row [1] = row [2])
stmt3 = textwrap.dedent(
"""
The above results are given in terms
of absolute numbers. In relative terms, the
response variable showed
"""
)
if pos:
stmt3 += " an increase of "
else:
stmt3 += " a decrease of "
stmt3 += textwrap.dedent(
"""
{rel_effect}. The {confidence} interval of this
percentage is [{rel_effect_lower},
{rel_effect_upper}]
""".format(
confidence=confidence,
rel_effect=rel_effect_fmt,
rel_effect_lower=rel_effect_lower_fmt,
rel_effect_upper=rel_effect_upper_fmt,
)
)
# Comment on significance
if sig and pos:
stmt4 = textwrap.dedent(
"""
This means that the positive effect observed
during the intervention period is statistically
significant and unlikely to be due to random
fluctuations. It should be noted, however, that
the question of whether this increase also bears
substantive significance can only be answered by
comparing the absolute effect {abs_effect} to
the original goal of the underlying
intervention.
""".format(
abs_effect=abs_effect_fmt
)
)
elif sig and not pos:
stmt4 = textwrap.dedent(
"""
This means that the negative effect observed
during the intervention period is statistically
significant. If the experimenter had expected a
positive effect, it is recommended to double-check
whether anomalies in the control variables may have
caused an overly optimistic expectation of what
should have happened in the response variable in the
absence of the intervention.
"""
)
elif not sig and pos:
stmt4 = textwrap.dedent(
"""
This means that, although the intervention
appears to have caused a positive effect, this
effect is not statistically significant when
considering the post-intervention period as a whole.
Individual days or shorter stretches within the
intervention period may of course still have had a
significant effect, as indicated whenever the lower
limit of the impact time series (lower plot) was
above zero.
"""
)
elif not sig and not pos:
stmt4 = textwrap.dedent(
"""
This means that, although it may look as though
the intervention has exerted a negative effect on
the response variable when considering the
intervention period as a whole, this effect is not
statistically significant, and so cannot be
meaningfully interpreted.
"""
)
if not sig:
stmt4 += textwrap.dedent(
"""
The apparent effect could be the result of random
fluctuations that are unrelated to the intervention.
This is often the case when the intervention period
is very long and includes much of the time when the
effect has already worn off. It can also be the case
when the intervention period is too short to
distinguish the signal from the noise. Finally,
failing to find a significant effect can happen when
there are not enough control variables or when these
variables do not correlate well with the response
variable during the learning period."""
)
if p_value < alpha:
stmt5 = textwrap.dedent(
"""The probability of obtaining this effect by
chance is very small (Bayesian one-sided tail-area
probability {p}). This means the
causal effect can be considered statistically
significant.""".format(
p=np.round(p_value, 3)
)
)
else:
stmt5 = """The probability of obtaining this effect by
chance is p = ", round(p, 3), "). This means the effect may
be spurious and would generally not be considered
statistically significant.""".format()
print(textwrap.fill(stmt, width=width))
print("\n")
print(textwrap.fill(stmt2, width=width))
print("\n")
print(textwrap.fill(stmt3, width=width))
print("\n")
print(textwrap.fill(stmt4, width=width))
print("\n")
print(textwrap.fill(stmt5, width=width))
def summary(self, output="summary", width=120, path=None):
"""reports a summary of the results
Parameters
----------
output: str
can be summary or report. summary outputs a table.
report outputs a natural language description of the
findings
width : int
line width of the output. Only relevant if output == report
path : str
path to output summary to csv. Only relevant if output == summary
"""
alpha = self.params["alpha"]
confidence = "{}%".format(int((1 - alpha) * 100))
post_period = self.params["post_period"]
post_inf = self.inferences.loc[post_period[0] : post_period[1], :]
post_point_resp = post_inf.loc[:, "response"]
post_point_pred = post_inf.loc[:, "point_pred"]
post_point_upper = post_inf.loc[:, "point_pred_upper"]
post_point_lower = post_inf.loc[:, "point_pred_lower"]
mean_resp = post_point_resp.mean()
mean_resp_fmt = int(mean_resp)
cum_resp = post_point_resp.sum()
cum_resp_fmt = int(cum_resp)
mean_pred = post_point_pred.mean()
mean_pred_fmt = int(post_point_pred.mean())
cum_pred = post_point_pred.sum()
cum_pred_fmt = int(cum_pred)
mean_lower = post_point_lower.mean()
mean_lower_fmt = int(mean_lower)
mean_upper = post_point_upper.mean()
mean_upper_fmt = int(mean_upper)
mean_ci_fmt = [mean_lower_fmt, mean_upper_fmt]
cum_lower = post_point_lower.sum()
cum_lower_fmt = int(cum_lower)
cum_upper = post_point_upper.sum()
cum_upper_fmt = int(cum_upper)
cum_ci_fmt = [cum_lower_fmt, cum_upper_fmt]
abs_effect = (post_point_resp - post_point_pred).mean()
abs_effect_fmt = int(abs_effect)
cum_abs_effect = (post_point_resp - post_point_pred).sum()
cum_abs_effect_fmt = int(cum_abs_effect)
abs_effect_lower = (post_point_resp - post_point_lower).mean()
abs_effect_lower_fmt = int(abs_effect_lower)
abs_effect_upper = (post_point_resp - post_point_upper).mean()
abs_effect_upper_fmt = int(abs_effect_upper)
abs_effect_ci_fmt = [abs_effect_lower_fmt, abs_effect_upper_fmt]
cum_abs_lower = (post_point_resp - post_point_lower).sum()
cum_abs_lower_fmt = int(cum_abs_lower)
cum_abs_upper = (post_point_resp - post_point_upper).sum()
cum_abs_upper_fmt = int(cum_abs_upper)
cum_abs_effect_ci_fmt = [cum_abs_lower_fmt, cum_abs_upper_fmt]
rel_effect = abs_effect / mean_pred * 100
rel_effect_fmt = "{:.1f}%".format(rel_effect)
cum_rel_effect = cum_abs_effect / cum_pred * 100
cum_rel_effect_fmt = "{:.1f}%".format(cum_rel_effect)
rel_effect_lower = abs_effect_lower / mean_pred * 100
rel_effect_lower_fmt = "{:.1f}%".format(rel_effect_lower)
rel_effect_upper = abs_effect_upper / mean_pred * 100
rel_effect_upper_fmt = "{:.1f}%".format(rel_effect_upper)
rel_effect_ci_fmt = [rel_effect_lower_fmt, rel_effect_upper_fmt]
cum_rel_effect_lower = cum_abs_lower / cum_pred * 100
cum_rel_effect_lower_fmt = "{:.1f}%".format(cum_rel_effect_lower)
cum_rel_effect_upper = cum_abs_upper / cum_pred * 100
cum_rel_effect_upper_fmt = "{:.1f}%".format(cum_rel_effect_upper)
cum_rel_effect_ci_fmt = [cum_rel_effect_lower_fmt, cum_rel_effect_upper_fmt]
# assuming approximately normal distribution
# calculate standard deviation from the 95% conf interval
std_pred = (
mean_upper - mean_pred
) / 1.96 # from mean_upper = mean_pred + 1.96 * std
# calculate z score
z_score = (0 - mean_pred) / std_pred
# pvalue from zscore
p_value = st.norm.cdf(z_score)
prob_causal = 1 - p_value
p_value_perc = p_value * 100
prob_causal_perc = prob_causal * 100
if output == "summary":
# Posterior inference {CausalImpact}
summary = [
[mean_resp_fmt, cum_resp_fmt],
[mean_pred_fmt, cum_pred_fmt],
[mean_ci_fmt, cum_ci_fmt],
[" ", " "],
[abs_effect_fmt, cum_abs_effect_fmt],
[abs_effect_ci_fmt, cum_abs_effect_ci_fmt],
[" ", " "],
[rel_effect_fmt, cum_rel_effect_fmt],
[rel_effect_ci_fmt, cum_rel_effect_ci_fmt],
[" ", " "],
["{:.1f}%".format(p_value_perc), " "],
["{:.1f}%".format(prob_causal_perc), " "],
]
summary = pd.DataFrame(
summary,
columns=["Average", "Cumulative"],
index=[
"Actual",
"Predicted",
"95% CI",
" ",
"Absolute Effect",
"95% CI",
" ",
"Relative Effect",
"95% CI",
" ",
"P-value",
"Prob. of Causal Effect",
],
)
df_print(summary, path)
elif output == "report":
self._print_report(
mean_pred_fmt,
mean_resp_fmt,
mean_lower_fmt,
mean_upper_fmt,
abs_effect_fmt,
abs_effect_upper_fmt,
abs_effect_lower_fmt,
rel_effect_fmt,
rel_effect_upper_fmt,
rel_effect_lower_fmt,
cum_resp_fmt,
cum_pred_fmt,
cum_lower_fmt,
cum_upper_fmt,
confidence,
cum_rel_effect_lower,
cum_rel_effect_upper,
cum_rel_effect,
width,
p_value,
alpha,
)
else:
raise ValueError(
"Output argument must be either 'summary' " + "or 'report'"
)
def plot(
self,
panels=None,
figsize=(15, 12),
fname=None,
):
if panels is None:
panels = ["original", "pointwise", "cumulative"]
plt = get_matplotlib()
fig = plt.figure(figsize=figsize)
data_inter = self.params["pre_period"][1]
if isinstance(data_inter, pd.DatetimeIndex):
data_inter = pd.Timestamp(data_inter)
inferences = self.inferences.iloc[1:, :]
# Observation and regression components
if "original" in panels:
ax1 = plt.subplot(3, 1, 1)
plt.plot(inferences.point_pred, "r--", linewidth=2, label="model")
plt.plot(inferences.response, "k", linewidth=2, label="endog")
plt.axvline(data_inter, c="k", linestyle="--")
plt.fill_between(
inferences.index,
inferences.point_pred_lower,
inferences.point_pred_upper,
facecolor="gray",
interpolate=True,
alpha=0.25,
)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.legend(loc="upper left")
plt.title("Observation vs prediction")
if "pointwise" in panels:
# Pointwise difference
if "ax1" in locals():
ax2 = plt.subplot(312, sharex=ax1)
else:
ax2 = plt.subplot(312)
lift = inferences.point_effect
plt.plot(lift, "r--", linewidth=2)
plt.plot(self.data.index, np.zeros(self.data.shape[0]), "g-", linewidth=2)
plt.axvline(data_inter, c="k", linestyle="--")
lift_lower = inferences.point_effect_lower
lift_upper = inferences.point_effect_upper
plt.fill_between(
inferences.index,
lift_lower,
lift_upper,
facecolor="gray",
interpolate=True,
alpha=0.25,
)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.title("Difference")
# Cumulative impact
if "cumulative" in panels:
if "ax1" in locals():
plt.subplot(313, sharex=ax1)
elif "ax2" in locals():
plt.subplot(313, sharex=ax2)
else:
plt.subplot(313)
plt.plot(
inferences.index,
inferences.cum_effect,
"r--",
linewidth=2,
)
plt.plot(self.data.index, np.zeros(self.data.shape[0]), "g-", linewidth=2)
plt.axvline(data_inter, c="k", linestyle="--")
plt.fill_between(
inferences.index,
inferences.cum_effect_lower,
inferences.cum_effect_upper,
facecolor="gray",
interpolate=True,
alpha=0.25,
)
plt.axis([inferences.index[0], inferences.index[-1], None, None])
plt.title("Cumulative Impact")
plt.xlabel("$T$")
if fname is None:
plt.show()
else:
fig.savefig(fname, bbox_inches="tight")
plt.close(fig)
|
(data=None, pre_period=None, post_period=None, model_args=None, ucm_model=None, post_period_response=None, alpha=0.05, estimation='MLE')
|
52,603 |
causalimpact.analysis
|
__init__
| null |
def __init__(
self,
data=None,
pre_period=None,
post_period=None,
model_args=None,
ucm_model=None,
post_period_response=None,
alpha=0.05,
estimation="MLE",
):
self.series = None
self.model = {}
if isinstance(data, pd.DataFrame):
self.data = data.copy()
else:
self.data = data
self.params = {
"data": data,
"pre_period": pre_period,
"post_period": post_period,
"model_args": model_args,
"ucm_model": ucm_model,
"post_period_response": post_period_response,
"alpha": alpha,
"estimation": estimation,
}
self.inferences = None
self.results = None
|
(self, data=None, pre_period=None, post_period=None, model_args=None, ucm_model=None, post_period_response=None, alpha=0.05, estimation='MLE')
|
52,604 |
causalimpact.analysis
|
_align_periods_dtypes
|
align the dtypes of the pre_period and post_period to the data index.
Args:
pre_period: two-element list
post_period: two-element list
data: already-checked Pandas DataFrame, for reference only
|
@staticmethod
def _align_periods_dtypes(pre_period, post_period, data):
"""align the dtypes of the pre_period and post_period to the data index.
Args:
pre_period: two-element list
post_period: two-element list
data: already-checked Pandas DataFrame, for reference only
"""
pre_dtype = np.array(pre_period).dtype
post_dtype = np.array(post_period).dtype
# if index is datetime then convert pre and post to datetimes
if isinstance(data.index, pd.core.indexes.datetimes.DatetimeIndex):
pre_period = [pd.to_datetime(date) for date in pre_period]
post_period = [pd.to_datetime(date) for date in post_period]
pd.core.dtypes.common.is_datetime_or_timedelta_dtype(pre_period)
# if index is not datetime then error if datetime pre and post is passed
elif pd.core.dtypes.common.is_datetime_or_timedelta_dtype(
pd.Series(pre_period)
) or pd.core.dtypes.common.is_datetime_or_timedelta_dtype(
pd.Series(post_period)
):
raise ValueError(
"pre_period ("
+ pre_dtype.name
+ ") and post_period ("
+ post_dtype.name
+ ") should have the same class as the "
+ "time points in the data ("
+ data.index.dtype.name
+ ")"
)
# if index is int
elif pd.api.types.is_int64_dtype(data.index):
pre_period = [int(elem) for elem in pre_period]
post_period = [int(elem) for elem in post_period]
# if index is int
elif pd.api.types.is_float_dtype(data.index):
pre_period = [float(elem) for elem in pre_period]
post_period = [float(elem) for elem in post_period]
# if index is string
elif pd.api.types.is_string_dtype(data.index):
if pd.api.types.is_numeric_dtype(
np.array(pre_period)
) or pd.api.types.is_numeric_dtype(np.array(post_period)):
raise ValueError(
"pre_period ("
+ pre_dtype.name
+ ") and post_period ("
+ post_dtype.name
+ ") should have the same class as the "
+ "time points in the data ("
+ data.index.dtype.name
+ ")"
)
else:
pre_period = [str(idx) for idx in pre_period]
post_period = [str(idx) for idx in post_period]
else:
raise ValueError(
"pre_period ("
+ pre_dtype.name
+ ") and post_period ("
+ post_dtype.name
+ ") should have the same class as the "
+ "time points in the data ("
+ data.index.dtype.name
+ ")"
)
return [pre_period, post_period]
|
(pre_period, post_period, data)
|
52,605 |
causalimpact.analysis
|
_check_periods_are_valid
| null |
@staticmethod
def _check_periods_are_valid(pre_period, post_period):
if not isinstance(pre_period, list) or not isinstance(post_period, list):
raise ValueError("pre_period and post_period must both be lists")
if len(pre_period) != 2 or len(post_period) != 2:
raise ValueError("pre_period and post_period must both be of " + "length 2")
if pd.isnull(pre_period).any(axis=None) or pd.isnull(post_period).any(
axis=None
):
raise ValueError(
"pre_period and post period must not contain " + "null values"
)
|
(pre_period, post_period)
|
52,606 |
causalimpact.analysis
|
_check_valid_alpha
| null |
@staticmethod
def _check_valid_alpha(alpha):
if alpha is None:
raise ValueError("alpha must not be None")
if not np.isreal(alpha):
raise ValueError("alpha must be a real number")
if np.isnan(alpha):
raise ValueError("alpha must not be NA")
if alpha <= 0 or alpha >= 1:
raise ValueError("alpha must be between 0 and 1")
|
(alpha)
|
52,607 |
causalimpact.analysis
|
_check_valid_args_combo
| null |
@staticmethod
def _check_valid_args_combo(args):
data_model_args = [True, True, True, False, False]
ucm_model_args = [False, False, False, True, True]
if np.any(pd.isnull(args) != data_model_args) and np.any(
pd.isnull(args) != ucm_model_args
):
raise SyntaxError(
"Must either provide ``data``, ``pre_period``"
+ " ,``post_period``, ``model_args``"
" or ``ucm_model" + "and ``post_period_response``"
)
|
(args)
|
52,608 |
causalimpact.analysis
|
_format_input
|
Check and format all input arguments supplied to CausalImpact().
See the documentation of CausalImpact() for details
Args:
data: Pandas DataFrame or data frame
pre_period: beginning and end of pre-period
post_period: beginning and end of post-period
model_args: dict of additional arguments for the model
ucm_model: UnobservedComponents model (instead of data)
post_period_response: observed response in the post-period
alpha: tail-area for posterior intervals
estimation: method of estimation for model fitting
Returns:
list of checked (and possibly reformatted) input arguments
|
def _format_input(
self,
data,
pre_period,
post_period,
model_args,
ucm_model,
post_period_response,
alpha,
):
"""Check and format all input arguments supplied to CausalImpact().
See the documentation of CausalImpact() for details
Args:
data: Pandas DataFrame or data frame
pre_period: beginning and end of pre-period
post_period: beginning and end of post-period
model_args: dict of additional arguments for the model
ucm_model: UnobservedComponents model (instead of data)
post_period_response: observed response in the post-period
alpha: tail-area for posterior intervals
estimation: method of estimation for model fitting
Returns:
list of checked (and possibly reformatted) input arguments
"""
from statsmodels.tsa.statespace.structural import UnobservedComponents
# Check that a consistent set of variables has been provided
args = [data, pre_period, post_period, ucm_model, post_period_response]
self._check_valid_args_combo(args)
# Check <data> and convert to Pandas DataFrame, with rows
# representing time points
if data is not None:
data = self._format_input_data(data)
# Check <pre_period> and <post_period>
if data is not None:
checked = self._format_input_prepost(pre_period, post_period, data)
pre_period = checked["pre_period"]
post_period = checked["post_period"]
self.params["pre_period"] = pre_period
self.params["post_period"] = post_period
# Parse <model_args>, fill gaps using <_defaults>
_defaults = {
"ndraws": 1000,
"nburn": 100,
"niter": 1000,
"standardize_data": True,
"prior_level_sd": 0.01,
"nseasons": 1,
"season_duration": 1,
"dynamic_regression": False,
}
if model_args is None:
model_args = _defaults
else:
missing = [key for key in _defaults if key not in model_args]
for arg in missing:
model_args[arg] = _defaults[arg]
# Check <standardize_data>
if not isinstance(model_args["standardize_data"], bool):
raise ValueError("model_args.standardize_data must be a" + " boolean value")
# Check <ucm_model>
if ucm_model is not None and not isinstance(ucm_model, UnobservedComponents):
raise ValueError(
"ucm_model must be an object of class "
"statsmodels.tsa.statespace.structural.UnobservedComponents "
"instead received " + str(type(ucm_model))[8:-2]
)
# Check <post_period_response>
if ucm_model is not None:
if not is_list_like(post_period_response):
raise ValueError("post_period_response must be list-like")
if np.array(post_period_response).dtype.num == 17:
raise ValueError(
"post_period_response should not be" + " datetime values"
)
if not np.all(np.isreal(post_period_response)):
raise ValueError(
"post_period_response must contain all" + " real values"
)
# Check <alpha>
self._check_valid_alpha(alpha)
# Return updated arguments
kwargs = {
"data": data,
"pre_period": pre_period,
"post_period": post_period,
"model_args": model_args,
"ucm_model": ucm_model,
"post_period_response": post_period_response,
"alpha": alpha,
}
return kwargs
|
(self, data, pre_period, post_period, model_args, ucm_model, post_period_response, alpha)
|
52,609 |
causalimpact.analysis
|
_format_input_data
|
Check and format the data argument provided to CausalImpact().
Args:
data: Pandas DataFrame
Returns:
correctly formatted Pandas DataFrame
|
@staticmethod
def _format_input_data(data):
"""Check and format the data argument provided to CausalImpact().
Args:
data: Pandas DataFrame
Returns:
correctly formatted Pandas DataFrame
"""
# If <data> is a Pandas DataFrame and the first column is 'date',
# try to convert
if (
isinstance(data, pd.DataFrame)
and isinstance(data.columns[0], str)
and data.columns[0].lower() in ["date", "time"]
):
data = data.set_index(data.columns[0])
# Try to convert to Pandas DataFrame
try:
data = pd.DataFrame(data)
except ValueError:
raise ValueError("could not convert input data to Pandas " + "DataFrame")
# Must have at least 3 time points
if len(data.index) < 3:
raise ValueError("data must have at least 3 time points")
# Must not have NA in covariates (if any)
if len(data.columns) >= 2 and pd.isnull(data.iloc[:, 1:]).any(axis=None):
raise ValueError("covariates must not contain null values")
return data
|
(data)
|
52,610 |
causalimpact.analysis
|
_format_input_prepost
|
Check and format the pre_period and post_period input arguments.
Args:
pre_period: two-element list
post_period: two-element list
data: already-checked Pandas DataFrame, for reference only
|
def _format_input_prepost(self, pre_period, post_period, data):
"""Check and format the pre_period and post_period input arguments.
Args:
pre_period: two-element list
post_period: two-element list
data: already-checked Pandas DataFrame, for reference only
"""
self._check_periods_are_valid(pre_period, post_period)
pre_period, post_period = self._align_periods_dtypes(
pre_period, post_period, data
)
if pre_period[1] > post_period[0]:
raise ValueError(
"post period must start at least 1 observation"
+ " after the end of the pre_period"
)
if isinstance(data.index, pd.RangeIndex):
loc3 = post_period[0]
loc4 = post_period[1]
else:
loc3 = data.index.get_loc(post_period[0])
loc4 = data.index.get_loc(post_period[1])
if loc4 < loc3:
raise ValueError(
"post_period[1] must not be earlier than " + "post_period[0]"
)
if pre_period[0] < data.index.min():
pre_period[0] = data.index.min()
if post_period[1] > data.index.max():
post_period[1] = data.index.max()
return {"pre_period": pre_period, "post_period": post_period}
|
(self, pre_period, post_period, data)
|
52,611 |
causalimpact.analysis
|
_print_report
| null |
@staticmethod
def _print_report(
mean_pred_fmt,
mean_resp_fmt,
mean_lower_fmt,
mean_upper_fmt,
abs_effect_fmt,
abs_effect_upper_fmt,
abs_effect_lower_fmt,
rel_effect_fmt,
rel_effect_upper_fmt,
rel_effect_lower_fmt,
cum_resp_fmt,
cum_pred_fmt,
cum_lower_fmt,
cum_upper_fmt,
confidence,
cum_rel_effect_lower,
cum_rel_effect_upper,
cum_rel_effect,
width,
p_value,
alpha,
):
sig = not (cum_rel_effect_lower < 0 < cum_rel_effect_upper)
pos = cum_rel_effect > 0
# Summarize averages
stmt = textwrap.dedent(
"""During the post-intervention period, the response
variable had an average value of
approx. {mean_resp}.
""".format(
mean_resp=mean_resp_fmt
)
)
if sig:
stmt += " By contrast, in "
else:
stmt += " In "
stmt += textwrap.dedent(
"""
the absence of an intervention, we would have
expected an average response of {mean_pred}. The
{confidence} interval of this counterfactual
prediction is [{mean_lower}, {mean_upper}].
Subtracting this prediction from the observed
response yields an estimate of the causal effect
the intervention had on the response variable.
This effect is {abs_effect} with a
{confidence} interval of [{abs_lower},
{abs_upper}]. For a discussion of the
significance of this effect,
see below.
""".format(
mean_pred=mean_pred_fmt,
confidence=confidence,
mean_lower=mean_lower_fmt,
mean_upper=mean_upper_fmt,
abs_effect=abs_effect_fmt,
abs_upper=abs_effect_upper_fmt,
abs_lower=abs_effect_lower_fmt,
)
)
# Summarize sums
stmt2 = textwrap.dedent(
"""
Summing up the individual data points during the
post-intervention period (which can only sometimes be
meaningfully interpreted), the response variable had an
overall value of {cum_resp}.
""".format(
cum_resp=cum_resp_fmt
)
)
if sig:
stmt2 += " By contrast, had "
else:
stmt2 += " Had "
stmt2 += textwrap.dedent(
"""
the intervention not taken place, we would have expected
a sum of {cum_pred}. The {confidence} interval of this
prediction is [{cum_pred_lower}, {cum_pred_upper}]
""".format(
cum_pred=cum_pred_fmt,
confidence=confidence,
cum_pred_lower=cum_lower_fmt,
cum_pred_upper=cum_upper_fmt,
)
)
# Summarize relative numbers (in which case row [1] = row [2])
stmt3 = textwrap.dedent(
"""
The above results are given in terms
of absolute numbers. In relative terms, the
response variable showed
"""
)
if pos:
stmt3 += " an increase of "
else:
stmt3 += " a decrease of "
stmt3 += textwrap.dedent(
"""
{rel_effect}. The {confidence} interval of this
percentage is [{rel_effect_lower},
{rel_effect_upper}]
""".format(
confidence=confidence,
rel_effect=rel_effect_fmt,
rel_effect_lower=rel_effect_lower_fmt,
rel_effect_upper=rel_effect_upper_fmt,
)
)
# Comment on significance
if sig and pos:
stmt4 = textwrap.dedent(
"""
This means that the positive effect observed
during the intervention period is statistically
significant and unlikely to be due to random
fluctuations. It should be noted, however, that
the question of whether this increase also bears
substantive significance can only be answered by
comparing the absolute effect {abs_effect} to
the original goal of the underlying
intervention.
""".format(
abs_effect=abs_effect_fmt
)
)
elif sig and not pos:
stmt4 = textwrap.dedent(
"""
This means that the negative effect observed
during the intervention period is statistically
significant. If the experimenter had expected a
positive effect, it is recommended to double-check
whether anomalies in the control variables may have
caused an overly optimistic expectation of what
should have happened in the response variable in the
absence of the intervention.
"""
)
elif not sig and pos:
stmt4 = textwrap.dedent(
"""
This means that, although the intervention
appears to have caused a positive effect, this
effect is not statistically significant when
considering the post-intervention period as a whole.
Individual days or shorter stretches within the
intervention period may of course still have had a
significant effect, as indicated whenever the lower
limit of the impact time series (lower plot) was
above zero.
"""
)
elif not sig and not pos:
stmt4 = textwrap.dedent(
"""
This means that, although it may look as though
the intervention has exerted a negative effect on
the response variable when considering the
intervention period as a whole, this effect is not
statistically significant, and so cannot be
meaningfully interpreted.
"""
)
if not sig:
stmt4 += textwrap.dedent(
"""
The apparent effect could be the result of random
fluctuations that are unrelated to the intervention.
This is often the case when the intervention period
is very long and includes much of the time when the
effect has already worn off. It can also be the case
when the intervention period is too short to
distinguish the signal from the noise. Finally,
failing to find a significant effect can happen when
there are not enough control variables or when these
variables do not correlate well with the response
variable during the learning period."""
)
if p_value < alpha:
stmt5 = textwrap.dedent(
"""The probability of obtaining this effect by
chance is very small (Bayesian one-sided tail-area
probability {p}). This means the
causal effect can be considered statistically
significant.""".format(
p=np.round(p_value, 3)
)
)
else:
stmt5 = """The probability of obtaining this effect by
chance is p = ", round(p, 3), "). This means the effect may
be spurious and would generally not be considered
statistically significant.""".format()
print(textwrap.fill(stmt, width=width))
print("\n")
print(textwrap.fill(stmt2, width=width))
print("\n")
print(textwrap.fill(stmt3, width=width))
print("\n")
print(textwrap.fill(stmt4, width=width))
print("\n")
print(textwrap.fill(stmt5, width=width))
|
(mean_pred_fmt, mean_resp_fmt, mean_lower_fmt, mean_upper_fmt, abs_effect_fmt, abs_effect_upper_fmt, abs_effect_lower_fmt, rel_effect_fmt, rel_effect_upper_fmt, rel_effect_lower_fmt, cum_resp_fmt, cum_pred_fmt, cum_lower_fmt, cum_upper_fmt, confidence, cum_rel_effect_lower, cum_rel_effect_upper, cum_rel_effect, width, p_value, alpha)
|
52,612 |
causalimpact.analysis
|
_run_with_data
| null |
def _run_with_data(
self, data, pre_period, post_period, model_args, alpha, estimation
):
# Zoom in on data in modeling range
if data.shape[1] == 1: # no exogenous values provided
raise ValueError("data contains no exogenous variables")
data_modeling = data.copy()
df_pre = data_modeling.loc[pre_period[0] : pre_period[1], :]
df_post = data_modeling.loc[post_period[0] : post_period[1], :]
# Standardize all variables
orig_std_params = (0, 1)
if model_args["standardize_data"]:
sd_results = standardize_all_variables(
data_modeling, pre_period, post_period
)
df_pre = sd_results["data_pre"]
df_post = sd_results["data_post"]
orig_std_params = sd_results["orig_std_params"]
# Construct model and perform inference
model = construct_model(df_pre, model_args)
self.model = model
model_results = model_fit(model, estimation, model_args)
inferences = compile_inferences(
model_results,
data,
df_pre,
df_post,
None,
alpha,
orig_std_params,
estimation,
)
# "append" to 'CausalImpact' object
self.inferences = inferences["series"]
self.results = model_results
|
(self, data, pre_period, post_period, model_args, alpha, estimation)
|
52,613 |
causalimpact.analysis
|
_run_with_ucm
|
Runs an impact analysis on top of a ucm model.
Args:
ucm_model: Model as returned by UnobservedComponents(),
in which the data during the post-period was set to NA
post_period_response: observed data during the post-intervention
period
alpha: tail-probabilities of posterior intervals
|
def _run_with_ucm(
self, ucm_model, post_period_response, alpha, model_args, estimation
):
"""Runs an impact analysis on top of a ucm model.
Args:
ucm_model: Model as returned by UnobservedComponents(),
in which the data during the post-period was set to NA
post_period_response: observed data during the post-intervention
period
alpha: tail-probabilities of posterior intervals"""
df_pre = ucm_model.data.orig_endog[: -len(post_period_response)]
df_pre = pd.DataFrame(df_pre)
post_period_response = pd.DataFrame(post_period_response)
data = pd.DataFrame(
np.concatenate([df_pre.values, post_period_response.values])
)
orig_std_params = (0, 1)
model_results = model_fit(ucm_model, estimation, model_args)
# Compile posterior inferences
inferences = compile_inferences(
model_results,
data,
df_pre,
None,
post_period_response,
alpha,
orig_std_params,
estimation,
)
obs_inter = model_results.model_nobs - len(post_period_response)
self.params["pre_period"] = [0, obs_inter - 1]
self.params["post_period"] = [obs_inter, -1]
self.data = pd.concat([df_pre, post_period_response])
self.inferences = inferences["series"]
self.results = model_results
|
(self, ucm_model, post_period_response, alpha, model_args, estimation)
|
52,614 |
causalimpact.analysis
|
plot
| null |
def plot(
self,
panels=None,
figsize=(15, 12),
fname=None,
):
if panels is None:
panels = ["original", "pointwise", "cumulative"]
plt = get_matplotlib()
fig = plt.figure(figsize=figsize)
data_inter = self.params["pre_period"][1]
if isinstance(data_inter, pd.DatetimeIndex):
data_inter = pd.Timestamp(data_inter)
inferences = self.inferences.iloc[1:, :]
# Observation and regression components
if "original" in panels:
ax1 = plt.subplot(3, 1, 1)
plt.plot(inferences.point_pred, "r--", linewidth=2, label="model")
plt.plot(inferences.response, "k", linewidth=2, label="endog")
plt.axvline(data_inter, c="k", linestyle="--")
plt.fill_between(
inferences.index,
inferences.point_pred_lower,
inferences.point_pred_upper,
facecolor="gray",
interpolate=True,
alpha=0.25,
)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.legend(loc="upper left")
plt.title("Observation vs prediction")
if "pointwise" in panels:
# Pointwise difference
if "ax1" in locals():
ax2 = plt.subplot(312, sharex=ax1)
else:
ax2 = plt.subplot(312)
lift = inferences.point_effect
plt.plot(lift, "r--", linewidth=2)
plt.plot(self.data.index, np.zeros(self.data.shape[0]), "g-", linewidth=2)
plt.axvline(data_inter, c="k", linestyle="--")
lift_lower = inferences.point_effect_lower
lift_upper = inferences.point_effect_upper
plt.fill_between(
inferences.index,
lift_lower,
lift_upper,
facecolor="gray",
interpolate=True,
alpha=0.25,
)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.title("Difference")
# Cumulative impact
if "cumulative" in panels:
if "ax1" in locals():
plt.subplot(313, sharex=ax1)
elif "ax2" in locals():
plt.subplot(313, sharex=ax2)
else:
plt.subplot(313)
plt.plot(
inferences.index,
inferences.cum_effect,
"r--",
linewidth=2,
)
plt.plot(self.data.index, np.zeros(self.data.shape[0]), "g-", linewidth=2)
plt.axvline(data_inter, c="k", linestyle="--")
plt.fill_between(
inferences.index,
inferences.cum_effect_lower,
inferences.cum_effect_upper,
facecolor="gray",
interpolate=True,
alpha=0.25,
)
plt.axis([inferences.index[0], inferences.index[-1], None, None])
plt.title("Cumulative Impact")
plt.xlabel("$T$")
if fname is None:
plt.show()
else:
fig.savefig(fname, bbox_inches="tight")
plt.close(fig)
|
(self, panels=None, figsize=(15, 12), fname=None)
|
52,615 |
causalimpact.analysis
|
run
| null |
def run(self):
kwargs = self._format_input(
self.params["data"],
self.params["pre_period"],
self.params["post_period"],
self.params["model_args"],
self.params["ucm_model"],
self.params["post_period_response"],
self.params["alpha"],
)
# Depending on input, dispatch to the appropriate Run* method()
if self.data is not None:
self._run_with_data(
kwargs["data"],
kwargs["pre_period"],
kwargs["post_period"],
kwargs["model_args"],
kwargs["alpha"],
self.params["estimation"],
)
else:
self._run_with_ucm(
kwargs["ucm_model"],
kwargs["post_period_response"],
kwargs["alpha"],
kwargs["model_args"],
self.params["estimation"],
)
|
(self)
|
52,616 |
causalimpact.analysis
|
summary
|
reports a summary of the results
Parameters
----------
output: str
can be summary or report. summary outputs a table.
report outputs a natural language description of the
findings
width : int
line width of the output. Only relevant if output == report
path : str
path to output summary to csv. Only relevant if output == summary
|
def summary(self, output="summary", width=120, path=None):
"""reports a summary of the results
Parameters
----------
output: str
can be summary or report. summary outputs a table.
report outputs a natural language description of the
findings
width : int
line width of the output. Only relevant if output == report
path : str
path to output summary to csv. Only relevant if output == summary
"""
alpha = self.params["alpha"]
confidence = "{}%".format(int((1 - alpha) * 100))
post_period = self.params["post_period"]
post_inf = self.inferences.loc[post_period[0] : post_period[1], :]
post_point_resp = post_inf.loc[:, "response"]
post_point_pred = post_inf.loc[:, "point_pred"]
post_point_upper = post_inf.loc[:, "point_pred_upper"]
post_point_lower = post_inf.loc[:, "point_pred_lower"]
mean_resp = post_point_resp.mean()
mean_resp_fmt = int(mean_resp)
cum_resp = post_point_resp.sum()
cum_resp_fmt = int(cum_resp)
mean_pred = post_point_pred.mean()
mean_pred_fmt = int(post_point_pred.mean())
cum_pred = post_point_pred.sum()
cum_pred_fmt = int(cum_pred)
mean_lower = post_point_lower.mean()
mean_lower_fmt = int(mean_lower)
mean_upper = post_point_upper.mean()
mean_upper_fmt = int(mean_upper)
mean_ci_fmt = [mean_lower_fmt, mean_upper_fmt]
cum_lower = post_point_lower.sum()
cum_lower_fmt = int(cum_lower)
cum_upper = post_point_upper.sum()
cum_upper_fmt = int(cum_upper)
cum_ci_fmt = [cum_lower_fmt, cum_upper_fmt]
abs_effect = (post_point_resp - post_point_pred).mean()
abs_effect_fmt = int(abs_effect)
cum_abs_effect = (post_point_resp - post_point_pred).sum()
cum_abs_effect_fmt = int(cum_abs_effect)
abs_effect_lower = (post_point_resp - post_point_lower).mean()
abs_effect_lower_fmt = int(abs_effect_lower)
abs_effect_upper = (post_point_resp - post_point_upper).mean()
abs_effect_upper_fmt = int(abs_effect_upper)
abs_effect_ci_fmt = [abs_effect_lower_fmt, abs_effect_upper_fmt]
cum_abs_lower = (post_point_resp - post_point_lower).sum()
cum_abs_lower_fmt = int(cum_abs_lower)
cum_abs_upper = (post_point_resp - post_point_upper).sum()
cum_abs_upper_fmt = int(cum_abs_upper)
cum_abs_effect_ci_fmt = [cum_abs_lower_fmt, cum_abs_upper_fmt]
rel_effect = abs_effect / mean_pred * 100
rel_effect_fmt = "{:.1f}%".format(rel_effect)
cum_rel_effect = cum_abs_effect / cum_pred * 100
cum_rel_effect_fmt = "{:.1f}%".format(cum_rel_effect)
rel_effect_lower = abs_effect_lower / mean_pred * 100
rel_effect_lower_fmt = "{:.1f}%".format(rel_effect_lower)
rel_effect_upper = abs_effect_upper / mean_pred * 100
rel_effect_upper_fmt = "{:.1f}%".format(rel_effect_upper)
rel_effect_ci_fmt = [rel_effect_lower_fmt, rel_effect_upper_fmt]
cum_rel_effect_lower = cum_abs_lower / cum_pred * 100
cum_rel_effect_lower_fmt = "{:.1f}%".format(cum_rel_effect_lower)
cum_rel_effect_upper = cum_abs_upper / cum_pred * 100
cum_rel_effect_upper_fmt = "{:.1f}%".format(cum_rel_effect_upper)
cum_rel_effect_ci_fmt = [cum_rel_effect_lower_fmt, cum_rel_effect_upper_fmt]
# assuming approximately normal distribution
# calculate standard deviation from the 95% conf interval
std_pred = (
mean_upper - mean_pred
) / 1.96 # from mean_upper = mean_pred + 1.96 * std
# calculate z score
z_score = (0 - mean_pred) / std_pred
# pvalue from zscore
p_value = st.norm.cdf(z_score)
prob_causal = 1 - p_value
p_value_perc = p_value * 100
prob_causal_perc = prob_causal * 100
if output == "summary":
# Posterior inference {CausalImpact}
summary = [
[mean_resp_fmt, cum_resp_fmt],
[mean_pred_fmt, cum_pred_fmt],
[mean_ci_fmt, cum_ci_fmt],
[" ", " "],
[abs_effect_fmt, cum_abs_effect_fmt],
[abs_effect_ci_fmt, cum_abs_effect_ci_fmt],
[" ", " "],
[rel_effect_fmt, cum_rel_effect_fmt],
[rel_effect_ci_fmt, cum_rel_effect_ci_fmt],
[" ", " "],
["{:.1f}%".format(p_value_perc), " "],
["{:.1f}%".format(prob_causal_perc), " "],
]
summary = pd.DataFrame(
summary,
columns=["Average", "Cumulative"],
index=[
"Actual",
"Predicted",
"95% CI",
" ",
"Absolute Effect",
"95% CI",
" ",
"Relative Effect",
"95% CI",
" ",
"P-value",
"Prob. of Causal Effect",
],
)
df_print(summary, path)
elif output == "report":
self._print_report(
mean_pred_fmt,
mean_resp_fmt,
mean_lower_fmt,
mean_upper_fmt,
abs_effect_fmt,
abs_effect_upper_fmt,
abs_effect_lower_fmt,
rel_effect_fmt,
rel_effect_upper_fmt,
rel_effect_lower_fmt,
cum_resp_fmt,
cum_pred_fmt,
cum_lower_fmt,
cum_upper_fmt,
confidence,
cum_rel_effect_lower,
cum_rel_effect_upper,
cum_rel_effect,
width,
p_value,
alpha,
)
else:
raise ValueError(
"Output argument must be either 'summary' " + "or 'report'"
)
|
(self, output='summary', width=120, path=None)
|
52,622 |
streamx.stream
|
AsyncStream
| null |
class AsyncStream(Generic[T]):
def __init__(self) -> None:
self._consuming_tasks: list[asyncio.Task] = []
self._closed: bool = False
self._event = SharedEvent[T]()
self._listeners = set[AsyncStreamListener[T]]()
@property
def listeners(self) -> set[AsyncStreamListener[T]]:
return self._listeners
@property
def closed(self) -> bool:
return self._closed
async def push(self, item: T) -> None:
if self._closed:
raise StreamClosedError("Can't push item into a closed stream.")
current_task = asyncio.current_task()
if current_task in self._consuming_tasks:
raise StreamShortCircuitError(
"Can't push an item while the task is listening to this stream."
)
await self._event.share(asyncio.sleep(0, item))
async def close(self) -> None:
if self._closed:
return
try:
await self.push(StopAsyncIteration) # type: ignore
except StreamShortCircuitError:
raise StreamShortCircuitError(
"Can't close a stream from a task that is listening to it."
) from None
self._closed = True
for listener in self._listeners:
listener.close()
@contextmanager
def listen(self) -> Iterator[AsyncStreamListener[T]]:
if self._closed:
raise StreamClosedError("Can't listen to a closed stream.")
current_task = asyncio.current_task()
if current_task in self._consuming_tasks:
raise StreamShortCircuitError("Task is already listening to this stream.")
listener = None
try:
with self._event.listen() as event_listener:
listener = AsyncStreamListener(event_listener)
self._listeners.add(listener)
if listener.current_task:
self._consuming_tasks.append(listener.current_task)
yield listener
finally:
if listener:
listener.close()
self._listeners.remove(listener)
if listener.current_task:
self._consuming_tasks.remove(listener.current_task)
|
() -> None
|
52,623 |
streamx.stream
|
__init__
| null |
def __init__(self) -> None:
self._consuming_tasks: list[asyncio.Task] = []
self._closed: bool = False
self._event = SharedEvent[T]()
self._listeners = set[AsyncStreamListener[T]]()
|
(self) -> NoneType
|
52,627 |
streamx.stream
|
AsyncStreamIterator
| null |
class AsyncStreamIterator(AsyncIterator[T], Generic[T], AsyncIterable[T]):
def __init__(self, event_listener: SharedEventListener[T]) -> None:
self._event_listener = event_listener
async def __anext__(self) -> T:
item = await self._event_listener.wait()
if item is StopAsyncIteration:
raise StopAsyncIteration
return item
|
(event_listener: streamx.event.SharedEventListener[~T]) -> None
|
52,629 |
streamx.stream
|
__anext__
| null |
def __init__(self, event_listener: SharedEventListener[T]) -> None:
self._event_listener = event_listener
|
(self) -> ~T
|
52,631 |
streamx.stream
|
AsyncStreamListener
| null |
class AsyncStreamListener(Generic[T]):
def __init__(self, event_listener: SharedEventListener[T]) -> None:
self._event_listener = event_listener
self._current_task = asyncio.current_task()
self._closed = False
@property
def current_task(self) -> asyncio.Task | None:
return self._current_task
@property
def closed(self) -> bool:
return self._closed
def close(self) -> None:
self._closed = True
def __aiter__(self) -> AsyncStreamIterator[T]:
if self._closed:
raise StreamClosedError("Can't iterate over a closed stream.")
return AsyncStreamIterator(self._event_listener)
|
(event_listener: streamx.event.SharedEventListener[~T]) -> None
|
52,632 |
streamx.stream
|
__aiter__
| null |
def __aiter__(self) -> AsyncStreamIterator[T]:
if self._closed:
raise StreamClosedError("Can't iterate over a closed stream.")
return AsyncStreamIterator(self._event_listener)
|
(self) -> streamx.stream.AsyncStreamIterator[~T]
|
52,633 |
streamx.stream
|
__init__
| null |
def __init__(self, event_listener: SharedEventListener[T]) -> None:
self._event_listener = event_listener
self._current_task = asyncio.current_task()
self._closed = False
|
(self, event_listener: streamx.event.SharedEventListener[~T]) -> NoneType
|
52,634 |
streamx.stream
|
close
| null |
def close(self) -> None:
self._closed = True
|
(self) -> NoneType
|
52,635 |
streamx.event
|
SharedEvent
| null |
class SharedEvent(Generic[T]):
def __init__(self, loop: asyncio.AbstractEventLoop | None = None):
self.loop = loop or asyncio.get_event_loop()
self._listeners: set[SharedEventListener[T]] = set()
@contextmanager
def listen(self) -> Iterator[SharedEventListener[T]]:
listener = None
try:
listener = SharedEventListener(self.loop)
self._listeners.add(listener)
yield listener
finally:
if listener:
self._listeners.remove(listener)
async def share(self, coro: ...) -> T:
value: T = await coro
await asyncio.gather(*[listener.ready.wait() for listener in self._listeners])
for listener in self._listeners:
listener.push(value)
return value
|
(loop: asyncio.events.AbstractEventLoop | None = None)
|
52,636 |
streamx.event
|
__init__
| null |
def __init__(self, loop: asyncio.AbstractEventLoop | None = None):
self.loop = loop or asyncio.get_event_loop()
self._listeners: set[SharedEventListener[T]] = set()
|
(self, loop: Optional[asyncio.events.AbstractEventLoop] = None)
|
52,638 |
streamx.event
|
share
| null |
@contextmanager
def listen(self) -> Iterator[SharedEventListener[T]]:
listener = None
try:
listener = SharedEventListener(self.loop)
self._listeners.add(listener)
yield listener
finally:
if listener:
self._listeners.remove(listener)
|
(self, coro: Ellipsis) -> ~T
|
52,639 |
streamx.event
|
SharedEventListener
| null |
class SharedEventListener(Generic[T]):
def __init__(self, loop: asyncio.AbstractEventLoop | None = None) -> None:
self._loop = loop or asyncio.get_event_loop()
self._waiter: asyncio.Future[T] = self._loop.create_future()
self._ready = asyncio.Event()
@property
def ready(self) -> asyncio.Event:
return self._ready
def push(self, value: T) -> None:
self._waiter.set_result(value)
async def wait(self) -> T:
try:
self._ready.set()
return await self._waiter
finally:
self._ready.clear()
self._waiter = self._loop.create_future()
|
(loop: asyncio.events.AbstractEventLoop | None = None) -> None
|
52,640 |
streamx.event
|
__init__
| null |
def __init__(self, loop: asyncio.AbstractEventLoop | None = None) -> None:
self._loop = loop or asyncio.get_event_loop()
self._waiter: asyncio.Future[T] = self._loop.create_future()
self._ready = asyncio.Event()
|
(self, loop: Optional[asyncio.events.AbstractEventLoop] = None) -> NoneType
|
52,641 |
streamx.event
|
push
| null |
def push(self, value: T) -> None:
self._waiter.set_result(value)
|
(self, value: ~T) -> NoneType
|
52,643 |
streamx.errors
|
StreamClosedError
|
Raised when operations are performed on a closed stream
|
class StreamClosedError(StreamError):
"""
Raised when operations are performed on a closed stream
"""
| null |
52,644 |
streamx.errors
|
StreamError
|
Base class for all streamx exceptions
|
class StreamError(Exception):
"""Base class for all streamx exceptions"""
| null |
52,645 |
streamx.errors
|
StreamShortCircuitError
|
Raised when a stream is being consumed by the same task
and a new item is pushed
|
class StreamShortCircuitError(StreamError):
"""
Raised when a stream is being consumed by the same task
and a new item is pushed
"""
| null |
52,649 |
bmitzkus_pulumi_onepassword.get_item
|
AwaitableGetItemResult
| null |
class AwaitableGetItemResult(GetItemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetItemResult(
category=self.category,
database=self.database,
hostname=self.hostname,
id=self.id,
note_value=self.note_value,
password=self.password,
port=self.port,
sections=self.sections,
tags=self.tags,
title=self.title,
type=self.type,
url=self.url,
username=self.username,
uuid=self.uuid,
vault=self.vault)
|
(category=None, database=None, hostname=None, id=None, note_value=None, password=None, port=None, sections=None, tags=None, title=None, type=None, url=None, username=None, uuid=None, vault=None)
|
52,650 |
bmitzkus_pulumi_onepassword.get_item
|
__await__
| null |
def __await__(self):
if False:
yield self
return GetItemResult(
category=self.category,
database=self.database,
hostname=self.hostname,
id=self.id,
note_value=self.note_value,
password=self.password,
port=self.port,
sections=self.sections,
tags=self.tags,
title=self.title,
type=self.type,
url=self.url,
username=self.username,
uuid=self.uuid,
vault=self.vault)
|
(self)
|
52,652 |
bmitzkus_pulumi_onepassword.get_item
|
__init__
| null |
def __init__(__self__, category=None, database=None, hostname=None, id=None, note_value=None, password=None, port=None, sections=None, tags=None, title=None, type=None, url=None, username=None, uuid=None, vault=None):
if category and not isinstance(category, str):
raise TypeError("Expected argument 'category' to be a str")
pulumi.set(__self__, "category", category)
if database and not isinstance(database, str):
raise TypeError("Expected argument 'database' to be a str")
pulumi.set(__self__, "database", database)
if hostname and not isinstance(hostname, str):
raise TypeError("Expected argument 'hostname' to be a str")
pulumi.set(__self__, "hostname", hostname)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if note_value and not isinstance(note_value, str):
raise TypeError("Expected argument 'note_value' to be a str")
pulumi.set(__self__, "note_value", note_value)
if password and not isinstance(password, str):
raise TypeError("Expected argument 'password' to be a str")
pulumi.set(__self__, "password", password)
if port and not isinstance(port, str):
raise TypeError("Expected argument 'port' to be a str")
pulumi.set(__self__, "port", port)
if sections and not isinstance(sections, list):
raise TypeError("Expected argument 'sections' to be a list")
pulumi.set(__self__, "sections", sections)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
if title and not isinstance(title, str):
raise TypeError("Expected argument 'title' to be a str")
pulumi.set(__self__, "title", title)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if url and not isinstance(url, str):
raise TypeError("Expected argument 'url' to be a str")
pulumi.set(__self__, "url", url)
if username and not isinstance(username, str):
raise TypeError("Expected argument 'username' to be a str")
pulumi.set(__self__, "username", username)
if uuid and not isinstance(uuid, str):
raise TypeError("Expected argument 'uuid' to be a str")
pulumi.set(__self__, "uuid", uuid)
if vault and not isinstance(vault, str):
raise TypeError("Expected argument 'vault' to be a str")
pulumi.set(__self__, "vault", vault)
|
(__self__, category=None, database=None, hostname=None, id=None, note_value=None, password=None, port=None, sections=None, tags=None, title=None, type=None, url=None, username=None, uuid=None, vault=None)
|
52,653 |
bmitzkus_pulumi_onepassword.get_vault
|
AwaitableGetVaultResult
| null |
class AwaitableGetVaultResult(GetVaultResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVaultResult(
description=self.description,
id=self.id,
name=self.name,
uuid=self.uuid)
|
(description=None, id=None, name=None, uuid=None)
|
52,654 |
bmitzkus_pulumi_onepassword.get_vault
|
__await__
| null |
def __await__(self):
if False:
yield self
return GetVaultResult(
description=self.description,
id=self.id,
name=self.name,
uuid=self.uuid)
|
(self)
|
52,656 |
bmitzkus_pulumi_onepassword.get_vault
|
__init__
| null |
def __init__(__self__, description=None, id=None, name=None, uuid=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if uuid and not isinstance(uuid, str):
raise TypeError("Expected argument 'uuid' to be a str")
pulumi.set(__self__, "uuid", uuid)
|
(__self__, description=None, id=None, name=None, uuid=None)
|
52,657 |
bmitzkus_pulumi_onepassword.get_item
|
GetItemResult
|
A collection of values returned by getItem.
|
class GetItemResult:
"""
A collection of values returned by getItem.
"""
def __init__(__self__, category=None, database=None, hostname=None, id=None, note_value=None, password=None, port=None, sections=None, tags=None, title=None, type=None, url=None, username=None, uuid=None, vault=None):
if category and not isinstance(category, str):
raise TypeError("Expected argument 'category' to be a str")
pulumi.set(__self__, "category", category)
if database and not isinstance(database, str):
raise TypeError("Expected argument 'database' to be a str")
pulumi.set(__self__, "database", database)
if hostname and not isinstance(hostname, str):
raise TypeError("Expected argument 'hostname' to be a str")
pulumi.set(__self__, "hostname", hostname)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if note_value and not isinstance(note_value, str):
raise TypeError("Expected argument 'note_value' to be a str")
pulumi.set(__self__, "note_value", note_value)
if password and not isinstance(password, str):
raise TypeError("Expected argument 'password' to be a str")
pulumi.set(__self__, "password", password)
if port and not isinstance(port, str):
raise TypeError("Expected argument 'port' to be a str")
pulumi.set(__self__, "port", port)
if sections and not isinstance(sections, list):
raise TypeError("Expected argument 'sections' to be a list")
pulumi.set(__self__, "sections", sections)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
if title and not isinstance(title, str):
raise TypeError("Expected argument 'title' to be a str")
pulumi.set(__self__, "title", title)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if url and not isinstance(url, str):
raise TypeError("Expected argument 'url' to be a str")
pulumi.set(__self__, "url", url)
if username and not isinstance(username, str):
raise TypeError("Expected argument 'username' to be a str")
pulumi.set(__self__, "username", username)
if uuid and not isinstance(uuid, str):
raise TypeError("Expected argument 'uuid' to be a str")
pulumi.set(__self__, "uuid", uuid)
if vault and not isinstance(vault, str):
raise TypeError("Expected argument 'vault' to be a str")
pulumi.set(__self__, "vault", vault)
@property
@pulumi.getter
def category(self) -> str:
"""
The category of the item. One of ["login" "password" "database"]
"""
return pulumi.get(self, "category")
@property
@pulumi.getter
def database(self) -> str:
"""
(Only applies to the database category) The name of the database.
"""
return pulumi.get(self, "database")
@property
@pulumi.getter
def hostname(self) -> str:
"""
(Only applies to the database category) The address where the database can be found
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="noteValue")
def note_value(self) -> str:
"""
Secure Note value.
"""
return pulumi.get(self, "note_value")
@property
@pulumi.getter
def password(self) -> str:
"""
Password for this item.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def port(self) -> str:
"""
(Only applies to the database category) The port the database is listening on.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def sections(self) -> Sequence['outputs.GetItemSectionResult']:
"""
A list of custom sections in an item
"""
return pulumi.get(self, "sections")
@property
@pulumi.getter
def tags(self) -> Sequence[str]:
"""
An array of strings of the tags assigned to the item.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def title(self) -> str:
"""
The title of the item to retrieve. This field will be populated with the title of the item if the item it looked up by its UUID.
"""
return pulumi.get(self, "title")
@property
@pulumi.getter
def type(self) -> str:
"""
(Only applies to the database category) The type of database. One of ["db2" "filemaker" "msaccess" "mssql" "mysql" "oracle" "postgresql" "sqlite" "other"]
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def url(self) -> str:
"""
The primary URL for the item.
"""
return pulumi.get(self, "url")
@property
@pulumi.getter
def username(self) -> str:
"""
Username for this item.
"""
return pulumi.get(self, "username")
@property
@pulumi.getter
def uuid(self) -> str:
"""
The UUID of the item to retrieve. This field will be populated with the UUID of the item if the item it looked up by its title.
"""
return pulumi.get(self, "uuid")
@property
@pulumi.getter
def vault(self) -> str:
"""
The UUID of the vault the item is in.
"""
return pulumi.get(self, "vault")
|
(category=None, database=None, hostname=None, id=None, note_value=None, password=None, port=None, sections=None, tags=None, title=None, type=None, url=None, username=None, uuid=None, vault=None)
|
52,660 |
bmitzkus_pulumi_onepassword.get_vault
|
GetVaultResult
|
A collection of values returned by getVault.
|
class GetVaultResult:
"""
A collection of values returned by getVault.
"""
def __init__(__self__, description=None, id=None, name=None, uuid=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if uuid and not isinstance(uuid, str):
raise TypeError("Expected argument 'uuid' to be a str")
pulumi.set(__self__, "uuid", uuid)
@property
@pulumi.getter
def description(self) -> str:
"""
The description of the vault.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the vault to retrieve. This field will be populated with the name of the vault if the vault it looked up by its UUID.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def uuid(self) -> str:
"""
The UUID of the vault to retrieve. This field will be populated with the UUID of the vault if the vault it looked up by its name.
"""
return pulumi.get(self, "uuid")
|
(description=None, id=None, name=None, uuid=None)
|
52,663 |
bmitzkus_pulumi_onepassword.item
|
Item
| null |
class Item(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
password_recipe: Optional[pulumi.Input[pulumi.InputType['ItemPasswordRecipeArgs']]] = None,
port: Optional[pulumi.Input[str]] = None,
sections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItemSectionArgs']]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
vault: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A 1Password item.
## Example Usage
```python
import pulumi
import bmitzkus_pulumi_onepassword as onepassword
demo_password = onepassword.Item("demoPassword",
vault=var["demo_vault"],
title="Demo Password Recipe",
category="password",
password_recipe=onepassword.ItemPasswordRecipeArgs(
length=40,
symbols=False,
))
demo_login = onepassword.Item("demoLogin",
vault=var["demo_vault"],
title="Demo Terraform Login",
category="login",
username="[email protected]")
demo_db = onepassword.Item("demoDb",
vault=var["demo_vault"],
category="database",
type="mysql",
title="Demo TF Database",
username="root",
database="Example MySQL Instance",
hostname="localhost",
port="3306")
```
## Import
import an existing 1Password item
```sh
$ pulumi import onepassword:index/item:Item myitem vaults/<vault uuid>/items/<item uuid>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] category: The category of the item. One of ["login" "password" "database"]
:param pulumi.Input[str] database: (Only applies to the database category) The name of the database.
:param pulumi.Input[str] hostname: (Only applies to the database category) The address where the database can be found
:param pulumi.Input[str] password: Password for this item.
:param pulumi.Input[pulumi.InputType['ItemPasswordRecipeArgs']] password_recipe: Password for this item.
:param pulumi.Input[str] port: (Only applies to the database category) The port the database is listening on.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItemSectionArgs']]]] sections: A list of custom sections in an item
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: An array of strings of the tags assigned to the item.
:param pulumi.Input[str] title: The title of the item.
:param pulumi.Input[str] type: The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
:param pulumi.Input[str] url: The primary URL for the item.
:param pulumi.Input[str] username: Username for this item.
:param pulumi.Input[str] vault: The UUID of the vault the item is in.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ItemArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A 1Password item.
## Example Usage
```python
import pulumi
import bmitzkus_pulumi_onepassword as onepassword
demo_password = onepassword.Item("demoPassword",
vault=var["demo_vault"],
title="Demo Password Recipe",
category="password",
password_recipe=onepassword.ItemPasswordRecipeArgs(
length=40,
symbols=False,
))
demo_login = onepassword.Item("demoLogin",
vault=var["demo_vault"],
title="Demo Terraform Login",
category="login",
username="[email protected]")
demo_db = onepassword.Item("demoDb",
vault=var["demo_vault"],
category="database",
type="mysql",
title="Demo TF Database",
username="root",
database="Example MySQL Instance",
hostname="localhost",
port="3306")
```
## Import
import an existing 1Password item
```sh
$ pulumi import onepassword:index/item:Item myitem vaults/<vault uuid>/items/<item uuid>
```
:param str resource_name: The name of the resource.
:param ItemArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ItemArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
password_recipe: Optional[pulumi.Input[pulumi.InputType['ItemPasswordRecipeArgs']]] = None,
port: Optional[pulumi.Input[str]] = None,
sections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItemSectionArgs']]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
vault: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ItemArgs.__new__(ItemArgs)
__props__.__dict__["category"] = category
__props__.__dict__["database"] = database
__props__.__dict__["hostname"] = hostname
__props__.__dict__["password"] = None if password is None else pulumi.Output.secret(password)
__props__.__dict__["password_recipe"] = password_recipe
__props__.__dict__["port"] = port
__props__.__dict__["sections"] = sections
__props__.__dict__["tags"] = tags
__props__.__dict__["title"] = title
__props__.__dict__["type"] = type
__props__.__dict__["url"] = url
__props__.__dict__["username"] = username
if vault is None and not opts.urn:
raise TypeError("Missing required property 'vault'")
__props__.__dict__["vault"] = vault
__props__.__dict__["uuid"] = None
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["password"])
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
super(Item, __self__).__init__(
'onepassword:index/item:Item',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
password_recipe: Optional[pulumi.Input[pulumi.InputType['ItemPasswordRecipeArgs']]] = None,
port: Optional[pulumi.Input[str]] = None,
sections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItemSectionArgs']]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
uuid: Optional[pulumi.Input[str]] = None,
vault: Optional[pulumi.Input[str]] = None) -> 'Item':
"""
Get an existing Item resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] category: The category of the item. One of ["login" "password" "database"]
:param pulumi.Input[str] database: (Only applies to the database category) The name of the database.
:param pulumi.Input[str] hostname: (Only applies to the database category) The address where the database can be found
:param pulumi.Input[str] password: Password for this item.
:param pulumi.Input[pulumi.InputType['ItemPasswordRecipeArgs']] password_recipe: Password for this item.
:param pulumi.Input[str] port: (Only applies to the database category) The port the database is listening on.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItemSectionArgs']]]] sections: A list of custom sections in an item
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: An array of strings of the tags assigned to the item.
:param pulumi.Input[str] title: The title of the item.
:param pulumi.Input[str] type: The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
:param pulumi.Input[str] url: The primary URL for the item.
:param pulumi.Input[str] username: Username for this item.
:param pulumi.Input[str] uuid: The UUID of the item. Item identifiers are unique within a specific vault.
:param pulumi.Input[str] vault: The UUID of the vault the item is in.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ItemState.__new__(_ItemState)
__props__.__dict__["category"] = category
__props__.__dict__["database"] = database
__props__.__dict__["hostname"] = hostname
__props__.__dict__["password"] = password
__props__.__dict__["password_recipe"] = password_recipe
__props__.__dict__["port"] = port
__props__.__dict__["sections"] = sections
__props__.__dict__["tags"] = tags
__props__.__dict__["title"] = title
__props__.__dict__["type"] = type
__props__.__dict__["url"] = url
__props__.__dict__["username"] = username
__props__.__dict__["uuid"] = uuid
__props__.__dict__["vault"] = vault
return Item(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def category(self) -> pulumi.Output[Optional[str]]:
"""
The category of the item. One of ["login" "password" "database"]
"""
return pulumi.get(self, "category")
@property
@pulumi.getter
def database(self) -> pulumi.Output[Optional[str]]:
"""
(Only applies to the database category) The name of the database.
"""
return pulumi.get(self, "database")
@property
@pulumi.getter
def hostname(self) -> pulumi.Output[Optional[str]]:
"""
(Only applies to the database category) The address where the database can be found
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def password(self) -> pulumi.Output[str]:
"""
Password for this item.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="passwordRecipe")
def password_recipe(self) -> pulumi.Output[Optional['outputs.ItemPasswordRecipe']]:
"""
Password for this item.
"""
return pulumi.get(self, "password_recipe")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[str]]:
"""
(Only applies to the database category) The port the database is listening on.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def sections(self) -> pulumi.Output[Optional[Sequence['outputs.ItemSection']]]:
"""
A list of custom sections in an item
"""
return pulumi.get(self, "sections")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
An array of strings of the tags assigned to the item.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def title(self) -> pulumi.Output[Optional[str]]:
"""
The title of the item.
"""
return pulumi.get(self, "title")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def url(self) -> pulumi.Output[Optional[str]]:
"""
The primary URL for the item.
"""
return pulumi.get(self, "url")
@property
@pulumi.getter
def username(self) -> pulumi.Output[Optional[str]]:
"""
Username for this item.
"""
return pulumi.get(self, "username")
@property
@pulumi.getter
def uuid(self) -> pulumi.Output[str]:
"""
The UUID of the item. Item identifiers are unique within a specific vault.
"""
return pulumi.get(self, "uuid")
@property
@pulumi.getter
def vault(self) -> pulumi.Output[str]:
"""
The UUID of the vault the item is in.
"""
return pulumi.get(self, "vault")
|
(resource_name: str, *args, **kwargs)
|
52,664 |
bmitzkus_pulumi_onepassword.item
|
__init__
| null |
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ItemArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
|
(__self__, resource_name: str, *args, **kwargs)
|
52,666 |
bmitzkus_pulumi_onepassword.item
|
_internal_init
| null |
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
password_recipe: Optional[pulumi.Input[pulumi.InputType['ItemPasswordRecipeArgs']]] = None,
port: Optional[pulumi.Input[str]] = None,
sections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItemSectionArgs']]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
vault: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ItemArgs.__new__(ItemArgs)
__props__.__dict__["category"] = category
__props__.__dict__["database"] = database
__props__.__dict__["hostname"] = hostname
__props__.__dict__["password"] = None if password is None else pulumi.Output.secret(password)
__props__.__dict__["password_recipe"] = password_recipe
__props__.__dict__["port"] = port
__props__.__dict__["sections"] = sections
__props__.__dict__["tags"] = tags
__props__.__dict__["title"] = title
__props__.__dict__["type"] = type
__props__.__dict__["url"] = url
__props__.__dict__["username"] = username
if vault is None and not opts.urn:
raise TypeError("Missing required property 'vault'")
__props__.__dict__["vault"] = vault
__props__.__dict__["uuid"] = None
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["password"])
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
super(Item, __self__).__init__(
'onepassword:index/item:Item',
resource_name,
__props__,
opts)
|
(__self__, resource_name: str, opts: Optional[pulumi.resource.ResourceOptions] = None, category: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, database: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, hostname: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, password: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, password_recipe: Union[ForwardRef('ItemPasswordRecipeArgs'), Mapping[str, Any], Awaitable[Union[ForwardRef('ItemPasswordRecipeArgs'), Mapping[str, Any]]], ForwardRef('Output[T]'), NoneType] = None, port: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, sections: Union[Sequence[Union[ForwardRef('ItemSectionArgs'), Mapping[str, Any], Awaitable[Union[ForwardRef('ItemSectionArgs'), Mapping[str, Any]]], ForwardRef('Output[T]')]], Awaitable[Sequence[Union[ForwardRef('ItemSectionArgs'), Mapping[str, Any], Awaitable[Union[ForwardRef('ItemSectionArgs'), Mapping[str, Any]]], ForwardRef('Output[T]')]]], ForwardRef('Output[T]'), NoneType] = None, tags: Union[Sequence[Union[str, Awaitable[str], ForwardRef('Output[T]')]], Awaitable[Sequence[Union[str, Awaitable[str], ForwardRef('Output[T]')]]], ForwardRef('Output[T]'), NoneType] = None, title: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, type: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, url: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, username: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, vault: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, __props__=None)
|
52,667 |
bmitzkus_pulumi_onepassword.item
|
get
|
Get an existing Item resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] category: The category of the item. One of ["login" "password" "database"]
:param pulumi.Input[str] database: (Only applies to the database category) The name of the database.
:param pulumi.Input[str] hostname: (Only applies to the database category) The address where the database can be found
:param pulumi.Input[str] password: Password for this item.
:param pulumi.Input[pulumi.InputType['ItemPasswordRecipeArgs']] password_recipe: Password for this item.
:param pulumi.Input[str] port: (Only applies to the database category) The port the database is listening on.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItemSectionArgs']]]] sections: A list of custom sections in an item
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: An array of strings of the tags assigned to the item.
:param pulumi.Input[str] title: The title of the item.
:param pulumi.Input[str] type: The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
:param pulumi.Input[str] url: The primary URL for the item.
:param pulumi.Input[str] username: Username for this item.
:param pulumi.Input[str] uuid: The UUID of the item. Item identifiers are unique within a specific vault.
:param pulumi.Input[str] vault: The UUID of the vault the item is in.
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
password_recipe: Optional[pulumi.Input[pulumi.InputType['ItemPasswordRecipeArgs']]] = None,
port: Optional[pulumi.Input[str]] = None,
sections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItemSectionArgs']]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
uuid: Optional[pulumi.Input[str]] = None,
vault: Optional[pulumi.Input[str]] = None) -> 'Item':
"""
Get an existing Item resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] category: The category of the item. One of ["login" "password" "database"]
:param pulumi.Input[str] database: (Only applies to the database category) The name of the database.
:param pulumi.Input[str] hostname: (Only applies to the database category) The address where the database can be found
:param pulumi.Input[str] password: Password for this item.
:param pulumi.Input[pulumi.InputType['ItemPasswordRecipeArgs']] password_recipe: Password for this item.
:param pulumi.Input[str] port: (Only applies to the database category) The port the database is listening on.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItemSectionArgs']]]] sections: A list of custom sections in an item
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: An array of strings of the tags assigned to the item.
:param pulumi.Input[str] title: The title of the item.
:param pulumi.Input[str] type: The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
:param pulumi.Input[str] url: The primary URL for the item.
:param pulumi.Input[str] username: Username for this item.
:param pulumi.Input[str] uuid: The UUID of the item. Item identifiers are unique within a specific vault.
:param pulumi.Input[str] vault: The UUID of the vault the item is in.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ItemState.__new__(_ItemState)
__props__.__dict__["category"] = category
__props__.__dict__["database"] = database
__props__.__dict__["hostname"] = hostname
__props__.__dict__["password"] = password
__props__.__dict__["password_recipe"] = password_recipe
__props__.__dict__["port"] = port
__props__.__dict__["sections"] = sections
__props__.__dict__["tags"] = tags
__props__.__dict__["title"] = title
__props__.__dict__["type"] = type
__props__.__dict__["url"] = url
__props__.__dict__["username"] = username
__props__.__dict__["uuid"] = uuid
__props__.__dict__["vault"] = vault
return Item(resource_name, opts=opts, __props__=__props__)
|
(resource_name: str, id: Union[str, Awaitable[str], ForwardRef('Output[T]')], opts: Optional[pulumi.resource.ResourceOptions] = None, category: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, database: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, hostname: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, password: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, password_recipe: Union[ForwardRef('ItemPasswordRecipeArgs'), Mapping[str, Any], Awaitable[Union[ForwardRef('ItemPasswordRecipeArgs'), Mapping[str, Any]]], ForwardRef('Output[T]'), NoneType] = None, port: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, sections: Union[Sequence[Union[ForwardRef('ItemSectionArgs'), Mapping[str, Any], Awaitable[Union[ForwardRef('ItemSectionArgs'), Mapping[str, Any]]], ForwardRef('Output[T]')]], Awaitable[Sequence[Union[ForwardRef('ItemSectionArgs'), Mapping[str, Any], Awaitable[Union[ForwardRef('ItemSectionArgs'), Mapping[str, Any]]], ForwardRef('Output[T]')]]], ForwardRef('Output[T]'), NoneType] = None, tags: Union[Sequence[Union[str, Awaitable[str], ForwardRef('Output[T]')]], Awaitable[Sequence[Union[str, Awaitable[str], ForwardRef('Output[T]')]]], ForwardRef('Output[T]'), NoneType] = None, title: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, type: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, url: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, username: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, uuid: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, vault: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None) -> 'Item'
|
52,671 |
bmitzkus_pulumi_onepassword.item
|
ItemArgs
| null |
class ItemArgs:
def __init__(__self__, *,
vault: pulumi.Input[str],
category: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
password_recipe: Optional[pulumi.Input['ItemPasswordRecipeArgs']] = None,
port: Optional[pulumi.Input[str]] = None,
sections: Optional[pulumi.Input[Sequence[pulumi.Input['ItemSectionArgs']]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Item resource.
:param pulumi.Input[str] vault: The UUID of the vault the item is in.
:param pulumi.Input[str] category: The category of the item. One of ["login" "password" "database"]
:param pulumi.Input[str] database: (Only applies to the database category) The name of the database.
:param pulumi.Input[str] hostname: (Only applies to the database category) The address where the database can be found
:param pulumi.Input[str] password: Password for this item.
:param pulumi.Input['ItemPasswordRecipeArgs'] password_recipe: Password for this item.
:param pulumi.Input[str] port: (Only applies to the database category) The port the database is listening on.
:param pulumi.Input[Sequence[pulumi.Input['ItemSectionArgs']]] sections: A list of custom sections in an item
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: An array of strings of the tags assigned to the item.
:param pulumi.Input[str] title: The title of the item.
:param pulumi.Input[str] type: The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
:param pulumi.Input[str] url: The primary URL for the item.
:param pulumi.Input[str] username: Username for this item.
"""
pulumi.set(__self__, "vault", vault)
if category is not None:
pulumi.set(__self__, "category", category)
if database is not None:
pulumi.set(__self__, "database", database)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if password is not None:
pulumi.set(__self__, "password", password)
if password_recipe is not None:
pulumi.set(__self__, "password_recipe", password_recipe)
if port is not None:
pulumi.set(__self__, "port", port)
if sections is not None:
pulumi.set(__self__, "sections", sections)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if title is not None:
pulumi.set(__self__, "title", title)
if type is not None:
pulumi.set(__self__, "type", type)
if url is not None:
pulumi.set(__self__, "url", url)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def vault(self) -> pulumi.Input[str]:
"""
The UUID of the vault the item is in.
"""
return pulumi.get(self, "vault")
@vault.setter
def vault(self, value: pulumi.Input[str]):
pulumi.set(self, "vault", value)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input[str]]:
"""
The category of the item. One of ["login" "password" "database"]
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "category", value)
@property
@pulumi.getter
def database(self) -> Optional[pulumi.Input[str]]:
"""
(Only applies to the database category) The name of the database.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
(Only applies to the database category) The address where the database can be found
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password for this item.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="passwordRecipe")
def password_recipe(self) -> Optional[pulumi.Input['ItemPasswordRecipeArgs']]:
"""
Password for this item.
"""
return pulumi.get(self, "password_recipe")
@password_recipe.setter
def password_recipe(self, value: Optional[pulumi.Input['ItemPasswordRecipeArgs']]):
pulumi.set(self, "password_recipe", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
"""
(Only applies to the database category) The port the database is listening on.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def sections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ItemSectionArgs']]]]:
"""
A list of custom sections in an item
"""
return pulumi.get(self, "sections")
@sections.setter
def sections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ItemSectionArgs']]]]):
pulumi.set(self, "sections", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of strings of the tags assigned to the item.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
The title of the item.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The primary URL for the item.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Username for this item.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
|
(*, vault: Union[str, Awaitable[str], ForwardRef('Output[T]')], category: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, database: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, hostname: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, password: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, password_recipe: Union[ForwardRef('ItemPasswordRecipeArgs'), Awaitable[ForwardRef('ItemPasswordRecipeArgs')], ForwardRef('Output[T]'), NoneType] = None, port: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, sections: Union[Sequence[Union[ForwardRef('ItemSectionArgs'), Awaitable[ForwardRef('ItemSectionArgs')], ForwardRef('Output[T]')]], Awaitable[Sequence[Union[ForwardRef('ItemSectionArgs'), Awaitable[ForwardRef('ItemSectionArgs')], ForwardRef('Output[T]')]]], ForwardRef('Output[T]'), NoneType] = None, tags: Union[Sequence[Union[str, Awaitable[str], ForwardRef('Output[T]')]], Awaitable[Sequence[Union[str, Awaitable[str], ForwardRef('Output[T]')]]], ForwardRef('Output[T]'), NoneType] = None, title: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, type: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, url: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, username: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None)
|
52,673 |
bmitzkus_pulumi_onepassword.item
|
__init__
|
The set of arguments for constructing a Item resource.
:param pulumi.Input[str] vault: The UUID of the vault the item is in.
:param pulumi.Input[str] category: The category of the item. One of ["login" "password" "database"]
:param pulumi.Input[str] database: (Only applies to the database category) The name of the database.
:param pulumi.Input[str] hostname: (Only applies to the database category) The address where the database can be found
:param pulumi.Input[str] password: Password for this item.
:param pulumi.Input['ItemPasswordRecipeArgs'] password_recipe: Password for this item.
:param pulumi.Input[str] port: (Only applies to the database category) The port the database is listening on.
:param pulumi.Input[Sequence[pulumi.Input['ItemSectionArgs']]] sections: A list of custom sections in an item
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: An array of strings of the tags assigned to the item.
:param pulumi.Input[str] title: The title of the item.
:param pulumi.Input[str] type: The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
:param pulumi.Input[str] url: The primary URL for the item.
:param pulumi.Input[str] username: Username for this item.
|
def __init__(__self__, *,
vault: pulumi.Input[str],
category: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
password_recipe: Optional[pulumi.Input['ItemPasswordRecipeArgs']] = None,
port: Optional[pulumi.Input[str]] = None,
sections: Optional[pulumi.Input[Sequence[pulumi.Input['ItemSectionArgs']]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
title: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Item resource.
:param pulumi.Input[str] vault: The UUID of the vault the item is in.
:param pulumi.Input[str] category: The category of the item. One of ["login" "password" "database"]
:param pulumi.Input[str] database: (Only applies to the database category) The name of the database.
:param pulumi.Input[str] hostname: (Only applies to the database category) The address where the database can be found
:param pulumi.Input[str] password: Password for this item.
:param pulumi.Input['ItemPasswordRecipeArgs'] password_recipe: Password for this item.
:param pulumi.Input[str] port: (Only applies to the database category) The port the database is listening on.
:param pulumi.Input[Sequence[pulumi.Input['ItemSectionArgs']]] sections: A list of custom sections in an item
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: An array of strings of the tags assigned to the item.
:param pulumi.Input[str] title: The title of the item.
:param pulumi.Input[str] type: The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
:param pulumi.Input[str] url: The primary URL for the item.
:param pulumi.Input[str] username: Username for this item.
"""
pulumi.set(__self__, "vault", vault)
if category is not None:
pulumi.set(__self__, "category", category)
if database is not None:
pulumi.set(__self__, "database", database)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if password is not None:
pulumi.set(__self__, "password", password)
if password_recipe is not None:
pulumi.set(__self__, "password_recipe", password_recipe)
if port is not None:
pulumi.set(__self__, "port", port)
if sections is not None:
pulumi.set(__self__, "sections", sections)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if title is not None:
pulumi.set(__self__, "title", title)
if type is not None:
pulumi.set(__self__, "type", type)
if url is not None:
pulumi.set(__self__, "url", url)
if username is not None:
pulumi.set(__self__, "username", username)
|
(__self__, *, vault: Union[str, Awaitable[str], ForwardRef('Output[T]')], category: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, database: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, hostname: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, password: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, password_recipe: Union[ForwardRef('ItemPasswordRecipeArgs'), Awaitable[ForwardRef('ItemPasswordRecipeArgs')], ForwardRef('Output[T]'), NoneType] = None, port: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, sections: Union[Sequence[Union[ForwardRef('ItemSectionArgs'), Awaitable[ForwardRef('ItemSectionArgs')], ForwardRef('Output[T]')]], Awaitable[Sequence[Union[ForwardRef('ItemSectionArgs'), Awaitable[ForwardRef('ItemSectionArgs')], ForwardRef('Output[T]')]]], ForwardRef('Output[T]'), NoneType] = None, tags: Union[Sequence[Union[str, Awaitable[str], ForwardRef('Output[T]')]], Awaitable[Sequence[Union[str, Awaitable[str], ForwardRef('Output[T]')]]], ForwardRef('Output[T]'), NoneType] = None, title: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, type: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, url: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, username: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None)
|
52,674 |
bmitzkus_pulumi_onepassword._inputs
|
ItemPasswordRecipeArgs
| null |
class ItemPasswordRecipeArgs:
def __init__(__self__, *,
digits: Optional[pulumi.Input[bool]] = None,
length: Optional[pulumi.Input[int]] = None,
letters: Optional[pulumi.Input[bool]] = None,
symbols: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] digits: Use digits [0-9] when generating the password.
:param pulumi.Input[int] length: The length of the password to be generated.
:param pulumi.Input[bool] letters: Use letters [a-zA-Z] when generating the password.
:param pulumi.Input[bool] symbols: Use symbols [[email protected]_*] when generating the password.
"""
if digits is not None:
pulumi.set(__self__, "digits", digits)
if length is not None:
pulumi.set(__self__, "length", length)
if letters is not None:
pulumi.set(__self__, "letters", letters)
if symbols is not None:
pulumi.set(__self__, "symbols", symbols)
@property
@pulumi.getter
def digits(self) -> Optional[pulumi.Input[bool]]:
"""
Use digits [0-9] when generating the password.
"""
return pulumi.get(self, "digits")
@digits.setter
def digits(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "digits", value)
@property
@pulumi.getter
def length(self) -> Optional[pulumi.Input[int]]:
"""
The length of the password to be generated.
"""
return pulumi.get(self, "length")
@length.setter
def length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "length", value)
@property
@pulumi.getter
def letters(self) -> Optional[pulumi.Input[bool]]:
"""
Use letters [a-zA-Z] when generating the password.
"""
return pulumi.get(self, "letters")
@letters.setter
def letters(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "letters", value)
@property
@pulumi.getter
def symbols(self) -> Optional[pulumi.Input[bool]]:
"""
Use symbols [[email protected]_*] when generating the password.
"""
return pulumi.get(self, "symbols")
@symbols.setter
def symbols(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "symbols", value)
|
(*, digits: Union[bool, Awaitable[bool], ForwardRef('Output[T]'), NoneType] = None, length: Union[int, Awaitable[int], ForwardRef('Output[T]'), NoneType] = None, letters: Union[bool, Awaitable[bool], ForwardRef('Output[T]'), NoneType] = None, symbols: Union[bool, Awaitable[bool], ForwardRef('Output[T]'), NoneType] = None)
|
52,676 |
bmitzkus_pulumi_onepassword._inputs
|
__init__
|
:param pulumi.Input[bool] digits: Use digits [0-9] when generating the password.
:param pulumi.Input[int] length: The length of the password to be generated.
:param pulumi.Input[bool] letters: Use letters [a-zA-Z] when generating the password.
:param pulumi.Input[bool] symbols: Use symbols [[email protected]_*] when generating the password.
|
def __init__(__self__, *,
digits: Optional[pulumi.Input[bool]] = None,
length: Optional[pulumi.Input[int]] = None,
letters: Optional[pulumi.Input[bool]] = None,
symbols: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] digits: Use digits [0-9] when generating the password.
:param pulumi.Input[int] length: The length of the password to be generated.
:param pulumi.Input[bool] letters: Use letters [a-zA-Z] when generating the password.
:param pulumi.Input[bool] symbols: Use symbols [[email protected]_*] when generating the password.
"""
if digits is not None:
pulumi.set(__self__, "digits", digits)
if length is not None:
pulumi.set(__self__, "length", length)
if letters is not None:
pulumi.set(__self__, "letters", letters)
if symbols is not None:
pulumi.set(__self__, "symbols", symbols)
|
(__self__, *, digits: Union[bool, Awaitable[bool], ForwardRef('Output[T]'), NoneType] = None, length: Union[int, Awaitable[int], ForwardRef('Output[T]'), NoneType] = None, letters: Union[bool, Awaitable[bool], ForwardRef('Output[T]'), NoneType] = None, symbols: Union[bool, Awaitable[bool], ForwardRef('Output[T]'), NoneType] = None)
|
52,677 |
bmitzkus_pulumi_onepassword._inputs
|
ItemSectionArgs
| null |
class ItemSectionArgs:
def __init__(__self__, *,
label: pulumi.Input[str],
fields: Optional[pulumi.Input[Sequence[pulumi.Input['ItemSectionFieldArgs']]]] = None,
id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] label: The label for the section.
:param pulumi.Input[Sequence[pulumi.Input['ItemSectionFieldArgs']]] fields: A list of custom fields in the section.
:param pulumi.Input[str] id: A unique identifier for the section.
"""
pulumi.set(__self__, "label", label)
if fields is not None:
pulumi.set(__self__, "fields", fields)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def label(self) -> pulumi.Input[str]:
"""
The label for the section.
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: pulumi.Input[str]):
pulumi.set(self, "label", value)
@property
@pulumi.getter
def fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ItemSectionFieldArgs']]]]:
"""
A list of custom fields in the section.
"""
return pulumi.get(self, "fields")
@fields.setter
def fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ItemSectionFieldArgs']]]]):
pulumi.set(self, "fields", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
A unique identifier for the section.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
|
(*, label: Union[str, Awaitable[str], ForwardRef('Output[T]')], fields: Union[Sequence[Union[ForwardRef('ItemSectionFieldArgs'), Awaitable[ForwardRef('ItemSectionFieldArgs')], ForwardRef('Output[T]')]], Awaitable[Sequence[Union[ForwardRef('ItemSectionFieldArgs'), Awaitable[ForwardRef('ItemSectionFieldArgs')], ForwardRef('Output[T]')]]], ForwardRef('Output[T]'), NoneType] = None, id: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None)
|
52,679 |
bmitzkus_pulumi_onepassword._inputs
|
__init__
|
:param pulumi.Input[str] label: The label for the section.
:param pulumi.Input[Sequence[pulumi.Input['ItemSectionFieldArgs']]] fields: A list of custom fields in the section.
:param pulumi.Input[str] id: A unique identifier for the section.
|
def __init__(__self__, *,
label: pulumi.Input[str],
fields: Optional[pulumi.Input[Sequence[pulumi.Input['ItemSectionFieldArgs']]]] = None,
id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] label: The label for the section.
:param pulumi.Input[Sequence[pulumi.Input['ItemSectionFieldArgs']]] fields: A list of custom fields in the section.
:param pulumi.Input[str] id: A unique identifier for the section.
"""
pulumi.set(__self__, "label", label)
if fields is not None:
pulumi.set(__self__, "fields", fields)
if id is not None:
pulumi.set(__self__, "id", id)
|
(__self__, *, label: Union[str, Awaitable[str], ForwardRef('Output[T]')], fields: Union[Sequence[Union[ForwardRef('ItemSectionFieldArgs'), Awaitable[ForwardRef('ItemSectionFieldArgs')], ForwardRef('Output[T]')]], Awaitable[Sequence[Union[ForwardRef('ItemSectionFieldArgs'), Awaitable[ForwardRef('ItemSectionFieldArgs')], ForwardRef('Output[T]')]]], ForwardRef('Output[T]'), NoneType] = None, id: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None)
|
52,680 |
bmitzkus_pulumi_onepassword._inputs
|
ItemSectionFieldArgs
| null |
class ItemSectionFieldArgs:
def __init__(__self__, *,
label: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
password_recipe: Optional[pulumi.Input['ItemSectionFieldPasswordRecipeArgs']] = None,
purpose: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] label: The label for the field.
:param pulumi.Input[str] id: A unique identifier for the field.
:param pulumi.Input['ItemSectionFieldPasswordRecipeArgs'] password_recipe: Password for this item.
:param pulumi.Input[str] purpose: Purpose indicates this is a special field: a username, password, or notes field. One of ["USERNAME" "PASSWORD" "NOTES"]
:param pulumi.Input[str] type: The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
:param pulumi.Input[str] value: The value of the field.
"""
pulumi.set(__self__, "label", label)
if id is not None:
pulumi.set(__self__, "id", id)
if password_recipe is not None:
pulumi.set(__self__, "password_recipe", password_recipe)
if purpose is not None:
pulumi.set(__self__, "purpose", purpose)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def label(self) -> pulumi.Input[str]:
"""
The label for the field.
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: pulumi.Input[str]):
pulumi.set(self, "label", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
A unique identifier for the field.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="passwordRecipe")
def password_recipe(self) -> Optional[pulumi.Input['ItemSectionFieldPasswordRecipeArgs']]:
"""
Password for this item.
"""
return pulumi.get(self, "password_recipe")
@password_recipe.setter
def password_recipe(self, value: Optional[pulumi.Input['ItemSectionFieldPasswordRecipeArgs']]):
pulumi.set(self, "password_recipe", value)
@property
@pulumi.getter
def purpose(self) -> Optional[pulumi.Input[str]]:
"""
Purpose indicates this is a special field: a username, password, or notes field. One of ["USERNAME" "PASSWORD" "NOTES"]
"""
return pulumi.get(self, "purpose")
@purpose.setter
def purpose(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "purpose", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the field.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
|
(*, label: Union[str, Awaitable[str], ForwardRef('Output[T]')], id: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, password_recipe: Union[ForwardRef('ItemSectionFieldPasswordRecipeArgs'), Awaitable[ForwardRef('ItemSectionFieldPasswordRecipeArgs')], ForwardRef('Output[T]'), NoneType] = None, purpose: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, type: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, value: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None)
|
52,682 |
bmitzkus_pulumi_onepassword._inputs
|
__init__
|
:param pulumi.Input[str] label: The label for the field.
:param pulumi.Input[str] id: A unique identifier for the field.
:param pulumi.Input['ItemSectionFieldPasswordRecipeArgs'] password_recipe: Password for this item.
:param pulumi.Input[str] purpose: Purpose indicates this is a special field: a username, password, or notes field. One of ["USERNAME" "PASSWORD" "NOTES"]
:param pulumi.Input[str] type: The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
:param pulumi.Input[str] value: The value of the field.
|
def __init__(__self__, *,
label: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
password_recipe: Optional[pulumi.Input['ItemSectionFieldPasswordRecipeArgs']] = None,
purpose: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] label: The label for the field.
:param pulumi.Input[str] id: A unique identifier for the field.
:param pulumi.Input['ItemSectionFieldPasswordRecipeArgs'] password_recipe: Password for this item.
:param pulumi.Input[str] purpose: Purpose indicates this is a special field: a username, password, or notes field. One of ["USERNAME" "PASSWORD" "NOTES"]
:param pulumi.Input[str] type: The type of value stored in the field. One of ["STRING" "EMAIL" "CONCEALED" "URL" "OTP" "DATE" "MONTH_YEAR" "MENU"]
:param pulumi.Input[str] value: The value of the field.
"""
pulumi.set(__self__, "label", label)
if id is not None:
pulumi.set(__self__, "id", id)
if password_recipe is not None:
pulumi.set(__self__, "password_recipe", password_recipe)
if purpose is not None:
pulumi.set(__self__, "purpose", purpose)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
|
(__self__, *, label: Union[str, Awaitable[str], ForwardRef('Output[T]')], id: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, password_recipe: Union[ForwardRef('ItemSectionFieldPasswordRecipeArgs'), Awaitable[ForwardRef('ItemSectionFieldPasswordRecipeArgs')], ForwardRef('Output[T]'), NoneType] = None, purpose: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, type: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, value: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None)
|
52,683 |
bmitzkus_pulumi_onepassword._inputs
|
ItemSectionFieldPasswordRecipeArgs
| null |
class ItemSectionFieldPasswordRecipeArgs:
def __init__(__self__, *,
digits: Optional[pulumi.Input[bool]] = None,
length: Optional[pulumi.Input[int]] = None,
letters: Optional[pulumi.Input[bool]] = None,
symbols: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] digits: Use digits [0-9] when generating the password.
:param pulumi.Input[int] length: The length of the password to be generated.
:param pulumi.Input[bool] letters: Use letters [a-zA-Z] when generating the password.
:param pulumi.Input[bool] symbols: Use symbols [[email protected]_*] when generating the password.
"""
if digits is not None:
pulumi.set(__self__, "digits", digits)
if length is not None:
pulumi.set(__self__, "length", length)
if letters is not None:
pulumi.set(__self__, "letters", letters)
if symbols is not None:
pulumi.set(__self__, "symbols", symbols)
@property
@pulumi.getter
def digits(self) -> Optional[pulumi.Input[bool]]:
"""
Use digits [0-9] when generating the password.
"""
return pulumi.get(self, "digits")
@digits.setter
def digits(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "digits", value)
@property
@pulumi.getter
def length(self) -> Optional[pulumi.Input[int]]:
"""
The length of the password to be generated.
"""
return pulumi.get(self, "length")
@length.setter
def length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "length", value)
@property
@pulumi.getter
def letters(self) -> Optional[pulumi.Input[bool]]:
"""
Use letters [a-zA-Z] when generating the password.
"""
return pulumi.get(self, "letters")
@letters.setter
def letters(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "letters", value)
@property
@pulumi.getter
def symbols(self) -> Optional[pulumi.Input[bool]]:
"""
Use symbols [[email protected]_*] when generating the password.
"""
return pulumi.get(self, "symbols")
@symbols.setter
def symbols(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "symbols", value)
|
(*, digits: Union[bool, Awaitable[bool], ForwardRef('Output[T]'), NoneType] = None, length: Union[int, Awaitable[int], ForwardRef('Output[T]'), NoneType] = None, letters: Union[bool, Awaitable[bool], ForwardRef('Output[T]'), NoneType] = None, symbols: Union[bool, Awaitable[bool], ForwardRef('Output[T]'), NoneType] = None)
|
52,686 |
bmitzkus_pulumi_onepassword.provider
|
Provider
| null |
class Provider(pulumi.ProviderResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account: Optional[pulumi.Input[str]] = None,
op_cli_path: Optional[pulumi.Input[str]] = None,
service_account_token: Optional[pulumi.Input[str]] = None,
token: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The provider type for the onepassword package. By default, resources use package-wide configuration
settings, however an explicit `Provider` instance may be created and passed during resource
construction to achieve fine-grained programmatic control over provider settings. See the
[documentation](https://www.pulumi.com/docs/reference/programming-model/#providers) for more information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account: A valid account's sign-in address or ID to use biometrics unlock. Can also be sourced from `OP_ACCOUNT` environment
variable. Provider will use the 1Password CLI if set.
:param pulumi.Input[str] op_cli_path: The path to the 1Password CLI binary. Can also be sourced from `OP_CLI_PATH` environment variable. Defaults to `op`.
:param pulumi.Input[str] service_account_token: A valid 1Password service account token. Can also be sourced from `OP_SERVICE_ACCOUNT_TOKEN` environment variable.
Provider will use the 1Password CLI if set.
:param pulumi.Input[str] token: A valid token for your 1Password Connect server. Can also be sourced from `OP_CONNECT_TOKEN` environment variable.
Provider will use 1Password Connect server if set.
:param pulumi.Input[str] url: The HTTP(S) URL where your 1Password Connect server can be found. Can also be sourced `OP_CONNECT_HOST` environment
variable. Provider will use 1Password Connect server if set.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ProviderArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The provider type for the onepassword package. By default, resources use package-wide configuration
settings, however an explicit `Provider` instance may be created and passed during resource
construction to achieve fine-grained programmatic control over provider settings. See the
[documentation](https://www.pulumi.com/docs/reference/programming-model/#providers) for more information.
:param str resource_name: The name of the resource.
:param ProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account: Optional[pulumi.Input[str]] = None,
op_cli_path: Optional[pulumi.Input[str]] = None,
service_account_token: Optional[pulumi.Input[str]] = None,
token: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProviderArgs.__new__(ProviderArgs)
__props__.__dict__["account"] = account
__props__.__dict__["op_cli_path"] = op_cli_path
__props__.__dict__["service_account_token"] = service_account_token
__props__.__dict__["token"] = token
__props__.__dict__["url"] = url
super(Provider, __self__).__init__(
'onepassword',
resource_name,
__props__,
opts)
@property
@pulumi.getter
def account(self) -> pulumi.Output[Optional[str]]:
"""
A valid account's sign-in address or ID to use biometrics unlock. Can also be sourced from `OP_ACCOUNT` environment
variable. Provider will use the 1Password CLI if set.
"""
return pulumi.get(self, "account")
@property
@pulumi.getter(name="opCliPath")
def op_cli_path(self) -> pulumi.Output[Optional[str]]:
"""
The path to the 1Password CLI binary. Can also be sourced from `OP_CLI_PATH` environment variable. Defaults to `op`.
"""
return pulumi.get(self, "op_cli_path")
@property
@pulumi.getter(name="serviceAccountToken")
def service_account_token(self) -> pulumi.Output[Optional[str]]:
"""
A valid 1Password service account token. Can also be sourced from `OP_SERVICE_ACCOUNT_TOKEN` environment variable.
Provider will use the 1Password CLI if set.
"""
return pulumi.get(self, "service_account_token")
@property
@pulumi.getter
def token(self) -> pulumi.Output[Optional[str]]:
"""
A valid token for your 1Password Connect server. Can also be sourced from `OP_CONNECT_TOKEN` environment variable.
Provider will use 1Password Connect server if set.
"""
return pulumi.get(self, "token")
@property
@pulumi.getter
def url(self) -> pulumi.Output[Optional[str]]:
"""
The HTTP(S) URL where your 1Password Connect server can be found. Can also be sourced `OP_CONNECT_HOST` environment
variable. Provider will use 1Password Connect server if set.
"""
return pulumi.get(self, "url")
|
(resource_name: str, *args, **kwargs)
|
52,689 |
bmitzkus_pulumi_onepassword.provider
|
_internal_init
| null |
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account: Optional[pulumi.Input[str]] = None,
op_cli_path: Optional[pulumi.Input[str]] = None,
service_account_token: Optional[pulumi.Input[str]] = None,
token: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProviderArgs.__new__(ProviderArgs)
__props__.__dict__["account"] = account
__props__.__dict__["op_cli_path"] = op_cli_path
__props__.__dict__["service_account_token"] = service_account_token
__props__.__dict__["token"] = token
__props__.__dict__["url"] = url
super(Provider, __self__).__init__(
'onepassword',
resource_name,
__props__,
opts)
|
(__self__, resource_name: str, opts: Optional[pulumi.resource.ResourceOptions] = None, account: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, op_cli_path: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, service_account_token: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, token: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, url: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, __props__=None)
|
52,693 |
bmitzkus_pulumi_onepassword.provider
|
ProviderArgs
| null |
class ProviderArgs:
def __init__(__self__, *,
account: Optional[pulumi.Input[str]] = None,
op_cli_path: Optional[pulumi.Input[str]] = None,
service_account_token: Optional[pulumi.Input[str]] = None,
token: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Provider resource.
:param pulumi.Input[str] account: A valid account's sign-in address or ID to use biometrics unlock. Can also be sourced from `OP_ACCOUNT` environment
variable. Provider will use the 1Password CLI if set.
:param pulumi.Input[str] op_cli_path: The path to the 1Password CLI binary. Can also be sourced from `OP_CLI_PATH` environment variable. Defaults to `op`.
:param pulumi.Input[str] service_account_token: A valid 1Password service account token. Can also be sourced from `OP_SERVICE_ACCOUNT_TOKEN` environment variable.
Provider will use the 1Password CLI if set.
:param pulumi.Input[str] token: A valid token for your 1Password Connect server. Can also be sourced from `OP_CONNECT_TOKEN` environment variable.
Provider will use 1Password Connect server if set.
:param pulumi.Input[str] url: The HTTP(S) URL where your 1Password Connect server can be found. Can also be sourced `OP_CONNECT_HOST` environment
variable. Provider will use 1Password Connect server if set.
"""
if account is not None:
pulumi.set(__self__, "account", account)
if op_cli_path is not None:
pulumi.set(__self__, "op_cli_path", op_cli_path)
if service_account_token is not None:
pulumi.set(__self__, "service_account_token", service_account_token)
if token is not None:
pulumi.set(__self__, "token", token)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def account(self) -> Optional[pulumi.Input[str]]:
"""
A valid account's sign-in address or ID to use biometrics unlock. Can also be sourced from `OP_ACCOUNT` environment
variable. Provider will use the 1Password CLI if set.
"""
return pulumi.get(self, "account")
@account.setter
def account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account", value)
@property
@pulumi.getter(name="opCliPath")
def op_cli_path(self) -> Optional[pulumi.Input[str]]:
"""
The path to the 1Password CLI binary. Can also be sourced from `OP_CLI_PATH` environment variable. Defaults to `op`.
"""
return pulumi.get(self, "op_cli_path")
@op_cli_path.setter
def op_cli_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "op_cli_path", value)
@property
@pulumi.getter(name="serviceAccountToken")
def service_account_token(self) -> Optional[pulumi.Input[str]]:
"""
A valid 1Password service account token. Can also be sourced from `OP_SERVICE_ACCOUNT_TOKEN` environment variable.
Provider will use the 1Password CLI if set.
"""
return pulumi.get(self, "service_account_token")
@service_account_token.setter
def service_account_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_token", value)
@property
@pulumi.getter
def token(self) -> Optional[pulumi.Input[str]]:
"""
A valid token for your 1Password Connect server. Can also be sourced from `OP_CONNECT_TOKEN` environment variable.
Provider will use 1Password Connect server if set.
"""
return pulumi.get(self, "token")
@token.setter
def token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The HTTP(S) URL where your 1Password Connect server can be found. Can also be sourced `OP_CONNECT_HOST` environment
variable. Provider will use 1Password Connect server if set.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
|
(*, account: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, op_cli_path: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, service_account_token: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, token: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, url: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None)
|
52,695 |
bmitzkus_pulumi_onepassword.provider
|
__init__
|
The set of arguments for constructing a Provider resource.
:param pulumi.Input[str] account: A valid account's sign-in address or ID to use biometrics unlock. Can also be sourced from `OP_ACCOUNT` environment
variable. Provider will use the 1Password CLI if set.
:param pulumi.Input[str] op_cli_path: The path to the 1Password CLI binary. Can also be sourced from `OP_CLI_PATH` environment variable. Defaults to `op`.
:param pulumi.Input[str] service_account_token: A valid 1Password service account token. Can also be sourced from `OP_SERVICE_ACCOUNT_TOKEN` environment variable.
Provider will use the 1Password CLI if set.
:param pulumi.Input[str] token: A valid token for your 1Password Connect server. Can also be sourced from `OP_CONNECT_TOKEN` environment variable.
Provider will use 1Password Connect server if set.
:param pulumi.Input[str] url: The HTTP(S) URL where your 1Password Connect server can be found. Can also be sourced `OP_CONNECT_HOST` environment
variable. Provider will use 1Password Connect server if set.
|
def __init__(__self__, *,
account: Optional[pulumi.Input[str]] = None,
op_cli_path: Optional[pulumi.Input[str]] = None,
service_account_token: Optional[pulumi.Input[str]] = None,
token: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Provider resource.
:param pulumi.Input[str] account: A valid account's sign-in address or ID to use biometrics unlock. Can also be sourced from `OP_ACCOUNT` environment
variable. Provider will use the 1Password CLI if set.
:param pulumi.Input[str] op_cli_path: The path to the 1Password CLI binary. Can also be sourced from `OP_CLI_PATH` environment variable. Defaults to `op`.
:param pulumi.Input[str] service_account_token: A valid 1Password service account token. Can also be sourced from `OP_SERVICE_ACCOUNT_TOKEN` environment variable.
Provider will use the 1Password CLI if set.
:param pulumi.Input[str] token: A valid token for your 1Password Connect server. Can also be sourced from `OP_CONNECT_TOKEN` environment variable.
Provider will use 1Password Connect server if set.
:param pulumi.Input[str] url: The HTTP(S) URL where your 1Password Connect server can be found. Can also be sourced `OP_CONNECT_HOST` environment
variable. Provider will use 1Password Connect server if set.
"""
if account is not None:
pulumi.set(__self__, "account", account)
if op_cli_path is not None:
pulumi.set(__self__, "op_cli_path", op_cli_path)
if service_account_token is not None:
pulumi.set(__self__, "service_account_token", service_account_token)
if token is not None:
pulumi.set(__self__, "token", token)
if url is not None:
pulumi.set(__self__, "url", url)
|
(__self__, *, account: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, op_cli_path: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, service_account_token: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, token: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None, url: Union[str, Awaitable[str], ForwardRef('Output[T]'), NoneType] = None)
|
52,699 |
bmitzkus_pulumi_onepassword.get_item
|
get_item
|
Use this data source to get details of an item by its vault uuid and either the title or the uuid of the item.
## Example Usage
```python
import pulumi
import pulumi_onepassword as onepassword
example = onepassword.get_item(vault=var["demo_vault"],
uuid=onepassword_item["demo_sections"]["uuid"])
```
:param str note_value: Secure Note value.
:param str title: The title of the item to retrieve. This field will be populated with the title of the item if the item it looked up by its UUID.
:param str uuid: The UUID of the item to retrieve. This field will be populated with the UUID of the item if the item it looked up by its title.
:param str vault: The UUID of the vault the item is in.
|
def get_item(note_value: Optional[str] = None,
title: Optional[str] = None,
uuid: Optional[str] = None,
vault: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetItemResult:
"""
Use this data source to get details of an item by its vault uuid and either the title or the uuid of the item.
## Example Usage
```python
import pulumi
import pulumi_onepassword as onepassword
example = onepassword.get_item(vault=var["demo_vault"],
uuid=onepassword_item["demo_sections"]["uuid"])
```
:param str note_value: Secure Note value.
:param str title: The title of the item to retrieve. This field will be populated with the title of the item if the item it looked up by its UUID.
:param str uuid: The UUID of the item to retrieve. This field will be populated with the UUID of the item if the item it looked up by its title.
:param str vault: The UUID of the vault the item is in.
"""
__args__ = dict()
__args__['noteValue'] = note_value
__args__['title'] = title
__args__['uuid'] = uuid
__args__['vault'] = vault
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('onepassword:index/getItem:getItem', __args__, opts=opts, typ=GetItemResult).value
return AwaitableGetItemResult(
category=pulumi.get(__ret__, 'category'),
database=pulumi.get(__ret__, 'database'),
hostname=pulumi.get(__ret__, 'hostname'),
id=pulumi.get(__ret__, 'id'),
note_value=pulumi.get(__ret__, 'note_value'),
password=pulumi.get(__ret__, 'password'),
port=pulumi.get(__ret__, 'port'),
sections=pulumi.get(__ret__, 'sections'),
tags=pulumi.get(__ret__, 'tags'),
title=pulumi.get(__ret__, 'title'),
type=pulumi.get(__ret__, 'type'),
url=pulumi.get(__ret__, 'url'),
username=pulumi.get(__ret__, 'username'),
uuid=pulumi.get(__ret__, 'uuid'),
vault=pulumi.get(__ret__, 'vault'))
|
(note_value: Optional[str] = None, title: Optional[str] = None, uuid: Optional[str] = None, vault: Optional[str] = None, opts: Optional[pulumi.invoke.InvokeOptions] = None) -> bmitzkus_pulumi_onepassword.get_item.AwaitableGetItemResult
|
52,701 |
bmitzkus_pulumi_onepassword.get_vault
|
get_vault
|
Use this data source to get details of a vault by either its name or uuid.
:param str name: The name of the vault to retrieve. This field will be populated with the name of the vault if the vault it looked up by its UUID.
:param str uuid: The UUID of the vault to retrieve. This field will be populated with the UUID of the vault if the vault it looked up by its name.
|
def get_vault(name: Optional[str] = None,
uuid: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVaultResult:
"""
Use this data source to get details of a vault by either its name or uuid.
:param str name: The name of the vault to retrieve. This field will be populated with the name of the vault if the vault it looked up by its UUID.
:param str uuid: The UUID of the vault to retrieve. This field will be populated with the UUID of the vault if the vault it looked up by its name.
"""
__args__ = dict()
__args__['name'] = name
__args__['uuid'] = uuid
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('onepassword:index/getVault:getVault', __args__, opts=opts, typ=GetVaultResult).value
return AwaitableGetVaultResult(
description=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
uuid=pulumi.get(__ret__, 'uuid'))
|
(name: Optional[str] = None, uuid: Optional[str] = None, opts: Optional[pulumi.invoke.InvokeOptions] = None) -> bmitzkus_pulumi_onepassword.get_vault.AwaitableGetVaultResult
|
52,707 |
flaml.automl.automl
|
AutoML
|
The AutoML class.
Example:
```python
automl = AutoML()
automl_settings = {
"time_budget": 60,
"metric": 'accuracy',
"task": 'classification',
"log_file_name": 'mylog.log',
}
automl.fit(X_train = X_train, y_train = y_train, **automl_settings)
```
|
class AutoML(BaseEstimator):
"""The AutoML class.
Example:
```python
automl = AutoML()
automl_settings = {
"time_budget": 60,
"metric": 'accuracy',
"task": 'classification',
"log_file_name": 'mylog.log',
}
automl.fit(X_train = X_train, y_train = y_train, **automl_settings)
```
"""
__version__ = flaml_version
def __init__(self, **settings):
"""Constructor.
Many settings in fit() can be passed to the constructor too.
If an argument in fit() is provided, it will override the setting passed to the constructor.
If an argument in fit() is not provided but provided in the constructor, the value passed to the constructor will be used.
Args:
metric: A string of the metric name or a function,
e.g., 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo', 'roc_auc_weighted',
'roc_auc_ovo_weighted', 'roc_auc_ovr_weighted', 'f1', 'micro_f1', 'macro_f1',
'log_loss', 'mae', 'mse', 'r2', 'mape'. Default is 'auto'.
If passing a customized metric function, the function needs to
have the following input arguments:
```python
def custom_metric(
X_test, y_test, estimator, labels,
X_train, y_train, weight_test=None, weight_train=None,
config=None, groups_test=None, groups_train=None,
):
return metric_to_minimize, metrics_to_log
```
which returns a float number as the minimization objective,
and a dictionary as the metrics to log. E.g.,
```python
def custom_metric(
X_val, y_val, estimator, labels,
X_train, y_train, weight_val=None, weight_train=None,
*args,
):
from sklearn.metrics import log_loss
import time
start = time.time()
y_pred = estimator.predict_proba(X_val)
pred_time = (time.time() - start) / len(X_val)
val_loss = log_loss(y_val, y_pred, labels=labels, sample_weight=weight_val)
y_pred = estimator.predict_proba(X_train)
train_loss = log_loss(y_train, y_pred, labels=labels, sample_weight=weight_train)
alpha = 0.5
return val_loss * (1 + alpha) - alpha * train_loss, {
"val_loss": val_loss,
"train_loss": train_loss,
"pred_time": pred_time,
}
```
task: A string of the task type, e.g.,
'classification', 'regression', 'ts_forecast', 'rank',
'seq-classification', 'seq-regression', 'summarization',
or an instance of the Task class.
n_jobs: An integer of the number of threads for training | default=-1.
Use all available resources when n_jobs == -1.
log_file_name: A string of the log file name | default="". To disable logging,
set it to be an empty string "".
estimator_list: A list of strings for estimator names, or 'auto'.
e.g., ```['lgbm', 'xgboost', 'xgb_limitdepth', 'catboost', 'rf', 'extra_tree']```.
time_budget: A float number of the time budget in seconds.
Use -1 if no time limit.
max_iter: An integer of the maximal number of iterations.
sample: A boolean of whether to sample the training data during
search.
ensemble: boolean or dict | default=False. Whether to perform
ensemble after search. Can be a dict with keys 'passthrough'
and 'final_estimator' to specify the passthrough and
final_estimator in the stacker. The dict can also contain
'n_jobs' as the key to specify the number of jobs for the stacker.
eval_method: A string of resampling strategy, one of
['auto', 'cv', 'holdout'].
split_ratio: A float of the valiation data percentage for holdout.
n_splits: An integer of the number of folds for cross - validation.
log_type: A string of the log type, one of
['better', 'all'].
'better' only logs configs with better loss than previos iters
'all' logs all the tried configs.
model_history: A boolean of whether to keep the best
model per estimator. Make sure memory is large enough if setting to True.
log_training_metric: A boolean of whether to log the training
metric for each model.
mem_thres: A float of the memory size constraint in bytes.
pred_time_limit: A float of the prediction latency constraint in seconds.
It refers to the average prediction time per row in validation data.
train_time_limit: A float of the training time constraint in seconds.
verbose: int, default=3 | Controls the verbosity, higher means more
messages.
retrain_full: bool or str, default=True | whether to retrain the
selected model on the full training data when using holdout.
True - retrain only after search finishes; False - no retraining;
'budget' - do best effort to retrain without violating the time
budget.
split_type: str or splitter object, default="auto" | the data split type.
* A valid splitter object is an instance of a derived class of scikit-learn
[KFold](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html#sklearn.model_selection.KFold)
and have ``split`` and ``get_n_splits`` methods with the same signatures.
Set eval_method to "cv" to use the splitter object.
* Valid str options depend on different tasks.
For classification tasks, valid choices are
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
"auto" -> uniform.
For time series forecast tasks, must be "auto" or 'time'.
For ranking task, must be "auto" or 'group'.
hpo_method: str, default="auto" | The hyperparameter
optimization method. By default, CFO is used for sequential
search and BlendSearch is used for parallel search.
No need to set when using flaml's default search space or using
a simple customized search space. When set to 'bs', BlendSearch
is used. BlendSearch can be tried when the search space is
complex, for example, containing multiple disjoint, discontinuous
subspaces. When set to 'random', random search is used.
starting_points: A dictionary or a str to specify the starting hyperparameter
config for the estimators | default="static".
If str:
- if "data", use data-dependent defaults;
- if "data:path" use data-dependent defaults which are stored at path;
- if "static", use data-independent defaults.
If dict, keys are the name of the estimators, and values are the starting
hyperparamter configurations for the corresponding estimators.
The value can be a single hyperparamter configuration dict or a list
of hyperparamter configuration dicts.
In the following code example, we get starting_points from the
`automl` object and use them in the `new_automl` object.
e.g.,
```python
from flaml import AutoML
automl = AutoML()
X_train, y_train = load_iris(return_X_y=True)
automl.fit(X_train, y_train)
starting_points = automl.best_config_per_estimator
new_automl = AutoML()
new_automl.fit(X_train, y_train, starting_points=starting_points)
```
seed: int or None, default=None | The random seed for hpo.
n_concurrent_trials: [In preview] int, default=1 | The number of
concurrent trials. When n_concurrent_trials > 1, flaml performes
[parallel tuning](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning)
and installation of ray or spark is required: `pip install flaml[ray]`
or `pip install flaml[spark]`. Please check
[here](https://spark.apache.org/docs/latest/api/python/getting_started/install.html)
for more details about installing Spark.
keep_search_state: boolean, default=False | Whether to keep data needed
for model search after fit(). By default the state is deleted for
space saving.
preserve_checkpoint: boolean, default=True | Whether to preserve the saved checkpoint
on disk when deleting automl. By default the checkpoint is preserved.
early_stop: boolean, default=False | Whether to stop early if the
search is considered to converge.
force_cancel: boolean, default=False | Whether to forcely cancel Spark jobs if the
search time exceeded the time budget.
append_log: boolean, default=False | Whether to directly append the log
records to the input log file if it exists.
auto_augment: boolean, default=True | Whether to automatically
augment rare classes.
min_sample_size: int, default=MIN_SAMPLE_TRAIN | the minimal sample
size when sample=True.
use_ray: boolean or dict.
If boolean: default=False | Whether to use ray to run the training
in separate processes. This can be used to prevent OOM for large
datasets, but will incur more overhead in time.
If dict: the dict contains the keywords arguments to be passed to
[ray.tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html).
use_spark: boolean, default=False | Whether to use spark to run the training
in parallel spark jobs. This can be used to accelerate training on large models
and large datasets, but will incur more overhead in time and thus slow down
training in some cases. GPU training is not supported yet when use_spark is True.
For Spark clusters, by default, we will launch one trial per executor. However,
sometimes we want to launch more trials than the number of executors (e.g., local mode).
In this case, we can set the environment variable `FLAML_MAX_CONCURRENT` to override
the detected `num_executors`. The final number of concurrent trials will be the minimum
of `n_concurrent_trials` and `num_executors`.
free_mem_ratio: float between 0 and 1, default=0. The free memory ratio to keep during training.
metric_constraints: list, default=[] | The list of metric constraints.
Each element in this list is a 3-tuple, which shall be expressed
in the following format: the first element of the 3-tuple is the name of the
metric, the second element is the inequality sign chosen from ">=" and "<=",
and the third element is the constraint value. E.g., `('val_loss', '<=', 0.1)`.
Note that all the metric names in metric_constraints need to be reported via
the metrics_to_log dictionary returned by a customized metric function.
The customized metric function shall be provided via the `metric` key word
argument of the fit() function or the automl constructor.
Find an example in the 4th constraint type in this [doc](/docs/Use-Cases/Task-Oriented-AutoML#constraint).
If `pred_time_limit` is provided as one of keyword arguments to fit() function or
the automl constructor, flaml will automatically (and under the hood)
add it as an additional element in the metric_constraints. Essentially 'pred_time_limit'
specifies a constraint about the prediction latency constraint in seconds.
custom_hp: dict, default=None | The custom search space specified by user.
It is a nested dict with keys being the estimator names, and values being dicts
per estimator search space. In the per estimator search space dict,
the keys are the hyperparameter names, and values are dicts of info ("domain",
"init_value", and "low_cost_init_value") about the search space associated with
the hyperparameter (i.e., per hyperparameter search space dict). When custom_hp
is provided, the built-in search space which is also a nested dict of per estimator
search space dict, will be updated with custom_hp. Note that during this nested dict update,
the per hyperparameter search space dicts will be replaced (instead of updated) by the ones
provided in custom_hp. Note that the value for "domain" can either be a constant
or a sample.Domain object.
e.g.,
```python
custom_hp = {
"transformer_ms": {
"model_path": {
"domain": "albert-base-v2",
},
"learning_rate": {
"domain": tune.choice([1e-4, 1e-5]),
}
}
}
```
skip_transform: boolean, default=False | Whether to pre-process data prior to modeling.
fit_kwargs_by_estimator: dict, default=None | The user specified keywords arguments, grouped by estimator name.
e.g.,
```python
fit_kwargs_by_estimator = {
"transformer": {
"output_dir": "test/data/output/",
"fp16": False,
}
}
```
mlflow_logging: boolean, default=True | Whether to log the training results to mlflow.
This requires mlflow to be installed and to have an active mlflow run.
FLAML will create nested runs.
"""
if ERROR:
raise ERROR
self._track_iter = 0
self._state = AutoMLState()
self._state.learner_classes = {}
self._settings = settings
# no budget by default
settings["time_budget"] = settings.get("time_budget", -1)
settings["task"] = settings.get("task", "classification")
settings["n_jobs"] = settings.get("n_jobs", -1)
settings["eval_method"] = settings.get("eval_method", "auto")
settings["split_ratio"] = settings.get("split_ratio", SPLIT_RATIO)
settings["n_splits"] = settings.get("n_splits", N_SPLITS)
settings["auto_augment"] = settings.get("auto_augment", True)
settings["metric"] = settings.get("metric", "auto")
settings["estimator_list"] = settings.get("estimator_list", "auto")
settings["log_file_name"] = settings.get("log_file_name", "")
settings["max_iter"] = settings.get("max_iter") # no budget by default
settings["sample"] = settings.get("sample", True)
settings["ensemble"] = settings.get("ensemble", False)
settings["log_type"] = settings.get("log_type", "better")
settings["model_history"] = settings.get("model_history", False)
settings["log_training_metric"] = settings.get("log_training_metric", False)
settings["mem_thres"] = settings.get("mem_thres", MEM_THRES)
settings["pred_time_limit"] = settings.get("pred_time_limit", np.inf)
settings["train_time_limit"] = settings.get("train_time_limit", None)
settings["verbose"] = settings.get("verbose", 3)
settings["retrain_full"] = settings.get("retrain_full", True)
settings["split_type"] = settings.get("split_type", "auto")
settings["hpo_method"] = settings.get("hpo_method", "auto")
settings["learner_selector"] = settings.get("learner_selector", "sample")
settings["starting_points"] = settings.get("starting_points", "static")
settings["n_concurrent_trials"] = settings.get("n_concurrent_trials", 1)
settings["keep_search_state"] = settings.get("keep_search_state", False)
settings["preserve_checkpoint"] = settings.get("preserve_checkpoint", True)
settings["early_stop"] = settings.get("early_stop", False)
settings["force_cancel"] = settings.get("force_cancel", False)
settings["append_log"] = settings.get("append_log", False)
settings["min_sample_size"] = settings.get("min_sample_size", MIN_SAMPLE_TRAIN)
settings["use_ray"] = settings.get("use_ray", False)
settings["use_spark"] = settings.get("use_spark", False)
if settings["use_ray"] is not False and settings["use_spark"] is not False:
raise ValueError("use_ray and use_spark cannot be both True.")
settings["free_mem_ratio"] = settings.get("free_mem_ratio", 0)
settings["metric_constraints"] = settings.get("metric_constraints", [])
settings["cv_score_agg_func"] = settings.get("cv_score_agg_func", None)
settings["fit_kwargs_by_estimator"] = settings.get("fit_kwargs_by_estimator", {})
settings["custom_hp"] = settings.get("custom_hp", {})
settings["skip_transform"] = settings.get("skip_transform", False)
settings["mlflow_logging"] = settings.get("mlflow_logging", True)
self._estimator_type = "classifier" if settings["task"] in CLASSIFICATION else "regressor"
def get_params(self, deep: bool = False) -> dict:
return self._settings.copy()
@property
def config_history(self) -> dict:
"""A dictionary of iter->(estimator, config, time),
storing the best estimator, config, and the time when the best
model is updated each time.
"""
return self._config_history
@property
def model(self):
"""An object with `predict()` and `predict_proba()` method (for
classification), storing the best trained model.
"""
return self.__dict__.get("_trained_estimator")
def best_model_for_estimator(self, estimator_name: str):
"""Return the best model found for a particular estimator.
Args:
estimator_name: a str of the estimator's name.
Returns:
An object storing the best model for estimator_name.
If `model_history` was set to False during fit(), then the returned model
is untrained unless estimator_name is the best estimator.
If `model_history` was set to True, then the returned model is trained.
"""
state = self._search_states.get(estimator_name)
return state and getattr(state, "trained_estimator", None)
@property
def best_estimator(self):
"""A string indicating the best estimator found."""
return self._best_estimator
@property
def best_iteration(self):
"""An integer of the iteration number where the best
config is found."""
return self._best_iteration
@property
def best_config(self):
"""A dictionary of the best configuration."""
state = self._search_states.get(self._best_estimator)
config = state and getattr(state, "best_config", None)
return config and AutoMLState.sanitize(config)
@property
def best_config_per_estimator(self):
"""A dictionary of all estimators' best configuration."""
return {
e: e_search_state.best_config and AutoMLState.sanitize(e_search_state.best_config)
for e, e_search_state in self._search_states.items()
}
@property
def best_loss_per_estimator(self):
"""A dictionary of all estimators' best loss."""
return {e: e_search_state.best_loss for e, e_search_state in self._search_states.items()}
@property
def best_loss(self):
"""A float of the best loss found."""
return self._state.best_loss
@property
def best_result(self):
"""Result dictionary for model trained with the best config."""
state = self._search_states.get(self._best_estimator)
return state and getattr(state, "best_result", None)
@property
def metrics_for_best_config(self):
"""Returns a float of the best loss, and a dictionary of the auxiliary metrics to log
associated with the best config. These two objects correspond to the returned
objects by the customized metric function for the config with the best loss."""
state = self._search_states.get(self._best_estimator)
return self._state.best_loss, state and getattr(state, "best_result", {}).get("metric_for_logging")
@property
def best_config_train_time(self):
"""A float of the seconds taken by training the best config."""
return getattr(self._search_states[self._best_estimator], "best_config_train_time", None)
def save_best_config(self, filename):
best = {
"class": self.best_estimator,
"hyperparameters": self.best_config,
}
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
json.dump(best, f)
@property
def feature_transformer(self):
"""Returns feature transformer which is used to preprocess data before applying training or inference."""
return getattr(self, "_transformer", None)
@property
def label_transformer(self):
"""Returns label transformer which is used to preprocess labels before scoring, and inverse transform labels after inference."""
return getattr(self, "_label_transformer", None)
@property
def classes_(self):
"""A numpy array of shape (n_classes,) for class labels."""
attr = getattr(self, "_label_transformer", None)
if attr:
return attr.classes_
attr = getattr(self, "_trained_estimator", None)
if attr:
return attr.classes_
return None
@property
def n_features_in_(self):
return self._trained_estimator.n_features_in_
@property
def feature_names_in_(self):
attr = getattr(self, "_trained_estimator", None)
attr = attr and getattr(attr, "feature_names_in_", None)
if attr is not None:
return attr
return getattr(self, "_feature_names_in_", None)
@property
def feature_importances_(self):
attr = getattr(self, "_trained_estimator", None)
attr = attr and getattr(attr, "feature_importances_", None)
return attr
@property
def time_to_find_best_model(self) -> float:
"""Time taken to find best model in seconds."""
return self.__dict__.get("_time_taken_best_iter")
def score(
self,
X: Union[DataFrame, psDataFrame],
y: Union[Series, psSeries],
**kwargs,
):
estimator = getattr(self, "_trained_estimator", None)
if estimator is None:
logger.warning("No estimator is trained. Please run fit with enough budget.")
return None
X = self._state.task.preprocess(X, self._transformer)
if self._label_transformer:
y = self._label_transformer.transform(y)
return estimator.score(X, y, **kwargs)
def predict(
self,
X: Union[np.array, DataFrame, List[str], List[List[str]], psDataFrame],
**pred_kwargs,
):
"""Predict label from features.
Args:
X: A numpy array or pandas dataframe or pyspark.pandas dataframe
of featurized instances, shape n * m,
or for time series forcast tasks:
a pandas dataframe with the first column containing
timestamp values (datetime type) or an integer n for
the predict steps (only valid when the estimator is
arima or sarimax). Other columns in the dataframe
are assumed to be exogenous variables (categorical
or numeric).
**pred_kwargs: Other key word arguments to pass to predict() function of
the searched learners, such as per_device_eval_batch_size.
```python
multivariate_X_test = DataFrame({
'timeStamp': pd.date_range(start='1/1/2022', end='1/07/2022'),
'categorical_col': ['yes', 'yes', 'no', 'no', 'yes', 'no', 'yes'],
'continuous_col': [105, 107, 120, 118, 110, 112, 115]
})
model.predict(multivariate_X_test)
```
Returns:
A array-like of shape n * 1: each element is a predicted
label for an instance.
"""
estimator = getattr(self, "_trained_estimator", None)
if estimator is None:
logger.warning("No estimator is trained. Please run fit with enough budget.")
return None
X = self._state.task.preprocess(X, self._transformer)
y_pred = estimator.predict(X, **pred_kwargs)
if isinstance(y_pred, np.ndarray) and y_pred.ndim > 1 and isinstance(y_pred, np.ndarray):
y_pred = y_pred.flatten()
if self._label_transformer:
return self._label_transformer.inverse_transform(Series(y_pred.astype(int)))
else:
return y_pred
def predict_proba(self, X, **pred_kwargs):
"""Predict the probability of each class from features, only works for
classification problems.
Args:
X: A numpy array of featurized instances, shape n * m.
**pred_kwargs: Other key word arguments to pass to predict_proba() function of
the searched learners, such as per_device_eval_batch_size.
Returns:
A numpy array of shape n * c. c is the # classes. Each element at
(i, j) is the probability for instance i to be in class j.
"""
estimator = getattr(self, "_trained_estimator", None)
if estimator is None:
logger.warning("No estimator is trained. Please run fit with enough budget.")
return None
X = self._state.task.preprocess(X, self._transformer)
proba = self._trained_estimator.predict_proba(X, **pred_kwargs)
return proba
def add_learner(self, learner_name, learner_class):
"""Add a customized learner.
Args:
learner_name: A string of the learner's name.
learner_class: A subclass of flaml.automl.model.BaseEstimator.
"""
self._state.learner_classes[learner_name] = learner_class
def get_estimator_from_log(self, log_file_name: str, record_id: int, task: Union[str, Task]):
"""Get the estimator from log file.
Args:
log_file_name: A string of the log file name.
record_id: An integer of the record ID in the file,
0 corresponds to the first trial.
task: A string of the task type,
'binary', 'multiclass', 'regression', 'ts_forecast', 'rank',
or an instance of the Task class.
Returns:
An estimator object for the given configuration.
"""
with training_log_reader(log_file_name) as reader:
record = reader.get_record(record_id)
estimator = record.learner
config = AutoMLState.sanitize(record.config)
if isinstance(task, str):
task = task_factory(task)
estimator, _ = train_estimator(
X_train=None,
y_train=None,
config_dic=config,
task=task,
estimator_name=estimator,
estimator_class=self._state.learner_classes.get(estimator),
eval_metric="train_time",
)
return estimator
def retrain_from_log(
self,
log_file_name,
X_train=None,
y_train=None,
dataframe=None,
label=None,
time_budget=np.inf,
task: Optional[Union[str, Task]] = None,
eval_method=None,
split_ratio=None,
n_splits=None,
split_type=None,
groups=None,
n_jobs=-1,
# gpu_per_trial=0,
train_best=True,
train_full=False,
record_id=-1,
auto_augment=None,
custom_hp=None,
skip_transform=None,
preserve_checkpoint=True,
fit_kwargs_by_estimator=None,
**fit_kwargs,
):
"""Retrain from log file.
This function is intended to retrain the logged configurations.
NOTE: In some rare case, the last config is early stopped to meet time_budget and it's the best config.
But the logged config's ITER_HP (e.g., n_estimators) is not reduced.
Args:
log_file_name: A string of the log file name.
X_train: A numpy array or dataframe of training data in shape n*m.
For time series forecast tasks, the first column of X_train must be the timestamp column (datetime type). Other columns in the dataframe are assumed to be exogenous variables (categorical or numeric).
y_train: A numpy array or series of labels in shape n*1.
dataframe: A dataframe of training data including label column.
For time series forecast tasks, dataframe must be specified and should
have at least two columns: timestamp and label, where the first
column is the timestamp column (datetime type). Other columns
in the dataframe are assumed to be exogenous variables
(categorical or numeric).
label: A str of the label column name, e.g., 'label';
Note: If X_train and y_train are provided,
dataframe and label are ignored;
If not, dataframe and label must be provided.
time_budget: A float number of the time budget in seconds.
task: A string of the task type, e.g.,
'classification', 'regression', 'ts_forecast', 'rank',
'seq-classification', 'seq-regression', 'summarization',
or an instance of Task class.
eval_method: A string of resampling strategy, one of
['auto', 'cv', 'holdout'].
split_ratio: A float of the validation data percentage for holdout.
n_splits: An integer of the number of folds for cross-validation.
split_type: str or splitter object, default="auto" | the data split type.
* A valid splitter object is an instance of a derived class of scikit-learn
[KFold](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html#sklearn.model_selection.KFold)
and have ``split`` and ``get_n_splits`` methods with the same signatures.
Set eval_method to "cv" to use the splitter object.
* Valid str options depend on different tasks.
For classification tasks, valid choices are
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
"auto" -> uniform.
For time series forecast tasks, must be "auto" or 'time'.
For ranking task, must be "auto" or 'group'.
groups: None or array-like | Group labels (with matching length to
y_train) or groups counts (with sum equal to length of y_train)
for training data.
n_jobs: An integer of the number of threads for training | default=-1.
Use all available resources when n_jobs == -1.
train_best: A boolean of whether to train the best config in the
time budget; if false, train the last config in the budget.
train_full: A boolean of whether to train on the full data. If true,
eval_method and sample_size in the log file will be ignored.
record_id: the ID of the training log record from which the model will
be retrained. By default `record_id = -1` which means this will be
ignored. `record_id = 0` corresponds to the first trial, and
when `record_id >= 0`, `time_budget` will be ignored.
auto_augment: boolean, default=True | Whether to automatically
augment rare classes.
custom_hp: dict, default=None | The custom search space specified by user
Each key is the estimator name, each value is a dict of the custom search space for that estimator. Notice the
domain of the custom search space can either be a value or a sample.Domain object.
```python
custom_hp = {
"transformer_ms": {
"model_path": {
"domain": "albert-base-v2",
},
"learning_rate": {
"domain": tune.choice([1e-4, 1e-5]),
}
}
}
```
fit_kwargs_by_estimator: dict, default=None | The user specified keywords arguments, grouped by estimator name.
e.g.,
```python
fit_kwargs_by_estimator = {
"transformer": {
"output_dir": "test/data/output/",
"fp16": False,
}
}
```
**fit_kwargs: Other key word arguments to pass to fit() function of
the searched learners, such as sample_weight. Below are a few examples of
estimator-specific parameters:
period: int | forecast horizon for all time series forecast tasks.
gpu_per_trial: float, default = 0 | A float of the number of gpus per trial,
only used by TransformersEstimator, XGBoostSklearnEstimator, and
TemporalFusionTransformerEstimator.
group_ids: list of strings of column names identifying a time series, only
used by TemporalFusionTransformerEstimator, required for
'ts_forecast_panel' task. `group_ids` is a parameter for TimeSeriesDataSet object
from PyTorchForecasting.
For other parameters to describe your dataset, refer to
[TimeSeriesDataSet PyTorchForecasting](https://pytorch-forecasting.readthedocs.io/en/stable/api/pytorch_forecasting.data.timeseries.TimeSeriesDataSet.html).
To specify your variables, use `static_categoricals`, `static_reals`,
`time_varying_known_categoricals`, `time_varying_known_reals`,
`time_varying_unknown_categoricals`, `time_varying_unknown_reals`,
`variable_groups`. To provide more information on your data, use
`max_encoder_length`, `min_encoder_length`, `lags`.
log_dir: str, default = "lightning_logs" | Folder into which to log results
for tensorboard, only used by TemporalFusionTransformerEstimator.
max_epochs: int, default = 20 | Maximum number of epochs to run training,
only used by TemporalFusionTransformerEstimator.
batch_size: int, default = 64 | Batch size for training model, only
used by TemporalFusionTransformerEstimator.
"""
task = task or self._settings.get("task")
if isinstance(task, str):
task = task_factory(task)
eval_method = eval_method or self._settings.get("eval_method")
split_ratio = split_ratio or self._settings.get("split_ratio")
n_splits = n_splits or self._settings.get("n_splits")
split_type = split_type or self._settings.get("split_type")
auto_augment = self._settings.get("auto_augment") if auto_augment is None else auto_augment
self._state.task = task
self._estimator_type = "classifier" if task.is_classification() else "regressor"
self._state.fit_kwargs = fit_kwargs
self._state.custom_hp = custom_hp or self._settings.get("custom_hp")
self._skip_transform = self._settings.get("skip_transform") if skip_transform is None else skip_transform
self._state.fit_kwargs_by_estimator = fit_kwargs_by_estimator or self._settings.get("fit_kwargs_by_estimator")
self.preserve_checkpoint = (
self._settings.get("preserve_checkpoint") if preserve_checkpoint is None else preserve_checkpoint
)
task.validate_data(self, self._state, X_train, y_train, dataframe, label, groups=groups)
logger.info("log file name {}".format(log_file_name))
best_config = None
best_val_loss = float("+inf")
best_estimator = None
sample_size = None
time_used = 0.0
training_duration = 0
best = None
with training_log_reader(log_file_name) as reader:
if record_id >= 0:
best = reader.get_record(record_id)
else:
for record in reader.records():
time_used = record.wall_clock_time
if time_used > time_budget:
break
training_duration = time_used
val_loss = record.validation_loss
if val_loss <= best_val_loss or not train_best:
if val_loss == best_val_loss and train_best:
size = record.sample_size
if size > sample_size:
best = record
best_val_loss = val_loss
sample_size = size
else:
best = record
size = record.sample_size
best_val_loss = val_loss
sample_size = size
if not training_duration:
logger.warning(f"No estimator found within time_budget={time_budget}")
from .model import BaseEstimator as Estimator
self._trained_estimator = Estimator()
return training_duration
if not best:
return
best_estimator = best.learner
best_config = best.config
sample_size = len(self._y_train_all) if train_full else best.sample_size
this_estimator_kwargs = self._state.fit_kwargs_by_estimator.get(best_estimator)
if this_estimator_kwargs:
this_estimator_kwargs = (
this_estimator_kwargs.copy()
) # make another shallow copy of the value (a dict obj), so user's fit_kwargs_by_estimator won't be updated
this_estimator_kwargs.update(self._state.fit_kwargs)
self._state.fit_kwargs_by_estimator[best_estimator] = this_estimator_kwargs
else:
self._state.fit_kwargs_by_estimator[best_estimator] = self._state.fit_kwargs
logger.info(
"estimator = {}, config = {}, #training instances = {}".format(best_estimator, best_config, sample_size)
)
# Partially copied from fit() function
# Initilize some attributes required for retrain_from_log
self._split_type = task.decide_split_type(
split_type,
self._y_train_all,
self._state.fit_kwargs,
self._state.groups,
)
eval_method = self._decide_eval_method(eval_method, time_budget)
self.modelcount = 0
self._auto_augment = auto_augment
self._prepare_data(eval_method, split_ratio, n_splits)
self._state.time_budget = -1
self._state.free_mem_ratio = 0
self._state.n_jobs = n_jobs
import os
self._state.resources_per_trial = (
{
"cpu": max(1, os.cpu_count() >> 1),
"gpu": fit_kwargs.get("gpu_per_trial", 0),
}
if self._state.n_jobs < 0
else {"cpu": self._state.n_jobs, "gpu": fit_kwargs.get("gpu_per_trial", 0)}
)
self._trained_estimator = self._state._train_with_config(
best_estimator,
best_config,
sample_size=sample_size,
)[0]
logger.info("retrain from log succeeded")
return training_duration
def _decide_eval_method(self, eval_method, time_budget):
if not isinstance(self._split_type, str):
assert eval_method in [
"auto",
"cv",
], "eval_method must be 'auto' or 'cv' for custom data splitter."
assert self._state.X_val is None, "custom splitter and custom validation data can't be used together."
return "cv"
if self._state.X_val is not None and (
not isinstance(self._state.X_val, TimeSeriesDataset) or len(self._state.X_val.test_data) > 0
):
assert eval_method in [
"auto",
"holdout",
], "eval_method must be 'auto' or 'holdout' for custom validation data."
return "holdout"
if eval_method != "auto":
assert eval_method in [
"holdout",
"cv",
], "eval_method must be 'holdout', 'cv' or 'auto'."
return eval_method
nrow, dim = self._nrow, self._ndim
if (
time_budget < 0
or nrow * dim / 0.9 < SMALL_LARGE_THRES * (time_budget / 3600)
and nrow < CV_HOLDOUT_THRESHOLD
):
# time allows or sampling can be used and cv is necessary
return "cv"
else:
return "holdout"
@property
def search_space(self) -> dict:
"""Search space.
Must be called after fit(...)
(use max_iter=0 and retrain_final=False to prevent actual fitting).
Returns:
A dict of the search space.
"""
estimator_list = self.estimator_list
if len(estimator_list) == 1:
estimator = estimator_list[0]
space = self._search_states[estimator].search_space.copy()
space["learner"] = estimator
return space
choices = []
for estimator in estimator_list:
space = self._search_states[estimator].search_space.copy()
space["learner"] = estimator
choices.append(space)
return {"ml": tune.choice(choices)}
@property
def low_cost_partial_config(self) -> dict:
"""Low cost partial config.
Returns:
A dict.
(a) if there is only one estimator in estimator_list, each key is a
hyperparameter name.
(b) otherwise, it is a nested dict with 'ml' as the key, and
a list of the low_cost_partial_configs as the value, corresponding
to each learner's low_cost_partial_config; the estimator index as
an integer corresponding to the cheapest learner is appended to the
list at the end.
"""
if len(self.estimator_list) == 1:
estimator = self.estimator_list[0]
c = self._search_states[estimator].low_cost_partial_config
return c
else:
configs = []
for estimator in self.estimator_list:
c = self._search_states[estimator].low_cost_partial_config
configs.append(c)
configs.append(
np.argmin(
[
self._state.learner_classes.get(estimator).cost_relative2lgbm()
for estimator in self.estimator_list
]
)
)
config = {"ml": configs}
return config
@property
def cat_hp_cost(self) -> dict:
"""Categorical hyperparameter cost
Returns:
A dict.
(a) if there is only one estimator in estimator_list, each key is a
hyperparameter name.
(b) otherwise, it is a nested dict with 'ml' as the key, and
a list of the cat_hp_cost's as the value, corresponding
to each learner's cat_hp_cost; the cost relative to lgbm for each
learner (as a list itself) is appended to the list at the end.
"""
if len(self.estimator_list) == 1:
estimator = self.estimator_list[0]
c = self._search_states[estimator].cat_hp_cost
return c
else:
configs = []
for estimator in self.estimator_list:
c = self._search_states[estimator].cat_hp_cost
configs.append(c)
configs.append(
[self._state.learner_classes.get(estimator).cost_relative2lgbm() for estimator in self.estimator_list]
)
config = {"ml": configs}
return config
@property
def points_to_evaluate(self) -> dict:
"""Initial points to evaluate.
Returns:
A list of dicts. Each dict is the initial point for each learner.
"""
points = []
for estimator in self.estimator_list:
configs = self._search_states[estimator].init_config
for config in configs:
config["learner"] = estimator
if len(self.estimator_list) > 1:
points.append({"ml": config})
else:
points.append(config)
return points
@property
def resource_attr(self) -> Optional[str]:
"""Attribute of the resource dimension.
Returns:
A string for the sample size attribute
(the resource attribute in AutoML) or None.
"""
return "FLAML_sample_size" if self._sample else None
@property
def min_resource(self) -> Optional[float]:
"""Attribute for pruning.
Returns:
A float for the minimal sample size or None.
"""
return self._min_sample_size if self._sample else None
@property
def max_resource(self) -> Optional[float]:
"""Attribute for pruning.
Returns:
A float for the maximal sample size or None.
"""
return self._state.data_size[0] if self._sample else None
def pickle(self, output_file_name):
import pickle
estimator_to_training_function = {}
for estimator in self.estimator_list:
search_state = self._search_states[estimator]
if hasattr(search_state, "training_function"):
estimator_to_training_function[estimator] = search_state.training_function
del search_state.training_function
with open(output_file_name, "wb") as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
@property
def trainable(self) -> Callable[[dict], Optional[float]]:
"""Training function.
Returns:
A function that evaluates each config and returns the loss.
"""
self._state.time_from_start = 0
states = self._search_states
mem_res = self._mem_thres
def train(config: dict, state, is_report=True):
# handle spark broadcast variables
state = get_broadcast_data(state)
is_report = get_broadcast_data(is_report)
sample_size = config.get("FLAML_sample_size")
config = config.get("ml", config).copy()
if sample_size:
config["FLAML_sample_size"] = sample_size
estimator = config["learner"]
# check memory constraints before training
if states[estimator].learner_class.size(config) <= mem_res:
del config["learner"]
config.pop("_choice_", None)
result = AutoMLState._compute_with_config_base(
config, state=state, estimator=estimator, is_report=is_report
)
else:
# If search algorithm is not in flaml, it does not handle the config constraint, should also tune.report before return
result = {
"pred_time": 0,
"wall_clock_time": None,
"metric_for_logging": np.inf,
"val_loss": np.inf,
"trained_estimator": None,
}
if is_report is True:
tune.report(**result)
return result
if self._use_ray is not False:
from ray.tune import with_parameters
return with_parameters(
train,
state=self._state,
)
elif self._use_spark:
from flaml.tune.spark.utils import with_parameters
return with_parameters(train, state=self._state, is_report=False)
else:
return partial(
train,
state=self._state,
)
@property
def metric_constraints(self) -> list:
"""Metric constraints.
Returns:
A list of the metric constraints.
"""
return self._metric_constraints
def _prepare_data(self, eval_method, split_ratio, n_splits):
self._state.task.prepare_data(
self._state,
self._X_train_all,
self._y_train_all,
self._auto_augment,
eval_method,
self._split_type,
split_ratio,
n_splits,
self._df,
self._sample_weight_full,
)
self.data_size_full = self._state.data_size_full
def fit(
self,
X_train=None,
y_train=None,
dataframe=None,
label=None,
metric=None,
task: Optional[Union[str, Task]] = None,
n_jobs=None,
# gpu_per_trial=0,
log_file_name=None,
estimator_list=None,
time_budget=None,
max_iter=None,
sample=None,
ensemble=None,
eval_method=None,
log_type=None,
model_history=None,
split_ratio=None,
n_splits=None,
log_training_metric=None,
mem_thres=None,
pred_time_limit=None,
train_time_limit=None,
X_val=None,
y_val=None,
sample_weight_val=None,
groups_val=None,
groups=None,
verbose=None,
retrain_full=None,
split_type=None,
learner_selector=None,
hpo_method=None,
starting_points=None,
seed=None,
n_concurrent_trials=None,
keep_search_state=None,
preserve_checkpoint=True,
early_stop=None,
force_cancel=None,
append_log=None,
auto_augment=None,
min_sample_size=None,
use_ray=None,
use_spark=None,
free_mem_ratio=0,
metric_constraints=None,
custom_hp=None,
time_col=None,
cv_score_agg_func=None,
skip_transform=None,
mlflow_logging=None,
fit_kwargs_by_estimator=None,
**fit_kwargs,
):
"""Find a model for a given task.
Args:
X_train: A numpy array or a pandas dataframe of training data in
shape (n, m). For time series forecsat tasks, the first column of X_train
must be the timestamp column (datetime type). Other columns in
the dataframe are assumed to be exogenous variables (categorical or numeric).
When using ray, X_train can be a ray.ObjectRef.
y_train: A numpy array or a pandas series of labels in shape (n, ).
dataframe: A dataframe of training data including label column.
For time series forecast tasks, dataframe must be specified and must have
at least two columns, timestamp and label, where the first
column is the timestamp column (datetime type). Other columns in
the dataframe are assumed to be exogenous variables (categorical or numeric).
When using ray, dataframe can be a ray.ObjectRef.
label: A str of the label column name for, e.g., 'label';
Note: If X_train and y_train are provided,
dataframe and label are ignored;
If not, dataframe and label must be provided.
metric: A string of the metric name or a function,
e.g., 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo', 'roc_auc_weighted',
'roc_auc_ovo_weighted', 'roc_auc_ovr_weighted', 'f1', 'micro_f1', 'macro_f1',
'log_loss', 'mae', 'mse', 'r2', 'mape'. Default is 'auto'.
If passing a customized metric function, the function needs to
have the following input arguments:
```python
def custom_metric(
X_test, y_test, estimator, labels,
X_train, y_train, weight_test=None, weight_train=None,
config=None, groups_test=None, groups_train=None,
):
return metric_to_minimize, metrics_to_log
```
which returns a float number as the minimization objective,
and a dictionary as the metrics to log. E.g.,
```python
def custom_metric(
X_val, y_val, estimator, labels,
X_train, y_train, weight_val=None, weight_train=None,
*args,
):
from sklearn.metrics import log_loss
import time
start = time.time()
y_pred = estimator.predict_proba(X_val)
pred_time = (time.time() - start) / len(X_val)
val_loss = log_loss(y_val, y_pred, labels=labels, sample_weight=weight_val)
y_pred = estimator.predict_proba(X_train)
train_loss = log_loss(y_train, y_pred, labels=labels, sample_weight=weight_train)
alpha = 0.5
return val_loss * (1 + alpha) - alpha * train_loss, {
"val_loss": val_loss,
"train_loss": train_loss,
"pred_time": pred_time,
}
```
task: A string of the task type, e.g.,
'classification', 'regression', 'ts_forecast_regression',
'ts_forecast_classification', 'rank', 'seq-classification',
'seq-regression', 'summarization', or an instance of Task class
n_jobs: An integer of the number of threads for training | default=-1.
Use all available resources when n_jobs == -1.
log_file_name: A string of the log file name | default="". To disable logging,
set it to be an empty string "".
estimator_list: A list of strings for estimator names, or 'auto'.
e.g., ```['lgbm', 'xgboost', 'xgb_limitdepth', 'catboost', 'rf', 'extra_tree']```.
time_budget: A float number of the time budget in seconds.
Use -1 if no time limit.
max_iter: An integer of the maximal number of iterations.
NOTE: when both time_budget and max_iter are unspecified,
only one model will be trained per estimator.
sample: A boolean of whether to sample the training data during
search.
ensemble: boolean or dict | default=False. Whether to perform
ensemble after search. Can be a dict with keys 'passthrough'
and 'final_estimator' to specify the passthrough and
final_estimator in the stacker. The dict can also contain
'n_jobs' as the key to specify the number of jobs for the stacker.
eval_method: A string of resampling strategy, one of
['auto', 'cv', 'holdout'].
split_ratio: A float of the valiation data percentage for holdout.
n_splits: An integer of the number of folds for cross - validation.
log_type: A string of the log type, one of
['better', 'all'].
'better' only logs configs with better loss than previos iters
'all' logs all the tried configs.
model_history: A boolean of whether to keep the trained best
model per estimator. Make sure memory is large enough if setting to True.
Default value is False: best_model_for_estimator would return a
untrained model for non-best learner.
log_training_metric: A boolean of whether to log the training
metric for each model.
mem_thres: A float of the memory size constraint in bytes.
pred_time_limit: A float of the prediction latency constraint in seconds.
It refers to the average prediction time per row in validation data.
train_time_limit: None or a float of the training time constraint in seconds.
X_val: None or a numpy array or a pandas dataframe of validation data.
y_val: None or a numpy array or a pandas series of validation labels.
sample_weight_val: None or a numpy array of the sample weight of
validation data of the same shape as y_val.
groups_val: None or array-like | group labels (with matching length
to y_val) or group counts (with sum equal to length of y_val)
for validation data. Need to be consistent with groups.
groups: None or array-like | Group labels (with matching length to
y_train) or groups counts (with sum equal to length of y_train)
for training data.
verbose: int, default=3 | Controls the verbosity, higher means more
messages.
retrain_full: bool or str, default=True | whether to retrain the
selected model on the full training data when using holdout.
True - retrain only after search finishes; False - no retraining;
'budget' - do best effort to retrain without violating the time
budget.
split_type: str or splitter object, default="auto" | the data split type.
* A valid splitter object is an instance of a derived class of scikit-learn
[KFold](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html#sklearn.model_selection.KFold)
and have ``split`` and ``get_n_splits`` methods with the same signatures.
Set eval_method to "cv" to use the splitter object.
* Valid str options depend on different tasks.
For classification tasks, valid choices are
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
"auto" -> uniform.
For time series forecast tasks, must be "auto" or 'time'.
For ranking task, must be "auto" or 'group'.
hpo_method: str, default="auto" | The hyperparameter
optimization method. By default, CFO is used for sequential
search and BlendSearch is used for parallel search.
No need to set when using flaml's default search space or using
a simple customized search space. When set to 'bs', BlendSearch
is used. BlendSearch can be tried when the search space is
complex, for example, containing multiple disjoint, discontinuous
subspaces. When set to 'random', random search is used.
starting_points: A dictionary or a str to specify the starting hyperparameter
config for the estimators | default="data".
If str:
- if "data", use data-dependent defaults;
- if "data:path" use data-dependent defaults which are stored at path;
- if "static", use data-independent defaults.
If dict, keys are the name of the estimators, and values are the starting
hyperparamter configurations for the corresponding estimators.
The value can be a single hyperparamter configuration dict or a list
of hyperparamter configuration dicts.
In the following code example, we get starting_points from the
`automl` object and use them in the `new_automl` object.
e.g.,
```python
from flaml import AutoML
automl = AutoML()
X_train, y_train = load_iris(return_X_y=True)
automl.fit(X_train, y_train)
starting_points = automl.best_config_per_estimator
new_automl = AutoML()
new_automl.fit(X_train, y_train, starting_points=starting_points)
```
seed: int or None, default=None | The random seed for hpo.
n_concurrent_trials: [In preview] int, default=1 | The number of
concurrent trials. When n_concurrent_trials > 1, flaml performes
[parallel tuning](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning)
and installation of ray or spark is required: `pip install flaml[ray]`
or `pip install flaml[spark]`. Please check
[here](https://spark.apache.org/docs/latest/api/python/getting_started/install.html)
for more details about installing Spark.
keep_search_state: boolean, default=False | Whether to keep data needed
for model search after fit(). By default the state is deleted for
space saving.
preserve_checkpoint: boolean, default=True | Whether to preserve the saved checkpoint
on disk when deleting automl. By default the checkpoint is preserved.
early_stop: boolean, default=False | Whether to stop early if the
search is considered to converge.
force_cancel: boolean, default=False | Whether to forcely cancel the PySpark job if overtime.
append_log: boolean, default=False | Whether to directly append the log
records to the input log file if it exists.
auto_augment: boolean, default=True | Whether to automatically
augment rare classes.
min_sample_size: int, default=MIN_SAMPLE_TRAIN | the minimal sample
size when sample=True.
use_ray: boolean or dict.
If boolean: default=False | Whether to use ray to run the training
in separate processes. This can be used to prevent OOM for large
datasets, but will incur more overhead in time.
If dict: the dict contains the keywords arguments to be passed to
[ray.tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html).
use_spark: boolean, default=False | Whether to use spark to run the training
in parallel spark jobs. This can be used to accelerate training on large models
and large datasets, but will incur more overhead in time and thus slow down
training in some cases.
free_mem_ratio: float between 0 and 1, default=0. The free memory ratio to keep during training.
metric_constraints: list, default=[] | The list of metric constraints.
Each element in this list is a 3-tuple, which shall be expressed
in the following format: the first element of the 3-tuple is the name of the
metric, the second element is the inequality sign chosen from ">=" and "<=",
and the third element is the constraint value. E.g., `('precision', '>=', 0.9)`.
Note that all the metric names in metric_constraints need to be reported via
the metrics_to_log dictionary returned by a customized metric function.
The customized metric function shall be provided via the `metric` key word argument
of the fit() function or the automl constructor.
Find examples in this [test](https://github.com/microsoft/FLAML/tree/main/test/automl/test_constraints.py).
If `pred_time_limit` is provided as one of keyword arguments to fit() function or
the automl constructor, flaml will automatically (and under the hood)
add it as an additional element in the metric_constraints. Essentially 'pred_time_limit'
specifies a constraint about the prediction latency constraint in seconds.
custom_hp: dict, default=None | The custom search space specified by user
Each key is the estimator name, each value is a dict of the custom search space for that estimator. Notice the
domain of the custom search space can either be a value of a sample.Domain object.
```python
custom_hp = {
"transformer_ms": {
"model_path": {
"domain": "albert-base-v2",
},
"learning_rate": {
"domain": tune.choice([1e-4, 1e-5]),
}
}
}
```
time_col: for a time series task, name of the column containing the timestamps. If not
provided, defaults to the first column of X_train/X_val
cv_score_agg_func: customized cross-validation scores aggregate function. Default to average metrics across folds. If specificed, this function needs to
have the following input arguments:
* val_loss_folds: list of floats, the loss scores of each fold;
* log_metrics_folds: list of dicts/floats, the metrics of each fold to log.
This function should return the final aggregate result of all folds. A float number of the minimization objective, and a dictionary as the metrics to log or None.
E.g.,
```python
def cv_score_agg_func(val_loss_folds, log_metrics_folds):
metric_to_minimize = sum(val_loss_folds)/len(val_loss_folds)
metrics_to_log = None
for single_fold in log_metrics_folds:
if metrics_to_log is None:
metrics_to_log = single_fold
elif isinstance(metrics_to_log, dict):
metrics_to_log = {k: metrics_to_log[k] + v for k, v in single_fold.items()}
else:
metrics_to_log += single_fold
if metrics_to_log:
n = len(val_loss_folds)
metrics_to_log = (
{k: v / n for k, v in metrics_to_log.items()}
if isinstance(metrics_to_log, dict)
else metrics_to_log / n
)
return metric_to_minimize, metrics_to_log
```
skip_transform: boolean, default=False | Whether to pre-process data prior to modeling.
mlflow_logging: boolean, default=None | Whether to log the training results to mlflow.
Default value is None, which means the logging decision is made based on
AutoML.__init__'s mlflow_logging argument.
This requires mlflow to be installed and to have an active mlflow run.
FLAML will create nested runs.
fit_kwargs_by_estimator: dict, default=None | The user specified keywords arguments, grouped by estimator name.
For TransformersEstimator, available fit_kwargs can be found from
[TrainingArgumentsForAuto](nlp/huggingface/training_args).
e.g.,
```python
fit_kwargs_by_estimator = {
"transformer": {
"output_dir": "test/data/output/",
"fp16": False,
},
"tft": {
"max_encoder_length": 1,
"min_encoder_length": 1,
"static_categoricals": [],
"static_reals": [],
"time_varying_known_categoricals": [],
"time_varying_known_reals": [],
"time_varying_unknown_categoricals": [],
"time_varying_unknown_reals": [],
"variable_groups": {},
"lags": {},
}
}
```
**fit_kwargs: Other key word arguments to pass to fit() function of
the searched learners, such as sample_weight. Below are a few examples of
estimator-specific parameters:
period: int | forecast horizon for all time series forecast tasks.
gpu_per_trial: float, default = 0 | A float of the number of gpus per trial,
only used by TransformersEstimator, XGBoostSklearnEstimator, and
TemporalFusionTransformerEstimator.
group_ids: list of strings of column names identifying a time series, only
used by TemporalFusionTransformerEstimator, required for
'ts_forecast_panel' task. `group_ids` is a parameter for TimeSeriesDataSet object
from PyTorchForecasting.
For other parameters to describe your dataset, refer to
[TimeSeriesDataSet PyTorchForecasting](https://pytorch-forecasting.readthedocs.io/en/stable/api/pytorch_forecasting.data.timeseries.TimeSeriesDataSet.html).
To specify your variables, use `static_categoricals`, `static_reals`,
`time_varying_known_categoricals`, `time_varying_known_reals`,
`time_varying_unknown_categoricals`, `time_varying_unknown_reals`,
`variable_groups`. To provide more information on your data, use
`max_encoder_length`, `min_encoder_length`, `lags`.
log_dir: str, default = "lightning_logs" | Folder into which to log results
for tensorboard, only used by TemporalFusionTransformerEstimator.
max_epochs: int, default = 20 | Maximum number of epochs to run training,
only used by TemporalFusionTransformerEstimator.
batch_size: int, default = 64 | Batch size for training model, only
used by TemporalFusionTransformerEstimator.
"""
self._state._start_time_flag = self._start_time_flag = time.time()
task = task or self._settings.get("task")
if isinstance(task, str):
task = task_factory(task, X_train, y_train)
self._state.task = task
self._state.task.time_col = time_col
self._estimator_type = "classifier" if task.is_classification() else "regressor"
time_budget = time_budget or self._settings.get("time_budget")
n_jobs = n_jobs or self._settings.get("n_jobs")
gpu_per_trial = fit_kwargs.get("gpu_per_trial", 0)
eval_method = eval_method or self._settings.get("eval_method")
split_ratio = split_ratio or self._settings.get("split_ratio")
n_splits = n_splits or self._settings.get("n_splits")
auto_augment = self._settings.get("auto_augment") if auto_augment is None else auto_augment
metric = metric or self._settings.get("metric")
estimator_list = estimator_list or self._settings.get("estimator_list")
log_file_name = self._settings.get("log_file_name") if log_file_name is None else log_file_name
max_iter = self._settings.get("max_iter") if max_iter is None else max_iter
sample_is_none = sample is None
if sample_is_none:
sample = self._settings.get("sample")
ensemble = self._settings.get("ensemble") if ensemble is None else ensemble
log_type = log_type or self._settings.get("log_type")
model_history = self._settings.get("model_history") if model_history is None else model_history
log_training_metric = (
self._settings.get("log_training_metric") if log_training_metric is None else log_training_metric
)
mem_thres = mem_thres or self._settings.get("mem_thres")
pred_time_limit = pred_time_limit or self._settings.get("pred_time_limit")
train_time_limit = train_time_limit or self._settings.get("train_time_limit")
self._metric_constraints = metric_constraints or self._settings.get("metric_constraints")
if np.isfinite(pred_time_limit):
self._metric_constraints.append(("pred_time", "<=", pred_time_limit))
verbose = self._settings.get("verbose") if verbose is None else verbose
retrain_full = self._settings.get("retrain_full") if retrain_full is None else retrain_full
split_type = split_type or self._settings.get("split_type")
hpo_method = hpo_method or self._settings.get("hpo_method")
learner_selector = learner_selector or self._settings.get("learner_selector")
no_starting_points = starting_points is None
if no_starting_points:
starting_points = self._settings.get("starting_points")
n_concurrent_trials = n_concurrent_trials or self._settings.get("n_concurrent_trials")
keep_search_state = self._settings.get("keep_search_state") if keep_search_state is None else keep_search_state
self.preserve_checkpoint = (
self._settings.get("preserve_checkpoint") if preserve_checkpoint is None else preserve_checkpoint
)
early_stop = self._settings.get("early_stop") if early_stop is None else early_stop
force_cancel = self._settings.get("force_cancel") if force_cancel is None else force_cancel
# no search budget is provided?
no_budget = time_budget < 0 and max_iter is None and not early_stop
append_log = self._settings.get("append_log") if append_log is None else append_log
min_sample_size = min_sample_size or self._settings.get("min_sample_size")
use_ray = self._settings.get("use_ray") if use_ray is None else use_ray
use_spark = self._settings.get("use_spark") if use_spark is None else use_spark
if use_spark and use_ray is not False:
raise ValueError("use_spark and use_ray cannot be both True.")
elif use_spark:
spark_available, spark_error_msg = check_spark()
if not spark_available:
raise spark_error_msg
old_level = logger.getEffectiveLevel()
self.verbose = verbose
logger.setLevel(50 - verbose * 10)
if not logger.handlers:
# Add the console handler.
_ch = logging.StreamHandler(stream=sys.stdout)
_ch.setFormatter(logger_formatter)
logger.addHandler(_ch)
if not use_ray and not use_spark and n_concurrent_trials > 1:
if ray_available:
logger.warning(
"n_concurrent_trials > 1 is only supported when using Ray or Spark. "
"Ray installed, setting use_ray to True. If you want to use Spark, set use_spark to True."
)
use_ray = True
else:
spark_available, _ = check_spark()
if spark_available:
logger.warning(
"n_concurrent_trials > 1 is only supported when using Ray or Spark. "
"Spark installed, setting use_spark to True. If you want to use Ray, set use_ray to True."
)
use_spark = True
else:
logger.warning(
"n_concurrent_trials > 1 is only supported when using Ray or Spark. "
"Neither Ray nor Spark installed, setting n_concurrent_trials to 1."
)
n_concurrent_trials = 1
self._state.n_jobs = n_jobs
self._n_concurrent_trials = n_concurrent_trials
self._early_stop = early_stop
self._use_spark = use_spark
self._force_cancel = force_cancel
self._use_ray = use_ray
# use the following condition if we have an estimation of average_trial_time and average_trial_overhead
# self._use_ray = use_ray or n_concurrent_trials > ( average_trial_time + average_trial_overhead) / (average_trial_time)
if self._use_ray is not False:
import ray
n_cpus = ray.is_initialized() and ray.available_resources()["CPU"] or os.cpu_count()
self._state.resources_per_trial = (
# when using gpu, default cpu is 1 per job; otherwise, default cpu is n_cpus / n_concurrent_trials
(
{
"cpu": max(int((n_cpus - 2) / 2 / n_concurrent_trials), 1),
"gpu": gpu_per_trial,
}
if gpu_per_trial == 0
else {"cpu": 1, "gpu": gpu_per_trial}
)
if n_jobs < 0
else {"cpu": n_jobs, "gpu": gpu_per_trial}
)
if isinstance(X_train, ray.ObjectRef):
X_train = ray.get(X_train)
elif isinstance(dataframe, ray.ObjectRef):
dataframe = ray.get(dataframe)
else:
# TODO: Integrate with Spark
self._state.resources_per_trial = {"cpu": n_jobs} if n_jobs > 0 else {"cpu": 1}
self._state.free_mem_ratio = self._settings.get("free_mem_ratio") if free_mem_ratio is None else free_mem_ratio
self._state.task = task
self._state.log_training_metric = log_training_metric
self._state.fit_kwargs = fit_kwargs
custom_hp = custom_hp or self._settings.get("custom_hp")
self._skip_transform = self._settings.get("skip_transform") if skip_transform is None else skip_transform
self._mlflow_logging = self._settings.get("mlflow_logging") if mlflow_logging is None else mlflow_logging
fit_kwargs_by_estimator = fit_kwargs_by_estimator or self._settings.get("fit_kwargs_by_estimator")
self._state.fit_kwargs_by_estimator = fit_kwargs_by_estimator.copy() # shallow copy of fit_kwargs_by_estimator
self._state.weight_val = sample_weight_val
task.validate_data(
self,
self._state,
X_train,
y_train,
dataframe,
label,
X_val,
y_val,
groups_val,
groups,
)
self._search_states = {} # key: estimator name; value: SearchState
self._random = np.random.RandomState(RANDOM_SEED)
self._seed = seed if seed is not None else 20
self._learner_selector = learner_selector
logger.info(f"task = {task}")
self._split_type = self._state.task.decide_split_type(
split_type,
self._y_train_all,
self._state.fit_kwargs,
self._state.groups,
)
if X_val is not None:
logger.info(f"Data split method: {self._split_type}")
eval_method = self._decide_eval_method(eval_method, time_budget)
self._state.eval_method = eval_method
logger.info("Evaluation method: {}".format(eval_method))
self._state.cv_score_agg_func = cv_score_agg_func or self._settings.get("cv_score_agg_func")
self._retrain_in_budget = retrain_full == "budget" and (eval_method == "holdout" and self._state.X_val is None)
self._auto_augment = auto_augment
_sample_size_from_starting_points = {}
if isinstance(starting_points, dict):
for _estimator, _point_per_estimator in starting_points.items():
sample_size = (
_point_per_estimator
and isinstance(_point_per_estimator, dict)
and _point_per_estimator.get("FLAML_sample_size")
)
if sample_size:
_sample_size_from_starting_points[_estimator] = sample_size
elif _point_per_estimator and isinstance(_point_per_estimator, list):
_sample_size_set = set(
[
config["FLAML_sample_size"]
for config in _point_per_estimator
if "FLAML_sample_size" in config
]
)
if _sample_size_set:
_sample_size_from_starting_points[_estimator] = min(_sample_size_set)
if len(_sample_size_set) > 1:
logger.warning(
"Using the min FLAML_sample_size of all the provided starting points for estimator {}. (Provided FLAML_sample_size are: {})".format(
_estimator, _sample_size_set
)
)
if not sample and isinstance(starting_points, dict):
assert (
not _sample_size_from_starting_points
), "When subsampling is disabled, do not include FLAML_sample_size in the starting point."
self._min_sample_size = _sample_size_from_starting_points or min_sample_size
self._min_sample_size_input = min_sample_size
self._prepare_data(eval_method, split_ratio, n_splits)
# TODO pull this to task as decide_sample_size
if isinstance(self._min_sample_size, dict):
self._sample = {
(
k,
sample
and not task.is_rank()
and eval_method != "cv"
and (self._min_sample_size[k] * SAMPLE_MULTIPLY_FACTOR < self._state.data_size[0]),
)
for k in self._min_sample_size.keys()
}
else:
self._sample = (
sample
and not task.is_rank()
and eval_method != "cv"
and (self._min_sample_size * SAMPLE_MULTIPLY_FACTOR < self._state.data_size[0])
)
metric = task.default_metric(metric)
self._state.metric = metric
# TODO pull this to task
def is_to_reverse_metric(metric, task):
if metric.startswith("ndcg"):
return True, f"1-{metric}"
if metric in [
"r2",
"accuracy",
"roc_auc",
"roc_auc_ovr",
"roc_auc_ovo",
"roc_auc_weighted",
"roc_auc_ovr_weighted",
"roc_auc_ovo_weighted",
"f1",
"ap",
"micro_f1",
"macro_f1",
]:
return True, f"1-{metric}"
if task.is_nlp():
from flaml.automl.ml import huggingface_metric_to_mode
if metric in huggingface_metric_to_mode and huggingface_metric_to_mode[metric] == "max":
return True, f"-{metric}"
return False, None
if isinstance(metric, str):
is_reverse, reverse_metric = is_to_reverse_metric(metric, task)
if is_reverse:
error_metric = reverse_metric
else:
error_metric = metric
else:
error_metric = "customized metric"
logger.info(f"Minimizing error metric: {error_metric}")
self._state.error_metric = error_metric
is_spark_dataframe = isinstance(X_train, psDataFrame) or isinstance(dataframe, psDataFrame)
estimator_list = task.default_estimator_list(estimator_list, is_spark_dataframe)
if is_spark_dataframe and self._use_spark:
# For spark dataframe, use_spark must be False because spark models are trained in parallel themselves
self._use_spark = False
logger.warning(
"Spark dataframes support only spark.ml type models, which will be trained "
"with spark themselves, no need to start spark trials in flaml. "
"`use_spark` is set to False."
)
# When no search budget is specified
if no_budget:
max_iter = len(estimator_list)
self._learner_selector = "roundrobin"
if sample_is_none:
self._sample = False
if no_starting_points:
starting_points = "data"
logger.warning(
"No search budget is provided via time_budget or max_iter."
" Training only one model per estimator."
" Zero-shot AutoML is used for certain tasks and estimators."
" To tune hyperparameters for each estimator,"
" please provide budget either via time_budget or max_iter."
)
elif max_iter is None:
# set to a large number
max_iter = 1000000
self._state.retrain_final = (
retrain_full is True
and eval_method == "holdout"
and (X_val is None or self._use_ray is not False)
or eval_method == "cv"
and (max_iter > 0 or retrain_full is True)
or max_iter == 1
)
# add custom learner
for estimator_name in estimator_list:
if estimator_name not in self._state.learner_classes:
self.add_learner(
estimator_name,
self._state.task.estimator_class_from_str(estimator_name),
)
# set up learner search space
if isinstance(starting_points, str) and starting_points.startswith("data"):
from flaml.default import suggest_config
location = starting_points[5:]
starting_points = {}
for estimator_name in estimator_list:
try:
configs = suggest_config(
self._state.task,
self._X_train_all,
self._y_train_all,
estimator_name,
location,
k=1,
)
starting_points[estimator_name] = [x["hyperparameters"] for x in configs]
except FileNotFoundError:
pass
try:
learner = suggest_learner(
self._state.task,
self._X_train_all,
self._y_train_all,
estimator_list=estimator_list,
location=location,
)
if learner != estimator_list[0]:
estimator_list.remove(learner)
estimator_list.insert(0, learner)
except FileNotFoundError:
pass
self._state.time_budget = time_budget
starting_points = {} if starting_points == "static" else starting_points
for estimator_name in estimator_list:
estimator_class = self._state.learner_classes[estimator_name]
estimator_class.init()
this_estimator_kwargs = self._state.fit_kwargs_by_estimator.get(estimator_name)
if this_estimator_kwargs:
# make another shallow copy of the value (a dict obj), so user's fit_kwargs_by_estimator won't be updated
this_estimator_kwargs = this_estimator_kwargs.copy()
this_estimator_kwargs.update(
self._state.fit_kwargs
) # update the shallow copy of fit_kwargs to fit_kwargs_by_estimator
self._state.fit_kwargs_by_estimator[
estimator_name
] = this_estimator_kwargs # set self._state.fit_kwargs_by_estimator[estimator_name] to the update, so only self._state.fit_kwargs_by_estimator will be updated
else:
self._state.fit_kwargs_by_estimator[estimator_name] = self._state.fit_kwargs
self._search_states[estimator_name] = SearchState(
learner_class=estimator_class,
# data_size=self._state.data_size,
data=self._state.X_train,
task=self._state.task,
starting_point=starting_points.get(estimator_name),
period=self._state.fit_kwargs.get(
"period"
), # NOTE: this is after kwargs is updated to fit_kwargs_by_estimator
custom_hp=custom_hp and custom_hp.get(estimator_name),
max_iter=max_iter / len(estimator_list) if self._learner_selector == "roundrobin" else max_iter,
budget=self._state.time_budget,
)
logger.info("List of ML learners in AutoML Run: {}".format(estimator_list))
self.estimator_list = estimator_list
self._active_estimators = estimator_list.copy()
self._ensemble = ensemble
self._max_iter = max_iter
self._mem_thres = mem_thres
self._pred_time_limit = pred_time_limit
self._state.train_time_limit = train_time_limit
self._log_type = log_type
self.split_ratio = split_ratio
self._state.model_history = model_history
self._hpo_method = (
hpo_method
if hpo_method != "auto"
else (
"bs"
if n_concurrent_trials > 1
or (self._use_ray is not False or self._use_spark)
and len(estimator_list) > 1
else "cfo"
)
)
if log_file_name:
with training_log_writer(log_file_name, append_log) as save_helper:
self._training_log = save_helper
self._search()
else:
self._training_log = None
self._search()
if self._best_estimator:
logger.info("fit succeeded")
logger.info(f"Time taken to find the best model: {self._time_taken_best_iter}")
if (
self._hpo_method in ("cfo", "bs")
and self._state.time_budget > 0
and (self._time_taken_best_iter >= self._state.time_budget * 0.7)
and not all(
state.search_alg and state.search_alg.searcher.is_ls_ever_converged
for state in self._search_states.values()
)
):
logger.warning(
"Time taken to find the best model is {0:.0f}% of the "
"provided time budget and not all estimators' hyperparameter "
"search converged. Consider increasing the time budget.".format(
self._time_taken_best_iter / self._state.time_budget * 100
)
)
if not keep_search_state:
# release space
del self._X_train_all, self._y_train_all, self._state.kf
del self._state.X_train, self._state.X_train_all, self._state.X_val
del self._state.y_train, self._state.y_train_all, self._state.y_val
del (
self._sample_weight_full,
self._state.fit_kwargs_by_estimator,
self._state.fit_kwargs,
) # NOTE: this is after kwargs is updated to fit_kwargs_by_estimator
del self._state.groups, self._state.groups_all, self._state.groups_val
logger.setLevel(old_level)
def _search_parallel(self):
if self._use_ray is not False:
try:
from ray import __version__ as ray_version
assert ray_version >= "1.10.0"
if ray_version.startswith("1."):
from ray.tune.suggest import ConcurrencyLimiter
else:
from ray.tune.search import ConcurrencyLimiter
import ray
except (ImportError, AssertionError):
raise ImportError("use_ray=True requires installation of ray. " "Please run pip install flaml[ray]")
else:
from flaml.tune.searcher.suggestion import ConcurrencyLimiter
if self._hpo_method in ("cfo", "grid"):
from flaml import CFO as SearchAlgo
elif "bs" == self._hpo_method:
from flaml import BlendSearch as SearchAlgo
elif "random" == self._hpo_method:
from flaml import RandomSearch as SearchAlgo
elif "optuna" == self._hpo_method:
if self._use_ray is not False:
try:
from ray import __version__ as ray_version
assert ray_version >= "1.10.0"
if ray_version.startswith("1."):
from ray.tune.suggest.optuna import OptunaSearch as SearchAlgo
else:
from ray.tune.search.optuna import OptunaSearch as SearchAlgo
except (ImportError, AssertionError):
from flaml.tune.searcher.suggestion import (
OptunaSearch as SearchAlgo,
)
else:
from flaml.tune.searcher.suggestion import OptunaSearch as SearchAlgo
else:
raise NotImplementedError(
f"hpo_method={self._hpo_method} is not recognized. " "'auto', 'cfo' and 'bs' are supported."
)
space = self.search_space
self._state.time_from_start = time.time() - self._start_time_flag
time_budget_s = self._state.time_budget - self._state.time_from_start if self._state.time_budget >= 0 else None
if self._hpo_method != "optuna":
min_resource = self.min_resource
if isinstance(min_resource, dict):
_min_resource_set = set(min_resource.values())
min_resource_all_estimator = min(_min_resource_set)
if len(_min_resource_set) > 1:
logger.warning(
"Using the min FLAML_sample_size of all the provided starting points as the starting sample size in the case of parallel search."
)
else:
min_resource_all_estimator = min_resource
search_alg = SearchAlgo(
metric="val_loss",
space=space,
low_cost_partial_config=self.low_cost_partial_config,
points_to_evaluate=self.points_to_evaluate,
cat_hp_cost=self.cat_hp_cost,
resource_attr=self.resource_attr,
min_resource=min_resource_all_estimator,
max_resource=self.max_resource,
config_constraints=[(partial(size, self._state.learner_classes), "<=", self._mem_thres)],
metric_constraints=self.metric_constraints,
seed=self._seed,
time_budget_s=time_budget_s,
num_samples=self._max_iter,
allow_empty_config=True,
)
else:
# if self._hpo_method is optuna, sometimes the search space and the initial config dimension do not match
# need to remove the extra keys from the search space to be consistent with the initial config
converted_space = SearchAlgo.convert_search_space(space)
removed_keys = set(space.keys()).difference(converted_space.keys())
new_points_to_evaluate = []
for idx in range(len(self.points_to_evaluate)):
r = self.points_to_evaluate[idx].copy()
for each_key in removed_keys:
r.pop(each_key)
new_points_to_evaluate.append(r)
search_alg = SearchAlgo(
metric="val_loss",
mode="min",
points_to_evaluate=[p for p in new_points_to_evaluate if len(p) == len(converted_space)],
)
search_alg = ConcurrencyLimiter(search_alg, self._n_concurrent_trials)
resources_per_trial = self._state.resources_per_trial
if self._use_spark:
# use spark as parallel backend
analysis = tune.run(
self.trainable,
search_alg=search_alg,
config=space,
metric="val_loss",
mode="min",
time_budget_s=time_budget_s,
num_samples=self._max_iter,
verbose=max(self.verbose - 2, 0),
use_ray=False,
use_spark=True,
force_cancel=self._force_cancel,
# raise_on_failed_trial=False,
# keep_checkpoints_num=1,
# checkpoint_score_attr="min-val_loss",
)
else:
# use ray as parallel backend
analysis = ray.tune.run(
self.trainable,
search_alg=search_alg,
config=space,
metric="val_loss",
mode="min",
resources_per_trial=resources_per_trial,
time_budget_s=time_budget_s,
num_samples=self._max_iter,
verbose=max(self.verbose - 2, 0),
raise_on_failed_trial=False,
keep_checkpoints_num=1,
checkpoint_score_attr="min-val_loss",
**self._use_ray if isinstance(self._use_ray, dict) else {},
)
# logger.info([trial.last_result for trial in analysis.trials])
trials = sorted(
(
trial
for trial in analysis.trials
if trial.last_result and trial.last_result.get("wall_clock_time") is not None
),
key=lambda x: x.last_result["wall_clock_time"],
)
for self._track_iter, trial in enumerate(trials):
result = trial.last_result
better = False
if result:
config = result["config"]
estimator = config.get("ml", config)["learner"]
search_state = self._search_states[estimator]
search_state.update(result, 0)
wall_time = result.get("wall_clock_time")
if wall_time is not None:
self._state.time_from_start = wall_time
self._iter_per_learner[estimator] += 1
if search_state.sample_size == self._state.data_size[0]:
if not self._fullsize_reached:
self._fullsize_reached = True
if search_state.best_loss < self._state.best_loss:
self._state.best_loss = search_state.best_loss
self._best_estimator = estimator
self._config_history[self._track_iter] = (
self._best_estimator,
config,
self._time_taken_best_iter,
)
self._trained_estimator = search_state.trained_estimator
self._best_iteration = self._track_iter
self._time_taken_best_iter = self._state.time_from_start
better = True
self._search_states[estimator].best_config = config
if better or self._log_type == "all":
self._log_trial(search_state, estimator)
def _log_trial(self, search_state, estimator):
if self._training_log:
self._training_log.append(
self._iter_per_learner[estimator],
search_state.metric_for_logging,
search_state.trial_time,
self._state.time_from_start,
search_state.val_loss,
search_state.config,
estimator,
search_state.sample_size,
)
if self._mlflow_logging and mlflow is not None and mlflow.active_run():
with mlflow.start_run(nested=True):
mlflow.log_metric("iter_counter", self._track_iter)
if (search_state.metric_for_logging is not None) and (
"intermediate_results" in search_state.metric_for_logging
):
for each_entry in search_state.metric_for_logging["intermediate_results"]:
with mlflow.start_run(nested=True):
mlflow.log_metrics(each_entry)
mlflow.log_metric("iter_counter", self._iter_per_learner[estimator])
del search_state.metric_for_logging["intermediate_results"]
if search_state.metric_for_logging:
mlflow.log_metrics(search_state.metric_for_logging)
mlflow.log_metric("trial_time", search_state.trial_time)
mlflow.log_metric("wall_clock_time", self._state.time_from_start)
mlflow.log_metric("validation_loss", search_state.val_loss)
mlflow.log_params(search_state.config)
mlflow.log_param("learner", estimator)
mlflow.log_param("sample_size", search_state.sample_size)
mlflow.log_metric("best_validation_loss", search_state.best_loss)
mlflow.log_param("best_config", search_state.best_config)
mlflow.log_param("best_learner", self._best_estimator)
mlflow.log_metric(
self._state.metric if isinstance(self._state.metric, str) else self._state.error_metric,
1 - search_state.val_loss
if self._state.error_metric.startswith("1-")
else -search_state.val_loss
if self._state.error_metric.startswith("-")
else search_state.val_loss,
)
def _search_sequential(self):
try:
from ray import __version__ as ray_version
assert ray_version >= "1.10.0"
if ray_version.startswith("1."):
from ray.tune.suggest import ConcurrencyLimiter
else:
from ray.tune.search import ConcurrencyLimiter
except (ImportError, AssertionError):
from flaml.tune.searcher.suggestion import ConcurrencyLimiter
if self._hpo_method in ("cfo", "grid"):
from flaml import CFO as SearchAlgo
elif "optuna" == self._hpo_method:
try:
from ray import __version__ as ray_version
assert ray_version >= "1.10.0"
if ray_version.startswith("1."):
from ray.tune.suggest.optuna import OptunaSearch as SearchAlgo
else:
from ray.tune.search.optuna import OptunaSearch as SearchAlgo
except (ImportError, AssertionError):
from flaml.tune.searcher.suggestion import OptunaSearch as SearchAlgo
elif "bs" == self._hpo_method:
from flaml import BlendSearch as SearchAlgo
elif "random" == self._hpo_method:
from flaml.tune.searcher import RandomSearch as SearchAlgo
elif "cfocat" == self._hpo_method:
from flaml.tune.searcher.cfo_cat import CFOCat as SearchAlgo
else:
raise NotImplementedError(
f"hpo_method={self._hpo_method} is not recognized. " "'cfo' and 'bs' are supported."
)
est_retrain_time = next_trial_time = 0
best_config_sig = None
better = True # whether we find a better model in one trial
for self._track_iter in range(self._max_iter):
if self._estimator_index is None:
estimator = self._active_estimators[0]
else:
estimator = self._select_estimator(self._active_estimators)
if not estimator:
break
logger.info(f"iteration {self._track_iter}, current learner {estimator}")
search_state = self._search_states[estimator]
self._state.time_from_start = time.time() - self._start_time_flag
time_left = self._state.time_budget - self._state.time_from_start
budget_left = (
time_left
if not self._retrain_in_budget
or better
or (not self.best_estimator)
or self._search_states[self.best_estimator].sample_size < self._state.data_size[0]
else time_left - est_retrain_time
)
if not search_state.search_alg:
search_state.training_function = partial(
AutoMLState._compute_with_config_base,
state=self._state,
estimator=estimator,
)
search_space = search_state.search_space
if self._sample:
resource_attr = "FLAML_sample_size"
min_resource = (
self._min_sample_size[estimator]
if isinstance(self._min_sample_size, dict) and estimator in self._min_sample_size
else self._min_sample_size_input
)
max_resource = self._state.data_size[0]
else:
resource_attr = min_resource = max_resource = None
learner_class = self._state.learner_classes.get(estimator)
if "grid" == self._hpo_method: # for synthetic exp only
points_to_evaluate = []
space = search_space
keys = list(space.keys())
domain0, domain1 = space[keys[0]], space[keys[1]]
for x1 in range(domain0.lower, domain0.upper + 1):
for x2 in range(domain1.lower, domain1.upper + 1):
points_to_evaluate.append(
{
keys[0]: x1,
keys[1]: x2,
}
)
self._max_iter_per_learner = len(points_to_evaluate)
low_cost_partial_config = None
else:
points_to_evaluate = search_state.init_config.copy()
low_cost_partial_config = search_state.low_cost_partial_config
time_budget_s = (
min(budget_left, self._state.train_time_limit or np.inf) if self._state.time_budget >= 0 else None
)
if self._hpo_method in ("bs", "cfo", "grid", "cfocat", "random"):
algo = SearchAlgo(
metric="val_loss",
mode="min",
space=search_space,
points_to_evaluate=points_to_evaluate,
low_cost_partial_config=low_cost_partial_config,
cat_hp_cost=search_state.cat_hp_cost,
resource_attr=resource_attr,
min_resource=min_resource,
max_resource=max_resource,
config_constraints=[(learner_class.size, "<=", self._mem_thres)],
metric_constraints=self.metric_constraints,
seed=self._seed,
allow_empty_config=True,
time_budget_s=time_budget_s,
num_samples=self._max_iter,
)
else:
# if self._hpo_method is optuna, sometimes the search space and the initial config dimension do not match
# need to remove the extra keys from the search space to be consistent with the initial config
converted_space = SearchAlgo.convert_search_space(search_space)
removed_keys = set(search_space.keys()).difference(converted_space.keys())
new_points_to_evaluate = []
for idx in range(len(points_to_evaluate)):
r = points_to_evaluate[idx].copy()
for each_key in removed_keys:
r.pop(each_key)
new_points_to_evaluate.append(r)
points_to_evaluate = new_points_to_evaluate
algo = SearchAlgo(
metric="val_loss",
mode="min",
space=search_space,
points_to_evaluate=[p for p in points_to_evaluate if len(p) == len(search_space)],
)
search_state.search_alg = ConcurrencyLimiter(algo, max_concurrent=1)
# search_state.search_alg = algo
else:
search_space = None
if self._hpo_method in ("bs", "cfo", "cfocat"):
search_state.search_alg.searcher.set_search_properties(
metric=None,
mode=None,
metric_target=self._state.best_loss,
)
start_run_time = time.time()
analysis = tune.run(
search_state.training_function,
search_alg=search_state.search_alg,
time_budget_s=time_budget_s,
verbose=max(self.verbose - 3, 0),
use_ray=False,
use_spark=False,
)
time_used = time.time() - start_run_time
better = False
if analysis.trials:
result = analysis.trials[-1].last_result
search_state.update(result, time_used=time_used)
if self._estimator_index is None:
# update init eci estimate
eci_base = search_state.init_eci
self._eci.append(search_state.estimated_cost4improvement)
for e in self.estimator_list[1:]:
self._eci.append(self._search_states[e].init_eci / eci_base * self._eci[0])
self._estimator_index = 0
min_budget = max(10 * self._eci[0], sum(self._eci))
max_budget = 10000 * self._eci[0]
if search_state.sample_size:
ratio = search_state.data_size[0] / search_state.sample_size
min_budget *= ratio
max_budget *= ratio
logger.info(
f"Estimated sufficient time budget={max_budget:.0f}s."
f" Estimated necessary time budget={min_budget:.0f}s."
)
wall_time = result.get("wall_clock_time")
if wall_time is not None:
self._state.time_from_start = wall_time
# logger.info(f"{self._search_states[estimator].sample_size}, {data_size}")
if search_state.sample_size == self._state.data_size[0]:
self._iter_per_learner_fullsize[estimator] += 1
self._fullsize_reached = True
self._iter_per_learner[estimator] += 1
if search_state.best_loss < self._state.best_loss:
best_config_sig = estimator + search_state.get_hist_config_sig(
self.data_size_full, search_state.best_config
)
self._state.best_loss = search_state.best_loss
self._best_estimator = estimator
est_retrain_time = (
search_state.est_retrain_time(self.data_size_full)
if (best_config_sig not in self._retrained_config)
else 0
)
self._config_history[self._track_iter] = (
estimator,
search_state.best_config,
self._state.time_from_start,
)
if self._trained_estimator:
self._trained_estimator.cleanup()
del self._trained_estimator
self._trained_estimator = None
if not self._state.retrain_final:
self._trained_estimator = search_state.trained_estimator
self._best_iteration = self._track_iter
self._time_taken_best_iter = self._state.time_from_start
better = True
next_trial_time = search_state.time2eval_best
if (
search_state.trained_estimator
and not self._state.model_history
and search_state.trained_estimator != self._trained_estimator
):
search_state.trained_estimator.cleanup()
if better or self._log_type == "all":
self._log_trial(search_state, estimator)
logger.info(
" at {:.1f}s,\testimator {}'s best error={:.4f},\tbest estimator {}'s best error={:.4f}".format(
self._state.time_from_start,
estimator,
search_state.best_loss,
self._best_estimator,
self._state.best_loss,
)
)
if (
self._hpo_method in ("cfo", "bs")
and all(
state.search_alg and state.search_alg.searcher.is_ls_ever_converged
for state in self._search_states.values()
)
and (self._state.time_from_start > self._warn_threshold * self._time_taken_best_iter)
):
logger.warning(
"All estimator hyperparameters local search has "
"converged at least once, and the total search time "
f"exceeds {self._warn_threshold} times the time taken "
"to find the best model."
)
if self._early_stop:
logger.warning("Stopping search as early_stop is set to True.")
break
self._warn_threshold *= 10
else:
logger.info(f"stop trying learner {estimator}")
if self._estimator_index is not None:
self._active_estimators.remove(estimator)
self._estimator_index -= 1
search_state.search_alg.searcher._is_ls_ever_converged = True
if (
self._retrain_in_budget
and best_config_sig
and est_retrain_time
and not better
and self._search_states[self._best_estimator].sample_size == self._state.data_size[0]
and (
est_retrain_time
<= self._state.time_budget - self._state.time_from_start
<= est_retrain_time + next_trial_time
)
):
state = self._search_states[self._best_estimator]
self._trained_estimator, retrain_time = self._state._train_with_config(
self._best_estimator,
state.best_config,
self.data_size_full,
)
logger.info("retrain {} for {:.1f}s".format(self._best_estimator, retrain_time))
self._retrained_config[best_config_sig] = state.best_config_train_time = retrain_time
est_retrain_time = 0
self._state.time_from_start = time.time() - self._start_time_flag
if self._state.time_from_start >= self._state.time_budget >= 0 or not self._active_estimators:
break
if self._ensemble and self._best_estimator:
time_left = self._state.time_budget - self._state.time_from_start
time_ensemble = self._search_states[self._best_estimator].time2eval_best
if time_left < time_ensemble < 2 * time_left:
break
def _search(self):
# initialize the search_states
self._eci = []
self._state.best_loss = float("+inf")
self._state.time_from_start = 0
self._estimator_index = None
self._best_iteration = 0
self._time_taken_best_iter = 0
self._config_history = {}
self._max_iter_per_learner = 10000
self._iter_per_learner = dict([(e, 0) for e in self.estimator_list])
self._iter_per_learner_fullsize = dict([(e, 0) for e in self.estimator_list])
self._fullsize_reached = False
self._trained_estimator = None
self._best_estimator = None
self._retrained_config = {}
self._warn_threshold = 10
self._selected = None
self.modelcount = 0
if self._max_iter < 2 and self.estimator_list and self._state.retrain_final:
# when max_iter is 1, no need to search
self.modelcount = self._max_iter
self._max_iter = 0
self._best_estimator = estimator = self.estimator_list[0]
self._selected = state = self._search_states[estimator]
state.best_config_sample_size = self._state.data_size[0]
state.best_config = state.init_config[0] if state.init_config else {}
elif self._use_ray is False and self._use_spark is False:
self._search_sequential()
else:
self._search_parallel()
# Add a checkpoint for the current best config to the log.
if self._training_log:
self._training_log.checkpoint()
self._state.time_from_start = time.time() - self._start_time_flag
if self._best_estimator:
self._selected = self._search_states[self._best_estimator]
self.modelcount = sum(search_state.total_iter for search_state in self._search_states.values())
if self._trained_estimator:
logger.info(f"selected model: {self._trained_estimator.model}")
estimators = []
if self._ensemble and self._state.task in (
"binary",
"multiclass",
"regression",
):
search_states = list(x for x in self._search_states.items() if x[1].best_config)
search_states.sort(key=lambda x: x[1].best_loss)
estimators = [
(
x[0],
x[1].learner_class(
task=self._state.task,
n_jobs=self._state.n_jobs,
**AutoMLState.sanitize(x[1].best_config),
),
)
for x in search_states[:2]
]
estimators += [
(
x[0],
x[1].learner_class(
task=self._state.task,
n_jobs=self._state.n_jobs,
**AutoMLState.sanitize(x[1].best_config),
),
)
for x in search_states[2:]
if x[1].best_loss < 4 * self._selected.best_loss
]
logger.info([(estimator[0], estimator[1].params) for estimator in estimators])
if len(estimators) > 1:
if self._state.task.is_classification():
from sklearn.ensemble import StackingClassifier as Stacker
else:
from sklearn.ensemble import StackingRegressor as Stacker
if self._use_ray is not False:
import ray
n_cpus = ray.is_initialized() and ray.available_resources()["CPU"] or os.cpu_count()
elif self._use_spark:
from flaml.tune.spark.utils import get_n_cpus
n_cpus = get_n_cpus()
else:
n_cpus = os.cpu_count()
ensemble_n_jobs = (
-self._state.n_jobs # maximize total parallelization degree
if abs(self._state.n_jobs) == 1 # 1 and -1 correspond to min/max parallelization
else max(1, int(n_cpus / 2 / self._state.n_jobs))
# the total degree of parallelization = parallelization degree per estimator * parallelization degree of ensemble
)
if isinstance(self._ensemble, dict):
final_estimator = self._ensemble.get("final_estimator", self._trained_estimator)
passthrough = self._ensemble.get("passthrough", True)
ensemble_n_jobs = self._ensemble.get("n_jobs", ensemble_n_jobs)
else:
final_estimator = self._trained_estimator
passthrough = True
stacker = Stacker(
estimators,
final_estimator,
n_jobs=ensemble_n_jobs,
passthrough=passthrough,
)
sample_weight_dict = (
(self._sample_weight_full is not None) and {"sample_weight": self._sample_weight_full} or {}
)
for e in estimators:
e[1].__class__.init()
import joblib
try:
logger.info("Building ensemble with tuned estimators")
stacker.fit(
self._X_train_all,
self._y_train_all,
**sample_weight_dict, # NOTE: _search is after kwargs is updated to fit_kwargs_by_estimator
)
logger.info(f"ensemble: {stacker}")
self._trained_estimator = stacker
self._trained_estimator.model = stacker
except ValueError as e:
if passthrough:
logger.warning(
"Using passthrough=False for ensemble because the data contain categorical features."
)
stacker = Stacker(
estimators,
final_estimator,
n_jobs=self._state.n_jobs,
passthrough=False,
)
stacker.fit(
self._X_train_all,
self._y_train_all,
**sample_weight_dict, # NOTE: _search is after kwargs is updated to fit_kwargs_by_estimator
)
logger.info(f"ensemble: {stacker}")
self._trained_estimator = stacker
self._trained_estimator.model = stacker
else:
raise e
except joblib.externals.loky.process_executor.TerminatedWorkerError:
logger.error(
"No enough memory to build the ensemble."
" Please try increasing available RAM, decreasing n_jobs for ensemble, or disabling ensemble."
)
elif self._state.retrain_final:
# reset time budget for retraining
if self._max_iter > 1:
self._state.time_budget = -1
if (
self._state.task.is_ts_forecast()
or self._trained_estimator is None
or self._trained_estimator.model is None
or (
self._state.time_budget < 0
or self._state.time_budget - self._state.time_from_start
> self._selected.est_retrain_time(self.data_size_full)
)
and self._selected.best_config_sample_size == self._state.data_size[0]
):
state = self._search_states[self._best_estimator]
(
self._trained_estimator,
retrain_time,
) = self._state._train_with_config(
self._best_estimator,
state.best_config,
self.data_size_full,
)
logger.info("retrain {} for {:.1f}s".format(self._best_estimator, retrain_time))
state.best_config_train_time = retrain_time
if self._trained_estimator:
logger.info(f"retrained model: {self._trained_estimator.model}")
else:
logger.info("not retraining because the time budget is too small.")
def __del__(self):
if (
hasattr(self, "_trained_estimator")
and self._trained_estimator
and hasattr(self._trained_estimator, "cleanup")
):
if self.preserve_checkpoint is False:
self._trained_estimator.cleanup()
del self._trained_estimator
def _select_estimator(self, estimator_list):
if self._learner_selector == "roundrobin":
self._estimator_index += 1
if self._estimator_index == len(estimator_list):
self._estimator_index = 0
return estimator_list[self._estimator_index]
min_estimated_cost, selected = np.inf, None
inv = []
untried_exists = False
for i, estimator in enumerate(estimator_list):
if estimator in self._search_states and (
self._search_states[estimator].sample_size
): # sample_size=None meaning no result
search_state = self._search_states[estimator]
if (
self._state.time_budget >= 0
and self._search_states[estimator].time2eval_best
> self._state.time_budget - self._state.time_from_start
or self._iter_per_learner_fullsize[estimator] >= self._max_iter_per_learner
):
inv.append(0)
continue
estimated_cost = search_state.estimated_cost4improvement
if search_state.sample_size < self._state.data_size[0] and self._state.time_budget >= 0:
estimated_cost = min(
estimated_cost,
search_state.time2eval_best
* min(
SAMPLE_MULTIPLY_FACTOR,
self._state.data_size[0] / search_state.sample_size,
),
)
gap = search_state.best_loss - self._state.best_loss
if gap > 0 and not self._ensemble:
delta_loss = (search_state.best_loss_old - search_state.best_loss) or search_state.best_loss
delta_time = (search_state.total_time_used - search_state.time_best_found_old) or 1e-10
speed = delta_loss / delta_time
if speed:
estimated_cost = max(2 * gap / speed, estimated_cost)
estimated_cost = estimated_cost or 1e-9
inv.append(1 / estimated_cost)
else:
estimated_cost = self._eci[i]
inv.append(0)
untried_exists = True
if estimated_cost < min_estimated_cost:
min_estimated_cost = estimated_cost
selected = estimator
if untried_exists or not selected:
state = self._search_states.get(selected)
if not (state and state.sample_size):
return selected
s = sum(inv)
p = self._random.rand()
q = 0
for i in range(len(inv)):
if inv[i]:
q += inv[i] / s
if p < q:
return estimator_list[i]
|
(**settings)
|
52,708 |
flaml.automl.automl
|
__del__
| null |
def __del__(self):
if (
hasattr(self, "_trained_estimator")
and self._trained_estimator
and hasattr(self._trained_estimator, "cleanup")
):
if self.preserve_checkpoint is False:
self._trained_estimator.cleanup()
del self._trained_estimator
|
(self)
|
52,709 |
flaml.automl.automl
|
__init__
|
Constructor.
Many settings in fit() can be passed to the constructor too.
If an argument in fit() is provided, it will override the setting passed to the constructor.
If an argument in fit() is not provided but provided in the constructor, the value passed to the constructor will be used.
Args:
metric: A string of the metric name or a function,
e.g., 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo', 'roc_auc_weighted',
'roc_auc_ovo_weighted', 'roc_auc_ovr_weighted', 'f1', 'micro_f1', 'macro_f1',
'log_loss', 'mae', 'mse', 'r2', 'mape'. Default is 'auto'.
If passing a customized metric function, the function needs to
have the following input arguments:
```python
def custom_metric(
X_test, y_test, estimator, labels,
X_train, y_train, weight_test=None, weight_train=None,
config=None, groups_test=None, groups_train=None,
):
return metric_to_minimize, metrics_to_log
```
which returns a float number as the minimization objective,
and a dictionary as the metrics to log. E.g.,
```python
def custom_metric(
X_val, y_val, estimator, labels,
X_train, y_train, weight_val=None, weight_train=None,
*args,
):
from sklearn.metrics import log_loss
import time
start = time.time()
y_pred = estimator.predict_proba(X_val)
pred_time = (time.time() - start) / len(X_val)
val_loss = log_loss(y_val, y_pred, labels=labels, sample_weight=weight_val)
y_pred = estimator.predict_proba(X_train)
train_loss = log_loss(y_train, y_pred, labels=labels, sample_weight=weight_train)
alpha = 0.5
return val_loss * (1 + alpha) - alpha * train_loss, {
"val_loss": val_loss,
"train_loss": train_loss,
"pred_time": pred_time,
}
```
task: A string of the task type, e.g.,
'classification', 'regression', 'ts_forecast', 'rank',
'seq-classification', 'seq-regression', 'summarization',
or an instance of the Task class.
n_jobs: An integer of the number of threads for training | default=-1.
Use all available resources when n_jobs == -1.
log_file_name: A string of the log file name | default="". To disable logging,
set it to be an empty string "".
estimator_list: A list of strings for estimator names, or 'auto'.
e.g., ```['lgbm', 'xgboost', 'xgb_limitdepth', 'catboost', 'rf', 'extra_tree']```.
time_budget: A float number of the time budget in seconds.
Use -1 if no time limit.
max_iter: An integer of the maximal number of iterations.
sample: A boolean of whether to sample the training data during
search.
ensemble: boolean or dict | default=False. Whether to perform
ensemble after search. Can be a dict with keys 'passthrough'
and 'final_estimator' to specify the passthrough and
final_estimator in the stacker. The dict can also contain
'n_jobs' as the key to specify the number of jobs for the stacker.
eval_method: A string of resampling strategy, one of
['auto', 'cv', 'holdout'].
split_ratio: A float of the valiation data percentage for holdout.
n_splits: An integer of the number of folds for cross - validation.
log_type: A string of the log type, one of
['better', 'all'].
'better' only logs configs with better loss than previos iters
'all' logs all the tried configs.
model_history: A boolean of whether to keep the best
model per estimator. Make sure memory is large enough if setting to True.
log_training_metric: A boolean of whether to log the training
metric for each model.
mem_thres: A float of the memory size constraint in bytes.
pred_time_limit: A float of the prediction latency constraint in seconds.
It refers to the average prediction time per row in validation data.
train_time_limit: A float of the training time constraint in seconds.
verbose: int, default=3 | Controls the verbosity, higher means more
messages.
retrain_full: bool or str, default=True | whether to retrain the
selected model on the full training data when using holdout.
True - retrain only after search finishes; False - no retraining;
'budget' - do best effort to retrain without violating the time
budget.
split_type: str or splitter object, default="auto" | the data split type.
* A valid splitter object is an instance of a derived class of scikit-learn
[KFold](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html#sklearn.model_selection.KFold)
and have ``split`` and ``get_n_splits`` methods with the same signatures.
Set eval_method to "cv" to use the splitter object.
* Valid str options depend on different tasks.
For classification tasks, valid choices are
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
"auto" -> uniform.
For time series forecast tasks, must be "auto" or 'time'.
For ranking task, must be "auto" or 'group'.
hpo_method: str, default="auto" | The hyperparameter
optimization method. By default, CFO is used for sequential
search and BlendSearch is used for parallel search.
No need to set when using flaml's default search space or using
a simple customized search space. When set to 'bs', BlendSearch
is used. BlendSearch can be tried when the search space is
complex, for example, containing multiple disjoint, discontinuous
subspaces. When set to 'random', random search is used.
starting_points: A dictionary or a str to specify the starting hyperparameter
config for the estimators | default="static".
If str:
- if "data", use data-dependent defaults;
- if "data:path" use data-dependent defaults which are stored at path;
- if "static", use data-independent defaults.
If dict, keys are the name of the estimators, and values are the starting
hyperparamter configurations for the corresponding estimators.
The value can be a single hyperparamter configuration dict or a list
of hyperparamter configuration dicts.
In the following code example, we get starting_points from the
`automl` object and use them in the `new_automl` object.
e.g.,
```python
from flaml import AutoML
automl = AutoML()
X_train, y_train = load_iris(return_X_y=True)
automl.fit(X_train, y_train)
starting_points = automl.best_config_per_estimator
new_automl = AutoML()
new_automl.fit(X_train, y_train, starting_points=starting_points)
```
seed: int or None, default=None | The random seed for hpo.
n_concurrent_trials: [In preview] int, default=1 | The number of
concurrent trials. When n_concurrent_trials > 1, flaml performes
[parallel tuning](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning)
and installation of ray or spark is required: `pip install flaml[ray]`
or `pip install flaml[spark]`. Please check
[here](https://spark.apache.org/docs/latest/api/python/getting_started/install.html)
for more details about installing Spark.
keep_search_state: boolean, default=False | Whether to keep data needed
for model search after fit(). By default the state is deleted for
space saving.
preserve_checkpoint: boolean, default=True | Whether to preserve the saved checkpoint
on disk when deleting automl. By default the checkpoint is preserved.
early_stop: boolean, default=False | Whether to stop early if the
search is considered to converge.
force_cancel: boolean, default=False | Whether to forcely cancel Spark jobs if the
search time exceeded the time budget.
append_log: boolean, default=False | Whether to directly append the log
records to the input log file if it exists.
auto_augment: boolean, default=True | Whether to automatically
augment rare classes.
min_sample_size: int, default=MIN_SAMPLE_TRAIN | the minimal sample
size when sample=True.
use_ray: boolean or dict.
If boolean: default=False | Whether to use ray to run the training
in separate processes. This can be used to prevent OOM for large
datasets, but will incur more overhead in time.
If dict: the dict contains the keywords arguments to be passed to
[ray.tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html).
use_spark: boolean, default=False | Whether to use spark to run the training
in parallel spark jobs. This can be used to accelerate training on large models
and large datasets, but will incur more overhead in time and thus slow down
training in some cases. GPU training is not supported yet when use_spark is True.
For Spark clusters, by default, we will launch one trial per executor. However,
sometimes we want to launch more trials than the number of executors (e.g., local mode).
In this case, we can set the environment variable `FLAML_MAX_CONCURRENT` to override
the detected `num_executors`. The final number of concurrent trials will be the minimum
of `n_concurrent_trials` and `num_executors`.
free_mem_ratio: float between 0 and 1, default=0. The free memory ratio to keep during training.
metric_constraints: list, default=[] | The list of metric constraints.
Each element in this list is a 3-tuple, which shall be expressed
in the following format: the first element of the 3-tuple is the name of the
metric, the second element is the inequality sign chosen from ">=" and "<=",
and the third element is the constraint value. E.g., `('val_loss', '<=', 0.1)`.
Note that all the metric names in metric_constraints need to be reported via
the metrics_to_log dictionary returned by a customized metric function.
The customized metric function shall be provided via the `metric` key word
argument of the fit() function or the automl constructor.
Find an example in the 4th constraint type in this [doc](/docs/Use-Cases/Task-Oriented-AutoML#constraint).
If `pred_time_limit` is provided as one of keyword arguments to fit() function or
the automl constructor, flaml will automatically (and under the hood)
add it as an additional element in the metric_constraints. Essentially 'pred_time_limit'
specifies a constraint about the prediction latency constraint in seconds.
custom_hp: dict, default=None | The custom search space specified by user.
It is a nested dict with keys being the estimator names, and values being dicts
per estimator search space. In the per estimator search space dict,
the keys are the hyperparameter names, and values are dicts of info ("domain",
"init_value", and "low_cost_init_value") about the search space associated with
the hyperparameter (i.e., per hyperparameter search space dict). When custom_hp
is provided, the built-in search space which is also a nested dict of per estimator
search space dict, will be updated with custom_hp. Note that during this nested dict update,
the per hyperparameter search space dicts will be replaced (instead of updated) by the ones
provided in custom_hp. Note that the value for "domain" can either be a constant
or a sample.Domain object.
e.g.,
```python
custom_hp = {
"transformer_ms": {
"model_path": {
"domain": "albert-base-v2",
},
"learning_rate": {
"domain": tune.choice([1e-4, 1e-5]),
}
}
}
```
skip_transform: boolean, default=False | Whether to pre-process data prior to modeling.
fit_kwargs_by_estimator: dict, default=None | The user specified keywords arguments, grouped by estimator name.
e.g.,
```python
fit_kwargs_by_estimator = {
"transformer": {
"output_dir": "test/data/output/",
"fp16": False,
}
}
```
mlflow_logging: boolean, default=True | Whether to log the training results to mlflow.
This requires mlflow to be installed and to have an active mlflow run.
FLAML will create nested runs.
|
def __init__(self, **settings):
"""Constructor.
Many settings in fit() can be passed to the constructor too.
If an argument in fit() is provided, it will override the setting passed to the constructor.
If an argument in fit() is not provided but provided in the constructor, the value passed to the constructor will be used.
Args:
metric: A string of the metric name or a function,
e.g., 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo', 'roc_auc_weighted',
'roc_auc_ovo_weighted', 'roc_auc_ovr_weighted', 'f1', 'micro_f1', 'macro_f1',
'log_loss', 'mae', 'mse', 'r2', 'mape'. Default is 'auto'.
If passing a customized metric function, the function needs to
have the following input arguments:
```python
def custom_metric(
X_test, y_test, estimator, labels,
X_train, y_train, weight_test=None, weight_train=None,
config=None, groups_test=None, groups_train=None,
):
return metric_to_minimize, metrics_to_log
```
which returns a float number as the minimization objective,
and a dictionary as the metrics to log. E.g.,
```python
def custom_metric(
X_val, y_val, estimator, labels,
X_train, y_train, weight_val=None, weight_train=None,
*args,
):
from sklearn.metrics import log_loss
import time
start = time.time()
y_pred = estimator.predict_proba(X_val)
pred_time = (time.time() - start) / len(X_val)
val_loss = log_loss(y_val, y_pred, labels=labels, sample_weight=weight_val)
y_pred = estimator.predict_proba(X_train)
train_loss = log_loss(y_train, y_pred, labels=labels, sample_weight=weight_train)
alpha = 0.5
return val_loss * (1 + alpha) - alpha * train_loss, {
"val_loss": val_loss,
"train_loss": train_loss,
"pred_time": pred_time,
}
```
task: A string of the task type, e.g.,
'classification', 'regression', 'ts_forecast', 'rank',
'seq-classification', 'seq-regression', 'summarization',
or an instance of the Task class.
n_jobs: An integer of the number of threads for training | default=-1.
Use all available resources when n_jobs == -1.
log_file_name: A string of the log file name | default="". To disable logging,
set it to be an empty string "".
estimator_list: A list of strings for estimator names, or 'auto'.
e.g., ```['lgbm', 'xgboost', 'xgb_limitdepth', 'catboost', 'rf', 'extra_tree']```.
time_budget: A float number of the time budget in seconds.
Use -1 if no time limit.
max_iter: An integer of the maximal number of iterations.
sample: A boolean of whether to sample the training data during
search.
ensemble: boolean or dict | default=False. Whether to perform
ensemble after search. Can be a dict with keys 'passthrough'
and 'final_estimator' to specify the passthrough and
final_estimator in the stacker. The dict can also contain
'n_jobs' as the key to specify the number of jobs for the stacker.
eval_method: A string of resampling strategy, one of
['auto', 'cv', 'holdout'].
split_ratio: A float of the valiation data percentage for holdout.
n_splits: An integer of the number of folds for cross - validation.
log_type: A string of the log type, one of
['better', 'all'].
'better' only logs configs with better loss than previos iters
'all' logs all the tried configs.
model_history: A boolean of whether to keep the best
model per estimator. Make sure memory is large enough if setting to True.
log_training_metric: A boolean of whether to log the training
metric for each model.
mem_thres: A float of the memory size constraint in bytes.
pred_time_limit: A float of the prediction latency constraint in seconds.
It refers to the average prediction time per row in validation data.
train_time_limit: A float of the training time constraint in seconds.
verbose: int, default=3 | Controls the verbosity, higher means more
messages.
retrain_full: bool or str, default=True | whether to retrain the
selected model on the full training data when using holdout.
True - retrain only after search finishes; False - no retraining;
'budget' - do best effort to retrain without violating the time
budget.
split_type: str or splitter object, default="auto" | the data split type.
* A valid splitter object is an instance of a derived class of scikit-learn
[KFold](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html#sklearn.model_selection.KFold)
and have ``split`` and ``get_n_splits`` methods with the same signatures.
Set eval_method to "cv" to use the splitter object.
* Valid str options depend on different tasks.
For classification tasks, valid choices are
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
"auto" -> uniform.
For time series forecast tasks, must be "auto" or 'time'.
For ranking task, must be "auto" or 'group'.
hpo_method: str, default="auto" | The hyperparameter
optimization method. By default, CFO is used for sequential
search and BlendSearch is used for parallel search.
No need to set when using flaml's default search space or using
a simple customized search space. When set to 'bs', BlendSearch
is used. BlendSearch can be tried when the search space is
complex, for example, containing multiple disjoint, discontinuous
subspaces. When set to 'random', random search is used.
starting_points: A dictionary or a str to specify the starting hyperparameter
config for the estimators | default="static".
If str:
- if "data", use data-dependent defaults;
- if "data:path" use data-dependent defaults which are stored at path;
- if "static", use data-independent defaults.
If dict, keys are the name of the estimators, and values are the starting
hyperparamter configurations for the corresponding estimators.
The value can be a single hyperparamter configuration dict or a list
of hyperparamter configuration dicts.
In the following code example, we get starting_points from the
`automl` object and use them in the `new_automl` object.
e.g.,
```python
from flaml import AutoML
automl = AutoML()
X_train, y_train = load_iris(return_X_y=True)
automl.fit(X_train, y_train)
starting_points = automl.best_config_per_estimator
new_automl = AutoML()
new_automl.fit(X_train, y_train, starting_points=starting_points)
```
seed: int or None, default=None | The random seed for hpo.
n_concurrent_trials: [In preview] int, default=1 | The number of
concurrent trials. When n_concurrent_trials > 1, flaml performes
[parallel tuning](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning)
and installation of ray or spark is required: `pip install flaml[ray]`
or `pip install flaml[spark]`. Please check
[here](https://spark.apache.org/docs/latest/api/python/getting_started/install.html)
for more details about installing Spark.
keep_search_state: boolean, default=False | Whether to keep data needed
for model search after fit(). By default the state is deleted for
space saving.
preserve_checkpoint: boolean, default=True | Whether to preserve the saved checkpoint
on disk when deleting automl. By default the checkpoint is preserved.
early_stop: boolean, default=False | Whether to stop early if the
search is considered to converge.
force_cancel: boolean, default=False | Whether to forcely cancel Spark jobs if the
search time exceeded the time budget.
append_log: boolean, default=False | Whether to directly append the log
records to the input log file if it exists.
auto_augment: boolean, default=True | Whether to automatically
augment rare classes.
min_sample_size: int, default=MIN_SAMPLE_TRAIN | the minimal sample
size when sample=True.
use_ray: boolean or dict.
If boolean: default=False | Whether to use ray to run the training
in separate processes. This can be used to prevent OOM for large
datasets, but will incur more overhead in time.
If dict: the dict contains the keywords arguments to be passed to
[ray.tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html).
use_spark: boolean, default=False | Whether to use spark to run the training
in parallel spark jobs. This can be used to accelerate training on large models
and large datasets, but will incur more overhead in time and thus slow down
training in some cases. GPU training is not supported yet when use_spark is True.
For Spark clusters, by default, we will launch one trial per executor. However,
sometimes we want to launch more trials than the number of executors (e.g., local mode).
In this case, we can set the environment variable `FLAML_MAX_CONCURRENT` to override
the detected `num_executors`. The final number of concurrent trials will be the minimum
of `n_concurrent_trials` and `num_executors`.
free_mem_ratio: float between 0 and 1, default=0. The free memory ratio to keep during training.
metric_constraints: list, default=[] | The list of metric constraints.
Each element in this list is a 3-tuple, which shall be expressed
in the following format: the first element of the 3-tuple is the name of the
metric, the second element is the inequality sign chosen from ">=" and "<=",
and the third element is the constraint value. E.g., `('val_loss', '<=', 0.1)`.
Note that all the metric names in metric_constraints need to be reported via
the metrics_to_log dictionary returned by a customized metric function.
The customized metric function shall be provided via the `metric` key word
argument of the fit() function or the automl constructor.
Find an example in the 4th constraint type in this [doc](/docs/Use-Cases/Task-Oriented-AutoML#constraint).
If `pred_time_limit` is provided as one of keyword arguments to fit() function or
the automl constructor, flaml will automatically (and under the hood)
add it as an additional element in the metric_constraints. Essentially 'pred_time_limit'
specifies a constraint about the prediction latency constraint in seconds.
custom_hp: dict, default=None | The custom search space specified by user.
It is a nested dict with keys being the estimator names, and values being dicts
per estimator search space. In the per estimator search space dict,
the keys are the hyperparameter names, and values are dicts of info ("domain",
"init_value", and "low_cost_init_value") about the search space associated with
the hyperparameter (i.e., per hyperparameter search space dict). When custom_hp
is provided, the built-in search space which is also a nested dict of per estimator
search space dict, will be updated with custom_hp. Note that during this nested dict update,
the per hyperparameter search space dicts will be replaced (instead of updated) by the ones
provided in custom_hp. Note that the value for "domain" can either be a constant
or a sample.Domain object.
e.g.,
```python
custom_hp = {
"transformer_ms": {
"model_path": {
"domain": "albert-base-v2",
},
"learning_rate": {
"domain": tune.choice([1e-4, 1e-5]),
}
}
}
```
skip_transform: boolean, default=False | Whether to pre-process data prior to modeling.
fit_kwargs_by_estimator: dict, default=None | The user specified keywords arguments, grouped by estimator name.
e.g.,
```python
fit_kwargs_by_estimator = {
"transformer": {
"output_dir": "test/data/output/",
"fp16": False,
}
}
```
mlflow_logging: boolean, default=True | Whether to log the training results to mlflow.
This requires mlflow to be installed and to have an active mlflow run.
FLAML will create nested runs.
"""
if ERROR:
raise ERROR
self._track_iter = 0
self._state = AutoMLState()
self._state.learner_classes = {}
self._settings = settings
# no budget by default
settings["time_budget"] = settings.get("time_budget", -1)
settings["task"] = settings.get("task", "classification")
settings["n_jobs"] = settings.get("n_jobs", -1)
settings["eval_method"] = settings.get("eval_method", "auto")
settings["split_ratio"] = settings.get("split_ratio", SPLIT_RATIO)
settings["n_splits"] = settings.get("n_splits", N_SPLITS)
settings["auto_augment"] = settings.get("auto_augment", True)
settings["metric"] = settings.get("metric", "auto")
settings["estimator_list"] = settings.get("estimator_list", "auto")
settings["log_file_name"] = settings.get("log_file_name", "")
settings["max_iter"] = settings.get("max_iter") # no budget by default
settings["sample"] = settings.get("sample", True)
settings["ensemble"] = settings.get("ensemble", False)
settings["log_type"] = settings.get("log_type", "better")
settings["model_history"] = settings.get("model_history", False)
settings["log_training_metric"] = settings.get("log_training_metric", False)
settings["mem_thres"] = settings.get("mem_thres", MEM_THRES)
settings["pred_time_limit"] = settings.get("pred_time_limit", np.inf)
settings["train_time_limit"] = settings.get("train_time_limit", None)
settings["verbose"] = settings.get("verbose", 3)
settings["retrain_full"] = settings.get("retrain_full", True)
settings["split_type"] = settings.get("split_type", "auto")
settings["hpo_method"] = settings.get("hpo_method", "auto")
settings["learner_selector"] = settings.get("learner_selector", "sample")
settings["starting_points"] = settings.get("starting_points", "static")
settings["n_concurrent_trials"] = settings.get("n_concurrent_trials", 1)
settings["keep_search_state"] = settings.get("keep_search_state", False)
settings["preserve_checkpoint"] = settings.get("preserve_checkpoint", True)
settings["early_stop"] = settings.get("early_stop", False)
settings["force_cancel"] = settings.get("force_cancel", False)
settings["append_log"] = settings.get("append_log", False)
settings["min_sample_size"] = settings.get("min_sample_size", MIN_SAMPLE_TRAIN)
settings["use_ray"] = settings.get("use_ray", False)
settings["use_spark"] = settings.get("use_spark", False)
if settings["use_ray"] is not False and settings["use_spark"] is not False:
raise ValueError("use_ray and use_spark cannot be both True.")
settings["free_mem_ratio"] = settings.get("free_mem_ratio", 0)
settings["metric_constraints"] = settings.get("metric_constraints", [])
settings["cv_score_agg_func"] = settings.get("cv_score_agg_func", None)
settings["fit_kwargs_by_estimator"] = settings.get("fit_kwargs_by_estimator", {})
settings["custom_hp"] = settings.get("custom_hp", {})
settings["skip_transform"] = settings.get("skip_transform", False)
settings["mlflow_logging"] = settings.get("mlflow_logging", True)
self._estimator_type = "classifier" if settings["task"] in CLASSIFICATION else "regressor"
|
(self, **settings)
|
52,710 |
flaml.automl.automl
|
_decide_eval_method
| null |
def _decide_eval_method(self, eval_method, time_budget):
if not isinstance(self._split_type, str):
assert eval_method in [
"auto",
"cv",
], "eval_method must be 'auto' or 'cv' for custom data splitter."
assert self._state.X_val is None, "custom splitter and custom validation data can't be used together."
return "cv"
if self._state.X_val is not None and (
not isinstance(self._state.X_val, TimeSeriesDataset) or len(self._state.X_val.test_data) > 0
):
assert eval_method in [
"auto",
"holdout",
], "eval_method must be 'auto' or 'holdout' for custom validation data."
return "holdout"
if eval_method != "auto":
assert eval_method in [
"holdout",
"cv",
], "eval_method must be 'holdout', 'cv' or 'auto'."
return eval_method
nrow, dim = self._nrow, self._ndim
if (
time_budget < 0
or nrow * dim / 0.9 < SMALL_LARGE_THRES * (time_budget / 3600)
and nrow < CV_HOLDOUT_THRESHOLD
):
# time allows or sampling can be used and cv is necessary
return "cv"
else:
return "holdout"
|
(self, eval_method, time_budget)
|
52,711 |
flaml.automl.automl
|
_log_trial
| null |
def _log_trial(self, search_state, estimator):
if self._training_log:
self._training_log.append(
self._iter_per_learner[estimator],
search_state.metric_for_logging,
search_state.trial_time,
self._state.time_from_start,
search_state.val_loss,
search_state.config,
estimator,
search_state.sample_size,
)
if self._mlflow_logging and mlflow is not None and mlflow.active_run():
with mlflow.start_run(nested=True):
mlflow.log_metric("iter_counter", self._track_iter)
if (search_state.metric_for_logging is not None) and (
"intermediate_results" in search_state.metric_for_logging
):
for each_entry in search_state.metric_for_logging["intermediate_results"]:
with mlflow.start_run(nested=True):
mlflow.log_metrics(each_entry)
mlflow.log_metric("iter_counter", self._iter_per_learner[estimator])
del search_state.metric_for_logging["intermediate_results"]
if search_state.metric_for_logging:
mlflow.log_metrics(search_state.metric_for_logging)
mlflow.log_metric("trial_time", search_state.trial_time)
mlflow.log_metric("wall_clock_time", self._state.time_from_start)
mlflow.log_metric("validation_loss", search_state.val_loss)
mlflow.log_params(search_state.config)
mlflow.log_param("learner", estimator)
mlflow.log_param("sample_size", search_state.sample_size)
mlflow.log_metric("best_validation_loss", search_state.best_loss)
mlflow.log_param("best_config", search_state.best_config)
mlflow.log_param("best_learner", self._best_estimator)
mlflow.log_metric(
self._state.metric if isinstance(self._state.metric, str) else self._state.error_metric,
1 - search_state.val_loss
if self._state.error_metric.startswith("1-")
else -search_state.val_loss
if self._state.error_metric.startswith("-")
else search_state.val_loss,
)
|
(self, search_state, estimator)
|
52,712 |
flaml.automl.automl
|
_prepare_data
| null |
def _prepare_data(self, eval_method, split_ratio, n_splits):
self._state.task.prepare_data(
self._state,
self._X_train_all,
self._y_train_all,
self._auto_augment,
eval_method,
self._split_type,
split_ratio,
n_splits,
self._df,
self._sample_weight_full,
)
self.data_size_full = self._state.data_size_full
|
(self, eval_method, split_ratio, n_splits)
|
52,713 |
flaml.automl.automl
|
_search
| null |
def _search(self):
# initialize the search_states
self._eci = []
self._state.best_loss = float("+inf")
self._state.time_from_start = 0
self._estimator_index = None
self._best_iteration = 0
self._time_taken_best_iter = 0
self._config_history = {}
self._max_iter_per_learner = 10000
self._iter_per_learner = dict([(e, 0) for e in self.estimator_list])
self._iter_per_learner_fullsize = dict([(e, 0) for e in self.estimator_list])
self._fullsize_reached = False
self._trained_estimator = None
self._best_estimator = None
self._retrained_config = {}
self._warn_threshold = 10
self._selected = None
self.modelcount = 0
if self._max_iter < 2 and self.estimator_list and self._state.retrain_final:
# when max_iter is 1, no need to search
self.modelcount = self._max_iter
self._max_iter = 0
self._best_estimator = estimator = self.estimator_list[0]
self._selected = state = self._search_states[estimator]
state.best_config_sample_size = self._state.data_size[0]
state.best_config = state.init_config[0] if state.init_config else {}
elif self._use_ray is False and self._use_spark is False:
self._search_sequential()
else:
self._search_parallel()
# Add a checkpoint for the current best config to the log.
if self._training_log:
self._training_log.checkpoint()
self._state.time_from_start = time.time() - self._start_time_flag
if self._best_estimator:
self._selected = self._search_states[self._best_estimator]
self.modelcount = sum(search_state.total_iter for search_state in self._search_states.values())
if self._trained_estimator:
logger.info(f"selected model: {self._trained_estimator.model}")
estimators = []
if self._ensemble and self._state.task in (
"binary",
"multiclass",
"regression",
):
search_states = list(x for x in self._search_states.items() if x[1].best_config)
search_states.sort(key=lambda x: x[1].best_loss)
estimators = [
(
x[0],
x[1].learner_class(
task=self._state.task,
n_jobs=self._state.n_jobs,
**AutoMLState.sanitize(x[1].best_config),
),
)
for x in search_states[:2]
]
estimators += [
(
x[0],
x[1].learner_class(
task=self._state.task,
n_jobs=self._state.n_jobs,
**AutoMLState.sanitize(x[1].best_config),
),
)
for x in search_states[2:]
if x[1].best_loss < 4 * self._selected.best_loss
]
logger.info([(estimator[0], estimator[1].params) for estimator in estimators])
if len(estimators) > 1:
if self._state.task.is_classification():
from sklearn.ensemble import StackingClassifier as Stacker
else:
from sklearn.ensemble import StackingRegressor as Stacker
if self._use_ray is not False:
import ray
n_cpus = ray.is_initialized() and ray.available_resources()["CPU"] or os.cpu_count()
elif self._use_spark:
from flaml.tune.spark.utils import get_n_cpus
n_cpus = get_n_cpus()
else:
n_cpus = os.cpu_count()
ensemble_n_jobs = (
-self._state.n_jobs # maximize total parallelization degree
if abs(self._state.n_jobs) == 1 # 1 and -1 correspond to min/max parallelization
else max(1, int(n_cpus / 2 / self._state.n_jobs))
# the total degree of parallelization = parallelization degree per estimator * parallelization degree of ensemble
)
if isinstance(self._ensemble, dict):
final_estimator = self._ensemble.get("final_estimator", self._trained_estimator)
passthrough = self._ensemble.get("passthrough", True)
ensemble_n_jobs = self._ensemble.get("n_jobs", ensemble_n_jobs)
else:
final_estimator = self._trained_estimator
passthrough = True
stacker = Stacker(
estimators,
final_estimator,
n_jobs=ensemble_n_jobs,
passthrough=passthrough,
)
sample_weight_dict = (
(self._sample_weight_full is not None) and {"sample_weight": self._sample_weight_full} or {}
)
for e in estimators:
e[1].__class__.init()
import joblib
try:
logger.info("Building ensemble with tuned estimators")
stacker.fit(
self._X_train_all,
self._y_train_all,
**sample_weight_dict, # NOTE: _search is after kwargs is updated to fit_kwargs_by_estimator
)
logger.info(f"ensemble: {stacker}")
self._trained_estimator = stacker
self._trained_estimator.model = stacker
except ValueError as e:
if passthrough:
logger.warning(
"Using passthrough=False for ensemble because the data contain categorical features."
)
stacker = Stacker(
estimators,
final_estimator,
n_jobs=self._state.n_jobs,
passthrough=False,
)
stacker.fit(
self._X_train_all,
self._y_train_all,
**sample_weight_dict, # NOTE: _search is after kwargs is updated to fit_kwargs_by_estimator
)
logger.info(f"ensemble: {stacker}")
self._trained_estimator = stacker
self._trained_estimator.model = stacker
else:
raise e
except joblib.externals.loky.process_executor.TerminatedWorkerError:
logger.error(
"No enough memory to build the ensemble."
" Please try increasing available RAM, decreasing n_jobs for ensemble, or disabling ensemble."
)
elif self._state.retrain_final:
# reset time budget for retraining
if self._max_iter > 1:
self._state.time_budget = -1
if (
self._state.task.is_ts_forecast()
or self._trained_estimator is None
or self._trained_estimator.model is None
or (
self._state.time_budget < 0
or self._state.time_budget - self._state.time_from_start
> self._selected.est_retrain_time(self.data_size_full)
)
and self._selected.best_config_sample_size == self._state.data_size[0]
):
state = self._search_states[self._best_estimator]
(
self._trained_estimator,
retrain_time,
) = self._state._train_with_config(
self._best_estimator,
state.best_config,
self.data_size_full,
)
logger.info("retrain {} for {:.1f}s".format(self._best_estimator, retrain_time))
state.best_config_train_time = retrain_time
if self._trained_estimator:
logger.info(f"retrained model: {self._trained_estimator.model}")
else:
logger.info("not retraining because the time budget is too small.")
|
(self)
|
52,714 |
flaml.automl.automl
|
_search_parallel
| null |
def _search_parallel(self):
if self._use_ray is not False:
try:
from ray import __version__ as ray_version
assert ray_version >= "1.10.0"
if ray_version.startswith("1."):
from ray.tune.suggest import ConcurrencyLimiter
else:
from ray.tune.search import ConcurrencyLimiter
import ray
except (ImportError, AssertionError):
raise ImportError("use_ray=True requires installation of ray. " "Please run pip install flaml[ray]")
else:
from flaml.tune.searcher.suggestion import ConcurrencyLimiter
if self._hpo_method in ("cfo", "grid"):
from flaml import CFO as SearchAlgo
elif "bs" == self._hpo_method:
from flaml import BlendSearch as SearchAlgo
elif "random" == self._hpo_method:
from flaml import RandomSearch as SearchAlgo
elif "optuna" == self._hpo_method:
if self._use_ray is not False:
try:
from ray import __version__ as ray_version
assert ray_version >= "1.10.0"
if ray_version.startswith("1."):
from ray.tune.suggest.optuna import OptunaSearch as SearchAlgo
else:
from ray.tune.search.optuna import OptunaSearch as SearchAlgo
except (ImportError, AssertionError):
from flaml.tune.searcher.suggestion import (
OptunaSearch as SearchAlgo,
)
else:
from flaml.tune.searcher.suggestion import OptunaSearch as SearchAlgo
else:
raise NotImplementedError(
f"hpo_method={self._hpo_method} is not recognized. " "'auto', 'cfo' and 'bs' are supported."
)
space = self.search_space
self._state.time_from_start = time.time() - self._start_time_flag
time_budget_s = self._state.time_budget - self._state.time_from_start if self._state.time_budget >= 0 else None
if self._hpo_method != "optuna":
min_resource = self.min_resource
if isinstance(min_resource, dict):
_min_resource_set = set(min_resource.values())
min_resource_all_estimator = min(_min_resource_set)
if len(_min_resource_set) > 1:
logger.warning(
"Using the min FLAML_sample_size of all the provided starting points as the starting sample size in the case of parallel search."
)
else:
min_resource_all_estimator = min_resource
search_alg = SearchAlgo(
metric="val_loss",
space=space,
low_cost_partial_config=self.low_cost_partial_config,
points_to_evaluate=self.points_to_evaluate,
cat_hp_cost=self.cat_hp_cost,
resource_attr=self.resource_attr,
min_resource=min_resource_all_estimator,
max_resource=self.max_resource,
config_constraints=[(partial(size, self._state.learner_classes), "<=", self._mem_thres)],
metric_constraints=self.metric_constraints,
seed=self._seed,
time_budget_s=time_budget_s,
num_samples=self._max_iter,
allow_empty_config=True,
)
else:
# if self._hpo_method is optuna, sometimes the search space and the initial config dimension do not match
# need to remove the extra keys from the search space to be consistent with the initial config
converted_space = SearchAlgo.convert_search_space(space)
removed_keys = set(space.keys()).difference(converted_space.keys())
new_points_to_evaluate = []
for idx in range(len(self.points_to_evaluate)):
r = self.points_to_evaluate[idx].copy()
for each_key in removed_keys:
r.pop(each_key)
new_points_to_evaluate.append(r)
search_alg = SearchAlgo(
metric="val_loss",
mode="min",
points_to_evaluate=[p for p in new_points_to_evaluate if len(p) == len(converted_space)],
)
search_alg = ConcurrencyLimiter(search_alg, self._n_concurrent_trials)
resources_per_trial = self._state.resources_per_trial
if self._use_spark:
# use spark as parallel backend
analysis = tune.run(
self.trainable,
search_alg=search_alg,
config=space,
metric="val_loss",
mode="min",
time_budget_s=time_budget_s,
num_samples=self._max_iter,
verbose=max(self.verbose - 2, 0),
use_ray=False,
use_spark=True,
force_cancel=self._force_cancel,
# raise_on_failed_trial=False,
# keep_checkpoints_num=1,
# checkpoint_score_attr="min-val_loss",
)
else:
# use ray as parallel backend
analysis = ray.tune.run(
self.trainable,
search_alg=search_alg,
config=space,
metric="val_loss",
mode="min",
resources_per_trial=resources_per_trial,
time_budget_s=time_budget_s,
num_samples=self._max_iter,
verbose=max(self.verbose - 2, 0),
raise_on_failed_trial=False,
keep_checkpoints_num=1,
checkpoint_score_attr="min-val_loss",
**self._use_ray if isinstance(self._use_ray, dict) else {},
)
# logger.info([trial.last_result for trial in analysis.trials])
trials = sorted(
(
trial
for trial in analysis.trials
if trial.last_result and trial.last_result.get("wall_clock_time") is not None
),
key=lambda x: x.last_result["wall_clock_time"],
)
for self._track_iter, trial in enumerate(trials):
result = trial.last_result
better = False
if result:
config = result["config"]
estimator = config.get("ml", config)["learner"]
search_state = self._search_states[estimator]
search_state.update(result, 0)
wall_time = result.get("wall_clock_time")
if wall_time is not None:
self._state.time_from_start = wall_time
self._iter_per_learner[estimator] += 1
if search_state.sample_size == self._state.data_size[0]:
if not self._fullsize_reached:
self._fullsize_reached = True
if search_state.best_loss < self._state.best_loss:
self._state.best_loss = search_state.best_loss
self._best_estimator = estimator
self._config_history[self._track_iter] = (
self._best_estimator,
config,
self._time_taken_best_iter,
)
self._trained_estimator = search_state.trained_estimator
self._best_iteration = self._track_iter
self._time_taken_best_iter = self._state.time_from_start
better = True
self._search_states[estimator].best_config = config
if better or self._log_type == "all":
self._log_trial(search_state, estimator)
|
(self)
|
52,715 |
flaml.automl.automl
|
_search_sequential
| null |
def _search_sequential(self):
try:
from ray import __version__ as ray_version
assert ray_version >= "1.10.0"
if ray_version.startswith("1."):
from ray.tune.suggest import ConcurrencyLimiter
else:
from ray.tune.search import ConcurrencyLimiter
except (ImportError, AssertionError):
from flaml.tune.searcher.suggestion import ConcurrencyLimiter
if self._hpo_method in ("cfo", "grid"):
from flaml import CFO as SearchAlgo
elif "optuna" == self._hpo_method:
try:
from ray import __version__ as ray_version
assert ray_version >= "1.10.0"
if ray_version.startswith("1."):
from ray.tune.suggest.optuna import OptunaSearch as SearchAlgo
else:
from ray.tune.search.optuna import OptunaSearch as SearchAlgo
except (ImportError, AssertionError):
from flaml.tune.searcher.suggestion import OptunaSearch as SearchAlgo
elif "bs" == self._hpo_method:
from flaml import BlendSearch as SearchAlgo
elif "random" == self._hpo_method:
from flaml.tune.searcher import RandomSearch as SearchAlgo
elif "cfocat" == self._hpo_method:
from flaml.tune.searcher.cfo_cat import CFOCat as SearchAlgo
else:
raise NotImplementedError(
f"hpo_method={self._hpo_method} is not recognized. " "'cfo' and 'bs' are supported."
)
est_retrain_time = next_trial_time = 0
best_config_sig = None
better = True # whether we find a better model in one trial
for self._track_iter in range(self._max_iter):
if self._estimator_index is None:
estimator = self._active_estimators[0]
else:
estimator = self._select_estimator(self._active_estimators)
if not estimator:
break
logger.info(f"iteration {self._track_iter}, current learner {estimator}")
search_state = self._search_states[estimator]
self._state.time_from_start = time.time() - self._start_time_flag
time_left = self._state.time_budget - self._state.time_from_start
budget_left = (
time_left
if not self._retrain_in_budget
or better
or (not self.best_estimator)
or self._search_states[self.best_estimator].sample_size < self._state.data_size[0]
else time_left - est_retrain_time
)
if not search_state.search_alg:
search_state.training_function = partial(
AutoMLState._compute_with_config_base,
state=self._state,
estimator=estimator,
)
search_space = search_state.search_space
if self._sample:
resource_attr = "FLAML_sample_size"
min_resource = (
self._min_sample_size[estimator]
if isinstance(self._min_sample_size, dict) and estimator in self._min_sample_size
else self._min_sample_size_input
)
max_resource = self._state.data_size[0]
else:
resource_attr = min_resource = max_resource = None
learner_class = self._state.learner_classes.get(estimator)
if "grid" == self._hpo_method: # for synthetic exp only
points_to_evaluate = []
space = search_space
keys = list(space.keys())
domain0, domain1 = space[keys[0]], space[keys[1]]
for x1 in range(domain0.lower, domain0.upper + 1):
for x2 in range(domain1.lower, domain1.upper + 1):
points_to_evaluate.append(
{
keys[0]: x1,
keys[1]: x2,
}
)
self._max_iter_per_learner = len(points_to_evaluate)
low_cost_partial_config = None
else:
points_to_evaluate = search_state.init_config.copy()
low_cost_partial_config = search_state.low_cost_partial_config
time_budget_s = (
min(budget_left, self._state.train_time_limit or np.inf) if self._state.time_budget >= 0 else None
)
if self._hpo_method in ("bs", "cfo", "grid", "cfocat", "random"):
algo = SearchAlgo(
metric="val_loss",
mode="min",
space=search_space,
points_to_evaluate=points_to_evaluate,
low_cost_partial_config=low_cost_partial_config,
cat_hp_cost=search_state.cat_hp_cost,
resource_attr=resource_attr,
min_resource=min_resource,
max_resource=max_resource,
config_constraints=[(learner_class.size, "<=", self._mem_thres)],
metric_constraints=self.metric_constraints,
seed=self._seed,
allow_empty_config=True,
time_budget_s=time_budget_s,
num_samples=self._max_iter,
)
else:
# if self._hpo_method is optuna, sometimes the search space and the initial config dimension do not match
# need to remove the extra keys from the search space to be consistent with the initial config
converted_space = SearchAlgo.convert_search_space(search_space)
removed_keys = set(search_space.keys()).difference(converted_space.keys())
new_points_to_evaluate = []
for idx in range(len(points_to_evaluate)):
r = points_to_evaluate[idx].copy()
for each_key in removed_keys:
r.pop(each_key)
new_points_to_evaluate.append(r)
points_to_evaluate = new_points_to_evaluate
algo = SearchAlgo(
metric="val_loss",
mode="min",
space=search_space,
points_to_evaluate=[p for p in points_to_evaluate if len(p) == len(search_space)],
)
search_state.search_alg = ConcurrencyLimiter(algo, max_concurrent=1)
# search_state.search_alg = algo
else:
search_space = None
if self._hpo_method in ("bs", "cfo", "cfocat"):
search_state.search_alg.searcher.set_search_properties(
metric=None,
mode=None,
metric_target=self._state.best_loss,
)
start_run_time = time.time()
analysis = tune.run(
search_state.training_function,
search_alg=search_state.search_alg,
time_budget_s=time_budget_s,
verbose=max(self.verbose - 3, 0),
use_ray=False,
use_spark=False,
)
time_used = time.time() - start_run_time
better = False
if analysis.trials:
result = analysis.trials[-1].last_result
search_state.update(result, time_used=time_used)
if self._estimator_index is None:
# update init eci estimate
eci_base = search_state.init_eci
self._eci.append(search_state.estimated_cost4improvement)
for e in self.estimator_list[1:]:
self._eci.append(self._search_states[e].init_eci / eci_base * self._eci[0])
self._estimator_index = 0
min_budget = max(10 * self._eci[0], sum(self._eci))
max_budget = 10000 * self._eci[0]
if search_state.sample_size:
ratio = search_state.data_size[0] / search_state.sample_size
min_budget *= ratio
max_budget *= ratio
logger.info(
f"Estimated sufficient time budget={max_budget:.0f}s."
f" Estimated necessary time budget={min_budget:.0f}s."
)
wall_time = result.get("wall_clock_time")
if wall_time is not None:
self._state.time_from_start = wall_time
# logger.info(f"{self._search_states[estimator].sample_size}, {data_size}")
if search_state.sample_size == self._state.data_size[0]:
self._iter_per_learner_fullsize[estimator] += 1
self._fullsize_reached = True
self._iter_per_learner[estimator] += 1
if search_state.best_loss < self._state.best_loss:
best_config_sig = estimator + search_state.get_hist_config_sig(
self.data_size_full, search_state.best_config
)
self._state.best_loss = search_state.best_loss
self._best_estimator = estimator
est_retrain_time = (
search_state.est_retrain_time(self.data_size_full)
if (best_config_sig not in self._retrained_config)
else 0
)
self._config_history[self._track_iter] = (
estimator,
search_state.best_config,
self._state.time_from_start,
)
if self._trained_estimator:
self._trained_estimator.cleanup()
del self._trained_estimator
self._trained_estimator = None
if not self._state.retrain_final:
self._trained_estimator = search_state.trained_estimator
self._best_iteration = self._track_iter
self._time_taken_best_iter = self._state.time_from_start
better = True
next_trial_time = search_state.time2eval_best
if (
search_state.trained_estimator
and not self._state.model_history
and search_state.trained_estimator != self._trained_estimator
):
search_state.trained_estimator.cleanup()
if better or self._log_type == "all":
self._log_trial(search_state, estimator)
logger.info(
" at {:.1f}s,\testimator {}'s best error={:.4f},\tbest estimator {}'s best error={:.4f}".format(
self._state.time_from_start,
estimator,
search_state.best_loss,
self._best_estimator,
self._state.best_loss,
)
)
if (
self._hpo_method in ("cfo", "bs")
and all(
state.search_alg and state.search_alg.searcher.is_ls_ever_converged
for state in self._search_states.values()
)
and (self._state.time_from_start > self._warn_threshold * self._time_taken_best_iter)
):
logger.warning(
"All estimator hyperparameters local search has "
"converged at least once, and the total search time "
f"exceeds {self._warn_threshold} times the time taken "
"to find the best model."
)
if self._early_stop:
logger.warning("Stopping search as early_stop is set to True.")
break
self._warn_threshold *= 10
else:
logger.info(f"stop trying learner {estimator}")
if self._estimator_index is not None:
self._active_estimators.remove(estimator)
self._estimator_index -= 1
search_state.search_alg.searcher._is_ls_ever_converged = True
if (
self._retrain_in_budget
and best_config_sig
and est_retrain_time
and not better
and self._search_states[self._best_estimator].sample_size == self._state.data_size[0]
and (
est_retrain_time
<= self._state.time_budget - self._state.time_from_start
<= est_retrain_time + next_trial_time
)
):
state = self._search_states[self._best_estimator]
self._trained_estimator, retrain_time = self._state._train_with_config(
self._best_estimator,
state.best_config,
self.data_size_full,
)
logger.info("retrain {} for {:.1f}s".format(self._best_estimator, retrain_time))
self._retrained_config[best_config_sig] = state.best_config_train_time = retrain_time
est_retrain_time = 0
self._state.time_from_start = time.time() - self._start_time_flag
if self._state.time_from_start >= self._state.time_budget >= 0 or not self._active_estimators:
break
if self._ensemble and self._best_estimator:
time_left = self._state.time_budget - self._state.time_from_start
time_ensemble = self._search_states[self._best_estimator].time2eval_best
if time_left < time_ensemble < 2 * time_left:
break
|
(self)
|
52,716 |
flaml.automl.automl
|
_select_estimator
| null |
def _select_estimator(self, estimator_list):
if self._learner_selector == "roundrobin":
self._estimator_index += 1
if self._estimator_index == len(estimator_list):
self._estimator_index = 0
return estimator_list[self._estimator_index]
min_estimated_cost, selected = np.inf, None
inv = []
untried_exists = False
for i, estimator in enumerate(estimator_list):
if estimator in self._search_states and (
self._search_states[estimator].sample_size
): # sample_size=None meaning no result
search_state = self._search_states[estimator]
if (
self._state.time_budget >= 0
and self._search_states[estimator].time2eval_best
> self._state.time_budget - self._state.time_from_start
or self._iter_per_learner_fullsize[estimator] >= self._max_iter_per_learner
):
inv.append(0)
continue
estimated_cost = search_state.estimated_cost4improvement
if search_state.sample_size < self._state.data_size[0] and self._state.time_budget >= 0:
estimated_cost = min(
estimated_cost,
search_state.time2eval_best
* min(
SAMPLE_MULTIPLY_FACTOR,
self._state.data_size[0] / search_state.sample_size,
),
)
gap = search_state.best_loss - self._state.best_loss
if gap > 0 and not self._ensemble:
delta_loss = (search_state.best_loss_old - search_state.best_loss) or search_state.best_loss
delta_time = (search_state.total_time_used - search_state.time_best_found_old) or 1e-10
speed = delta_loss / delta_time
if speed:
estimated_cost = max(2 * gap / speed, estimated_cost)
estimated_cost = estimated_cost or 1e-9
inv.append(1 / estimated_cost)
else:
estimated_cost = self._eci[i]
inv.append(0)
untried_exists = True
if estimated_cost < min_estimated_cost:
min_estimated_cost = estimated_cost
selected = estimator
if untried_exists or not selected:
state = self._search_states.get(selected)
if not (state and state.sample_size):
return selected
s = sum(inv)
p = self._random.rand()
q = 0
for i in range(len(inv)):
if inv[i]:
q += inv[i] / s
if p < q:
return estimator_list[i]
|
(self, estimator_list)
|
52,717 |
flaml.automl.automl
|
add_learner
|
Add a customized learner.
Args:
learner_name: A string of the learner's name.
learner_class: A subclass of flaml.automl.model.BaseEstimator.
|
def add_learner(self, learner_name, learner_class):
"""Add a customized learner.
Args:
learner_name: A string of the learner's name.
learner_class: A subclass of flaml.automl.model.BaseEstimator.
"""
self._state.learner_classes[learner_name] = learner_class
|
(self, learner_name, learner_class)
|
52,718 |
flaml.automl.automl
|
best_model_for_estimator
|
Return the best model found for a particular estimator.
Args:
estimator_name: a str of the estimator's name.
Returns:
An object storing the best model for estimator_name.
If `model_history` was set to False during fit(), then the returned model
is untrained unless estimator_name is the best estimator.
If `model_history` was set to True, then the returned model is trained.
|
def best_model_for_estimator(self, estimator_name: str):
"""Return the best model found for a particular estimator.
Args:
estimator_name: a str of the estimator's name.
Returns:
An object storing the best model for estimator_name.
If `model_history` was set to False during fit(), then the returned model
is untrained unless estimator_name is the best estimator.
If `model_history` was set to True, then the returned model is trained.
"""
state = self._search_states.get(estimator_name)
return state and getattr(state, "trained_estimator", None)
|
(self, estimator_name: str)
|
52,719 |
flaml.automl.automl
|
fit
|
Find a model for a given task.
Args:
X_train: A numpy array or a pandas dataframe of training data in
shape (n, m). For time series forecsat tasks, the first column of X_train
must be the timestamp column (datetime type). Other columns in
the dataframe are assumed to be exogenous variables (categorical or numeric).
When using ray, X_train can be a ray.ObjectRef.
y_train: A numpy array or a pandas series of labels in shape (n, ).
dataframe: A dataframe of training data including label column.
For time series forecast tasks, dataframe must be specified and must have
at least two columns, timestamp and label, where the first
column is the timestamp column (datetime type). Other columns in
the dataframe are assumed to be exogenous variables (categorical or numeric).
When using ray, dataframe can be a ray.ObjectRef.
label: A str of the label column name for, e.g., 'label';
Note: If X_train and y_train are provided,
dataframe and label are ignored;
If not, dataframe and label must be provided.
metric: A string of the metric name or a function,
e.g., 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo', 'roc_auc_weighted',
'roc_auc_ovo_weighted', 'roc_auc_ovr_weighted', 'f1', 'micro_f1', 'macro_f1',
'log_loss', 'mae', 'mse', 'r2', 'mape'. Default is 'auto'.
If passing a customized metric function, the function needs to
have the following input arguments:
```python
def custom_metric(
X_test, y_test, estimator, labels,
X_train, y_train, weight_test=None, weight_train=None,
config=None, groups_test=None, groups_train=None,
):
return metric_to_minimize, metrics_to_log
```
which returns a float number as the minimization objective,
and a dictionary as the metrics to log. E.g.,
```python
def custom_metric(
X_val, y_val, estimator, labels,
X_train, y_train, weight_val=None, weight_train=None,
*args,
):
from sklearn.metrics import log_loss
import time
start = time.time()
y_pred = estimator.predict_proba(X_val)
pred_time = (time.time() - start) / len(X_val)
val_loss = log_loss(y_val, y_pred, labels=labels, sample_weight=weight_val)
y_pred = estimator.predict_proba(X_train)
train_loss = log_loss(y_train, y_pred, labels=labels, sample_weight=weight_train)
alpha = 0.5
return val_loss * (1 + alpha) - alpha * train_loss, {
"val_loss": val_loss,
"train_loss": train_loss,
"pred_time": pred_time,
}
```
task: A string of the task type, e.g.,
'classification', 'regression', 'ts_forecast_regression',
'ts_forecast_classification', 'rank', 'seq-classification',
'seq-regression', 'summarization', or an instance of Task class
n_jobs: An integer of the number of threads for training | default=-1.
Use all available resources when n_jobs == -1.
log_file_name: A string of the log file name | default="". To disable logging,
set it to be an empty string "".
estimator_list: A list of strings for estimator names, or 'auto'.
e.g., ```['lgbm', 'xgboost', 'xgb_limitdepth', 'catboost', 'rf', 'extra_tree']```.
time_budget: A float number of the time budget in seconds.
Use -1 if no time limit.
max_iter: An integer of the maximal number of iterations.
NOTE: when both time_budget and max_iter are unspecified,
only one model will be trained per estimator.
sample: A boolean of whether to sample the training data during
search.
ensemble: boolean or dict | default=False. Whether to perform
ensemble after search. Can be a dict with keys 'passthrough'
and 'final_estimator' to specify the passthrough and
final_estimator in the stacker. The dict can also contain
'n_jobs' as the key to specify the number of jobs for the stacker.
eval_method: A string of resampling strategy, one of
['auto', 'cv', 'holdout'].
split_ratio: A float of the valiation data percentage for holdout.
n_splits: An integer of the number of folds for cross - validation.
log_type: A string of the log type, one of
['better', 'all'].
'better' only logs configs with better loss than previos iters
'all' logs all the tried configs.
model_history: A boolean of whether to keep the trained best
model per estimator. Make sure memory is large enough if setting to True.
Default value is False: best_model_for_estimator would return a
untrained model for non-best learner.
log_training_metric: A boolean of whether to log the training
metric for each model.
mem_thres: A float of the memory size constraint in bytes.
pred_time_limit: A float of the prediction latency constraint in seconds.
It refers to the average prediction time per row in validation data.
train_time_limit: None or a float of the training time constraint in seconds.
X_val: None or a numpy array or a pandas dataframe of validation data.
y_val: None or a numpy array or a pandas series of validation labels.
sample_weight_val: None or a numpy array of the sample weight of
validation data of the same shape as y_val.
groups_val: None or array-like | group labels (with matching length
to y_val) or group counts (with sum equal to length of y_val)
for validation data. Need to be consistent with groups.
groups: None or array-like | Group labels (with matching length to
y_train) or groups counts (with sum equal to length of y_train)
for training data.
verbose: int, default=3 | Controls the verbosity, higher means more
messages.
retrain_full: bool or str, default=True | whether to retrain the
selected model on the full training data when using holdout.
True - retrain only after search finishes; False - no retraining;
'budget' - do best effort to retrain without violating the time
budget.
split_type: str or splitter object, default="auto" | the data split type.
* A valid splitter object is an instance of a derived class of scikit-learn
[KFold](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html#sklearn.model_selection.KFold)
and have ``split`` and ``get_n_splits`` methods with the same signatures.
Set eval_method to "cv" to use the splitter object.
* Valid str options depend on different tasks.
For classification tasks, valid choices are
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
"auto" -> uniform.
For time series forecast tasks, must be "auto" or 'time'.
For ranking task, must be "auto" or 'group'.
hpo_method: str, default="auto" | The hyperparameter
optimization method. By default, CFO is used for sequential
search and BlendSearch is used for parallel search.
No need to set when using flaml's default search space or using
a simple customized search space. When set to 'bs', BlendSearch
is used. BlendSearch can be tried when the search space is
complex, for example, containing multiple disjoint, discontinuous
subspaces. When set to 'random', random search is used.
starting_points: A dictionary or a str to specify the starting hyperparameter
config for the estimators | default="data".
If str:
- if "data", use data-dependent defaults;
- if "data:path" use data-dependent defaults which are stored at path;
- if "static", use data-independent defaults.
If dict, keys are the name of the estimators, and values are the starting
hyperparamter configurations for the corresponding estimators.
The value can be a single hyperparamter configuration dict or a list
of hyperparamter configuration dicts.
In the following code example, we get starting_points from the
`automl` object and use them in the `new_automl` object.
e.g.,
```python
from flaml import AutoML
automl = AutoML()
X_train, y_train = load_iris(return_X_y=True)
automl.fit(X_train, y_train)
starting_points = automl.best_config_per_estimator
new_automl = AutoML()
new_automl.fit(X_train, y_train, starting_points=starting_points)
```
seed: int or None, default=None | The random seed for hpo.
n_concurrent_trials: [In preview] int, default=1 | The number of
concurrent trials. When n_concurrent_trials > 1, flaml performes
[parallel tuning](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning)
and installation of ray or spark is required: `pip install flaml[ray]`
or `pip install flaml[spark]`. Please check
[here](https://spark.apache.org/docs/latest/api/python/getting_started/install.html)
for more details about installing Spark.
keep_search_state: boolean, default=False | Whether to keep data needed
for model search after fit(). By default the state is deleted for
space saving.
preserve_checkpoint: boolean, default=True | Whether to preserve the saved checkpoint
on disk when deleting automl. By default the checkpoint is preserved.
early_stop: boolean, default=False | Whether to stop early if the
search is considered to converge.
force_cancel: boolean, default=False | Whether to forcely cancel the PySpark job if overtime.
append_log: boolean, default=False | Whether to directly append the log
records to the input log file if it exists.
auto_augment: boolean, default=True | Whether to automatically
augment rare classes.
min_sample_size: int, default=MIN_SAMPLE_TRAIN | the minimal sample
size when sample=True.
use_ray: boolean or dict.
If boolean: default=False | Whether to use ray to run the training
in separate processes. This can be used to prevent OOM for large
datasets, but will incur more overhead in time.
If dict: the dict contains the keywords arguments to be passed to
[ray.tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html).
use_spark: boolean, default=False | Whether to use spark to run the training
in parallel spark jobs. This can be used to accelerate training on large models
and large datasets, but will incur more overhead in time and thus slow down
training in some cases.
free_mem_ratio: float between 0 and 1, default=0. The free memory ratio to keep during training.
metric_constraints: list, default=[] | The list of metric constraints.
Each element in this list is a 3-tuple, which shall be expressed
in the following format: the first element of the 3-tuple is the name of the
metric, the second element is the inequality sign chosen from ">=" and "<=",
and the third element is the constraint value. E.g., `('precision', '>=', 0.9)`.
Note that all the metric names in metric_constraints need to be reported via
the metrics_to_log dictionary returned by a customized metric function.
The customized metric function shall be provided via the `metric` key word argument
of the fit() function or the automl constructor.
Find examples in this [test](https://github.com/microsoft/FLAML/tree/main/test/automl/test_constraints.py).
If `pred_time_limit` is provided as one of keyword arguments to fit() function or
the automl constructor, flaml will automatically (and under the hood)
add it as an additional element in the metric_constraints. Essentially 'pred_time_limit'
specifies a constraint about the prediction latency constraint in seconds.
custom_hp: dict, default=None | The custom search space specified by user
Each key is the estimator name, each value is a dict of the custom search space for that estimator. Notice the
domain of the custom search space can either be a value of a sample.Domain object.
```python
custom_hp = {
"transformer_ms": {
"model_path": {
"domain": "albert-base-v2",
},
"learning_rate": {
"domain": tune.choice([1e-4, 1e-5]),
}
}
}
```
time_col: for a time series task, name of the column containing the timestamps. If not
provided, defaults to the first column of X_train/X_val
cv_score_agg_func: customized cross-validation scores aggregate function. Default to average metrics across folds. If specificed, this function needs to
have the following input arguments:
* val_loss_folds: list of floats, the loss scores of each fold;
* log_metrics_folds: list of dicts/floats, the metrics of each fold to log.
This function should return the final aggregate result of all folds. A float number of the minimization objective, and a dictionary as the metrics to log or None.
E.g.,
```python
def cv_score_agg_func(val_loss_folds, log_metrics_folds):
metric_to_minimize = sum(val_loss_folds)/len(val_loss_folds)
metrics_to_log = None
for single_fold in log_metrics_folds:
if metrics_to_log is None:
metrics_to_log = single_fold
elif isinstance(metrics_to_log, dict):
metrics_to_log = {k: metrics_to_log[k] + v for k, v in single_fold.items()}
else:
metrics_to_log += single_fold
if metrics_to_log:
n = len(val_loss_folds)
metrics_to_log = (
{k: v / n for k, v in metrics_to_log.items()}
if isinstance(metrics_to_log, dict)
else metrics_to_log / n
)
return metric_to_minimize, metrics_to_log
```
skip_transform: boolean, default=False | Whether to pre-process data prior to modeling.
mlflow_logging: boolean, default=None | Whether to log the training results to mlflow.
Default value is None, which means the logging decision is made based on
AutoML.__init__'s mlflow_logging argument.
This requires mlflow to be installed and to have an active mlflow run.
FLAML will create nested runs.
fit_kwargs_by_estimator: dict, default=None | The user specified keywords arguments, grouped by estimator name.
For TransformersEstimator, available fit_kwargs can be found from
[TrainingArgumentsForAuto](nlp/huggingface/training_args).
e.g.,
```python
fit_kwargs_by_estimator = {
"transformer": {
"output_dir": "test/data/output/",
"fp16": False,
},
"tft": {
"max_encoder_length": 1,
"min_encoder_length": 1,
"static_categoricals": [],
"static_reals": [],
"time_varying_known_categoricals": [],
"time_varying_known_reals": [],
"time_varying_unknown_categoricals": [],
"time_varying_unknown_reals": [],
"variable_groups": {},
"lags": {},
}
}
```
**fit_kwargs: Other key word arguments to pass to fit() function of
the searched learners, such as sample_weight. Below are a few examples of
estimator-specific parameters:
period: int | forecast horizon for all time series forecast tasks.
gpu_per_trial: float, default = 0 | A float of the number of gpus per trial,
only used by TransformersEstimator, XGBoostSklearnEstimator, and
TemporalFusionTransformerEstimator.
group_ids: list of strings of column names identifying a time series, only
used by TemporalFusionTransformerEstimator, required for
'ts_forecast_panel' task. `group_ids` is a parameter for TimeSeriesDataSet object
from PyTorchForecasting.
For other parameters to describe your dataset, refer to
[TimeSeriesDataSet PyTorchForecasting](https://pytorch-forecasting.readthedocs.io/en/stable/api/pytorch_forecasting.data.timeseries.TimeSeriesDataSet.html).
To specify your variables, use `static_categoricals`, `static_reals`,
`time_varying_known_categoricals`, `time_varying_known_reals`,
`time_varying_unknown_categoricals`, `time_varying_unknown_reals`,
`variable_groups`. To provide more information on your data, use
`max_encoder_length`, `min_encoder_length`, `lags`.
log_dir: str, default = "lightning_logs" | Folder into which to log results
for tensorboard, only used by TemporalFusionTransformerEstimator.
max_epochs: int, default = 20 | Maximum number of epochs to run training,
only used by TemporalFusionTransformerEstimator.
batch_size: int, default = 64 | Batch size for training model, only
used by TemporalFusionTransformerEstimator.
|
def fit(
self,
X_train=None,
y_train=None,
dataframe=None,
label=None,
metric=None,
task: Optional[Union[str, Task]] = None,
n_jobs=None,
# gpu_per_trial=0,
log_file_name=None,
estimator_list=None,
time_budget=None,
max_iter=None,
sample=None,
ensemble=None,
eval_method=None,
log_type=None,
model_history=None,
split_ratio=None,
n_splits=None,
log_training_metric=None,
mem_thres=None,
pred_time_limit=None,
train_time_limit=None,
X_val=None,
y_val=None,
sample_weight_val=None,
groups_val=None,
groups=None,
verbose=None,
retrain_full=None,
split_type=None,
learner_selector=None,
hpo_method=None,
starting_points=None,
seed=None,
n_concurrent_trials=None,
keep_search_state=None,
preserve_checkpoint=True,
early_stop=None,
force_cancel=None,
append_log=None,
auto_augment=None,
min_sample_size=None,
use_ray=None,
use_spark=None,
free_mem_ratio=0,
metric_constraints=None,
custom_hp=None,
time_col=None,
cv_score_agg_func=None,
skip_transform=None,
mlflow_logging=None,
fit_kwargs_by_estimator=None,
**fit_kwargs,
):
"""Find a model for a given task.
Args:
X_train: A numpy array or a pandas dataframe of training data in
shape (n, m). For time series forecsat tasks, the first column of X_train
must be the timestamp column (datetime type). Other columns in
the dataframe are assumed to be exogenous variables (categorical or numeric).
When using ray, X_train can be a ray.ObjectRef.
y_train: A numpy array or a pandas series of labels in shape (n, ).
dataframe: A dataframe of training data including label column.
For time series forecast tasks, dataframe must be specified and must have
at least two columns, timestamp and label, where the first
column is the timestamp column (datetime type). Other columns in
the dataframe are assumed to be exogenous variables (categorical or numeric).
When using ray, dataframe can be a ray.ObjectRef.
label: A str of the label column name for, e.g., 'label';
Note: If X_train and y_train are provided,
dataframe and label are ignored;
If not, dataframe and label must be provided.
metric: A string of the metric name or a function,
e.g., 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo', 'roc_auc_weighted',
'roc_auc_ovo_weighted', 'roc_auc_ovr_weighted', 'f1', 'micro_f1', 'macro_f1',
'log_loss', 'mae', 'mse', 'r2', 'mape'. Default is 'auto'.
If passing a customized metric function, the function needs to
have the following input arguments:
```python
def custom_metric(
X_test, y_test, estimator, labels,
X_train, y_train, weight_test=None, weight_train=None,
config=None, groups_test=None, groups_train=None,
):
return metric_to_minimize, metrics_to_log
```
which returns a float number as the minimization objective,
and a dictionary as the metrics to log. E.g.,
```python
def custom_metric(
X_val, y_val, estimator, labels,
X_train, y_train, weight_val=None, weight_train=None,
*args,
):
from sklearn.metrics import log_loss
import time
start = time.time()
y_pred = estimator.predict_proba(X_val)
pred_time = (time.time() - start) / len(X_val)
val_loss = log_loss(y_val, y_pred, labels=labels, sample_weight=weight_val)
y_pred = estimator.predict_proba(X_train)
train_loss = log_loss(y_train, y_pred, labels=labels, sample_weight=weight_train)
alpha = 0.5
return val_loss * (1 + alpha) - alpha * train_loss, {
"val_loss": val_loss,
"train_loss": train_loss,
"pred_time": pred_time,
}
```
task: A string of the task type, e.g.,
'classification', 'regression', 'ts_forecast_regression',
'ts_forecast_classification', 'rank', 'seq-classification',
'seq-regression', 'summarization', or an instance of Task class
n_jobs: An integer of the number of threads for training | default=-1.
Use all available resources when n_jobs == -1.
log_file_name: A string of the log file name | default="". To disable logging,
set it to be an empty string "".
estimator_list: A list of strings for estimator names, or 'auto'.
e.g., ```['lgbm', 'xgboost', 'xgb_limitdepth', 'catboost', 'rf', 'extra_tree']```.
time_budget: A float number of the time budget in seconds.
Use -1 if no time limit.
max_iter: An integer of the maximal number of iterations.
NOTE: when both time_budget and max_iter are unspecified,
only one model will be trained per estimator.
sample: A boolean of whether to sample the training data during
search.
ensemble: boolean or dict | default=False. Whether to perform
ensemble after search. Can be a dict with keys 'passthrough'
and 'final_estimator' to specify the passthrough and
final_estimator in the stacker. The dict can also contain
'n_jobs' as the key to specify the number of jobs for the stacker.
eval_method: A string of resampling strategy, one of
['auto', 'cv', 'holdout'].
split_ratio: A float of the valiation data percentage for holdout.
n_splits: An integer of the number of folds for cross - validation.
log_type: A string of the log type, one of
['better', 'all'].
'better' only logs configs with better loss than previos iters
'all' logs all the tried configs.
model_history: A boolean of whether to keep the trained best
model per estimator. Make sure memory is large enough if setting to True.
Default value is False: best_model_for_estimator would return a
untrained model for non-best learner.
log_training_metric: A boolean of whether to log the training
metric for each model.
mem_thres: A float of the memory size constraint in bytes.
pred_time_limit: A float of the prediction latency constraint in seconds.
It refers to the average prediction time per row in validation data.
train_time_limit: None or a float of the training time constraint in seconds.
X_val: None or a numpy array or a pandas dataframe of validation data.
y_val: None or a numpy array or a pandas series of validation labels.
sample_weight_val: None or a numpy array of the sample weight of
validation data of the same shape as y_val.
groups_val: None or array-like | group labels (with matching length
to y_val) or group counts (with sum equal to length of y_val)
for validation data. Need to be consistent with groups.
groups: None or array-like | Group labels (with matching length to
y_train) or groups counts (with sum equal to length of y_train)
for training data.
verbose: int, default=3 | Controls the verbosity, higher means more
messages.
retrain_full: bool or str, default=True | whether to retrain the
selected model on the full training data when using holdout.
True - retrain only after search finishes; False - no retraining;
'budget' - do best effort to retrain without violating the time
budget.
split_type: str or splitter object, default="auto" | the data split type.
* A valid splitter object is an instance of a derived class of scikit-learn
[KFold](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html#sklearn.model_selection.KFold)
and have ``split`` and ``get_n_splits`` methods with the same signatures.
Set eval_method to "cv" to use the splitter object.
* Valid str options depend on different tasks.
For classification tasks, valid choices are
["auto", 'stratified', 'uniform', 'time', 'group']. "auto" -> stratified.
For regression tasks, valid choices are ["auto", 'uniform', 'time'].
"auto" -> uniform.
For time series forecast tasks, must be "auto" or 'time'.
For ranking task, must be "auto" or 'group'.
hpo_method: str, default="auto" | The hyperparameter
optimization method. By default, CFO is used for sequential
search and BlendSearch is used for parallel search.
No need to set when using flaml's default search space or using
a simple customized search space. When set to 'bs', BlendSearch
is used. BlendSearch can be tried when the search space is
complex, for example, containing multiple disjoint, discontinuous
subspaces. When set to 'random', random search is used.
starting_points: A dictionary or a str to specify the starting hyperparameter
config for the estimators | default="data".
If str:
- if "data", use data-dependent defaults;
- if "data:path" use data-dependent defaults which are stored at path;
- if "static", use data-independent defaults.
If dict, keys are the name of the estimators, and values are the starting
hyperparamter configurations for the corresponding estimators.
The value can be a single hyperparamter configuration dict or a list
of hyperparamter configuration dicts.
In the following code example, we get starting_points from the
`automl` object and use them in the `new_automl` object.
e.g.,
```python
from flaml import AutoML
automl = AutoML()
X_train, y_train = load_iris(return_X_y=True)
automl.fit(X_train, y_train)
starting_points = automl.best_config_per_estimator
new_automl = AutoML()
new_automl.fit(X_train, y_train, starting_points=starting_points)
```
seed: int or None, default=None | The random seed for hpo.
n_concurrent_trials: [In preview] int, default=1 | The number of
concurrent trials. When n_concurrent_trials > 1, flaml performes
[parallel tuning](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning)
and installation of ray or spark is required: `pip install flaml[ray]`
or `pip install flaml[spark]`. Please check
[here](https://spark.apache.org/docs/latest/api/python/getting_started/install.html)
for more details about installing Spark.
keep_search_state: boolean, default=False | Whether to keep data needed
for model search after fit(). By default the state is deleted for
space saving.
preserve_checkpoint: boolean, default=True | Whether to preserve the saved checkpoint
on disk when deleting automl. By default the checkpoint is preserved.
early_stop: boolean, default=False | Whether to stop early if the
search is considered to converge.
force_cancel: boolean, default=False | Whether to forcely cancel the PySpark job if overtime.
append_log: boolean, default=False | Whether to directly append the log
records to the input log file if it exists.
auto_augment: boolean, default=True | Whether to automatically
augment rare classes.
min_sample_size: int, default=MIN_SAMPLE_TRAIN | the minimal sample
size when sample=True.
use_ray: boolean or dict.
If boolean: default=False | Whether to use ray to run the training
in separate processes. This can be used to prevent OOM for large
datasets, but will incur more overhead in time.
If dict: the dict contains the keywords arguments to be passed to
[ray.tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html).
use_spark: boolean, default=False | Whether to use spark to run the training
in parallel spark jobs. This can be used to accelerate training on large models
and large datasets, but will incur more overhead in time and thus slow down
training in some cases.
free_mem_ratio: float between 0 and 1, default=0. The free memory ratio to keep during training.
metric_constraints: list, default=[] | The list of metric constraints.
Each element in this list is a 3-tuple, which shall be expressed
in the following format: the first element of the 3-tuple is the name of the
metric, the second element is the inequality sign chosen from ">=" and "<=",
and the third element is the constraint value. E.g., `('precision', '>=', 0.9)`.
Note that all the metric names in metric_constraints need to be reported via
the metrics_to_log dictionary returned by a customized metric function.
The customized metric function shall be provided via the `metric` key word argument
of the fit() function or the automl constructor.
Find examples in this [test](https://github.com/microsoft/FLAML/tree/main/test/automl/test_constraints.py).
If `pred_time_limit` is provided as one of keyword arguments to fit() function or
the automl constructor, flaml will automatically (and under the hood)
add it as an additional element in the metric_constraints. Essentially 'pred_time_limit'
specifies a constraint about the prediction latency constraint in seconds.
custom_hp: dict, default=None | The custom search space specified by user
Each key is the estimator name, each value is a dict of the custom search space for that estimator. Notice the
domain of the custom search space can either be a value of a sample.Domain object.
```python
custom_hp = {
"transformer_ms": {
"model_path": {
"domain": "albert-base-v2",
},
"learning_rate": {
"domain": tune.choice([1e-4, 1e-5]),
}
}
}
```
time_col: for a time series task, name of the column containing the timestamps. If not
provided, defaults to the first column of X_train/X_val
cv_score_agg_func: customized cross-validation scores aggregate function. Default to average metrics across folds. If specificed, this function needs to
have the following input arguments:
* val_loss_folds: list of floats, the loss scores of each fold;
* log_metrics_folds: list of dicts/floats, the metrics of each fold to log.
This function should return the final aggregate result of all folds. A float number of the minimization objective, and a dictionary as the metrics to log or None.
E.g.,
```python
def cv_score_agg_func(val_loss_folds, log_metrics_folds):
metric_to_minimize = sum(val_loss_folds)/len(val_loss_folds)
metrics_to_log = None
for single_fold in log_metrics_folds:
if metrics_to_log is None:
metrics_to_log = single_fold
elif isinstance(metrics_to_log, dict):
metrics_to_log = {k: metrics_to_log[k] + v for k, v in single_fold.items()}
else:
metrics_to_log += single_fold
if metrics_to_log:
n = len(val_loss_folds)
metrics_to_log = (
{k: v / n for k, v in metrics_to_log.items()}
if isinstance(metrics_to_log, dict)
else metrics_to_log / n
)
return metric_to_minimize, metrics_to_log
```
skip_transform: boolean, default=False | Whether to pre-process data prior to modeling.
mlflow_logging: boolean, default=None | Whether to log the training results to mlflow.
Default value is None, which means the logging decision is made based on
AutoML.__init__'s mlflow_logging argument.
This requires mlflow to be installed and to have an active mlflow run.
FLAML will create nested runs.
fit_kwargs_by_estimator: dict, default=None | The user specified keywords arguments, grouped by estimator name.
For TransformersEstimator, available fit_kwargs can be found from
[TrainingArgumentsForAuto](nlp/huggingface/training_args).
e.g.,
```python
fit_kwargs_by_estimator = {
"transformer": {
"output_dir": "test/data/output/",
"fp16": False,
},
"tft": {
"max_encoder_length": 1,
"min_encoder_length": 1,
"static_categoricals": [],
"static_reals": [],
"time_varying_known_categoricals": [],
"time_varying_known_reals": [],
"time_varying_unknown_categoricals": [],
"time_varying_unknown_reals": [],
"variable_groups": {},
"lags": {},
}
}
```
**fit_kwargs: Other key word arguments to pass to fit() function of
the searched learners, such as sample_weight. Below are a few examples of
estimator-specific parameters:
period: int | forecast horizon for all time series forecast tasks.
gpu_per_trial: float, default = 0 | A float of the number of gpus per trial,
only used by TransformersEstimator, XGBoostSklearnEstimator, and
TemporalFusionTransformerEstimator.
group_ids: list of strings of column names identifying a time series, only
used by TemporalFusionTransformerEstimator, required for
'ts_forecast_panel' task. `group_ids` is a parameter for TimeSeriesDataSet object
from PyTorchForecasting.
For other parameters to describe your dataset, refer to
[TimeSeriesDataSet PyTorchForecasting](https://pytorch-forecasting.readthedocs.io/en/stable/api/pytorch_forecasting.data.timeseries.TimeSeriesDataSet.html).
To specify your variables, use `static_categoricals`, `static_reals`,
`time_varying_known_categoricals`, `time_varying_known_reals`,
`time_varying_unknown_categoricals`, `time_varying_unknown_reals`,
`variable_groups`. To provide more information on your data, use
`max_encoder_length`, `min_encoder_length`, `lags`.
log_dir: str, default = "lightning_logs" | Folder into which to log results
for tensorboard, only used by TemporalFusionTransformerEstimator.
max_epochs: int, default = 20 | Maximum number of epochs to run training,
only used by TemporalFusionTransformerEstimator.
batch_size: int, default = 64 | Batch size for training model, only
used by TemporalFusionTransformerEstimator.
"""
self._state._start_time_flag = self._start_time_flag = time.time()
task = task or self._settings.get("task")
if isinstance(task, str):
task = task_factory(task, X_train, y_train)
self._state.task = task
self._state.task.time_col = time_col
self._estimator_type = "classifier" if task.is_classification() else "regressor"
time_budget = time_budget or self._settings.get("time_budget")
n_jobs = n_jobs or self._settings.get("n_jobs")
gpu_per_trial = fit_kwargs.get("gpu_per_trial", 0)
eval_method = eval_method or self._settings.get("eval_method")
split_ratio = split_ratio or self._settings.get("split_ratio")
n_splits = n_splits or self._settings.get("n_splits")
auto_augment = self._settings.get("auto_augment") if auto_augment is None else auto_augment
metric = metric or self._settings.get("metric")
estimator_list = estimator_list or self._settings.get("estimator_list")
log_file_name = self._settings.get("log_file_name") if log_file_name is None else log_file_name
max_iter = self._settings.get("max_iter") if max_iter is None else max_iter
sample_is_none = sample is None
if sample_is_none:
sample = self._settings.get("sample")
ensemble = self._settings.get("ensemble") if ensemble is None else ensemble
log_type = log_type or self._settings.get("log_type")
model_history = self._settings.get("model_history") if model_history is None else model_history
log_training_metric = (
self._settings.get("log_training_metric") if log_training_metric is None else log_training_metric
)
mem_thres = mem_thres or self._settings.get("mem_thres")
pred_time_limit = pred_time_limit or self._settings.get("pred_time_limit")
train_time_limit = train_time_limit or self._settings.get("train_time_limit")
self._metric_constraints = metric_constraints or self._settings.get("metric_constraints")
if np.isfinite(pred_time_limit):
self._metric_constraints.append(("pred_time", "<=", pred_time_limit))
verbose = self._settings.get("verbose") if verbose is None else verbose
retrain_full = self._settings.get("retrain_full") if retrain_full is None else retrain_full
split_type = split_type or self._settings.get("split_type")
hpo_method = hpo_method or self._settings.get("hpo_method")
learner_selector = learner_selector or self._settings.get("learner_selector")
no_starting_points = starting_points is None
if no_starting_points:
starting_points = self._settings.get("starting_points")
n_concurrent_trials = n_concurrent_trials or self._settings.get("n_concurrent_trials")
keep_search_state = self._settings.get("keep_search_state") if keep_search_state is None else keep_search_state
self.preserve_checkpoint = (
self._settings.get("preserve_checkpoint") if preserve_checkpoint is None else preserve_checkpoint
)
early_stop = self._settings.get("early_stop") if early_stop is None else early_stop
force_cancel = self._settings.get("force_cancel") if force_cancel is None else force_cancel
# no search budget is provided?
no_budget = time_budget < 0 and max_iter is None and not early_stop
append_log = self._settings.get("append_log") if append_log is None else append_log
min_sample_size = min_sample_size or self._settings.get("min_sample_size")
use_ray = self._settings.get("use_ray") if use_ray is None else use_ray
use_spark = self._settings.get("use_spark") if use_spark is None else use_spark
if use_spark and use_ray is not False:
raise ValueError("use_spark and use_ray cannot be both True.")
elif use_spark:
spark_available, spark_error_msg = check_spark()
if not spark_available:
raise spark_error_msg
old_level = logger.getEffectiveLevel()
self.verbose = verbose
logger.setLevel(50 - verbose * 10)
if not logger.handlers:
# Add the console handler.
_ch = logging.StreamHandler(stream=sys.stdout)
_ch.setFormatter(logger_formatter)
logger.addHandler(_ch)
if not use_ray and not use_spark and n_concurrent_trials > 1:
if ray_available:
logger.warning(
"n_concurrent_trials > 1 is only supported when using Ray or Spark. "
"Ray installed, setting use_ray to True. If you want to use Spark, set use_spark to True."
)
use_ray = True
else:
spark_available, _ = check_spark()
if spark_available:
logger.warning(
"n_concurrent_trials > 1 is only supported when using Ray or Spark. "
"Spark installed, setting use_spark to True. If you want to use Ray, set use_ray to True."
)
use_spark = True
else:
logger.warning(
"n_concurrent_trials > 1 is only supported when using Ray or Spark. "
"Neither Ray nor Spark installed, setting n_concurrent_trials to 1."
)
n_concurrent_trials = 1
self._state.n_jobs = n_jobs
self._n_concurrent_trials = n_concurrent_trials
self._early_stop = early_stop
self._use_spark = use_spark
self._force_cancel = force_cancel
self._use_ray = use_ray
# use the following condition if we have an estimation of average_trial_time and average_trial_overhead
# self._use_ray = use_ray or n_concurrent_trials > ( average_trial_time + average_trial_overhead) / (average_trial_time)
if self._use_ray is not False:
import ray
n_cpus = ray.is_initialized() and ray.available_resources()["CPU"] or os.cpu_count()
self._state.resources_per_trial = (
# when using gpu, default cpu is 1 per job; otherwise, default cpu is n_cpus / n_concurrent_trials
(
{
"cpu": max(int((n_cpus - 2) / 2 / n_concurrent_trials), 1),
"gpu": gpu_per_trial,
}
if gpu_per_trial == 0
else {"cpu": 1, "gpu": gpu_per_trial}
)
if n_jobs < 0
else {"cpu": n_jobs, "gpu": gpu_per_trial}
)
if isinstance(X_train, ray.ObjectRef):
X_train = ray.get(X_train)
elif isinstance(dataframe, ray.ObjectRef):
dataframe = ray.get(dataframe)
else:
# TODO: Integrate with Spark
self._state.resources_per_trial = {"cpu": n_jobs} if n_jobs > 0 else {"cpu": 1}
self._state.free_mem_ratio = self._settings.get("free_mem_ratio") if free_mem_ratio is None else free_mem_ratio
self._state.task = task
self._state.log_training_metric = log_training_metric
self._state.fit_kwargs = fit_kwargs
custom_hp = custom_hp or self._settings.get("custom_hp")
self._skip_transform = self._settings.get("skip_transform") if skip_transform is None else skip_transform
self._mlflow_logging = self._settings.get("mlflow_logging") if mlflow_logging is None else mlflow_logging
fit_kwargs_by_estimator = fit_kwargs_by_estimator or self._settings.get("fit_kwargs_by_estimator")
self._state.fit_kwargs_by_estimator = fit_kwargs_by_estimator.copy() # shallow copy of fit_kwargs_by_estimator
self._state.weight_val = sample_weight_val
task.validate_data(
self,
self._state,
X_train,
y_train,
dataframe,
label,
X_val,
y_val,
groups_val,
groups,
)
self._search_states = {} # key: estimator name; value: SearchState
self._random = np.random.RandomState(RANDOM_SEED)
self._seed = seed if seed is not None else 20
self._learner_selector = learner_selector
logger.info(f"task = {task}")
self._split_type = self._state.task.decide_split_type(
split_type,
self._y_train_all,
self._state.fit_kwargs,
self._state.groups,
)
if X_val is not None:
logger.info(f"Data split method: {self._split_type}")
eval_method = self._decide_eval_method(eval_method, time_budget)
self._state.eval_method = eval_method
logger.info("Evaluation method: {}".format(eval_method))
self._state.cv_score_agg_func = cv_score_agg_func or self._settings.get("cv_score_agg_func")
self._retrain_in_budget = retrain_full == "budget" and (eval_method == "holdout" and self._state.X_val is None)
self._auto_augment = auto_augment
_sample_size_from_starting_points = {}
if isinstance(starting_points, dict):
for _estimator, _point_per_estimator in starting_points.items():
sample_size = (
_point_per_estimator
and isinstance(_point_per_estimator, dict)
and _point_per_estimator.get("FLAML_sample_size")
)
if sample_size:
_sample_size_from_starting_points[_estimator] = sample_size
elif _point_per_estimator and isinstance(_point_per_estimator, list):
_sample_size_set = set(
[
config["FLAML_sample_size"]
for config in _point_per_estimator
if "FLAML_sample_size" in config
]
)
if _sample_size_set:
_sample_size_from_starting_points[_estimator] = min(_sample_size_set)
if len(_sample_size_set) > 1:
logger.warning(
"Using the min FLAML_sample_size of all the provided starting points for estimator {}. (Provided FLAML_sample_size are: {})".format(
_estimator, _sample_size_set
)
)
if not sample and isinstance(starting_points, dict):
assert (
not _sample_size_from_starting_points
), "When subsampling is disabled, do not include FLAML_sample_size in the starting point."
self._min_sample_size = _sample_size_from_starting_points or min_sample_size
self._min_sample_size_input = min_sample_size
self._prepare_data(eval_method, split_ratio, n_splits)
# TODO pull this to task as decide_sample_size
if isinstance(self._min_sample_size, dict):
self._sample = {
(
k,
sample
and not task.is_rank()
and eval_method != "cv"
and (self._min_sample_size[k] * SAMPLE_MULTIPLY_FACTOR < self._state.data_size[0]),
)
for k in self._min_sample_size.keys()
}
else:
self._sample = (
sample
and not task.is_rank()
and eval_method != "cv"
and (self._min_sample_size * SAMPLE_MULTIPLY_FACTOR < self._state.data_size[0])
)
metric = task.default_metric(metric)
self._state.metric = metric
# TODO pull this to task
def is_to_reverse_metric(metric, task):
if metric.startswith("ndcg"):
return True, f"1-{metric}"
if metric in [
"r2",
"accuracy",
"roc_auc",
"roc_auc_ovr",
"roc_auc_ovo",
"roc_auc_weighted",
"roc_auc_ovr_weighted",
"roc_auc_ovo_weighted",
"f1",
"ap",
"micro_f1",
"macro_f1",
]:
return True, f"1-{metric}"
if task.is_nlp():
from flaml.automl.ml import huggingface_metric_to_mode
if metric in huggingface_metric_to_mode and huggingface_metric_to_mode[metric] == "max":
return True, f"-{metric}"
return False, None
if isinstance(metric, str):
is_reverse, reverse_metric = is_to_reverse_metric(metric, task)
if is_reverse:
error_metric = reverse_metric
else:
error_metric = metric
else:
error_metric = "customized metric"
logger.info(f"Minimizing error metric: {error_metric}")
self._state.error_metric = error_metric
is_spark_dataframe = isinstance(X_train, psDataFrame) or isinstance(dataframe, psDataFrame)
estimator_list = task.default_estimator_list(estimator_list, is_spark_dataframe)
if is_spark_dataframe and self._use_spark:
# For spark dataframe, use_spark must be False because spark models are trained in parallel themselves
self._use_spark = False
logger.warning(
"Spark dataframes support only spark.ml type models, which will be trained "
"with spark themselves, no need to start spark trials in flaml. "
"`use_spark` is set to False."
)
# When no search budget is specified
if no_budget:
max_iter = len(estimator_list)
self._learner_selector = "roundrobin"
if sample_is_none:
self._sample = False
if no_starting_points:
starting_points = "data"
logger.warning(
"No search budget is provided via time_budget or max_iter."
" Training only one model per estimator."
" Zero-shot AutoML is used for certain tasks and estimators."
" To tune hyperparameters for each estimator,"
" please provide budget either via time_budget or max_iter."
)
elif max_iter is None:
# set to a large number
max_iter = 1000000
self._state.retrain_final = (
retrain_full is True
and eval_method == "holdout"
and (X_val is None or self._use_ray is not False)
or eval_method == "cv"
and (max_iter > 0 or retrain_full is True)
or max_iter == 1
)
# add custom learner
for estimator_name in estimator_list:
if estimator_name not in self._state.learner_classes:
self.add_learner(
estimator_name,
self._state.task.estimator_class_from_str(estimator_name),
)
# set up learner search space
if isinstance(starting_points, str) and starting_points.startswith("data"):
from flaml.default import suggest_config
location = starting_points[5:]
starting_points = {}
for estimator_name in estimator_list:
try:
configs = suggest_config(
self._state.task,
self._X_train_all,
self._y_train_all,
estimator_name,
location,
k=1,
)
starting_points[estimator_name] = [x["hyperparameters"] for x in configs]
except FileNotFoundError:
pass
try:
learner = suggest_learner(
self._state.task,
self._X_train_all,
self._y_train_all,
estimator_list=estimator_list,
location=location,
)
if learner != estimator_list[0]:
estimator_list.remove(learner)
estimator_list.insert(0, learner)
except FileNotFoundError:
pass
self._state.time_budget = time_budget
starting_points = {} if starting_points == "static" else starting_points
for estimator_name in estimator_list:
estimator_class = self._state.learner_classes[estimator_name]
estimator_class.init()
this_estimator_kwargs = self._state.fit_kwargs_by_estimator.get(estimator_name)
if this_estimator_kwargs:
# make another shallow copy of the value (a dict obj), so user's fit_kwargs_by_estimator won't be updated
this_estimator_kwargs = this_estimator_kwargs.copy()
this_estimator_kwargs.update(
self._state.fit_kwargs
) # update the shallow copy of fit_kwargs to fit_kwargs_by_estimator
self._state.fit_kwargs_by_estimator[
estimator_name
] = this_estimator_kwargs # set self._state.fit_kwargs_by_estimator[estimator_name] to the update, so only self._state.fit_kwargs_by_estimator will be updated
else:
self._state.fit_kwargs_by_estimator[estimator_name] = self._state.fit_kwargs
self._search_states[estimator_name] = SearchState(
learner_class=estimator_class,
# data_size=self._state.data_size,
data=self._state.X_train,
task=self._state.task,
starting_point=starting_points.get(estimator_name),
period=self._state.fit_kwargs.get(
"period"
), # NOTE: this is after kwargs is updated to fit_kwargs_by_estimator
custom_hp=custom_hp and custom_hp.get(estimator_name),
max_iter=max_iter / len(estimator_list) if self._learner_selector == "roundrobin" else max_iter,
budget=self._state.time_budget,
)
logger.info("List of ML learners in AutoML Run: {}".format(estimator_list))
self.estimator_list = estimator_list
self._active_estimators = estimator_list.copy()
self._ensemble = ensemble
self._max_iter = max_iter
self._mem_thres = mem_thres
self._pred_time_limit = pred_time_limit
self._state.train_time_limit = train_time_limit
self._log_type = log_type
self.split_ratio = split_ratio
self._state.model_history = model_history
self._hpo_method = (
hpo_method
if hpo_method != "auto"
else (
"bs"
if n_concurrent_trials > 1
or (self._use_ray is not False or self._use_spark)
and len(estimator_list) > 1
else "cfo"
)
)
if log_file_name:
with training_log_writer(log_file_name, append_log) as save_helper:
self._training_log = save_helper
self._search()
else:
self._training_log = None
self._search()
if self._best_estimator:
logger.info("fit succeeded")
logger.info(f"Time taken to find the best model: {self._time_taken_best_iter}")
if (
self._hpo_method in ("cfo", "bs")
and self._state.time_budget > 0
and (self._time_taken_best_iter >= self._state.time_budget * 0.7)
and not all(
state.search_alg and state.search_alg.searcher.is_ls_ever_converged
for state in self._search_states.values()
)
):
logger.warning(
"Time taken to find the best model is {0:.0f}% of the "
"provided time budget and not all estimators' hyperparameter "
"search converged. Consider increasing the time budget.".format(
self._time_taken_best_iter / self._state.time_budget * 100
)
)
if not keep_search_state:
# release space
del self._X_train_all, self._y_train_all, self._state.kf
del self._state.X_train, self._state.X_train_all, self._state.X_val
del self._state.y_train, self._state.y_train_all, self._state.y_val
del (
self._sample_weight_full,
self._state.fit_kwargs_by_estimator,
self._state.fit_kwargs,
) # NOTE: this is after kwargs is updated to fit_kwargs_by_estimator
del self._state.groups, self._state.groups_all, self._state.groups_val
logger.setLevel(old_level)
|
(self, X_train=None, y_train=None, dataframe=None, label=None, metric=None, task: Union[str, flaml.automl.task.task.Task, NoneType] = None, n_jobs=None, log_file_name=None, estimator_list=None, time_budget=None, max_iter=None, sample=None, ensemble=None, eval_method=None, log_type=None, model_history=None, split_ratio=None, n_splits=None, log_training_metric=None, mem_thres=None, pred_time_limit=None, train_time_limit=None, X_val=None, y_val=None, sample_weight_val=None, groups_val=None, groups=None, verbose=None, retrain_full=None, split_type=None, learner_selector=None, hpo_method=None, starting_points=None, seed=None, n_concurrent_trials=None, keep_search_state=None, preserve_checkpoint=True, early_stop=None, force_cancel=None, append_log=None, auto_augment=None, min_sample_size=None, use_ray=None, use_spark=None, free_mem_ratio=0, metric_constraints=None, custom_hp=None, time_col=None, cv_score_agg_func=None, skip_transform=None, mlflow_logging=None, fit_kwargs_by_estimator=None, **fit_kwargs)
|
52,720 |
flaml.automl.automl
|
get_estimator_from_log
|
Get the estimator from log file.
Args:
log_file_name: A string of the log file name.
record_id: An integer of the record ID in the file,
0 corresponds to the first trial.
task: A string of the task type,
'binary', 'multiclass', 'regression', 'ts_forecast', 'rank',
or an instance of the Task class.
Returns:
An estimator object for the given configuration.
|
def get_estimator_from_log(self, log_file_name: str, record_id: int, task: Union[str, Task]):
"""Get the estimator from log file.
Args:
log_file_name: A string of the log file name.
record_id: An integer of the record ID in the file,
0 corresponds to the first trial.
task: A string of the task type,
'binary', 'multiclass', 'regression', 'ts_forecast', 'rank',
or an instance of the Task class.
Returns:
An estimator object for the given configuration.
"""
with training_log_reader(log_file_name) as reader:
record = reader.get_record(record_id)
estimator = record.learner
config = AutoMLState.sanitize(record.config)
if isinstance(task, str):
task = task_factory(task)
estimator, _ = train_estimator(
X_train=None,
y_train=None,
config_dic=config,
task=task,
estimator_name=estimator,
estimator_class=self._state.learner_classes.get(estimator),
eval_metric="train_time",
)
return estimator
|
(self, log_file_name: str, record_id: int, task: Union[str, flaml.automl.task.task.Task])
|
52,721 |
flaml.automl.automl
|
get_params
| null |
def get_params(self, deep: bool = False) -> dict:
return self._settings.copy()
|
(self, deep: bool = False) -> dict
|
52,722 |
flaml.automl.automl
|
pickle
| null |
def pickle(self, output_file_name):
import pickle
estimator_to_training_function = {}
for estimator in self.estimator_list:
search_state = self._search_states[estimator]
if hasattr(search_state, "training_function"):
estimator_to_training_function[estimator] = search_state.training_function
del search_state.training_function
with open(output_file_name, "wb") as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
|
(self, output_file_name)
|
52,723 |
flaml.automl.automl
|
predict
|
Predict label from features.
Args:
X: A numpy array or pandas dataframe or pyspark.pandas dataframe
of featurized instances, shape n * m,
or for time series forcast tasks:
a pandas dataframe with the first column containing
timestamp values (datetime type) or an integer n for
the predict steps (only valid when the estimator is
arima or sarimax). Other columns in the dataframe
are assumed to be exogenous variables (categorical
or numeric).
**pred_kwargs: Other key word arguments to pass to predict() function of
the searched learners, such as per_device_eval_batch_size.
```python
multivariate_X_test = DataFrame({
'timeStamp': pd.date_range(start='1/1/2022', end='1/07/2022'),
'categorical_col': ['yes', 'yes', 'no', 'no', 'yes', 'no', 'yes'],
'continuous_col': [105, 107, 120, 118, 110, 112, 115]
})
model.predict(multivariate_X_test)
```
Returns:
A array-like of shape n * 1: each element is a predicted
label for an instance.
|
def predict(
self,
X: Union[np.array, DataFrame, List[str], List[List[str]], psDataFrame],
**pred_kwargs,
):
"""Predict label from features.
Args:
X: A numpy array or pandas dataframe or pyspark.pandas dataframe
of featurized instances, shape n * m,
or for time series forcast tasks:
a pandas dataframe with the first column containing
timestamp values (datetime type) or an integer n for
the predict steps (only valid when the estimator is
arima or sarimax). Other columns in the dataframe
are assumed to be exogenous variables (categorical
or numeric).
**pred_kwargs: Other key word arguments to pass to predict() function of
the searched learners, such as per_device_eval_batch_size.
```python
multivariate_X_test = DataFrame({
'timeStamp': pd.date_range(start='1/1/2022', end='1/07/2022'),
'categorical_col': ['yes', 'yes', 'no', 'no', 'yes', 'no', 'yes'],
'continuous_col': [105, 107, 120, 118, 110, 112, 115]
})
model.predict(multivariate_X_test)
```
Returns:
A array-like of shape n * 1: each element is a predicted
label for an instance.
"""
estimator = getattr(self, "_trained_estimator", None)
if estimator is None:
logger.warning("No estimator is trained. Please run fit with enough budget.")
return None
X = self._state.task.preprocess(X, self._transformer)
y_pred = estimator.predict(X, **pred_kwargs)
if isinstance(y_pred, np.ndarray) and y_pred.ndim > 1 and isinstance(y_pred, np.ndarray):
y_pred = y_pred.flatten()
if self._label_transformer:
return self._label_transformer.inverse_transform(Series(y_pred.astype(int)))
else:
return y_pred
|
(self, X: Union[<built-in function array>, pandas.core.frame.DataFrame, List[str], List[List[str]], flaml.automl.spark.psDataFrame], **pred_kwargs)
|
52,724 |
flaml.automl.automl
|
predict_proba
|
Predict the probability of each class from features, only works for
classification problems.
Args:
X: A numpy array of featurized instances, shape n * m.
**pred_kwargs: Other key word arguments to pass to predict_proba() function of
the searched learners, such as per_device_eval_batch_size.
Returns:
A numpy array of shape n * c. c is the # classes. Each element at
(i, j) is the probability for instance i to be in class j.
|
def predict_proba(self, X, **pred_kwargs):
"""Predict the probability of each class from features, only works for
classification problems.
Args:
X: A numpy array of featurized instances, shape n * m.
**pred_kwargs: Other key word arguments to pass to predict_proba() function of
the searched learners, such as per_device_eval_batch_size.
Returns:
A numpy array of shape n * c. c is the # classes. Each element at
(i, j) is the probability for instance i to be in class j.
"""
estimator = getattr(self, "_trained_estimator", None)
if estimator is None:
logger.warning("No estimator is trained. Please run fit with enough budget.")
return None
X = self._state.task.preprocess(X, self._transformer)
proba = self._trained_estimator.predict_proba(X, **pred_kwargs)
return proba
|
(self, X, **pred_kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.