index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
52,418 |
zabbix_utils.types
|
__init__
| null |
def __init__(self, host: str, key: str, value: str,
clock: Union[int, None] = None, ns: Union[int, None] = None):
self.host = str(host)
self.key = str(key)
self.value = str(value)
self.clock = None
self.ns = None
if clock is not None:
try:
self.clock = int(clock)
except ValueError:
raise ValueError(
'The clock value must be expressed in the Unix Timestamp format') from None
if ns is not None:
try:
self.ns = int(ns)
except ValueError:
raise ValueError(
'The ns value must be expressed in the integer value of nanoseconds') from None
|
(self, host: str, key: str, value: str, clock: Optional[int] = None, ns: Optional[int] = None)
|
52,419 |
zabbix_utils.types
|
__repr__
| null |
def __repr__(self) -> str:
return self.__str__()
|
(self) -> str
|
52,420 |
zabbix_utils.types
|
__str__
| null |
def __str__(self) -> str:
return json.dumps(self.to_json(), ensure_ascii=False)
|
(self) -> str
|
52,421 |
zabbix_utils.types
|
to_json
|
Represents ItemValue object in dictionary for json.
Returns:
dict: Object attributes in dictionary.
|
def to_json(self) -> dict:
"""Represents ItemValue object in dictionary for json.
Returns:
dict: Object attributes in dictionary.
"""
return {k: v for k, v in self.__dict__.items() if v is not None}
|
(self) -> dict
|
52,422 |
zabbix_utils.exceptions
|
ModuleBaseException
| null |
class ModuleBaseException(Exception):
pass
| null |
52,423 |
zabbix_utils.exceptions
|
ProcessingError
| null |
class ProcessingError(ModuleBaseException):
def __init__(self, *args):
super().__init__(" ".join(map(str, args)))
return
|
(*args)
|
52,424 |
zabbix_utils.exceptions
|
__init__
| null |
def __init__(self, *args):
super().__init__(" ".join(map(str, args)))
return
|
(self, *args)
|
52,425 |
zabbix_utils.sender
|
Sender
|
Zabbix sender synchronous implementation.
Args:
server (str, optional): Zabbix server address. Defaults to `'127.0.0.1'`.
port (int, optional): Zabbix server port. Defaults to `10051`.
use_config (bool, optional): Specifying configuration use. Defaults to `False`.
timeout (int, optional): Connection timeout value. Defaults to `10`.
use_ipv6 (bool, optional): Specifying IPv6 use instead of IPv4. Defaults to `False`.
source_ip (str, optional): IP from which to establish connection. Defaults to `None`.
chunk_size (int, optional): Number of packets in one chunk. Defaults to `250`.
clusters (tuple|list, optional): List of Zabbix clusters. Defaults to `None`.
socket_wrapper (Callable, optional): Func(`conn`,`tls`) to wrap socket. Defaults to `None`.
compression (bool, optional): Specifying compression use. Defaults to `False`.
config_path (str, optional): Path to Zabbix agent configuration file. Defaults to `/etc/zabbix/zabbix_agentd.conf`.
|
class Sender():
"""Zabbix sender synchronous implementation.
Args:
server (str, optional): Zabbix server address. Defaults to `'127.0.0.1'`.
port (int, optional): Zabbix server port. Defaults to `10051`.
use_config (bool, optional): Specifying configuration use. Defaults to `False`.
timeout (int, optional): Connection timeout value. Defaults to `10`.
use_ipv6 (bool, optional): Specifying IPv6 use instead of IPv4. Defaults to `False`.
source_ip (str, optional): IP from which to establish connection. Defaults to `None`.
chunk_size (int, optional): Number of packets in one chunk. Defaults to `250`.
clusters (tuple|list, optional): List of Zabbix clusters. Defaults to `None`.
socket_wrapper (Callable, optional): Func(`conn`,`tls`) to wrap socket. Defaults to `None`.
compression (bool, optional): Specifying compression use. Defaults to `False`.
config_path (str, optional): Path to Zabbix agent configuration file. Defaults to \
`/etc/zabbix/zabbix_agentd.conf`.
"""
def __init__(self, server: Optional[str] = None, port: int = 10051,
use_config: bool = False, timeout: int = 10,
use_ipv6: bool = False, source_ip: Optional[str] = None,
chunk_size: int = 250, clusters: Union[tuple, list] = None,
socket_wrapper: Optional[Callable] = None, compression: bool = False,
config_path: Optional[str] = '/etc/zabbix/zabbix_agentd.conf'):
self.timeout = timeout
self.use_ipv6 = use_ipv6
self.tls = {}
self.source_ip = None
self.chunk_size = chunk_size
self.compression = compression
if socket_wrapper is not None:
if not isinstance(socket_wrapper, Callable):
raise TypeError('Value "socket_wrapper" should be a function.') from None
self.socket_wrapper = socket_wrapper
if source_ip is not None:
self.source_ip = source_ip
if use_config:
self.clusters = []
self.__load_config(config_path)
return
if clusters is not None:
if not (isinstance(clusters, tuple) or isinstance(clusters, list)):
raise TypeError('Value "clusters" should be a tuple or a list.') from None
clusters = clusters.copy()
if server is not None:
clusters.append([f"{server}:{port}"])
self.clusters = [Cluster(c) for c in clusters]
else:
self.clusters = [Cluster([f"{server or '127.0.0.1'}:{port}"])]
def __read_config(self, config: configparser.SectionProxy) -> None:
server_row = config.get('ServerActive') or config.get('Server') or '127.0.0.1:10051'
for cluster in server_row.split(','):
self.clusters.append(Cluster(cluster.strip().split(';')))
if 'SourceIP' in config:
self.source_ip = config.get('SourceIP')
for key in config:
if key.startswith('tls'):
self.tls[key] = config.get(key)
def __load_config(self, filepath: str) -> None:
config = configparser.ConfigParser(strict=False)
with open(filepath, 'r', encoding='utf-8') as cfg:
config.read_string('[root]\n' + cfg.read())
self.__read_config(config['root'])
def __get_response(self, conn: socket) -> Optional[str]:
try:
result = json.loads(
ZabbixProtocol.parse_sync_packet(conn, log, ProcessingError)
)
except json.decoder.JSONDecodeError as err:
log.debug('Unexpected response was received from Zabbix.')
raise err
log.debug('Received data: %s', result)
return result
def __create_request(self, items: list) -> dict:
return {
"request": "sender data",
"data": [i.to_json() for i in items]
}
def __chunk_send(self, items: list) -> dict:
responses = {}
packet = ZabbixProtocol.create_packet(self.__create_request(items), log, self.compression)
for cluster in self.clusters:
active_node = None
for i, node in enumerate(cluster.nodes):
log.debug('Trying to send data to %s', node)
try:
if self.use_ipv6:
connection = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error:
raise ProcessingError(f"Error creating socket for {node}") from None
connection.settimeout(self.timeout)
if self.source_ip:
connection.bind((self.source_ip, 0,))
try:
connection.connect((node.address, node.port))
except (TimeoutError, socket.timeout):
log.debug(
'The connection to %s timed out after %d seconds',
node,
self.timeout
)
except (ConnectionRefusedError, socket.gaierror) as err:
log.debug(
'An error occurred while trying to connect to %s: %s',
node,
getattr(err, 'msg', str(err))
)
else:
if i > 0:
cluster.nodes[0], cluster.nodes[i] = cluster.nodes[i], cluster.nodes[0]
active_node = node
break
if active_node is None:
log.error(
'Couldn\'t connect to all of cluster nodes: %s',
str(list(cluster.nodes))
)
connection.close()
raise ProcessingError(
f"Couldn't connect to all of cluster nodes: {list(cluster.nodes)}"
)
if self.socket_wrapper is not None:
connection = self.socket_wrapper(connection, self.tls)
try:
connection.sendall(packet)
except (TimeoutError, socket.timeout) as err:
log.error(
'The connection to %s timed out after %d seconds while trying to send',
active_node,
self.timeout
)
connection.close()
raise err
except (OSError, socket.error) as err:
log.warning(
'An error occurred while trying to send to %s: %s',
active_node,
getattr(err, 'msg', str(err))
)
connection.close()
raise err
try:
response = self.__get_response(connection)
except ConnectionResetError as err:
log.debug('Get value error: %s', err)
raise err
log.debug('Response from %s: %s', active_node, response)
if response and response.get('response') != 'success':
raise socket.error(response)
responses[active_node] = response
try:
connection.close()
except socket.error:
pass
return responses
def send(self, items: list) -> TrapperResponse:
"""Sends packets and receives an answer from Zabbix.
Args:
items (list): List of ItemValue objects.
Returns:
TrapperResponse: Response from Zabbix server/proxy.
"""
# Split the list of items into chunks of size self.chunk_size.
chunks = [items[i:i + self.chunk_size] for i in range(0, len(items), self.chunk_size)]
# Merge responses into a single TrapperResponse object.
try:
result = TrapperResponse()
except ProcessingError as err:
log.debug(err)
raise ProcessingError(err) from err
# TrapperResponse details for each node and chunk.
result.details = {}
for i, chunk in enumerate(chunks):
if not all(isinstance(item, ItemValue) for item in chunk):
log.debug('Received unexpected item list. It must be a list of \
ItemValue objects: %s', json.dumps(chunk))
raise ProcessingError(f"Received unexpected item list. \
It must be a list of ItemValue objects: {json.dumps(chunk)}")
resp_by_node = self.__chunk_send(chunk)
node_step = 1
for node, resp in resp_by_node.items():
try:
result.add(resp, (i + 1) * node_step)
except ProcessingError as err:
log.debug(err)
raise ProcessingError(err) from None
node_step += 1
if node not in result.details:
result.details[node] = []
result.details[node].append(TrapperResponse(i+1).add(resp))
return result
def send_value(self, host: str, key: str,
value: str, clock: Optional[int] = None,
ns: Optional[int] = None) -> TrapperResponse:
"""Sends one value and receives an answer from Zabbix.
Args:
host (str): Specify host name the item belongs to (as registered in Zabbix frontend).
key (str): Specify item key to send value to.
value (str): Specify item value.
clock (int, optional): Specify time in Unix timestamp format. Defaults to `None`.
ns (int, optional): Specify time expressed in nanoseconds. Defaults to `None`.
Returns:
TrapperResponse: Response from Zabbix server/proxy.
"""
return self.send([ItemValue(host, key, value, clock, ns)])
|
(server: Optional[str] = None, port: int = 10051, use_config: bool = False, timeout: int = 10, use_ipv6: bool = False, source_ip: Optional[str] = None, chunk_size: int = 250, clusters: Union[tuple, list] = None, socket_wrapper: Optional[Callable] = None, compression: bool = False, config_path: Optional[str] = '/etc/zabbix/zabbix_agentd.conf')
|
52,426 |
zabbix_utils.sender
|
__chunk_send
| null |
def __chunk_send(self, items: list) -> dict:
responses = {}
packet = ZabbixProtocol.create_packet(self.__create_request(items), log, self.compression)
for cluster in self.clusters:
active_node = None
for i, node in enumerate(cluster.nodes):
log.debug('Trying to send data to %s', node)
try:
if self.use_ipv6:
connection = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error:
raise ProcessingError(f"Error creating socket for {node}") from None
connection.settimeout(self.timeout)
if self.source_ip:
connection.bind((self.source_ip, 0,))
try:
connection.connect((node.address, node.port))
except (TimeoutError, socket.timeout):
log.debug(
'The connection to %s timed out after %d seconds',
node,
self.timeout
)
except (ConnectionRefusedError, socket.gaierror) as err:
log.debug(
'An error occurred while trying to connect to %s: %s',
node,
getattr(err, 'msg', str(err))
)
else:
if i > 0:
cluster.nodes[0], cluster.nodes[i] = cluster.nodes[i], cluster.nodes[0]
active_node = node
break
if active_node is None:
log.error(
'Couldn\'t connect to all of cluster nodes: %s',
str(list(cluster.nodes))
)
connection.close()
raise ProcessingError(
f"Couldn't connect to all of cluster nodes: {list(cluster.nodes)}"
)
if self.socket_wrapper is not None:
connection = self.socket_wrapper(connection, self.tls)
try:
connection.sendall(packet)
except (TimeoutError, socket.timeout) as err:
log.error(
'The connection to %s timed out after %d seconds while trying to send',
active_node,
self.timeout
)
connection.close()
raise err
except (OSError, socket.error) as err:
log.warning(
'An error occurred while trying to send to %s: %s',
active_node,
getattr(err, 'msg', str(err))
)
connection.close()
raise err
try:
response = self.__get_response(connection)
except ConnectionResetError as err:
log.debug('Get value error: %s', err)
raise err
log.debug('Response from %s: %s', active_node, response)
if response and response.get('response') != 'success':
raise socket.error(response)
responses[active_node] = response
try:
connection.close()
except socket.error:
pass
return responses
|
(self, items: list) -> dict
|
52,428 |
zabbix_utils.sender
|
__get_response
| null |
def __get_response(self, conn: socket) -> Optional[str]:
try:
result = json.loads(
ZabbixProtocol.parse_sync_packet(conn, log, ProcessingError)
)
except json.decoder.JSONDecodeError as err:
log.debug('Unexpected response was received from Zabbix.')
raise err
log.debug('Received data: %s', result)
return result
|
(self, conn: <module 'socket' from '/usr/local/lib/python3.10/socket.py'>) -> Optional[str]
|
52,431 |
zabbix_utils.sender
|
__init__
| null |
def __init__(self, server: Optional[str] = None, port: int = 10051,
use_config: bool = False, timeout: int = 10,
use_ipv6: bool = False, source_ip: Optional[str] = None,
chunk_size: int = 250, clusters: Union[tuple, list] = None,
socket_wrapper: Optional[Callable] = None, compression: bool = False,
config_path: Optional[str] = '/etc/zabbix/zabbix_agentd.conf'):
self.timeout = timeout
self.use_ipv6 = use_ipv6
self.tls = {}
self.source_ip = None
self.chunk_size = chunk_size
self.compression = compression
if socket_wrapper is not None:
if not isinstance(socket_wrapper, Callable):
raise TypeError('Value "socket_wrapper" should be a function.') from None
self.socket_wrapper = socket_wrapper
if source_ip is not None:
self.source_ip = source_ip
if use_config:
self.clusters = []
self.__load_config(config_path)
return
if clusters is not None:
if not (isinstance(clusters, tuple) or isinstance(clusters, list)):
raise TypeError('Value "clusters" should be a tuple or a list.') from None
clusters = clusters.copy()
if server is not None:
clusters.append([f"{server}:{port}"])
self.clusters = [Cluster(c) for c in clusters]
else:
self.clusters = [Cluster([f"{server or '127.0.0.1'}:{port}"])]
|
(self, server: Optional[str] = None, port: int = 10051, use_config: bool = False, timeout: int = 10, use_ipv6: bool = False, source_ip: Optional[str] = None, chunk_size: int = 250, clusters: Union[tuple, list, NoneType] = None, socket_wrapper: Optional[Callable] = None, compression: bool = False, config_path: Optional[str] = '/etc/zabbix/zabbix_agentd.conf')
|
52,432 |
zabbix_utils.sender
|
send
|
Sends packets and receives an answer from Zabbix.
Args:
items (list): List of ItemValue objects.
Returns:
TrapperResponse: Response from Zabbix server/proxy.
|
def send(self, items: list) -> TrapperResponse:
"""Sends packets and receives an answer from Zabbix.
Args:
items (list): List of ItemValue objects.
Returns:
TrapperResponse: Response from Zabbix server/proxy.
"""
# Split the list of items into chunks of size self.chunk_size.
chunks = [items[i:i + self.chunk_size] for i in range(0, len(items), self.chunk_size)]
# Merge responses into a single TrapperResponse object.
try:
result = TrapperResponse()
except ProcessingError as err:
log.debug(err)
raise ProcessingError(err) from err
# TrapperResponse details for each node and chunk.
result.details = {}
for i, chunk in enumerate(chunks):
if not all(isinstance(item, ItemValue) for item in chunk):
log.debug('Received unexpected item list. It must be a list of \
ItemValue objects: %s', json.dumps(chunk))
raise ProcessingError(f"Received unexpected item list. \
It must be a list of ItemValue objects: {json.dumps(chunk)}")
resp_by_node = self.__chunk_send(chunk)
node_step = 1
for node, resp in resp_by_node.items():
try:
result.add(resp, (i + 1) * node_step)
except ProcessingError as err:
log.debug(err)
raise ProcessingError(err) from None
node_step += 1
if node not in result.details:
result.details[node] = []
result.details[node].append(TrapperResponse(i+1).add(resp))
return result
|
(self, items: list) -> zabbix_utils.types.TrapperResponse
|
52,433 |
zabbix_utils.sender
|
send_value
|
Sends one value and receives an answer from Zabbix.
Args:
host (str): Specify host name the item belongs to (as registered in Zabbix frontend).
key (str): Specify item key to send value to.
value (str): Specify item value.
clock (int, optional): Specify time in Unix timestamp format. Defaults to `None`.
ns (int, optional): Specify time expressed in nanoseconds. Defaults to `None`.
Returns:
TrapperResponse: Response from Zabbix server/proxy.
|
def send_value(self, host: str, key: str,
value: str, clock: Optional[int] = None,
ns: Optional[int] = None) -> TrapperResponse:
"""Sends one value and receives an answer from Zabbix.
Args:
host (str): Specify host name the item belongs to (as registered in Zabbix frontend).
key (str): Specify item key to send value to.
value (str): Specify item value.
clock (int, optional): Specify time in Unix timestamp format. Defaults to `None`.
ns (int, optional): Specify time expressed in nanoseconds. Defaults to `None`.
Returns:
TrapperResponse: Response from Zabbix server/proxy.
"""
return self.send([ItemValue(host, key, value, clock, ns)])
|
(self, host: str, key: str, value: str, clock: Optional[int] = None, ns: Optional[int] = None) -> zabbix_utils.types.TrapperResponse
|
52,434 |
zabbix_utils.api
|
ZabbixAPI
|
Provide interface for working with Zabbix API.
Args:
url (str, optional): Zabbix API URL. Defaults to `http://localhost/zabbix/api_jsonrpc.php`.
token (str, optional): Zabbix API token. Defaults to `None`.
user (str, optional): Zabbix API username. Defaults to `None`.
password (str, optional): Zabbix API user's password. Defaults to `None`.
http_user (str, optional): Basic Authentication username. Defaults to `None`.
http_password (str, optional): Basic Authentication password. Defaults to `None`.
skip_version_check (bool, optional): Skip version compatibility check. Defaults to `False`.
validate_certs (bool, optional): Specifying certificate validation. Defaults to `True`.
timeout (int, optional): Connection timeout to Zabbix API. Defaults to `30`.
|
class ZabbixAPI():
"""Provide interface for working with Zabbix API.
Args:
url (str, optional): Zabbix API URL. Defaults to `http://localhost/zabbix/api_jsonrpc.php`.
token (str, optional): Zabbix API token. Defaults to `None`.
user (str, optional): Zabbix API username. Defaults to `None`.
password (str, optional): Zabbix API user's password. Defaults to `None`.
http_user (str, optional): Basic Authentication username. Defaults to `None`.
http_password (str, optional): Basic Authentication password. Defaults to `None`.
skip_version_check (bool, optional): Skip version compatibility check. Defaults to `False`.
validate_certs (bool, optional): Specifying certificate validation. Defaults to `True`.
timeout (int, optional): Connection timeout to Zabbix API. Defaults to `30`.
"""
__version = None
__use_token = False
__session_id = None
__basic_cred = None
def __init__(self, url: Optional[str] = None, token: Optional[str] = None,
user: Optional[str] = None, password: Optional[str] = None,
http_user: Optional[str] = None, http_password: Optional[str] = None,
skip_version_check: bool = False, validate_certs: bool = True, timeout: int = 30):
url = url or env.get('ZABBIX_URL') or 'http://localhost/zabbix/api_jsonrpc.php'
user = user or env.get('ZABBIX_USER') or None
password = password or env.get('ZABBIX_PASSWORD') or None
token = token or env.get('ZABBIX_TOKEN') or None
self.url = ModuleUtils.check_url(url)
self.validate_certs = validate_certs
self.timeout = timeout
if http_user and http_password:
self.__basic_auth(http_user, http_password)
self.__check_version(skip_version_check)
if token or user or password:
self.login(token, user, password)
def __getattr__(self, name: str) -> Callable:
"""Dynamic creation of an API object.
Args:
name (str): Zabbix API method name.
Returns:
APIObject: Zabbix API object instance.
"""
return APIObject(name, self)
def __enter__(self) -> Callable:
return self
def __exit__(self, *args) -> None:
self.logout()
def __basic_auth(self, user: str, password: str) -> None:
"""Enable Basic Authentication using.
Args:
user (str): Basic Authentication username.
password (str): Basic Authentication password.
"""
log.debug(
"Enable Basic Authentication with username:%s password:%s",
user,
ModuleUtils.HIDING_MASK
)
self.__basic_cred = base64.b64encode(
f"{user}:{password}".encode()
).decode()
def api_version(self) -> APIVersion:
"""Return object of Zabbix API version.
Returns:
APIVersion: Object of Zabbix API version
"""
if self.__version is None:
self.__version = APIVersion(self.apiinfo.version())
return self.__version
@property
def version(self) -> APIVersion:
"""Return object of Zabbix API version.
Returns:
APIVersion: Object of Zabbix API version.
"""
return self.api_version()
def login(self, token: Optional[str] = None, user: Optional[str] = None,
password: Optional[str] = None) -> None:
"""Login to Zabbix API.
Args:
token (str, optional): Zabbix API token. Defaults to `None`.
user (str, optional): Zabbix API username. Defaults to `None`.
password (str, optional): Zabbix API user's password. Defaults to `None`.
"""
if token:
if self.version < 5.4:
raise APINotSupported(
message="Token usage",
version=self.version
)
if user or password:
raise ProcessingError(
"Token cannot be used with username and password")
self.__use_token = True
self.__session_id = token
return
if not user:
raise ProcessingError("Username is missing")
if not password:
raise ProcessingError("User password is missing")
if self.version < 5.4:
user_cred = {
"user": user,
"password": password
}
else:
user_cred = {
"username": user,
"password": password
}
log.debug(
"Login to Zabbix API using username:%s password:%s", user, ModuleUtils.HIDING_MASK
)
self.__use_token = False
self.__session_id = self.user.login(**user_cred)
log.debug("Connected to Zabbix API version %s: %s", self.version, self.url)
def logout(self) -> None:
"""Logout from Zabbix API."""
if self.__session_id:
if self.__use_token:
self.__session_id = None
self.__use_token = False
return
log.debug("Logout from Zabbix API")
self.user.logout()
self.__session_id = None
else:
log.debug("You're not logged in Zabbix API")
def check_auth(self) -> bool:
"""Check authentication status in Zabbix API.
Returns:
bool: User authentication status (`True`, `False`)
"""
if not self.__session_id:
log.debug("You're not logged in Zabbix API")
return False
if self.__use_token:
log.debug("Check auth session using token in Zabbix API")
refresh_resp = self.user.checkAuthentication(token=self.__session_id)
else:
log.debug("Check auth session using sessionid in Zabbix API")
refresh_resp = self.user.checkAuthentication(sessionid=self.__session_id)
return bool(refresh_resp.get('userid'))
def send_api_request(self, method: str, params: Optional[dict] = None,
need_auth=True) -> dict:
"""Function for sending request to Zabbix API.
Args:
method (str): Zabbix API method name.
params (dict, optional): Params for request body. Defaults to `None`.
need_auth (bool, optional): Authorization using flag. Defaults to `False`.
Raises:
ProcessingError: Wrapping built-in exceptions during request processing.
APIRequestError: Wrapping errors from Zabbix API.
Returns:
dict: Dictionary with Zabbix API response.
"""
request_json = {
'jsonrpc': '2.0',
'method': method,
'params': params or {},
'id': str(uuid4()),
}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json-rpc',
'User-Agent': f"{__name__}/{__version__}"
}
if need_auth:
if not self.__session_id:
raise ProcessingError("You're not logged in Zabbix API")
if self.version < 6.4 or self.__basic_cred is not None:
request_json['auth'] = self.__session_id
else:
headers["Authorization"] = f"Bearer {self.__session_id}"
if self.__basic_cred is not None:
headers["Authorization"] = f"Basic {self.__basic_cred}"
log.debug(
"Sending request to %s with body: %s",
self.url,
request_json
)
req = ul.Request(
self.url,
data=json.dumps(request_json).encode("utf-8"),
headers=headers,
method='POST'
)
req.timeout = self.timeout
# Disable SSL certificate validation if needed.
if not self.validate_certs:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
else:
ctx = None
try:
resp = ul.urlopen(req, context=ctx)
resp_json = json.loads(resp.read().decode('utf-8'))
except URLError as err:
raise ProcessingError(f"Unable to connect to {self.url}:", err) from None
except ValueError as err:
raise ProcessingError("Unable to parse json:", err) from None
if method not in ModuleUtils.FILES_METHODS:
log.debug(
"Received response body: %s",
resp_json
)
else:
debug_json = resp_json.copy()
if debug_json.get('result'):
debug_json['result'] = shorten(debug_json['result'], 200, placeholder='...')
log.debug(
"Received response body (clipped): %s",
json.dumps(debug_json, indent=4, separators=(',', ': '))
)
if 'error' in resp_json:
err = resp_json['error'].copy()
err['body'] = request_json.copy()
raise APIRequestError(err)
return resp_json
def __check_version(self, skip_check: bool) -> None:
skip_check_help = "If you're sure zabbix_utils will work properly with your current \
Zabbix version you can skip this check by \
specifying skip_version_check=True when create ZabbixAPI object."
if self.version < __min_supported__:
if skip_check:
log.debug(
"Version of Zabbix API [%s] is less than the library supports. %s",
self.version,
"Further library use at your own risk!"
)
else:
raise APINotSupported(
f"Version of Zabbix API [{self.version}] is not supported by the library. " +
f"The oldest supported version is {__min_supported__}.0. " + skip_check_help
)
if self.version > __max_supported__:
if skip_check:
log.debug(
"Version of Zabbix API [%s] is more than the library was tested on. %s",
self.version,
"Recommended to update the library. Further library use at your own risk!"
)
else:
raise APINotSupported(
f"Version of Zabbix API [{self.version}] was not tested with the library. " +
f"The latest tested version is {__max_supported__}.0. " + skip_check_help
)
|
(url: Optional[str] = None, token: Optional[str] = None, user: Optional[str] = None, password: Optional[str] = None, http_user: Optional[str] = None, http_password: Optional[str] = None, skip_version_check: bool = False, validate_certs: bool = True, timeout: int = 30)
|
52,435 |
zabbix_utils.api
|
__basic_auth
|
Enable Basic Authentication using.
Args:
user (str): Basic Authentication username.
password (str): Basic Authentication password.
|
def __basic_auth(self, user: str, password: str) -> None:
"""Enable Basic Authentication using.
Args:
user (str): Basic Authentication username.
password (str): Basic Authentication password.
"""
log.debug(
"Enable Basic Authentication with username:%s password:%s",
user,
ModuleUtils.HIDING_MASK
)
self.__basic_cred = base64.b64encode(
f"{user}:{password}".encode()
).decode()
|
(self, user: str, password: str) -> NoneType
|
52,436 |
zabbix_utils.api
|
__check_version
| null |
def __check_version(self, skip_check: bool) -> None:
skip_check_help = "If you're sure zabbix_utils will work properly with your current \
Zabbix version you can skip this check by \
specifying skip_version_check=True when create ZabbixAPI object."
if self.version < __min_supported__:
if skip_check:
log.debug(
"Version of Zabbix API [%s] is less than the library supports. %s",
self.version,
"Further library use at your own risk!"
)
else:
raise APINotSupported(
f"Version of Zabbix API [{self.version}] is not supported by the library. " +
f"The oldest supported version is {__min_supported__}.0. " + skip_check_help
)
if self.version > __max_supported__:
if skip_check:
log.debug(
"Version of Zabbix API [%s] is more than the library was tested on. %s",
self.version,
"Recommended to update the library. Further library use at your own risk!"
)
else:
raise APINotSupported(
f"Version of Zabbix API [{self.version}] was not tested with the library. " +
f"The latest tested version is {__max_supported__}.0. " + skip_check_help
)
|
(self, skip_check: bool) -> NoneType
|
52,437 |
zabbix_utils.api
|
__enter__
| null |
def __enter__(self) -> Callable:
return self
|
(self) -> Callable
|
52,438 |
zabbix_utils.api
|
__exit__
| null |
def __exit__(self, *args) -> None:
self.logout()
|
(self, *args) -> NoneType
|
52,439 |
zabbix_utils.api
|
__getattr__
|
Dynamic creation of an API object.
Args:
name (str): Zabbix API method name.
Returns:
APIObject: Zabbix API object instance.
|
def __getattr__(self, name: str) -> Callable:
"""Dynamic creation of an API object.
Args:
name (str): Zabbix API method name.
Returns:
APIObject: Zabbix API object instance.
"""
return APIObject(name, self)
|
(self, name: str) -> Callable
|
52,440 |
zabbix_utils.api
|
__init__
| null |
def __init__(self, url: Optional[str] = None, token: Optional[str] = None,
user: Optional[str] = None, password: Optional[str] = None,
http_user: Optional[str] = None, http_password: Optional[str] = None,
skip_version_check: bool = False, validate_certs: bool = True, timeout: int = 30):
url = url or env.get('ZABBIX_URL') or 'http://localhost/zabbix/api_jsonrpc.php'
user = user or env.get('ZABBIX_USER') or None
password = password or env.get('ZABBIX_PASSWORD') or None
token = token or env.get('ZABBIX_TOKEN') or None
self.url = ModuleUtils.check_url(url)
self.validate_certs = validate_certs
self.timeout = timeout
if http_user and http_password:
self.__basic_auth(http_user, http_password)
self.__check_version(skip_version_check)
if token or user or password:
self.login(token, user, password)
|
(self, url: Optional[str] = None, token: Optional[str] = None, user: Optional[str] = None, password: Optional[str] = None, http_user: Optional[str] = None, http_password: Optional[str] = None, skip_version_check: bool = False, validate_certs: bool = True, timeout: int = 30)
|
52,441 |
zabbix_utils.api
|
api_version
|
Return object of Zabbix API version.
Returns:
APIVersion: Object of Zabbix API version
|
def api_version(self) -> APIVersion:
"""Return object of Zabbix API version.
Returns:
APIVersion: Object of Zabbix API version
"""
if self.__version is None:
self.__version = APIVersion(self.apiinfo.version())
return self.__version
|
(self) -> zabbix_utils.types.APIVersion
|
52,442 |
zabbix_utils.api
|
check_auth
|
Check authentication status in Zabbix API.
Returns:
bool: User authentication status (`True`, `False`)
|
def check_auth(self) -> bool:
"""Check authentication status in Zabbix API.
Returns:
bool: User authentication status (`True`, `False`)
"""
if not self.__session_id:
log.debug("You're not logged in Zabbix API")
return False
if self.__use_token:
log.debug("Check auth session using token in Zabbix API")
refresh_resp = self.user.checkAuthentication(token=self.__session_id)
else:
log.debug("Check auth session using sessionid in Zabbix API")
refresh_resp = self.user.checkAuthentication(sessionid=self.__session_id)
return bool(refresh_resp.get('userid'))
|
(self) -> bool
|
52,443 |
zabbix_utils.api
|
login
|
Login to Zabbix API.
Args:
token (str, optional): Zabbix API token. Defaults to `None`.
user (str, optional): Zabbix API username. Defaults to `None`.
password (str, optional): Zabbix API user's password. Defaults to `None`.
|
def login(self, token: Optional[str] = None, user: Optional[str] = None,
password: Optional[str] = None) -> None:
"""Login to Zabbix API.
Args:
token (str, optional): Zabbix API token. Defaults to `None`.
user (str, optional): Zabbix API username. Defaults to `None`.
password (str, optional): Zabbix API user's password. Defaults to `None`.
"""
if token:
if self.version < 5.4:
raise APINotSupported(
message="Token usage",
version=self.version
)
if user or password:
raise ProcessingError(
"Token cannot be used with username and password")
self.__use_token = True
self.__session_id = token
return
if not user:
raise ProcessingError("Username is missing")
if not password:
raise ProcessingError("User password is missing")
if self.version < 5.4:
user_cred = {
"user": user,
"password": password
}
else:
user_cred = {
"username": user,
"password": password
}
log.debug(
"Login to Zabbix API using username:%s password:%s", user, ModuleUtils.HIDING_MASK
)
self.__use_token = False
self.__session_id = self.user.login(**user_cred)
log.debug("Connected to Zabbix API version %s: %s", self.version, self.url)
|
(self, token: Optional[str] = None, user: Optional[str] = None, password: Optional[str] = None) -> NoneType
|
52,444 |
zabbix_utils.api
|
logout
|
Logout from Zabbix API.
|
def logout(self) -> None:
"""Logout from Zabbix API."""
if self.__session_id:
if self.__use_token:
self.__session_id = None
self.__use_token = False
return
log.debug("Logout from Zabbix API")
self.user.logout()
self.__session_id = None
else:
log.debug("You're not logged in Zabbix API")
|
(self) -> NoneType
|
52,445 |
zabbix_utils.api
|
send_api_request
|
Function for sending request to Zabbix API.
Args:
method (str): Zabbix API method name.
params (dict, optional): Params for request body. Defaults to `None`.
need_auth (bool, optional): Authorization using flag. Defaults to `False`.
Raises:
ProcessingError: Wrapping built-in exceptions during request processing.
APIRequestError: Wrapping errors from Zabbix API.
Returns:
dict: Dictionary with Zabbix API response.
|
def send_api_request(self, method: str, params: Optional[dict] = None,
need_auth=True) -> dict:
"""Function for sending request to Zabbix API.
Args:
method (str): Zabbix API method name.
params (dict, optional): Params for request body. Defaults to `None`.
need_auth (bool, optional): Authorization using flag. Defaults to `False`.
Raises:
ProcessingError: Wrapping built-in exceptions during request processing.
APIRequestError: Wrapping errors from Zabbix API.
Returns:
dict: Dictionary with Zabbix API response.
"""
request_json = {
'jsonrpc': '2.0',
'method': method,
'params': params or {},
'id': str(uuid4()),
}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json-rpc',
'User-Agent': f"{__name__}/{__version__}"
}
if need_auth:
if not self.__session_id:
raise ProcessingError("You're not logged in Zabbix API")
if self.version < 6.4 or self.__basic_cred is not None:
request_json['auth'] = self.__session_id
else:
headers["Authorization"] = f"Bearer {self.__session_id}"
if self.__basic_cred is not None:
headers["Authorization"] = f"Basic {self.__basic_cred}"
log.debug(
"Sending request to %s with body: %s",
self.url,
request_json
)
req = ul.Request(
self.url,
data=json.dumps(request_json).encode("utf-8"),
headers=headers,
method='POST'
)
req.timeout = self.timeout
# Disable SSL certificate validation if needed.
if not self.validate_certs:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
else:
ctx = None
try:
resp = ul.urlopen(req, context=ctx)
resp_json = json.loads(resp.read().decode('utf-8'))
except URLError as err:
raise ProcessingError(f"Unable to connect to {self.url}:", err) from None
except ValueError as err:
raise ProcessingError("Unable to parse json:", err) from None
if method not in ModuleUtils.FILES_METHODS:
log.debug(
"Received response body: %s",
resp_json
)
else:
debug_json = resp_json.copy()
if debug_json.get('result'):
debug_json['result'] = shorten(debug_json['result'], 200, placeholder='...')
log.debug(
"Received response body (clipped): %s",
json.dumps(debug_json, indent=4, separators=(',', ': '))
)
if 'error' in resp_json:
err = resp_json['error'].copy()
err['body'] = request_json.copy()
raise APIRequestError(err)
return resp_json
|
(self, method: str, params: Optional[dict] = None, need_auth=True) -> dict
|
52,456 |
vega.vega
|
Vega
|
Display a Vega visualization in the Jupyter Notebook.
|
class Vega(VegaBase):
"""Display a Vega visualization in the Jupyter Notebook."""
render_type = 'vega'
|
(spec, data=None, opt=None)
|
52,457 |
vega.base
|
__init__
|
Initialize the visualization object.
|
def __init__(self, spec, data=None, opt=None):
"""Initialize the visualization object."""
self.opt = opt or {}
self.spec = self._prepare_spec(spec, data)
|
(self, spec, data=None, opt=None)
|
52,458 |
vega.base
|
_generate_js
| null |
def _generate_js(self, id, **kwds):
template = utils.get_content(self.JS_TEMPLATE)
payload = template.format(
id=id,
spec=json.dumps(self.spec, **kwds),
opt=json.dumps(self.opt, **kwds),
type=self.render_type
)
return payload
|
(self, id, **kwds)
|
52,459 |
vega.base
|
_prepare_spec
| null |
def _prepare_spec(self, spec, data):
return spec
|
(self, spec, data)
|
52,460 |
vega.base
|
_repr_mimebundle_
|
Display the visualization in the Jupyter notebook.
|
def _repr_mimebundle_(self, include=None, exclude=None):
"""Display the visualization in the Jupyter notebook."""
id = uuid.uuid4()
return (
{'application/javascript': self._generate_js(id)},
{'jupyter-vega': '#{0}'.format(id)},
)
|
(self, include=None, exclude=None)
|
52,461 |
vega.base
|
display
|
Render the visualization.
|
def display(self):
"""Render the visualization."""
display(self)
|
(self)
|
52,462 |
vega.vegalite
|
VegaLite
|
Display a Vega-Lite visualization in the Jupyter Notebook.
|
class VegaLite(VegaBase):
"""Display a Vega-Lite visualization in the Jupyter Notebook."""
render_type = 'vega-lite'
def _prepare_spec(self, spec, data):
return prepare_spec(spec, data)
|
(spec, data=None, opt=None)
|
52,465 |
vega.vegalite
|
_prepare_spec
| null |
def _prepare_spec(self, spec, data):
return prepare_spec(spec, data)
|
(self, spec, data)
|
52,468 |
vega
|
_jupyter_nbextension_paths
|
Return metadata for the jupyter-vega nbextension.
|
def _jupyter_nbextension_paths():
"""Return metadata for the jupyter-vega nbextension."""
return [dict(
section="notebook",
# the path is relative to the `vega` directory
src="static",
# directory in the `nbextension/` namespace
dest="jupyter-vega",
# _also_ in the `nbextension/` namespace
require="jupyter-vega/extension")]
|
()
|
52,470 |
vega
|
find_static_assets
| null |
def find_static_assets():
warn("""To use the vega nbextension, you'll need to update
the Jupyter notebook to version 4.2 or later.""")
return []
|
()
|
52,474 |
smdebug_rulesconfig.actions.actions
|
ActionList
| null |
class ActionList(object):
def __init__(self, *actions: Action):
"""
Higher level object to maintain a list of actions to be invoked when a rule is fired. Offers higher level
`serialize` function to handle serialization of actions as a string list of dictionaries.
:param actions: List of actions.
"""
if not all(isinstance(action, Action) for action in actions):
raise TypeError("actions must be list of Action objects!")
self.actions = actions
def update_training_job_prefix_if_not_specified(self, training_job_name: str):
"""
For any StopTraining actions in the action list, update the training job prefix to be the training job name if
the user has not already specified a custom training job prefix. This is meant to be called via the sagemaker
SDK when `estimator.fit` is called by the user. Validation is purposely excluded here so that any failures in
validation of the training job name are intentionally caught in the sagemaker SDK and not here.
:param training_job_name: Name of the training job, passed in when `estimator.fit` is called.
"""
for action in self.actions:
if isinstance(action, StopTraining):
action.update_training_job_prefix_if_not_specified(training_job_name)
def serialize(self):
return "[" + ", ".join([action.serialize() for action in self.actions]) + "]"
|
(*actions: smdebug_rulesconfig.actions.actions.Action)
|
52,475 |
smdebug_rulesconfig.actions.actions
|
__init__
|
Higher level object to maintain a list of actions to be invoked when a rule is fired. Offers higher level
`serialize` function to handle serialization of actions as a string list of dictionaries.
:param actions: List of actions.
|
def __init__(self, *actions: Action):
"""
Higher level object to maintain a list of actions to be invoked when a rule is fired. Offers higher level
`serialize` function to handle serialization of actions as a string list of dictionaries.
:param actions: List of actions.
"""
if not all(isinstance(action, Action) for action in actions):
raise TypeError("actions must be list of Action objects!")
self.actions = actions
|
(self, *actions: smdebug_rulesconfig.actions.actions.Action)
|
52,476 |
smdebug_rulesconfig.actions.actions
|
serialize
| null |
def serialize(self):
return "[" + ", ".join([action.serialize() for action in self.actions]) + "]"
|
(self)
|
52,477 |
smdebug_rulesconfig.actions.actions
|
update_training_job_prefix_if_not_specified
|
For any StopTraining actions in the action list, update the training job prefix to be the training job name if
the user has not already specified a custom training job prefix. This is meant to be called via the sagemaker
SDK when `estimator.fit` is called by the user. Validation is purposely excluded here so that any failures in
validation of the training job name are intentionally caught in the sagemaker SDK and not here.
:param training_job_name: Name of the training job, passed in when `estimator.fit` is called.
|
def update_training_job_prefix_if_not_specified(self, training_job_name: str):
"""
For any StopTraining actions in the action list, update the training job prefix to be the training job name if
the user has not already specified a custom training job prefix. This is meant to be called via the sagemaker
SDK when `estimator.fit` is called by the user. Validation is purposely excluded here so that any failures in
validation of the training job name are intentionally caught in the sagemaker SDK and not here.
:param training_job_name: Name of the training job, passed in when `estimator.fit` is called.
"""
for action in self.actions:
if isinstance(action, StopTraining):
action.update_training_job_prefix_if_not_specified(training_job_name)
|
(self, training_job_name: str)
|
52,478 |
smdebug_rulesconfig.profiler_rules.rules
|
BatchSize
| null |
class BatchSize(ProfilerRuleBase):
def __init__(
self,
cpu_threshold_p95=70,
gpu_threshold_p95=70,
gpu_memory_threshold_p95=70,
patience=1000,
window=500,
scan_interval_us=60 * 1000 * 1000,
):
"""
This rule helps to detect if GPU is underulitized because of the batch size being too small.
To detect this the rule analyzes the average GPU memory footprint, CPU and GPU utilization.
If utilization on CPU, GPU and memory footprint is on average low , it may indicate that user
can either run on a smaller instance type or that batch size could be increased. This analysis does not
work for frameworks that heavily over-allocate memory. Increasing batch size could potentially lead to
a processing/dataloading bottleneck, because more data needs to be pre-processed in each iteration.
:param cpu_threshold_p95: defines the threshold for 95th quantile of CPU utilization.Default is 70%.
:param gpu_threshold_p95: defines the threshold for 95th quantile of GPU utilization.Default is 70%.
:param gpu_memory_threshold_p95: defines the threshold for 95th quantile of GPU memory utilization.Default is 70%.
:param patience: defines how many datapoints to capture before Rule runs the first evluation. Default 100
:param window: window size for computing quantiles.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_percentile("cpu_threshold_p95", cpu_threshold_p95)
validate_percentile("gpu_threshold_p95", gpu_threshold_p95)
validate_percentile("gpu_memory_threshold_p95", gpu_memory_threshold_p95)
validate_positive_integer("patience", patience)
validate_positive_integer("window", window)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
cpu_threshold_p95=cpu_threshold_p95,
gpu_threshold_p95=gpu_threshold_p95,
gpu_memory_threshold_p95=gpu_memory_threshold_p95,
patience=patience,
window=window,
scan_interval_us=scan_interval_us,
)
|
(cpu_threshold_p95=70, gpu_threshold_p95=70, gpu_memory_threshold_p95=70, patience=1000, window=500, scan_interval_us=60000000)
|
52,479 |
smdebug_rulesconfig.profiler_rules.rules
|
__init__
|
This rule helps to detect if GPU is underulitized because of the batch size being too small.
To detect this the rule analyzes the average GPU memory footprint, CPU and GPU utilization.
If utilization on CPU, GPU and memory footprint is on average low , it may indicate that user
can either run on a smaller instance type or that batch size could be increased. This analysis does not
work for frameworks that heavily over-allocate memory. Increasing batch size could potentially lead to
a processing/dataloading bottleneck, because more data needs to be pre-processed in each iteration.
:param cpu_threshold_p95: defines the threshold for 95th quantile of CPU utilization.Default is 70%.
:param gpu_threshold_p95: defines the threshold for 95th quantile of GPU utilization.Default is 70%.
:param gpu_memory_threshold_p95: defines the threshold for 95th quantile of GPU memory utilization.Default is 70%.
:param patience: defines how many datapoints to capture before Rule runs the first evluation. Default 100
:param window: window size for computing quantiles.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
|
def __init__(
self,
cpu_threshold_p95=70,
gpu_threshold_p95=70,
gpu_memory_threshold_p95=70,
patience=1000,
window=500,
scan_interval_us=60 * 1000 * 1000,
):
"""
This rule helps to detect if GPU is underulitized because of the batch size being too small.
To detect this the rule analyzes the average GPU memory footprint, CPU and GPU utilization.
If utilization on CPU, GPU and memory footprint is on average low , it may indicate that user
can either run on a smaller instance type or that batch size could be increased. This analysis does not
work for frameworks that heavily over-allocate memory. Increasing batch size could potentially lead to
a processing/dataloading bottleneck, because more data needs to be pre-processed in each iteration.
:param cpu_threshold_p95: defines the threshold for 95th quantile of CPU utilization.Default is 70%.
:param gpu_threshold_p95: defines the threshold for 95th quantile of GPU utilization.Default is 70%.
:param gpu_memory_threshold_p95: defines the threshold for 95th quantile of GPU memory utilization.Default is 70%.
:param patience: defines how many datapoints to capture before Rule runs the first evluation. Default 100
:param window: window size for computing quantiles.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_percentile("cpu_threshold_p95", cpu_threshold_p95)
validate_percentile("gpu_threshold_p95", gpu_threshold_p95)
validate_percentile("gpu_memory_threshold_p95", gpu_memory_threshold_p95)
validate_positive_integer("patience", patience)
validate_positive_integer("window", window)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
cpu_threshold_p95=cpu_threshold_p95,
gpu_threshold_p95=gpu_threshold_p95,
gpu_memory_threshold_p95=gpu_memory_threshold_p95,
patience=patience,
window=window,
scan_interval_us=scan_interval_us,
)
|
(self, cpu_threshold_p95=70, gpu_threshold_p95=70, gpu_memory_threshold_p95=70, patience=1000, window=500, scan_interval_us=60000000)
|
52,480 |
smdebug_rulesconfig.profiler_rules.rules
|
CPUBottleneck
| null |
class CPUBottleneck(ProfilerRuleBase):
def __init__(
self,
threshold=50,
gpu_threshold=10,
cpu_threshold=90,
patience=1000,
scan_interval_us=60 * 1000 * 1000,
):
"""
This rule helps to detect if GPU is underutilized due to CPU bottlenecks. Rule returns True if number of CPU bottlenecks exceeds a predefined threshold.
:param threshold: defines the threshold behyond which Rule should return True. Default is 50 percent. So if there is a bottleneck more than 50% of the time during the training Rule will return True.
:param gpu_threshold: threshold that defines when GPU is considered being under-utilized. Default is 10%
:param cpu_threshold: threshold that defines high CPU utilization. Default is above 90%
:param patience: How many values to record before checking for CPU bottlenecks. During training initilization, GPU is likely at 0 percent, so Rule should not check for underutilization immediatly. Default 1000.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_percentile("threshold", threshold)
validate_percentile("gpu_threshold", gpu_threshold)
validate_percentile("cpu_threshold", cpu_threshold)
validate_positive_integer("patience", patience)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
threshold=threshold,
gpu_threshold=gpu_threshold,
cpu_threshold=cpu_threshold,
patience=patience,
scan_interval_us=scan_interval_us,
)
|
(threshold=50, gpu_threshold=10, cpu_threshold=90, patience=1000, scan_interval_us=60000000)
|
52,481 |
smdebug_rulesconfig.profiler_rules.rules
|
__init__
|
This rule helps to detect if GPU is underutilized due to CPU bottlenecks. Rule returns True if number of CPU bottlenecks exceeds a predefined threshold.
:param threshold: defines the threshold behyond which Rule should return True. Default is 50 percent. So if there is a bottleneck more than 50% of the time during the training Rule will return True.
:param gpu_threshold: threshold that defines when GPU is considered being under-utilized. Default is 10%
:param cpu_threshold: threshold that defines high CPU utilization. Default is above 90%
:param patience: How many values to record before checking for CPU bottlenecks. During training initilization, GPU is likely at 0 percent, so Rule should not check for underutilization immediatly. Default 1000.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
|
def __init__(
self,
threshold=50,
gpu_threshold=10,
cpu_threshold=90,
patience=1000,
scan_interval_us=60 * 1000 * 1000,
):
"""
This rule helps to detect if GPU is underutilized due to CPU bottlenecks. Rule returns True if number of CPU bottlenecks exceeds a predefined threshold.
:param threshold: defines the threshold behyond which Rule should return True. Default is 50 percent. So if there is a bottleneck more than 50% of the time during the training Rule will return True.
:param gpu_threshold: threshold that defines when GPU is considered being under-utilized. Default is 10%
:param cpu_threshold: threshold that defines high CPU utilization. Default is above 90%
:param patience: How many values to record before checking for CPU bottlenecks. During training initilization, GPU is likely at 0 percent, so Rule should not check for underutilization immediatly. Default 1000.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_percentile("threshold", threshold)
validate_percentile("gpu_threshold", gpu_threshold)
validate_percentile("cpu_threshold", cpu_threshold)
validate_positive_integer("patience", patience)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
threshold=threshold,
gpu_threshold=gpu_threshold,
cpu_threshold=cpu_threshold,
patience=patience,
scan_interval_us=scan_interval_us,
)
|
(self, threshold=50, gpu_threshold=10, cpu_threshold=90, patience=1000, scan_interval_us=60000000)
|
52,482 |
smdebug_rulesconfig.actions.actions
|
Email
| null |
class Email(Action):
def __init__(self, email_address: str):
"""
Action for sending an email to the provided email address when the rule is fired. Note that a policy must be
created in the AWS account to allow the sagemaker role to send an email to the user:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"sns:Publish",
"sns:CreateTopic",
"sns:Subscribe"
],
"Resource": "arn:aws:sns:*:<account-id>:SMDebugRules"
}
]
}
```
:param email_address: Email address to send the email notification to.
"""
validate_email_address("email_address", email_address)
super(Email, self).__init__(endpoint=email_address)
|
(email_address: str)
|
52,483 |
smdebug_rulesconfig.actions.actions
|
__init__
|
Action for sending an email to the provided email address when the rule is fired. Note that a policy must be
created in the AWS account to allow the sagemaker role to send an email to the user:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"sns:Publish",
"sns:CreateTopic",
"sns:Subscribe"
],
"Resource": "arn:aws:sns:*:<account-id>:SMDebugRules"
}
]
}
```
:param email_address: Email address to send the email notification to.
|
def __init__(self, email_address: str):
"""
Action for sending an email to the provided email address when the rule is fired. Note that a policy must be
created in the AWS account to allow the sagemaker role to send an email to the user:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"sns:Publish",
"sns:CreateTopic",
"sns:Subscribe"
],
"Resource": "arn:aws:sns:*:<account-id>:SMDebugRules"
}
]
}
```
:param email_address: Email address to send the email notification to.
"""
validate_email_address("email_address", email_address)
super(Email, self).__init__(endpoint=email_address)
|
(self, email_address: str)
|
52,484 |
smdebug_rulesconfig.actions.actions
|
serialize
|
Serialize the action parameters as a string dictionary.
:return: Action parameters serialized as a string dictionary.
|
def serialize(self):
"""
Serialize the action parameters as a string dictionary.
:return: Action parameters serialized as a string dictionary.
"""
return (
"{"
+ ", ".join(
[f'\\"{key}\\": \\"{value}\\"' for key, value in self.action_parameters.items()]
)
+ "}"
)
|
(self)
|
52,485 |
smdebug_rulesconfig.profiler_rules.rules
|
GPUMemoryIncrease
| null |
class GPUMemoryIncrease(ProfilerRuleBase):
def __init__(self, increase=5, patience=1000, window=10, scan_interval_us=60 * 1000 * 1000):
"""
This rule helps to detect large increase in memory usage on GPUs. The rule computes the moving average of continous datapoints and compares it against the moving average of previous iteration.
:param increase: defines the threshold for absolute memory increase.Default is 5%. So if moving average increase from 5% to 6%, the rule will fire.
:param patience: defines how many continous datapoints to capture before Rule runs the first evluation. Default is 1000
:param window: window size for computing moving average of continous datapoints
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_positive_integer("increase", increase)
validate_positive_integer("patience", patience)
validate_positive_integer("window", window)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
increase=increase, patience=patience, window=window, scan_interval_us=scan_interval_us
)
|
(increase=5, patience=1000, window=10, scan_interval_us=60000000)
|
52,486 |
smdebug_rulesconfig.profiler_rules.rules
|
__init__
|
This rule helps to detect large increase in memory usage on GPUs. The rule computes the moving average of continous datapoints and compares it against the moving average of previous iteration.
:param increase: defines the threshold for absolute memory increase.Default is 5%. So if moving average increase from 5% to 6%, the rule will fire.
:param patience: defines how many continous datapoints to capture before Rule runs the first evluation. Default is 1000
:param window: window size for computing moving average of continous datapoints
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
|
def __init__(self, increase=5, patience=1000, window=10, scan_interval_us=60 * 1000 * 1000):
"""
This rule helps to detect large increase in memory usage on GPUs. The rule computes the moving average of continous datapoints and compares it against the moving average of previous iteration.
:param increase: defines the threshold for absolute memory increase.Default is 5%. So if moving average increase from 5% to 6%, the rule will fire.
:param patience: defines how many continous datapoints to capture before Rule runs the first evluation. Default is 1000
:param window: window size for computing moving average of continous datapoints
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_positive_integer("increase", increase)
validate_positive_integer("patience", patience)
validate_positive_integer("window", window)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
increase=increase, patience=patience, window=window, scan_interval_us=scan_interval_us
)
|
(self, increase=5, patience=1000, window=10, scan_interval_us=60000000)
|
52,487 |
smdebug_rulesconfig.profiler_rules.rules
|
IOBottleneck
| null |
class IOBottleneck(ProfilerRuleBase):
def __init__(
self,
threshold=50,
gpu_threshold=10,
io_threshold=50,
patience=1000,
scan_interval_us=60 * 1000 * 1000,
):
"""
This rule helps to detect if GPU is underutilized due to IO bottlenecks. Rule returns True if number of IO bottlenecks exceeds a predefined threshold.
:param threshold: defines the threshold when Rule should return True. Default is 50 percent. So if there is a bottleneck more than 50% of the time during the training Rule will return True.
:param gpu_threshold: threshold that defines when GPU is considered being under-utilized. Default is 70%
:param io_threshold: threshold that defines high IO wait time. Default is above 50%
:param patience: How many values to record before checking for IO bottlenecks. During training initilization, GPU is likely at 0 percent, so Rule should not check for underutilization immediatly. Default 1000.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_percentile("threshold", threshold)
validate_percentile("gpu_threshold", gpu_threshold)
validate_percentile("io_threshold", io_threshold)
validate_positive_integer("patience", patience)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
threshold=threshold,
gpu_threshold=gpu_threshold,
io_threshold=io_threshold,
patience=patience,
scan_interval_us=scan_interval_us,
)
|
(threshold=50, gpu_threshold=10, io_threshold=50, patience=1000, scan_interval_us=60000000)
|
52,488 |
smdebug_rulesconfig.profiler_rules.rules
|
__init__
|
This rule helps to detect if GPU is underutilized due to IO bottlenecks. Rule returns True if number of IO bottlenecks exceeds a predefined threshold.
:param threshold: defines the threshold when Rule should return True. Default is 50 percent. So if there is a bottleneck more than 50% of the time during the training Rule will return True.
:param gpu_threshold: threshold that defines when GPU is considered being under-utilized. Default is 70%
:param io_threshold: threshold that defines high IO wait time. Default is above 50%
:param patience: How many values to record before checking for IO bottlenecks. During training initilization, GPU is likely at 0 percent, so Rule should not check for underutilization immediatly. Default 1000.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
|
def __init__(
self,
threshold=50,
gpu_threshold=10,
io_threshold=50,
patience=1000,
scan_interval_us=60 * 1000 * 1000,
):
"""
This rule helps to detect if GPU is underutilized due to IO bottlenecks. Rule returns True if number of IO bottlenecks exceeds a predefined threshold.
:param threshold: defines the threshold when Rule should return True. Default is 50 percent. So if there is a bottleneck more than 50% of the time during the training Rule will return True.
:param gpu_threshold: threshold that defines when GPU is considered being under-utilized. Default is 70%
:param io_threshold: threshold that defines high IO wait time. Default is above 50%
:param patience: How many values to record before checking for IO bottlenecks. During training initilization, GPU is likely at 0 percent, so Rule should not check for underutilization immediatly. Default 1000.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_percentile("threshold", threshold)
validate_percentile("gpu_threshold", gpu_threshold)
validate_percentile("io_threshold", io_threshold)
validate_positive_integer("patience", patience)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
threshold=threshold,
gpu_threshold=gpu_threshold,
io_threshold=io_threshold,
patience=patience,
scan_interval_us=scan_interval_us,
)
|
(self, threshold=50, gpu_threshold=10, io_threshold=50, patience=1000, scan_interval_us=60000000)
|
52,489 |
smdebug_rulesconfig.profiler_rules.rules
|
LoadBalancing
| null |
class LoadBalancing(ProfilerRuleBase):
def __init__(self, threshold=0.5, patience=1000, scan_interval_us=60 * 1000 * 1000):
"""
This rule helps to detect issues in workload balancing between multiple GPUs.
It computes a histogram of utilization per GPU and measures the distance between those histograms.
If the histogram exceeds a pre-defined threshold then rule triggers.
:param threshold: difference between 2 histograms 0.5
:param patience: how many values to record before checking for loadbalancing issues
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_percentile("threshold", threshold)
validate_positive_integer("patience", patience)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(threshold=threshold, patience=patience, scan_interval_us=scan_interval_us)
|
(threshold=0.5, patience=1000, scan_interval_us=60000000)
|
52,490 |
smdebug_rulesconfig.profiler_rules.rules
|
__init__
|
This rule helps to detect issues in workload balancing between multiple GPUs.
It computes a histogram of utilization per GPU and measures the distance between those histograms.
If the histogram exceeds a pre-defined threshold then rule triggers.
:param threshold: difference between 2 histograms 0.5
:param patience: how many values to record before checking for loadbalancing issues
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
|
def __init__(self, threshold=0.5, patience=1000, scan_interval_us=60 * 1000 * 1000):
"""
This rule helps to detect issues in workload balancing between multiple GPUs.
It computes a histogram of utilization per GPU and measures the distance between those histograms.
If the histogram exceeds a pre-defined threshold then rule triggers.
:param threshold: difference between 2 histograms 0.5
:param patience: how many values to record before checking for loadbalancing issues
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_percentile("threshold", threshold)
validate_positive_integer("patience", patience)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(threshold=threshold, patience=patience, scan_interval_us=scan_interval_us)
|
(self, threshold=0.5, patience=1000, scan_interval_us=60000000)
|
52,491 |
smdebug_rulesconfig.profiler_rules.rules
|
LowGPUUtilization
| null |
class LowGPUUtilization(ProfilerRuleBase):
def __init__(
self,
threshold_p95=70,
threshold_p5=10,
window=500,
patience=1000,
scan_interval_us=60 * 1000 * 1000,
):
"""
This rule helps to detect if GPU utilization is low or suffers from fluctuations. This is checked for each single GPU on each worker node.
Rule returns True if 95th quantile is below threshold_p95 which indicates under-utilization.
Rule returns true if 95th quantile is above threshold_p95 and 5th quantile is below threshold_p5 which indicates fluctuations.
:param threshold_p95: threshold for 95th quantile below which GPU is considered to be underutilized. Default is 70 percent.
:param threshold_p5: threshold for 5th quantile. Default is 10 percent.
:param window: number of past datapoints which are used to compute the quantiles.
:param patience: How many values to record before checking for underutilization/fluctuations. During training initilization, GPU is likely at 0 percent, so Rule should not check for underutilization immediately. Default 1000.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_percentile("threshold_p95", threshold_p95)
validate_percentile("threshold_p5", threshold_p5)
validate_positive_integer("window", window)
validate_positive_integer("patience", patience)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
threshold_p95=threshold_p95,
threshold_p5=threshold_p5,
window=window,
patience=patience,
scan_interval_us=scan_interval_us,
)
|
(threshold_p95=70, threshold_p5=10, window=500, patience=1000, scan_interval_us=60000000)
|
52,492 |
smdebug_rulesconfig.profiler_rules.rules
|
__init__
|
This rule helps to detect if GPU utilization is low or suffers from fluctuations. This is checked for each single GPU on each worker node.
Rule returns True if 95th quantile is below threshold_p95 which indicates under-utilization.
Rule returns true if 95th quantile is above threshold_p95 and 5th quantile is below threshold_p5 which indicates fluctuations.
:param threshold_p95: threshold for 95th quantile below which GPU is considered to be underutilized. Default is 70 percent.
:param threshold_p5: threshold for 5th quantile. Default is 10 percent.
:param window: number of past datapoints which are used to compute the quantiles.
:param patience: How many values to record before checking for underutilization/fluctuations. During training initilization, GPU is likely at 0 percent, so Rule should not check for underutilization immediately. Default 1000.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
|
def __init__(
self,
threshold_p95=70,
threshold_p5=10,
window=500,
patience=1000,
scan_interval_us=60 * 1000 * 1000,
):
"""
This rule helps to detect if GPU utilization is low or suffers from fluctuations. This is checked for each single GPU on each worker node.
Rule returns True if 95th quantile is below threshold_p95 which indicates under-utilization.
Rule returns true if 95th quantile is above threshold_p95 and 5th quantile is below threshold_p5 which indicates fluctuations.
:param threshold_p95: threshold for 95th quantile below which GPU is considered to be underutilized. Default is 70 percent.
:param threshold_p5: threshold for 5th quantile. Default is 10 percent.
:param window: number of past datapoints which are used to compute the quantiles.
:param patience: How many values to record before checking for underutilization/fluctuations. During training initilization, GPU is likely at 0 percent, so Rule should not check for underutilization immediately. Default 1000.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_percentile("threshold_p95", threshold_p95)
validate_percentile("threshold_p5", threshold_p5)
validate_positive_integer("window", window)
validate_positive_integer("patience", patience)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
threshold_p95=threshold_p95,
threshold_p5=threshold_p5,
window=window,
patience=patience,
scan_interval_us=scan_interval_us,
)
|
(self, threshold_p95=70, threshold_p5=10, window=500, patience=1000, scan_interval_us=60000000)
|
52,493 |
smdebug_rulesconfig.profiler_rules.rules
|
MaxInitializationTime
| null |
class MaxInitializationTime(ProfilerRuleBase):
def __init__(self, threshold=20, scan_interval_us=60 * 1000 * 1000):
"""
This rule helps to detect if the training intialization is taking too much time. The rule waits until first
step is available.
:param threshold: defines the threshold in minutes to wait for first step to become available. Default is 20 minutes.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_positive_integer("threshold", threshold)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(threshold=threshold, scan_interval_us=scan_interval_us)
|
(threshold=20, scan_interval_us=60000000)
|
52,494 |
smdebug_rulesconfig.profiler_rules.rules
|
__init__
|
This rule helps to detect if the training intialization is taking too much time. The rule waits until first
step is available.
:param threshold: defines the threshold in minutes to wait for first step to become available. Default is 20 minutes.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
|
def __init__(self, threshold=20, scan_interval_us=60 * 1000 * 1000):
"""
This rule helps to detect if the training intialization is taking too much time. The rule waits until first
step is available.
:param threshold: defines the threshold in minutes to wait for first step to become available. Default is 20 minutes.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_positive_integer("threshold", threshold)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(threshold=threshold, scan_interval_us=scan_interval_us)
|
(self, threshold=20, scan_interval_us=60000000)
|
52,495 |
smdebug_rulesconfig.profiler_rules.rules
|
OverallSystemUsage
| null |
class OverallSystemUsage(ProfilerRuleBase):
def __init__(self, scan_interval_us=60 * 1000 * 1000):
"""
This rule measures overall system usage per worker node. The rule currently only aggregates values per node
and computes their percentiles. The rule does currently not take any threshold parameters into account
nor can it trigger. The reason behind that is that other rules already cover cases such as underutilization and
they do it at a more fine-grained level e.g. per GPU. We may change this in the future.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(scan_interval_us=scan_interval_us)
|
(scan_interval_us=60000000)
|
52,496 |
smdebug_rulesconfig.profiler_rules.rules
|
__init__
|
This rule measures overall system usage per worker node. The rule currently only aggregates values per node
and computes their percentiles. The rule does currently not take any threshold parameters into account
nor can it trigger. The reason behind that is that other rules already cover cases such as underutilization and
they do it at a more fine-grained level e.g. per GPU. We may change this in the future.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
|
def __init__(self, scan_interval_us=60 * 1000 * 1000):
"""
This rule measures overall system usage per worker node. The rule currently only aggregates values per node
and computes their percentiles. The rule does currently not take any threshold parameters into account
nor can it trigger. The reason behind that is that other rules already cover cases such as underutilization and
they do it at a more fine-grained level e.g. per GPU. We may change this in the future.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(scan_interval_us=scan_interval_us)
|
(self, scan_interval_us=60000000)
|
52,497 |
smdebug_rulesconfig.profiler_rules.rules
|
ProfilerReport
| null |
class ProfilerReport(ProfilerRuleBase):
def __init__(self, **rule_parameters):
"""
This rule will create a profiler report after invoking all of the rules. The parameters
used in any of these rules can be customized by following this naming scheme:
<rule_name>_<parameter_name> : value
Validation is also done here to ensure that:
1. The key names follow the above format
2. rule_name corresponds to a valid rule name.
3. parameter_name corresponds to a valid parameter of this rule.
4. The parameter for this rule's parameter is valid.
:param rule_parameters: Dictionary mapping rule + parameter name to value.
"""
invalid_key_format_error = (
"Key ({0}) does not follow naming scheme: <rule_name>_<parameter_name>"
)
invalid_rule_error = (
"{0} is an invalid rule name! Accepted rule names (case insensitive) are: {1}"
)
invalid_param_error = (
"{0} is an invalid parameter name! Accepted parameter names for {1} are: {2}"
)
rule_classes = [
BatchSize,
CPUBottleneck,
Dataloader,
GPUMemoryIncrease,
IOBottleneck,
LoadBalancing,
LowGPUUtilization,
MaxInitializationTime,
OverallSystemUsage,
StepOutlier,
]
rule_names = [rule.__name__ for rule in rule_classes]
rule_classes_by_name = {rule.__name__.lower(): rule for rule in rule_classes}
for key, val in rule_parameters.items():
assert key.count("_") >= 1, invalid_key_format_error.format(key)
rule_name, *parameter_name = key.split("_")
rule_name = rule_name.lower()
parameter_name = "_".join(parameter_name).lower()
assert rule_name in rule_classes_by_name, invalid_rule_error.format(
rule_name, rule_names
)
rule_class = rule_classes_by_name[rule_name]
try:
rule_class(**{parameter_name: val})
except TypeError:
rule_signature = inspect.signature(rule_class.__init__)
raise TypeError(
invalid_param_error.format(parameter_name, rule_class.__name__, rule_signature)
)
super().__init__(**rule_parameters)
|
(**rule_parameters)
|
52,498 |
smdebug_rulesconfig.profiler_rules.rules
|
__init__
|
This rule will create a profiler report after invoking all of the rules. The parameters
used in any of these rules can be customized by following this naming scheme:
<rule_name>_<parameter_name> : value
Validation is also done here to ensure that:
1. The key names follow the above format
2. rule_name corresponds to a valid rule name.
3. parameter_name corresponds to a valid parameter of this rule.
4. The parameter for this rule's parameter is valid.
:param rule_parameters: Dictionary mapping rule + parameter name to value.
|
def __init__(self, **rule_parameters):
"""
This rule will create a profiler report after invoking all of the rules. The parameters
used in any of these rules can be customized by following this naming scheme:
<rule_name>_<parameter_name> : value
Validation is also done here to ensure that:
1. The key names follow the above format
2. rule_name corresponds to a valid rule name.
3. parameter_name corresponds to a valid parameter of this rule.
4. The parameter for this rule's parameter is valid.
:param rule_parameters: Dictionary mapping rule + parameter name to value.
"""
invalid_key_format_error = (
"Key ({0}) does not follow naming scheme: <rule_name>_<parameter_name>"
)
invalid_rule_error = (
"{0} is an invalid rule name! Accepted rule names (case insensitive) are: {1}"
)
invalid_param_error = (
"{0} is an invalid parameter name! Accepted parameter names for {1} are: {2}"
)
rule_classes = [
BatchSize,
CPUBottleneck,
Dataloader,
GPUMemoryIncrease,
IOBottleneck,
LoadBalancing,
LowGPUUtilization,
MaxInitializationTime,
OverallSystemUsage,
StepOutlier,
]
rule_names = [rule.__name__ for rule in rule_classes]
rule_classes_by_name = {rule.__name__.lower(): rule for rule in rule_classes}
for key, val in rule_parameters.items():
assert key.count("_") >= 1, invalid_key_format_error.format(key)
rule_name, *parameter_name = key.split("_")
rule_name = rule_name.lower()
parameter_name = "_".join(parameter_name).lower()
assert rule_name in rule_classes_by_name, invalid_rule_error.format(
rule_name, rule_names
)
rule_class = rule_classes_by_name[rule_name]
try:
rule_class(**{parameter_name: val})
except TypeError:
rule_signature = inspect.signature(rule_class.__init__)
raise TypeError(
invalid_param_error.format(parameter_name, rule_class.__name__, rule_signature)
)
super().__init__(**rule_parameters)
|
(self, **rule_parameters)
|
52,499 |
smdebug_rulesconfig.actions.actions
|
SMS
| null |
class SMS(Action):
def __init__(self, phone_number: str):
"""
Action for sending an SMS to the provided phone number when the rule is fired. Note that a policy must be
created in the AWS account to allow the sagemaker role to send an SMS to the user:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"sns:Publish",
"sns:CreateTopic",
"sns:Subscribe"
],
"Resource": "arn:aws:sns:*:<account-id>:SMDebugRules"
}
]
}
```
:param phone_number: Valid phone number that follows the the E.164 format. See
https://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html for more info.
"""
validate_phone_number("phone_number", phone_number)
super(SMS, self).__init__(endpoint=phone_number)
|
(phone_number: str)
|
52,500 |
smdebug_rulesconfig.actions.actions
|
__init__
|
Action for sending an SMS to the provided phone number when the rule is fired. Note that a policy must be
created in the AWS account to allow the sagemaker role to send an SMS to the user:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"sns:Publish",
"sns:CreateTopic",
"sns:Subscribe"
],
"Resource": "arn:aws:sns:*:<account-id>:SMDebugRules"
}
]
}
```
:param phone_number: Valid phone number that follows the the E.164 format. See
https://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html for more info.
|
def __init__(self, phone_number: str):
"""
Action for sending an SMS to the provided phone number when the rule is fired. Note that a policy must be
created in the AWS account to allow the sagemaker role to send an SMS to the user:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"sns:Publish",
"sns:CreateTopic",
"sns:Subscribe"
],
"Resource": "arn:aws:sns:*:<account-id>:SMDebugRules"
}
]
}
```
:param phone_number: Valid phone number that follows the the E.164 format. See
https://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html for more info.
"""
validate_phone_number("phone_number", phone_number)
super(SMS, self).__init__(endpoint=phone_number)
|
(self, phone_number: str)
|
52,502 |
smdebug_rulesconfig.profiler_rules.rules
|
StepOutlier
| null |
class StepOutlier(ProfilerRuleBase):
def __init__(self, stddev=3, mode=None, n_outliers=10, scan_interval_us=60 * 1000 * 1000):
"""
This rule helps to detect outlier in step durations. Rule returns True if duration is larger than stddev * standard deviation.
:param stddev: factor by which to multiply the standard deviation. Default is 3
:param mode: select mode under which steps have been saved and on which Rule should run on. Per default rule will run on steps from EVAL and TRAIN phase.
:param n_outliers: How many outliers to ignore before rule returns True. Default 10.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_positive_integer("stddev", stddev)
assert mode is None or isinstance(mode, str), "Mode must be a string if specified!"
validate_positive_integer("n_outliers", n_outliers)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
stddev=stddev, mode=mode, n_outliers=n_outliers, scan_interval_us=scan_interval_us
)
|
(stddev=3, mode=None, n_outliers=10, scan_interval_us=60000000)
|
52,503 |
smdebug_rulesconfig.profiler_rules.rules
|
__init__
|
This rule helps to detect outlier in step durations. Rule returns True if duration is larger than stddev * standard deviation.
:param stddev: factor by which to multiply the standard deviation. Default is 3
:param mode: select mode under which steps have been saved and on which Rule should run on. Per default rule will run on steps from EVAL and TRAIN phase.
:param n_outliers: How many outliers to ignore before rule returns True. Default 10.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
|
def __init__(self, stddev=3, mode=None, n_outliers=10, scan_interval_us=60 * 1000 * 1000):
"""
This rule helps to detect outlier in step durations. Rule returns True if duration is larger than stddev * standard deviation.
:param stddev: factor by which to multiply the standard deviation. Default is 3
:param mode: select mode under which steps have been saved and on which Rule should run on. Per default rule will run on steps from EVAL and TRAIN phase.
:param n_outliers: How many outliers to ignore before rule returns True. Default 10.
:param scan_interval_us: interval with which timeline files are scanned. Default is 60000000 us.
"""
validate_positive_integer("stddev", stddev)
assert mode is None or isinstance(mode, str), "Mode must be a string if specified!"
validate_positive_integer("n_outliers", n_outliers)
validate_positive_integer("scan_interval_us", scan_interval_us)
super().__init__(
stddev=stddev, mode=mode, n_outliers=n_outliers, scan_interval_us=scan_interval_us
)
|
(self, stddev=3, mode=None, n_outliers=10, scan_interval_us=60000000)
|
52,504 |
smdebug_rulesconfig.actions.actions
|
StopTraining
| null |
class StopTraining(Action):
def __init__(self, training_job_prefix: Optional[str] = None):
"""
Action for stopping the training job when a rule is fired. Note that a policy must be created in the AWS
account to allow the sagemaker role to stop the training job:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": "sagemaker:StopTrainingJob",
"Resource": "arn:aws:sagemaker:*:<account_id>:training-job/*"
}
]
}
```
:param training_job_prefix: The prefix of the training job to stop if the rule is fired. This must only refer
to one active training job, otherwise no training job will be stopped.
"""
self.use_default_training_job_prefix = True
if training_job_prefix is not None:
validate_training_job_prefix("training_job_prefix", training_job_prefix)
self.use_default_training_job_prefix = False
super(StopTraining, self).__init__(training_job_prefix=training_job_prefix)
def update_training_job_prefix_if_not_specified(self, training_job_name: str):
"""
Update the training job prefix to be the training job name if the user has not already specified a custom
training job prefix. This is only meant to be called via the sagemaker SDK when `estimator.fit` is called by the
user. Validation is purposely excluded here so that any failures in validation of the training job name are
intentionally caught in the sagemaker SDK and not here.
:param training_job_name: Name of the training job, passed in when `estimator.fit` is called.
"""
if self.use_default_training_job_prefix:
self.action_parameters["training_job_prefix"] = training_job_name
|
(training_job_prefix: Optional[str] = None)
|
52,505 |
smdebug_rulesconfig.actions.actions
|
__init__
|
Action for stopping the training job when a rule is fired. Note that a policy must be created in the AWS
account to allow the sagemaker role to stop the training job:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": "sagemaker:StopTrainingJob",
"Resource": "arn:aws:sagemaker:*:<account_id>:training-job/*"
}
]
}
```
:param training_job_prefix: The prefix of the training job to stop if the rule is fired. This must only refer
to one active training job, otherwise no training job will be stopped.
|
def __init__(self, training_job_prefix: Optional[str] = None):
"""
Action for stopping the training job when a rule is fired. Note that a policy must be created in the AWS
account to allow the sagemaker role to stop the training job:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": "sagemaker:StopTrainingJob",
"Resource": "arn:aws:sagemaker:*:<account_id>:training-job/*"
}
]
}
```
:param training_job_prefix: The prefix of the training job to stop if the rule is fired. This must only refer
to one active training job, otherwise no training job will be stopped.
"""
self.use_default_training_job_prefix = True
if training_job_prefix is not None:
validate_training_job_prefix("training_job_prefix", training_job_prefix)
self.use_default_training_job_prefix = False
super(StopTraining, self).__init__(training_job_prefix=training_job_prefix)
|
(self, training_job_prefix: Optional[str] = None)
|
52,507 |
smdebug_rulesconfig.actions.actions
|
update_training_job_prefix_if_not_specified
|
Update the training job prefix to be the training job name if the user has not already specified a custom
training job prefix. This is only meant to be called via the sagemaker SDK when `estimator.fit` is called by the
user. Validation is purposely excluded here so that any failures in validation of the training job name are
intentionally caught in the sagemaker SDK and not here.
:param training_job_name: Name of the training job, passed in when `estimator.fit` is called.
|
def update_training_job_prefix_if_not_specified(self, training_job_name: str):
"""
Update the training job prefix to be the training job name if the user has not already specified a custom
training job prefix. This is only meant to be called via the sagemaker SDK when `estimator.fit` is called by the
user. Validation is purposely excluded here so that any failures in validation of the training job name are
intentionally caught in the sagemaker SDK and not here.
:param training_job_name: Name of the training job, passed in when `estimator.fit` is called.
"""
if self.use_default_training_job_prefix:
self.action_parameters["training_job_prefix"] = training_job_name
|
(self, training_job_name: str)
|
52,510 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
all_zero
| null |
def all_zero():
rule_config = _get_rule_config("AllZero")
return rule_config
|
()
|
52,511 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
check_input_images
| null |
def check_input_images():
rule_config = _get_rule_config("CheckInputImages")
return rule_config
|
()
|
52,512 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
class_imbalance
| null |
def class_imbalance():
rule_config = _get_rule_config("ClassImbalance")
return rule_config
|
()
|
52,513 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
confusion
| null |
def confusion():
rule_config = _get_rule_config("Confusion")
return rule_config
|
()
|
52,514 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
create_xgboost_report
| null |
def create_xgboost_report():
rule_config = _get_rule_config("CreateXgboostReport")
return rule_config
|
()
|
52,515 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
dead_relu
| null |
def dead_relu():
rule_config = _get_rule_config("DeadRelu")
return rule_config
|
()
|
52,517 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
exploding_tensor
| null |
def exploding_tensor():
rule_config = _get_rule_config("ExplodingTensor")
return rule_config
|
()
|
52,518 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
feature_importance_overweight
| null |
def feature_importance_overweight():
rule_config = _get_rule_config("FeatureImportanceOverweight")
return rule_config
|
()
|
52,519 |
smdebug_rulesconfig.debugger_rules._collections
|
get_collection
| null |
def get_collection(collection_name):
return _get_collection_config(collection_name)
|
(collection_name)
|
52,520 |
smdebug_rulesconfig.actions.actions
|
is_valid_action_object
|
Helper function to be used by the sagemaker SDK to determine whether the provided object is a valid action object
or not (must be of type `Action` or `ActionList`.
:param actions: actions object specified by the user when calling `Rule.sagemaker` in the sagemaker SDK.
:return: Boolean for whether the provided actions object is valid or not.
|
def is_valid_action_object(actions: Union[Action, ActionList]):
"""
Helper function to be used by the sagemaker SDK to determine whether the provided object is a valid action object
or not (must be of type `Action` or `ActionList`.
:param actions: actions object specified by the user when calling `Rule.sagemaker` in the sagemaker SDK.
:return: Boolean for whether the provided actions object is valid or not.
"""
return isinstance(actions, Action) or isinstance(actions, ActionList)
|
(actions: Union[smdebug_rulesconfig.actions.actions.Action, smdebug_rulesconfig.actions.actions.ActionList])
|
52,521 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
loss_not_decreasing
| null |
def loss_not_decreasing():
rule_config = _get_rule_config("LossNotDecreasing")
return rule_config
|
()
|
52,522 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
nlp_sequence_ratio
| null |
def nlp_sequence_ratio():
rule_config = _get_rule_config("NLPSequenceRatio")
return rule_config
|
()
|
52,523 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
overfit
| null |
def overfit():
rule_config = _get_rule_config("Overfit")
return rule_config
|
()
|
52,524 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
overtraining
| null |
def overtraining():
rule_config = _get_rule_config("Overtraining")
return rule_config
|
()
|
52,525 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
poor_weight_initialization
| null |
def poor_weight_initialization():
rule_config = _get_rule_config("PoorWeightInitialization")
return rule_config
|
()
|
52,527 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
saturated_activation
| null |
def saturated_activation():
rule_config = _get_rule_config("SaturatedActivation")
return rule_config
|
()
|
52,528 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
similar_across_runs
| null |
def similar_across_runs():
rule_config = _get_rule_config("SimilarAcrossRuns")
return rule_config
|
()
|
52,529 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
stalled_training_rule
| null |
def stalled_training_rule():
rule_config = _get_rule_config("StalledTrainingRule")
return rule_config
|
()
|
52,530 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
tensor_variance
| null |
def tensor_variance():
rule_config = _get_rule_config("TensorVariance")
return rule_config
|
()
|
52,531 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
tree_depth
| null |
def tree_depth():
rule_config = _get_rule_config("TreeDepth")
return rule_config
|
()
|
52,532 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
unchanged_tensor
| null |
def unchanged_tensor():
rule_config = _get_rule_config("UnchangedTensor")
return rule_config
|
()
|
52,533 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
vanishing_gradient
| null |
def vanishing_gradient():
rule_config = _get_rule_config("VanishingGradient")
return rule_config
|
()
|
52,534 |
smdebug_rulesconfig.debugger_rules.builtin_rules
|
weight_update_ratio
| null |
def weight_update_ratio():
rule_config = _get_rule_config("WeightUpdateRatio")
return rule_config
|
()
|
52,535 |
contextlib
|
AbstractContextManager
|
An abstract base class for context managers.
|
class AbstractContextManager(abc.ABC):
"""An abstract base class for context managers."""
__class_getitem__ = classmethod(GenericAlias)
def __enter__(self):
"""Return `self` upon entering the runtime context."""
return self
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
@classmethod
def __subclasshook__(cls, C):
if cls is AbstractContextManager:
return _collections_abc._check_methods(C, "__enter__", "__exit__")
return NotImplemented
|
()
|
52,536 |
contextlib
|
__enter__
|
Return `self` upon entering the runtime context.
|
def __enter__(self):
"""Return `self` upon entering the runtime context."""
return self
|
(self)
|
52,537 |
contextlib
|
__exit__
|
Raise any exception triggered within the runtime context.
|
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
|
(self, exc_type, exc_value, traceback)
|
52,538 |
asyncache
|
IdentityFunction
|
Type for a function returning the same type as the one it received.
|
class IdentityFunction(Protocol): # pylint: disable=too-few-public-methods
"""
Type for a function returning the same type as the one it received.
"""
def __call__(self, __x: _T) -> _T:
...
|
(*args, **kwargs)
|
52,539 |
asyncache
|
__call__
| null |
def __call__(self, __x: _T) -> _T:
...
|
(self, _IdentityFunction__x: ~_T) -> ~_T
|
52,540 |
typing
|
_no_init_or_replace_init
| null |
def _no_init_or_replace_init(self, *args, **kwargs):
cls = type(self)
if cls._is_protocol:
raise TypeError('Protocols cannot be instantiated')
# Already using a custom `__init__`. No need to calculate correct
# `__init__` to call. This can lead to RecursionError. See bpo-45121.
if cls.__init__ is not _no_init_or_replace_init:
return
# Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`.
# The first instantiation of the subclass will call `_no_init_or_replace_init` which
# searches for a proper new `__init__` in the MRO. The new `__init__`
# replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent
# instantiation of the protocol subclass will thus use the new
# `__init__` and no longer call `_no_init_or_replace_init`.
for base in cls.__mro__:
init = base.__dict__.get('__init__', _no_init_or_replace_init)
if init is not _no_init_or_replace_init:
cls.__init__ = init
break
else:
# should not happen
cls.__init__ = object.__init__
cls.__init__(self, *args, **kwargs)
|
(self, *args, **kwargs)
|
52,542 |
asyncache
|
NullContext
|
A class for noop context managers.
|
class NullContext:
"""A class for noop context managers."""
def __enter__(self):
"""Return ``self`` upon entering the runtime context."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
async def __aenter__(self):
"""Return ``self`` upon entering the runtime context."""
return self
async def __aexit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
|
()
|
52,543 |
asyncache
|
__aenter__
|
Return ``self`` upon entering the runtime context.
|
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
|
(self)
|
52,544 |
asyncache
|
__aexit__
|
Raise any exception triggered within the runtime context.
|
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
|
(self, exc_type, exc_value, traceback)
|
52,545 |
asyncache
|
__enter__
|
Return ``self`` upon entering the runtime context.
|
def __enter__(self):
"""Return ``self`` upon entering the runtime context."""
return self
|
(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.