index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
36,833 | lizard | map_files_to_analyzer | null | def map_files_to_analyzer(files, analyzer, working_threads):
mapmethod = get_map_method(working_threads)
return mapmethod(analyzer, files)
| (files, analyzer, working_threads) |
36,834 | lizard | md5_hash_file | return md5 hash of a file | def md5_hash_file(full_path_name):
''' return md5 hash of a file '''
try:
with auto_open(full_path_name, mode='r') as source_file:
if sys.version_info[0] == 3:
code_md5 = hashlib.md5(source_file.read().encode('utf-8'))
else:
code_md5 = hashlib.md5(source_file.read())
return code_md5.hexdigest()
except IOError:
return None
except UnicodeDecodeError:
return None
except UnicodeEncodeError:
return None
| (full_path_name) |
36,835 | lizard | open_output_file | null | def open_output_file(path):
try:
return codecs.open(path, 'w', encoding='utf8')
except OSError:
msg = "Error: failed to open output file '{}'\n.".format(path)
sys.stderr.write(msg)
sys.exit(2)
| (path) |
36,837 | lizard | parse_args | null | def parse_args(argv):
def extend_parser(parser_to_extend):
from argparse import ArgumentParser
parser = ArgumentParser(add_help=False)
_extension_arg(parser)
opt, _ = parser.parse_known_args(args=argv[1:])
extensions = get_extensions(opt.extensions)
for ext in extensions:
if hasattr(ext, "set_args"):
ext.set_args(parser_to_extend) # pylint: disable=E1101
return parser_to_extend
parser = extend_parser(arg_parser(argv[0]))
opt = parser.parse_args(args=argv[1:])
opt.extensions = get_extensions(opt.extensions)
values = OutputScheme(opt.extensions).value_columns()
no_fields = (set(opt.sorting) | set(opt.thresholds.keys())) - set(values)
if no_fields:
error_message = "Wrong field name '%s'.\n" % ", ".join(no_fields)
error_message += "Candidates are: " + ', '.join(values) + "\n"
sys.stderr.write(error_message)
sys.exit(2)
if "cyclomatic_complexity" not in opt.thresholds:
opt.thresholds["cyclomatic_complexity"] = opt.CCN
if "max_nesting_depth" not in opt.thresholds and hasattr(opt, "ND"):
opt.thresholds["max_nesting_depth"] = opt.ND
if "max_nested_structures" not in opt.thresholds and hasattr(opt, "NS"):
opt.thresholds["max_nested_structures"] = opt.NS
if "length" not in opt.thresholds:
opt.thresholds["length"] = opt.length
if "nloc" not in opt.thresholds:
opt.thresholds["nloc"] = 1000000
if "parameter_count" not in opt.thresholds:
opt.thresholds["parameter_count"] = opt.arguments
if opt.output_file:
inferred_printer = infer_printer_from_file_ext(opt.output_file)
if inferred_printer:
if not opt.printer:
opt.printer = inferred_printer
else:
msg = "Warning: overriding output file extension.\n"
sys.stderr.write(msg)
return opt
| (argv) |
36,838 | lizard | preprocessing | null | def preprocessing(tokens, reader):
if hasattr(reader, "preprocess"):
return reader.preprocess(tokens)
return (t for t in tokens if not t.isspace() or t == '\n')
| (tokens, reader) |
36,839 | lizard | print_and_save_modules | null | def print_and_save_modules(all_fileinfos, scheme):
saved_fileinfos = []
print(scheme.function_info_head())
for module_info in all_fileinfos:
if module_info:
saved_fileinfos.append(module_info)
for fun in module_info.function_list:
try:
print(scheme.function_info(fun))
except UnicodeEncodeError:
print("Found ill-formatted unicode function name.")
print("%d file analyzed." % (len(saved_fileinfos)))
print("==============================================================")
print("NLOC " + scheme.average_captions() + " function_cnt file")
print("--------------------------------------------------------------")
for module_info in saved_fileinfos:
print((
"{module.nloc:7d}" +
scheme.average_formatter() +
"{function_count:10d}" +
" {module.filename}").format(
module=module_info,
function_count=len(module_info.function_list)))
return saved_fileinfos
| (all_fileinfos, scheme) |
36,840 | lizard | print_clang_style_warning | null | def print_clang_style_warning(code_infos, option, scheme, _):
count = 0
for warning in get_warnings(code_infos, option):
print(scheme.clang_warning_format().format(f=warning))
count += 1
return count
| (code_infos, option, scheme, _) |
36,841 | lizard_ext | print_csv | null | def print_csv(results, options, _, total_factory):
csv_output(total_factory(list(results)), options)
return 0
| (results, options, _, total_factory) |
36,842 | lizard | print_extension_results | null | def print_extension_results(extensions):
for extension in extensions:
if hasattr(extension, 'print_result'):
extension.print_result()
| (extensions) |
36,843 | lizard | print_msvs_style_warning | null | def print_msvs_style_warning(code_infos, option, scheme, _):
count = 0
for warning in get_warnings(code_infos, option):
print(scheme.msvs_warning_format().format(f=warning))
count += 1
return count
| (code_infos, option, scheme, _) |
36,844 | lizard | print_no_warnings | null | def print_no_warnings(option):
warn_str = "No thresholds exceeded ({0})".format(
' or '.join("{0} > {1}".format(
k, val) for k, val in option.thresholds.items()))
print("\n" + "=" * len(warn_str) + "\n" + warn_str)
| (option) |
36,845 | lizard | print_result | null | def print_result(result, option, scheme, total_factory):
result = print_and_save_modules(result, scheme)
warnings = get_warnings(result, option)
warning_count, warning_nloc = print_warnings(option, scheme, warnings)
print_total(warning_count, warning_nloc, total_factory(result), scheme)
return warning_count
| (result, option, scheme, total_factory) |
36,846 | lizard | print_total | null | def print_total(warning_count, warning_nloc, all_result, scheme):
print("=" * 90)
print("Total nloc " + scheme.average_captions() + " Fun Cnt Warning"
" cnt Fun Rt nloc Rt")
print("-" * 90)
print((
"{module.nloc:10d}" +
scheme.average_formatter() +
"{function_count:9d}{warning_count:13d}" +
"{function_rate:10.2f}{nloc_rate:8.2f}").format(
module=all_result.as_fileinfo(),
function_count=all_result.function_count(),
warning_count=warning_count,
function_rate=(warning_count/all_result.function_count()),
nloc_rate=(warning_nloc/all_result.nloc_in_functions())))
| (warning_count, warning_nloc, all_result, scheme) |
36,847 | lizard | print_warnings | null | def print_warnings(option, scheme, warnings):
warning_count = 0
warning_nloc = 0
warn_str = "!!!! Warnings ({0}) !!!!".format(
' or '.join("{0} > {1}".format(
k, val) for k, val in option.thresholds.items()))
for warning in warnings:
if warning_count == 0:
print("\n" + "=" * len(warn_str) + "\n" + warn_str)
print(scheme.function_info_head())
warning_count += 1
warning_nloc += warning.nloc
print(scheme.function_info(warning))
if warning_count == 0:
print_no_warnings(option)
return warning_count, warning_nloc
| (option, scheme, warnings) |
36,848 | lizard_ext | print_xml | null | def print_xml(results, options, _, total_factory):
print(xml_output(total_factory(list(results)), options.verbose))
return 0
| (results, options, _, total_factory) |
36,850 | lizard | silent_printer |
just to exhaust the result, no output.
| def silent_printer(result, *_):
'''
just to exhaust the result, no output.
'''
for _ in result:
pass
return 0
| (result, *_) |
36,852 | lizard | token_counter | null | def token_counter(tokens, reader):
context = reader.context
for token in tokens:
context.fileinfo.token_count += 1
context.current_function.token_count += 1
yield token
| (tokens, reader) |
36,853 | lizard | warning_filter | null | def warning_filter(option, module_infos):
for file_info in module_infos:
if file_info:
for fun in file_info.function_list:
if any(getattr(fun, attr) > limit for attr, limit in
option.thresholds.items()):
yield fun
| (option, module_infos) |
36,854 | lizard | whitelist_filter | null | def whitelist_filter(warnings, script=None, whitelist=None):
def _get_whitelist_item(script):
white = {}
pieces = script.replace('::', '##').split(':')
if len(pieces) > 1:
white['file_name'] = pieces[0]
script = pieces[1]
white['function_names'] = (
[x.strip().replace('##', '::') for x in script.split(',')])
return white
def _in_list(warning):
return any(_match_whitelist_item(white, warning)
for white in whitelist)
def _match_whitelist_item(white, warning):
return (warning.name in white['function_names'] and
warning.filename == white.get('file_name', warning.filename))
def get_whitelist(whitelist):
if os.path.isfile(whitelist):
return open(whitelist, mode='r').read()
if whitelist != DEFAULT_WHITELIST:
print("WARNING: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("WARNING: the whitelist \""+whitelist+"\" doesn't exist.")
return ''
if not script:
script = get_whitelist(whitelist)
whitelist = [
_get_whitelist_item(line.split('#')[0])
for line in script.splitlines()]
for warning in warnings:
if not _in_list(warning):
yield warning
| (warnings, script=None, whitelist=None) |
36,855 | smbprotocol | Dialects |
[MS-SMB2] v53.0 2017-09-15
2.2.3 SMB2 NEGOTIATE Request Dialects
16-bit integeres specifying an SMB2 dialect that is supported. 0x02FF is
used in the SMBv1 negotiate request to say that dialects greater than
2.0.2 is supported.
| class Dialects:
"""
[MS-SMB2] v53.0 2017-09-15
2.2.3 SMB2 NEGOTIATE Request Dialects
16-bit integeres specifying an SMB2 dialect that is supported. 0x02FF is
used in the SMBv1 negotiate request to say that dialects greater than
2.0.2 is supported.
"""
SMB_2_0_2 = 0x0202
SMB_2_1_0 = 0x0210
SMB_3_0_0 = 0x0300
SMB_3_0_2 = 0x0302
SMB_3_1_1 = 0x0311
SMB_2_WILDCARD = 0x02FF
| () |
36,857 | aws_assume_role_lib.aws_assume_role_lib | JSONFileCache | JSON file cache.
This provides a dict like interface that stores JSON serializable
objects.
The objects are serialized to JSON and stored in a file. These
values can be retrieved at a later time.
| class JSONFileCache(botocore.credentials.JSONFileCache):
"""JSON file cache.
This provides a dict like interface that stores JSON serializable
objects.
The objects are serialized to JSON and stored in a file. These
values can be retrieved at a later time.
"""
def __init__(self, dir_path):
super().__init__(working_dir=dir_path)
| (dir_path) |
36,858 | botocore.utils | __contains__ | null | def __contains__(self, cache_key):
actual_key = self._convert_cache_key(cache_key)
return os.path.isfile(actual_key)
| (self, cache_key) |
36,859 | botocore.utils | __delitem__ | null | def __delitem__(self, cache_key):
actual_key = self._convert_cache_key(cache_key)
try:
key_path = Path(actual_key)
key_path.unlink()
except FileNotFoundError:
raise KeyError(cache_key)
| (self, cache_key) |
36,860 | botocore.utils | __getitem__ | Retrieve value from a cache key. | def __getitem__(self, cache_key):
"""Retrieve value from a cache key."""
actual_key = self._convert_cache_key(cache_key)
try:
with open(actual_key) as f:
return json.load(f)
except (OSError, ValueError):
raise KeyError(cache_key)
| (self, cache_key) |
36,861 | aws_assume_role_lib.aws_assume_role_lib | __init__ | null | def __init__(self, dir_path):
super().__init__(working_dir=dir_path)
| (self, dir_path) |
36,862 | botocore.utils | __setitem__ | null | def __setitem__(self, cache_key, value):
full_key = self._convert_cache_key(cache_key)
try:
file_content = self._dumps(value)
except (TypeError, ValueError):
raise ValueError(
f"Value cannot be cached, must be "
f"JSON serializable: {value}"
)
if not os.path.isdir(self._working_dir):
os.makedirs(self._working_dir)
with os.fdopen(
os.open(full_key, os.O_WRONLY | os.O_CREAT, 0o600), 'w'
) as f:
f.truncate()
f.write(file_content)
| (self, cache_key, value) |
36,863 | botocore.utils | _convert_cache_key | null | def _convert_cache_key(self, cache_key):
full_path = os.path.join(self._working_dir, cache_key + '.json')
return full_path
| (self, cache_key) |
36,864 | botocore.utils | _default_dumps | null | def _default_dumps(self, obj):
return json.dumps(obj, default=self._serialize_if_needed)
| (self, obj) |
36,865 | botocore.utils | _serialize_if_needed | null | def _serialize_if_needed(self, value, iso=False):
if isinstance(value, _DatetimeClass):
if iso:
return value.isoformat()
return value.strftime('%Y-%m-%dT%H:%M:%S%Z')
return value
| (self, value, iso=False) |
36,866 | aws_assume_role_lib.aws_assume_role_lib | assume_role | Produce a boto3 Session using the given role, assumed using the input session.
Unlike creating a session with the credentials returned directly
by sts.AssumeRole, the returned session will refresh the credentials
automatically when they expire by calling AssumeRole again.
By default, the parameters are checked so that errors can be raised
at this point, rather than more confusingly when the first call
is made using the child session.
This can be disabled by setting validate=False.
The parent session is available on the child session
in the property assume_role_parent_session.
Additional arguments for AssumeRole, if any are added in the future,
can be passed in additional_kwargs. | def assume_role(session: boto3.Session, RoleArn: str, *,
RoleSessionName: str=None,
PolicyArns: typing.Union[typing.List[typing.Dict[str, str]], typing.List[str]]=None,
Policy: typing.Union[str, typing.Dict]=None,
DurationSeconds: typing.Union[int, datetime.timedelta]=None,
Tags: typing.List[typing.Dict[str, str]]=None,
TransitiveTagKeys:typing.List[str]=None,
ExternalId: str=None,
SerialNumber: str=None,
TokenCode: str=None,
SourceIdentity: str=None,
region_name: typing.Union[str, bool]=None,
validate: bool=True,
cache: dict=None,
additional_kwargs: typing.Dict=None) -> boto3.Session:
"""Produce a boto3 Session using the given role, assumed using the input session.
Unlike creating a session with the credentials returned directly
by sts.AssumeRole, the returned session will refresh the credentials
automatically when they expire by calling AssumeRole again.
By default, the parameters are checked so that errors can be raised
at this point, rather than more confusingly when the first call
is made using the child session.
This can be disabled by setting validate=False.
The parent session is available on the child session
in the property assume_role_parent_session.
Additional arguments for AssumeRole, if any are added in the future,
can be passed in additional_kwargs."""
botocore_session = session._session
if not RoleSessionName and SourceIdentity:
RoleSessionName = SourceIdentity
elif RoleSessionName == AUTOMATIC_ROLE_SESSION_NAME:
RoleSessionName = None
if PolicyArns:
PolicyArns = [{"arn": value} if isinstance(value, str) else value for value in PolicyArns]
if Policy is not None and not isinstance(Policy, str):
Policy = json.dumps(Policy)
if isinstance(DurationSeconds, datetime.timedelta):
DurationSeconds = int(DurationSeconds.total_seconds())
extra_args = {}
if additional_kwargs:
extra_args.update(additional_kwargs)
for var_name in [
"RoleSessionName",
"PolicyArns",
"Policy",
"DurationSeconds",
"Tags",
"TransitiveTagKeys",
"ExternalId",
"SerialNumber",
"TokenCode",
"SourceIdentity"]:
value = locals()[var_name]
if value is not None:
extra_args[var_name] = value
credentials = botocore_session.get_credentials()
if not credentials:
raise botocore.exceptions.NoCredentialsError
if validate:
validate_args = extra_args.copy()
validate_args["RoleArn"] = RoleArn
if "RoleSessionName" not in validate_args:
# this gets generated later if it's not present
validate_args["RoleSessionName"] = "ToBeGenerated"
shape = session.client("sts")._service_model.shape_for("AssumeRoleRequest")
botocore.validate.validate_parameters(validate_args, shape)
assume_role_provider = ProgrammaticAssumeRoleProvider(
botocore_session.create_client,
credentials,
RoleArn,
extra_args=extra_args,
cache=cache,
)
assumed_role_botocore_session = botocore.session.Session()
assumed_role_botocore_session.register_component(
"credential_provider",
botocore.credentials.CredentialResolver([assume_role_provider])
)
if region_name is True:
region_name = session.region_name
elif region_name is False:
region_name = None
elif region_name is None:
try:
_set_parent_session_provider(
botocore_session,
assumed_role_botocore_session,
"region")
except Exception as e:
raise RuntimeError(
"Unexpected breakage of botocore config API. " +
"Fall back to setting region_name=True to use parent session region " +
"or region=False to use implicit region.") from e
session_kwargs = {
"botocore_session": assumed_role_botocore_session,
"region_name": region_name,
}
assumed_role_boto3_session = boto3.Session(**session_kwargs)
assumed_role_boto3_session.assume_role_parent_session = session
return assumed_role_boto3_session
| (session: boto3.session.Session, RoleArn: str, *, RoleSessionName: Optional[str] = None, PolicyArns: Union[List[Dict[str, str]], List[str], NoneType] = None, Policy: Union[str, Dict, NoneType] = None, DurationSeconds: Union[int, datetime.timedelta, NoneType] = None, Tags: Optional[List[Dict[str, str]]] = None, TransitiveTagKeys: Optional[List[str]] = None, ExternalId: Optional[str] = None, SerialNumber: Optional[str] = None, TokenCode: Optional[str] = None, SourceIdentity: Optional[str] = None, region_name: Union[str, bool, NoneType] = None, validate: bool = True, cache: Optional[dict] = None, additional_kwargs: Optional[Dict] = None) -> boto3.session.Session |
36,868 | aws_assume_role_lib.aws_assume_role_lib | generate_lambda_session_name | For Lambda functions, generate a role session name that identifies the function.
The returned value is in one of the following forms:
{function_name}
{function_name}.{identifier}
{function_name}.{function_version}.{identifier}
The function name must be retrievable from the AWS_LAMBDA_FUNCTION_NAME
environment variable, or it must be provided.
The function version is looked for in the AWS_LAMBDA_FUNCTION_VERSION
environment variable by default. Function versions of $LATEST
are treated the same as missing function versions.
The identifier is taken from the log stream name in the
AWS_LAMBDA_LOG_STREAM_NAME environment variable by default; if it is not
provided and this cannot be found, it's a timestamp if the identifier can be
at least 14 characters long (to provide for second-level precision),
otherwise it is a random string.
The maximum role session name length is 64 characters. To ensure this, and
to provide at least 4 characters of the identifier when it is used, the
following rules apply, in order:
1. If the function name is longer than 59 characters, the session name is the
truncated function name.
2. If the function name plus the function version is longer than 59 characters,
the session name is the function name plus the identifier, truncated.
3. Otherwise, the session name is the function name plus the version (if one
is found and not $LATEST) plus the identifier, truncated.
| def generate_lambda_session_name(
function_name: str=None,
function_version: str=None,
identifier: str=None):
"""For Lambda functions, generate a role session name that identifies the function.
The returned value is in one of the following forms:
{function_name}
{function_name}.{identifier}
{function_name}.{function_version}.{identifier}
The function name must be retrievable from the AWS_LAMBDA_FUNCTION_NAME
environment variable, or it must be provided.
The function version is looked for in the AWS_LAMBDA_FUNCTION_VERSION
environment variable by default. Function versions of $LATEST
are treated the same as missing function versions.
The identifier is taken from the log stream name in the
AWS_LAMBDA_LOG_STREAM_NAME environment variable by default; if it is not
provided and this cannot be found, it's a timestamp if the identifier can be
at least 14 characters long (to provide for second-level precision),
otherwise it is a random string.
The maximum role session name length is 64 characters. To ensure this, and
to provide at least 4 characters of the identifier when it is used, the
following rules apply, in order:
1. If the function name is longer than 59 characters, the session name is the
truncated function name.
2. If the function name plus the function version is longer than 59 characters,
the session name is the function name plus the identifier, truncated.
3. Otherwise, the session name is the function name plus the version (if one
is found and not $LATEST) plus the identifier, truncated.
"""
if not function_name:
function_name = os.environ["AWS_LAMBDA_FUNCTION_NAME"]
name_component = function_name
if not function_version:
function_version = os.environ.get("AWS_LAMBDA_FUNCTION_VERSION", "")
if function_version and function_version != "$LATEST":
version_component = "." + str(function_version)
else:
version_component = ""
def _get_identifier(max_length):
if identifier:
return identifier
# the execution environment has a unique ID, but it's not exposed directly
# the log stream name (currently) includes it and looks like
# 2020/01/31/[$LATEST]3893xmpl7fac4485b47bb75b671a283c
log_stream_name = os.environ.get("AWS_LAMBDA_LOG_STREAM_NAME", "")
match = re.search(r"\w+$", log_stream_name)
if match:
return match.group()[:max_length]
elif max_length >= 14:
# enough for second-level precision
return datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S%f")[:max_length]
else:
chars = string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for _ in range(max_length))
# truncate for max role session name length of 64
if len(name_component) > 59:
# don't bother with the identifier unless we can get
# at least four characters of it
value = name_component[:64]
elif len(name_component) + len(version_component) > 59:
# don't bother with the version if we can't get it
max_length = 63 - len(name_component)
identifier_component = "." + _get_identifier(max_length)
value = f"{name_component}{identifier_component}"[:64]
else:
max_length = 63 - (len(name_component) + len(version_component))
identifier_component = "." + _get_identifier(max_length)
value = f"{name_component}{version_component}{identifier_component}"[:64]
clean_value = re.sub(r"[^a-zA-Z0-9_=,.@-]+", "_", value)
return clean_value
| (function_name: Optional[str] = None, function_version: Optional[str] = None, identifier: Optional[str] = None) |
36,869 | aws_assume_role_lib.aws_assume_role_lib | get_assumed_role_session_arn | Get a correctly-formatted IAM assumed role session ARN.
Note these ARNs do not contain the role's path, if it has one.
If you provide the role name with path, it will be stripped off. | def get_assumed_role_session_arn(
account_id: typing.Union[str, int],
role_name: str,
role_session_name: str,
partition: str="aws"):
"""Get a correctly-formatted IAM assumed role session ARN.
Note these ARNs do not contain the role's path, if it has one.
If you provide the role name with path, it will be stripped off."""
if isinstance(account_id, numbers.Number):
account_id = str(int(account_id))
if isinstance(account_id, str) and len(account_id) < 12:
account_id = account_id.rjust(12, "0")
if "/" in role_name:
role_name = role_name.rsplit("/", 1)[1]
return f"arn:{partition}:sts::{account_id}:assumed-role/{role_name}/{role_session_name}"
| (account_id: Union[str, int], role_name: str, role_session_name: str, partition: str = 'aws') |
36,870 | aws_assume_role_lib.aws_assume_role_lib | get_role_arn | Get a correctly-formatted IAM role ARN.
You can provide the path separately or as part of the role name. | def get_role_arn(
account_id: typing.Union[str, int],
role_name: str,
path: str="",
partition: str="aws"):
"""Get a correctly-formatted IAM role ARN.
You can provide the path separately or as part of the role name."""
if isinstance(account_id, numbers.Number):
account_id = str(int(account_id))
if isinstance(account_id, str) and len(account_id) < 12:
account_id = account_id.rjust(12, "0")
if "/" in role_name and path:
raise ValueError("Path cannot be given in both role_name and path")
if "/" in role_name:
path, role_name = role_name.rsplit("/", 1)
if path == "/":
path = ""
if path.startswith("/"):
path = path[1:]
if path and not path.endswith("/"):
path = path + "/"
return f"arn:{partition}:iam::{account_id}:role/{path}{role_name}"
| (account_id: Union[str, int], role_name: str, path: str = '', partition: str = 'aws') |
36,871 | aws_assume_role_lib.aws_assume_role_lib | patch_boto3 | Add boto3.assume_role() and boto3.Session.assume_role().
Each has the same interface as assume_role() except they do not take
a session object as input. | def patch_boto3():
"""Add boto3.assume_role() and boto3.Session.assume_role().
Each has the same interface as assume_role() except they do not take
a session object as input."""
setattr(boto3.Session, assume_role.__name__, assume_role)
def wrapper(RoleArn: str, *,
RoleSessionName: str=None,
PolicyArns: typing.Union[typing.List[typing.Dict[str, str]], typing.List[str]]=None,
Policy: typing.Union[str, typing.Dict]=None,
DurationSeconds: typing.Union[int, datetime.timedelta]=None,
Tags: typing.List[typing.Dict[str, str]]=None,
TransitiveTagKeys:typing.List[str]=None,
ExternalId: str=None,
SerialNumber: str=None,
TokenCode: str=None,
SourceIdentity: str=None,
region_name: typing.Union[str, bool]=None,
validate: bool=True,
cache: dict=None,
additional_kwargs: typing.Dict=None) -> boto3.Session:
"""Produce a boto3 Session using the given role.
Unlike creating a session with the credentials returned directly
by sts.AssumeRole, the returned session will refresh the credentials
automatically when they expire by calling AssumeRole again.
By default, the parameters are checked so that errors can be raised
at this point, rather than more confusingly when the first call
is made using the child session.
This can be disabled by setting validate=False.
The parent session is available on the child session
in the property assume_role_parent_session.
Additional arguments for AssumeRole, if any are added in the future,
can be passed in additional_kwargs."""
session = boto3._get_default_session()
return assume_role(session, RoleArn,
RoleSessionName=RoleSessionName,
PolicyArns=PolicyArns,
Policy=Policy,
DurationSeconds=DurationSeconds,
Tags=Tags,
TransitiveTagKeys=TransitiveTagKeys,
ExternalId=ExternalId,
SerialNumber=SerialNumber,
TokenCode=TokenCode,
SourceIdentity=SourceIdentity,
region_name=region_name,
validate=validate,
cache=cache,
additional_kwargs=additional_kwargs
)
wrapper.__name__ = assume_role.__name__
setattr(boto3, assume_role.__name__, wrapper)
| () |
36,872 | numba.core.errors | NumbaDeprecationWarning |
Warning category for use of a deprecated feature.
| class NumbaDeprecationWarning(NumbaWarning, DeprecationWarning):
"""
Warning category for use of a deprecated feature.
"""
| (msg, loc=None, highlighting=True) |
36,873 | numba.core.errors | __init__ | null | def __init__(self, msg, loc=None, highlighting=True, ):
self.msg = msg
self.loc = loc
# If a warning is emitted inside validation of env-vars in
# numba.core.config. Highlighting will not be available.
if highlighting and _is_numba_core_config_loaded():
highlight = termcolor().errmsg
else:
def highlight(x):
return x
if loc:
super(NumbaWarning, self).__init__(
highlight("%s\n%s\n" % (msg, loc.strformat())))
else:
super(NumbaWarning, self).__init__(highlight("%s" % (msg,)))
| (self, msg, loc=None, highlighting=True) |
36,874 | ydata_profiling.profile_report | ProfileReport | Generate a profile report from a Dataset stored as a pandas `DataFrame`.
Used as is, it will output its content as an HTML report in a Jupyter notebook.
| class ProfileReport(SerializeReport, ExpectationsReport):
"""Generate a profile report from a Dataset stored as a pandas `DataFrame`.
Used as is, it will output its content as an HTML report in a Jupyter notebook.
"""
_description_set = None
_report = None
_html = None
_widgets = None
_json = None
config: Settings
def __init__(
self,
df: Optional[Union[pd.DataFrame, sDataFrame]] = None,
minimal: bool = False,
tsmode: bool = False,
sortby: Optional[str] = None,
sensitive: bool = False,
explorative: bool = False,
dark_mode: bool = False,
orange_mode: bool = False,
sample: Optional[dict] = None,
config_file: Optional[Union[Path, str]] = None,
lazy: bool = True,
typeset: Optional[VisionsTypeset] = None,
summarizer: Optional[BaseSummarizer] = None,
config: Optional[Settings] = None,
type_schema: Optional[dict] = None,
**kwargs,
):
"""Generate a ProfileReport based on a pandas or spark.sql DataFrame
Config processing order (in case of duplicate entries, entries later in the order are retained):
- config presets (e.g. `config_file`, `minimal` arguments)
- config groups (e.g. `explorative` and `sensitive` arguments)
- custom settings (e.g. `config` argument)
- custom settings **kwargs (e.g. `title`)
Args:
df: a pandas or spark.sql DataFrame
minimal: minimal mode is a default configuration with minimal computation
ts_mode: activates time-series analysis for all the numerical variables from the dataset.
Only available for pd.DataFrame
sort_by: ignored if ts_mode=False. Order the dataset by a provided column.
sensitive: hides the values for categorical and text variables for report privacy
config_file: a config file (.yml), mutually exclusive with `minimal`
lazy: compute when needed
sample: optional dict(name="Sample title", caption="Caption", data=pd.DataFrame())
typeset: optional user typeset to use for type inference
summarizer: optional user summarizer to generate custom summary output
type_schema: optional dict containing pairs of `column name`: `type`
**kwargs: other arguments, for valid arguments, check the default configuration file.
"""
self.__validate_inputs(df, minimal, tsmode, config_file, lazy)
if config_file or minimal:
if not config_file:
config_file = get_config("config_minimal.yaml")
report_config = Settings().from_file(config_file)
elif config is not None:
report_config = config
else:
if isinstance(df, pd.DataFrame):
report_config = Settings()
else:
report_config = SparkSettings()
groups = [
(explorative, "explorative"),
(sensitive, "sensitive"),
(dark_mode, "dark_mode"),
(orange_mode, "orange_mode"),
]
if any(condition for condition, _ in groups):
cfg = Settings()
for condition, key in groups:
if condition:
cfg = cfg.update(Config.get_arg_groups(key))
report_config = cfg.update(report_config.dict(exclude_defaults=True))
if len(kwargs) > 0:
shorthands, kwargs = Config.shorthands(kwargs)
report_config = (
Settings()
.update(shorthands)
.update(report_config.dict(exclude_defaults=True))
)
if kwargs:
report_config = report_config.update(kwargs)
report_config.vars.timeseries.active = tsmode
if tsmode and sortby:
report_config.vars.timeseries.sortby = sortby
self.df = self.__initialize_dataframe(df, report_config)
self.config = report_config
self._df_hash = None
self._sample = sample
self._type_schema = type_schema
self._typeset = typeset
self._summarizer = summarizer
if not lazy:
# Trigger building the report structure
_ = self.report
@staticmethod
def __validate_inputs(
df: Optional[Union[pd.DataFrame, sDataFrame]],
minimal: bool,
tsmode: bool,
config_file: Optional[Union[Path, str]],
lazy: bool,
) -> None:
# Lazy profile cannot be set if no DataFrame is provided
if df is None and not lazy:
raise ValueError("Can init a not-lazy ProfileReport with no DataFrame")
if config_file is not None and minimal:
raise ValueError(
"Arguments `config_file` and `minimal` are mutually exclusive."
)
# Spark Dataframe validations
if isinstance(df, pd.DataFrame):
if df is not None and df.empty:
raise ValueError(
"DataFrame is empty. Please" "provide a non-empty DataFrame."
)
else:
if tsmode:
raise NotImplementedError(
"Time-Series dataset analysis is not yet supported for Spark DataFrames"
)
if (
df is not None and df.rdd.isEmpty()
): # df.isEmpty is only support by 3.3.0 pyspark version
raise ValueError(
"DataFrame is empty. Please" "provide a non-empty DataFrame."
)
@staticmethod
def __initialize_dataframe(
df: Optional[Union[pd.DataFrame, sDataFrame]], report_config: Settings
) -> Optional[Union[pd.DataFrame, sDataFrame]]:
logger.info_def_report(
dataframe=type(df), timeseries=report_config.vars.timeseries.active
)
if (
df is not None
and isinstance(df, pd.DataFrame)
and report_config.vars.timeseries.active
):
if report_config.vars.timeseries.sortby:
df = df.sort_values(by=report_config.vars.timeseries.sortby)
df = df.set_index(report_config.vars.timeseries.sortby, drop=False)
df.index.name = None
else:
df = df.sort_index()
return df
def invalidate_cache(self, subset: Optional[str] = None) -> None:
"""Invalidate report cache. Useful after changing setting.
Args:
subset:
- "rendering" to invalidate the html, json and widget report rendering
- "report" to remove the caching of the report structure
- None (default) to invalidate all caches
Returns:
None
"""
if subset is not None and subset not in ["rendering", "report"]:
raise ValueError(
"'subset' parameter should be None, 'rendering' or 'report'"
)
if subset is None or subset in ["rendering", "report"]:
self._widgets = None
self._json = None
self._html = None
if subset is None or subset == "report":
self._report = None
if subset is None:
self._description_set = None
@property
def typeset(self) -> Optional[VisionsTypeset]:
if self._typeset is None:
self._typeset = ProfilingTypeSet(self.config, self._type_schema)
return self._typeset
@property
def summarizer(self) -> BaseSummarizer:
if self._summarizer is None:
self._summarizer = PandasProfilingSummarizer(self.typeset)
return self._summarizer
@property
def description_set(self) -> BaseDescription:
if self._description_set is None:
self._description_set = describe_df(
self.config,
self.df,
self.summarizer,
self.typeset,
self._sample,
)
return self._description_set
@property
def df_hash(self) -> Optional[str]:
if self._df_hash is None and self.df is not None:
self._df_hash = hash_dataframe(self.df)
return self._df_hash
@property
def report(self) -> Root:
if self._report is None:
self._report = get_report_structure(self.config, self.description_set)
return self._report
@property
def html(self) -> str:
if self._html is None:
self._html = self._render_html()
return self._html
@property
def json(self) -> str:
if self._json is None:
self._json = self._render_json()
return self._json
@property
def widgets(self) -> Any:
if (
isinstance(self.description_set.table["n"], list)
and len(self.description_set.table["n"]) > 1
):
raise RuntimeError(
"Widgets interface not (yet) supported for comparing reports, please use the HTML rendering."
)
if self._widgets is None:
self._widgets = self._render_widgets()
return self._widgets
def get_duplicates(self) -> Optional[pd.DataFrame]:
"""Get duplicate rows and counts based on the configuration
Returns:
A DataFrame with the duplicate rows and their counts.
"""
return self.description_set.duplicates
def get_sample(self) -> dict:
"""Get head/tail samples based on the configuration
Returns:
A dict with the head and tail samples.
"""
return self.description_set.sample
def get_description(self) -> BaseDescription:
"""Return the description (a raw statistical summary) of the dataset.
Returns:
Dict containing a description for each variable in the DataFrame.
"""
return self.description_set
def get_rejected_variables(self) -> set:
"""Get variables that are rejected for analysis (e.g. constant, mixed data types)
Returns:
a set of column names that are unsupported
"""
return {
alert.column_name
for alert in self.description_set.alerts
if alert.alert_type == AlertType.REJECTED
}
def to_file(self, output_file: Union[str, Path], silent: bool = True) -> None:
"""Write the report to a file.
Args:
output_file: The name or the path of the file to generate including the extension (.html, .json).
silent: if False, opens the file in the default browser or download it in a Google Colab environment
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pillow_version = pkg_resources.get_distribution("Pillow").version
version_tuple = tuple(map(int, pillow_version.split(".")))
if version_tuple < (9, 5, 0):
warnings.warn(
"Try running command: 'pip install --upgrade Pillow' to avoid ValueError"
)
if not isinstance(output_file, Path):
output_file = Path(str(output_file))
if output_file.suffix == ".json":
data = self.to_json()
else:
if not self.config.html.inline:
self.config.html.assets_path = str(output_file.parent)
if self.config.html.assets_prefix is None:
self.config.html.assets_prefix = str(output_file.stem) + "_assets"
create_html_assets(self.config, output_file)
data = self.to_html()
if output_file.suffix != ".html":
suffix = output_file.suffix
output_file = output_file.with_suffix(".html")
warnings.warn(
f"Extension {suffix} not supported. For now we assume .html was intended. "
f"To remove this warning, please use .html or .json."
)
disable_progress_bar = not self.config.progress_bar
with tqdm(
total=1, desc="Export report to file", disable=disable_progress_bar
) as pbar:
output_file.write_text(data, encoding="utf-8")
pbar.update()
if not silent:
try:
from google.colab import files # noqa: F401
files.download(output_file.absolute().as_uri())
except ModuleNotFoundError:
import webbrowser
webbrowser.open_new_tab(output_file.absolute().as_uri())
def _render_html(self) -> str:
from ydata_profiling.report.presentation.flavours import HTMLReport
report = self.report
with tqdm(
total=1, desc="Render HTML", disable=not self.config.progress_bar
) as pbar:
html = HTMLReport(copy.deepcopy(report)).render(
nav=self.config.html.navbar_show,
offline=self.config.html.use_local_assets,
inline=self.config.html.inline,
assets_prefix=self.config.html.assets_prefix,
primary_color=self.config.html.style.primary_colors[0],
logo=self.config.html.style.logo,
theme=self.config.html.style.theme,
title=self.description_set.analysis.title,
date=self.description_set.analysis.date_start,
version=self.description_set.package["ydata_profiling_version"],
)
if self.config.html.minify_html:
from htmlmin.main import minify
html = minify(html, remove_all_empty_space=True, remove_comments=True)
pbar.update()
return html
def _render_widgets(self) -> Any:
from ydata_profiling.report.presentation.flavours import WidgetReport
report = self.report
with tqdm(
total=1,
desc="Render widgets",
disable=not self.config.progress_bar,
leave=False,
) as pbar:
widgets = WidgetReport(copy.deepcopy(report)).render()
pbar.update()
return widgets
def _render_json(self) -> str:
def encode_it(o: Any) -> Any:
if is_dataclass(o):
o = asdict(o)
if isinstance(o, dict):
return {encode_it(k): encode_it(v) for k, v in o.items()}
else:
if isinstance(o, (bool, int, float, str)):
return o
elif isinstance(o, list):
return [encode_it(v) for v in o]
elif isinstance(o, set):
return {encode_it(v) for v in o}
elif isinstance(o, pd.Series):
return encode_it(o.to_list())
elif isinstance(o, pd.DataFrame):
return encode_it(o.to_dict(orient="records"))
elif isinstance(o, np.ndarray):
return encode_it(o.tolist())
elif isinstance(o, Sample):
return encode_it(o.dict())
elif isinstance(o, np.generic):
return o.item()
else:
return str(o)
description = self.description_set
with tqdm(
total=1, desc="Render JSON", disable=not self.config.progress_bar
) as pbar:
description_dict = format_summary(description)
description_dict = encode_it(description_dict)
description_dict = redact_summary(description_dict, self.config)
data = json.dumps(description_dict, indent=4)
pbar.update()
return data
def to_html(self) -> str:
"""Generate and return complete template as lengthy string
for using with frameworks.
Returns:
Profiling report html including wrapper.
"""
return self.html
def to_json(self) -> str:
"""Represent the ProfileReport as a JSON string
Returns:
JSON string
"""
return self.json
def to_notebook_iframe(self) -> None:
"""Used to output the HTML representation to a Jupyter notebook.
When config.notebook.iframe.attribute is "src", this function creates a temporary HTML file
in `./tmp/profile_[hash].html` and returns an Iframe pointing to that contents.
When config.notebook.iframe.attribute is "srcdoc", the same HTML is injected in the "srcdoc" attribute of
the Iframe.
Notes:
This constructions solves problems with conflicting stylesheets and navigation links.
"""
from IPython.core.display import display
from ydata_profiling.report.presentation.flavours.widget.notebook import (
get_notebook_iframe,
)
# Ignore warning: https://github.com/ipython/ipython/pull/11350/files
with warnings.catch_warnings():
warnings.simplefilter("ignore")
display(get_notebook_iframe(self.config, self))
def to_widgets(self) -> None:
"""The ipython notebook widgets user interface."""
try:
from google.colab import files # noqa: F401
warnings.warn(
"Ipywidgets is not yet fully supported on Google Colab (https://github.com/googlecolab/colabtools/issues/60)."
"As an alternative, you can use the HTML report. See the documentation for more information."
)
except ModuleNotFoundError:
pass
from IPython.core.display import display
display(self.widgets)
def _repr_html_(self) -> None:
"""The ipython notebook widgets user interface gets called by the jupyter notebook."""
self.to_notebook_iframe()
def __repr__(self) -> str:
"""Override so that Jupyter Notebook does not print the object."""
return ""
def compare(
self, other: "ProfileReport", config: Optional[Settings] = None
) -> "ProfileReport":
"""Compare this report with another ProfileReport
Alias for:
```
ydata_profiling.compare([report1, report2], config=config)
```
See `ydata_profiling.compare` for details.
Args:
other: the ProfileReport to compare to
config: the settings object for the merged ProfileReport. If `None`, uses the caller's config
Returns:
Comparison ProfileReport
"""
from ydata_profiling.compare_reports import compare
return compare([self, other], config if config is not None else self.config)
| (df: Optional[pandas.core.frame.DataFrame] = None, minimal: bool = False, tsmode: bool = False, sortby: Optional[str] = None, sensitive: bool = False, explorative: bool = False, dark_mode: bool = False, orange_mode: bool = False, sample: Optional[dict] = None, config_file: Union[pathlib.Path, str, NoneType] = None, lazy: bool = True, typeset: Optional[visions.typesets.typeset.VisionsTypeset] = None, summarizer: Optional[ydata_profiling.model.summarizer.BaseSummarizer] = None, config: ydata_profiling.config.Settings = None, type_schema: Optional[dict] = None, **kwargs) |
36,875 | ydata_profiling.profile_report | __initialize_dataframe | null | @staticmethod
def __initialize_dataframe(
df: Optional[Union[pd.DataFrame, sDataFrame]], report_config: Settings
) -> Optional[Union[pd.DataFrame, sDataFrame]]:
logger.info_def_report(
dataframe=type(df), timeseries=report_config.vars.timeseries.active
)
if (
df is not None
and isinstance(df, pd.DataFrame)
and report_config.vars.timeseries.active
):
if report_config.vars.timeseries.sortby:
df = df.sort_values(by=report_config.vars.timeseries.sortby)
df = df.set_index(report_config.vars.timeseries.sortby, drop=False)
df.index.name = None
else:
df = df.sort_index()
return df
| (df: Union[pandas.core.frame.DataFrame, ~sDataFrame, NoneType], report_config: ydata_profiling.config.Settings) -> Union[pandas.core.frame.DataFrame, ~sDataFrame, NoneType] |
36,876 | ydata_profiling.profile_report | __validate_inputs | null | @staticmethod
def __validate_inputs(
df: Optional[Union[pd.DataFrame, sDataFrame]],
minimal: bool,
tsmode: bool,
config_file: Optional[Union[Path, str]],
lazy: bool,
) -> None:
# Lazy profile cannot be set if no DataFrame is provided
if df is None and not lazy:
raise ValueError("Can init a not-lazy ProfileReport with no DataFrame")
if config_file is not None and minimal:
raise ValueError(
"Arguments `config_file` and `minimal` are mutually exclusive."
)
# Spark Dataframe validations
if isinstance(df, pd.DataFrame):
if df is not None and df.empty:
raise ValueError(
"DataFrame is empty. Please" "provide a non-empty DataFrame."
)
else:
if tsmode:
raise NotImplementedError(
"Time-Series dataset analysis is not yet supported for Spark DataFrames"
)
if (
df is not None and df.rdd.isEmpty()
): # df.isEmpty is only support by 3.3.0 pyspark version
raise ValueError(
"DataFrame is empty. Please" "provide a non-empty DataFrame."
)
| (df: Union[pandas.core.frame.DataFrame, ~sDataFrame, NoneType], minimal: bool, tsmode: bool, config_file: Union[pathlib.Path, str, NoneType], lazy: bool) -> NoneType |
36,877 | ydata_profiling.profile_report | __init__ | Generate a ProfileReport based on a pandas or spark.sql DataFrame
Config processing order (in case of duplicate entries, entries later in the order are retained):
- config presets (e.g. `config_file`, `minimal` arguments)
- config groups (e.g. `explorative` and `sensitive` arguments)
- custom settings (e.g. `config` argument)
- custom settings **kwargs (e.g. `title`)
Args:
df: a pandas or spark.sql DataFrame
minimal: minimal mode is a default configuration with minimal computation
ts_mode: activates time-series analysis for all the numerical variables from the dataset.
Only available for pd.DataFrame
sort_by: ignored if ts_mode=False. Order the dataset by a provided column.
sensitive: hides the values for categorical and text variables for report privacy
config_file: a config file (.yml), mutually exclusive with `minimal`
lazy: compute when needed
sample: optional dict(name="Sample title", caption="Caption", data=pd.DataFrame())
typeset: optional user typeset to use for type inference
summarizer: optional user summarizer to generate custom summary output
type_schema: optional dict containing pairs of `column name`: `type`
**kwargs: other arguments, for valid arguments, check the default configuration file.
| def __init__(
self,
df: Optional[Union[pd.DataFrame, sDataFrame]] = None,
minimal: bool = False,
tsmode: bool = False,
sortby: Optional[str] = None,
sensitive: bool = False,
explorative: bool = False,
dark_mode: bool = False,
orange_mode: bool = False,
sample: Optional[dict] = None,
config_file: Optional[Union[Path, str]] = None,
lazy: bool = True,
typeset: Optional[VisionsTypeset] = None,
summarizer: Optional[BaseSummarizer] = None,
config: Optional[Settings] = None,
type_schema: Optional[dict] = None,
**kwargs,
):
"""Generate a ProfileReport based on a pandas or spark.sql DataFrame
Config processing order (in case of duplicate entries, entries later in the order are retained):
- config presets (e.g. `config_file`, `minimal` arguments)
- config groups (e.g. `explorative` and `sensitive` arguments)
- custom settings (e.g. `config` argument)
- custom settings **kwargs (e.g. `title`)
Args:
df: a pandas or spark.sql DataFrame
minimal: minimal mode is a default configuration with minimal computation
ts_mode: activates time-series analysis for all the numerical variables from the dataset.
Only available for pd.DataFrame
sort_by: ignored if ts_mode=False. Order the dataset by a provided column.
sensitive: hides the values for categorical and text variables for report privacy
config_file: a config file (.yml), mutually exclusive with `minimal`
lazy: compute when needed
sample: optional dict(name="Sample title", caption="Caption", data=pd.DataFrame())
typeset: optional user typeset to use for type inference
summarizer: optional user summarizer to generate custom summary output
type_schema: optional dict containing pairs of `column name`: `type`
**kwargs: other arguments, for valid arguments, check the default configuration file.
"""
self.__validate_inputs(df, minimal, tsmode, config_file, lazy)
if config_file or minimal:
if not config_file:
config_file = get_config("config_minimal.yaml")
report_config = Settings().from_file(config_file)
elif config is not None:
report_config = config
else:
if isinstance(df, pd.DataFrame):
report_config = Settings()
else:
report_config = SparkSettings()
groups = [
(explorative, "explorative"),
(sensitive, "sensitive"),
(dark_mode, "dark_mode"),
(orange_mode, "orange_mode"),
]
if any(condition for condition, _ in groups):
cfg = Settings()
for condition, key in groups:
if condition:
cfg = cfg.update(Config.get_arg_groups(key))
report_config = cfg.update(report_config.dict(exclude_defaults=True))
if len(kwargs) > 0:
shorthands, kwargs = Config.shorthands(kwargs)
report_config = (
Settings()
.update(shorthands)
.update(report_config.dict(exclude_defaults=True))
)
if kwargs:
report_config = report_config.update(kwargs)
report_config.vars.timeseries.active = tsmode
if tsmode and sortby:
report_config.vars.timeseries.sortby = sortby
self.df = self.__initialize_dataframe(df, report_config)
self.config = report_config
self._df_hash = None
self._sample = sample
self._type_schema = type_schema
self._typeset = typeset
self._summarizer = summarizer
if not lazy:
# Trigger building the report structure
_ = self.report
| (self, df: Union[pandas.core.frame.DataFrame, ~sDataFrame, NoneType] = None, minimal: bool = False, tsmode: bool = False, sortby: Optional[str] = None, sensitive: bool = False, explorative: bool = False, dark_mode: bool = False, orange_mode: bool = False, sample: Optional[dict] = None, config_file: Union[pathlib.Path, str, NoneType] = None, lazy: bool = True, typeset: Optional[visions.typesets.typeset.VisionsTypeset] = None, summarizer: Optional[ydata_profiling.model.summarizer.BaseSummarizer] = None, config: Optional[ydata_profiling.config.Settings] = None, type_schema: Optional[dict] = None, **kwargs) |
36,878 | ydata_profiling.profile_report | __repr__ | Override so that Jupyter Notebook does not print the object. | def __repr__(self) -> str:
"""Override so that Jupyter Notebook does not print the object."""
return ""
| (self) -> str |
36,879 | ydata_profiling.profile_report | _render_html | null | def _render_html(self) -> str:
from ydata_profiling.report.presentation.flavours import HTMLReport
report = self.report
with tqdm(
total=1, desc="Render HTML", disable=not self.config.progress_bar
) as pbar:
html = HTMLReport(copy.deepcopy(report)).render(
nav=self.config.html.navbar_show,
offline=self.config.html.use_local_assets,
inline=self.config.html.inline,
assets_prefix=self.config.html.assets_prefix,
primary_color=self.config.html.style.primary_colors[0],
logo=self.config.html.style.logo,
theme=self.config.html.style.theme,
title=self.description_set.analysis.title,
date=self.description_set.analysis.date_start,
version=self.description_set.package["ydata_profiling_version"],
)
if self.config.html.minify_html:
from htmlmin.main import minify
html = minify(html, remove_all_empty_space=True, remove_comments=True)
pbar.update()
return html
| (self) -> str |
36,880 | ydata_profiling.profile_report | _render_json | null | def _render_json(self) -> str:
def encode_it(o: Any) -> Any:
if is_dataclass(o):
o = asdict(o)
if isinstance(o, dict):
return {encode_it(k): encode_it(v) for k, v in o.items()}
else:
if isinstance(o, (bool, int, float, str)):
return o
elif isinstance(o, list):
return [encode_it(v) for v in o]
elif isinstance(o, set):
return {encode_it(v) for v in o}
elif isinstance(o, pd.Series):
return encode_it(o.to_list())
elif isinstance(o, pd.DataFrame):
return encode_it(o.to_dict(orient="records"))
elif isinstance(o, np.ndarray):
return encode_it(o.tolist())
elif isinstance(o, Sample):
return encode_it(o.dict())
elif isinstance(o, np.generic):
return o.item()
else:
return str(o)
description = self.description_set
with tqdm(
total=1, desc="Render JSON", disable=not self.config.progress_bar
) as pbar:
description_dict = format_summary(description)
description_dict = encode_it(description_dict)
description_dict = redact_summary(description_dict, self.config)
data = json.dumps(description_dict, indent=4)
pbar.update()
return data
| (self) -> str |
36,881 | ydata_profiling.profile_report | _render_widgets | null | def _render_widgets(self) -> Any:
from ydata_profiling.report.presentation.flavours import WidgetReport
report = self.report
with tqdm(
total=1,
desc="Render widgets",
disable=not self.config.progress_bar,
leave=False,
) as pbar:
widgets = WidgetReport(copy.deepcopy(report)).render()
pbar.update()
return widgets
| (self) -> Any |
36,882 | ydata_profiling.profile_report | _repr_html_ | The ipython notebook widgets user interface gets called by the jupyter notebook. | def _repr_html_(self) -> None:
"""The ipython notebook widgets user interface gets called by the jupyter notebook."""
self.to_notebook_iframe()
| (self) -> NoneType |
36,883 | ydata_profiling.profile_report | compare | Compare this report with another ProfileReport
Alias for:
```
ydata_profiling.compare([report1, report2], config=config)
```
See `ydata_profiling.compare` for details.
Args:
other: the ProfileReport to compare to
config: the settings object for the merged ProfileReport. If `None`, uses the caller's config
Returns:
Comparison ProfileReport
| def compare(
self, other: "ProfileReport", config: Optional[Settings] = None
) -> "ProfileReport":
"""Compare this report with another ProfileReport
Alias for:
```
ydata_profiling.compare([report1, report2], config=config)
```
See `ydata_profiling.compare` for details.
Args:
other: the ProfileReport to compare to
config: the settings object for the merged ProfileReport. If `None`, uses the caller's config
Returns:
Comparison ProfileReport
"""
from ydata_profiling.compare_reports import compare
return compare([self, other], config if config is not None else self.config)
| (self, other: ydata_profiling.profile_report.ProfileReport, config: Optional[ydata_profiling.config.Settings] = None) -> ydata_profiling.profile_report.ProfileReport |
36,884 | ydata_profiling.serialize_report | dump |
Dump ProfileReport to file
| def dump(self, output_file: Union[Path, str]) -> None:
"""
Dump ProfileReport to file
"""
if not isinstance(output_file, Path):
output_file = Path(str(output_file))
output_file = output_file.with_suffix(".pp")
output_file.write_bytes(self.dumps())
| (self, output_file: Union[pathlib.Path, str]) -> NoneType |
36,885 | ydata_profiling.serialize_report | dumps |
Serialize ProfileReport and return bytes for reproducing ProfileReport or Caching.
Returns:
Bytes which contains hash of DataFrame, config, _description_set and _report
| def dumps(self) -> bytes:
"""
Serialize ProfileReport and return bytes for reproducing ProfileReport or Caching.
Returns:
Bytes which contains hash of DataFrame, config, _description_set and _report
"""
import pickle
# Note: _description_set and _report may are None if they haven't been computed
return pickle.dumps(
[
self.df_hash,
self.config,
self._description_set,
self._report,
]
)
| (self) -> bytes |
36,886 | ydata_profiling.profile_report | get_description | Return the description (a raw statistical summary) of the dataset.
Returns:
Dict containing a description for each variable in the DataFrame.
| def get_description(self) -> BaseDescription:
"""Return the description (a raw statistical summary) of the dataset.
Returns:
Dict containing a description for each variable in the DataFrame.
"""
return self.description_set
| (self) -> ydata_profiling.model.description.BaseDescription |
36,887 | ydata_profiling.profile_report | get_duplicates | Get duplicate rows and counts based on the configuration
Returns:
A DataFrame with the duplicate rows and their counts.
| def get_duplicates(self) -> Optional[pd.DataFrame]:
"""Get duplicate rows and counts based on the configuration
Returns:
A DataFrame with the duplicate rows and their counts.
"""
return self.description_set.duplicates
| (self) -> Optional[pandas.core.frame.DataFrame] |
36,888 | ydata_profiling.profile_report | get_rejected_variables | Get variables that are rejected for analysis (e.g. constant, mixed data types)
Returns:
a set of column names that are unsupported
| def get_rejected_variables(self) -> set:
"""Get variables that are rejected for analysis (e.g. constant, mixed data types)
Returns:
a set of column names that are unsupported
"""
return {
alert.column_name
for alert in self.description_set.alerts
if alert.alert_type == AlertType.REJECTED
}
| (self) -> set |
36,889 | ydata_profiling.profile_report | get_sample | Get head/tail samples based on the configuration
Returns:
A dict with the head and tail samples.
| def get_sample(self) -> dict:
"""Get head/tail samples based on the configuration
Returns:
A dict with the head and tail samples.
"""
return self.description_set.sample
| (self) -> dict |
36,890 | ydata_profiling.profile_report | invalidate_cache | Invalidate report cache. Useful after changing setting.
Args:
subset:
- "rendering" to invalidate the html, json and widget report rendering
- "report" to remove the caching of the report structure
- None (default) to invalidate all caches
Returns:
None
| def invalidate_cache(self, subset: Optional[str] = None) -> None:
"""Invalidate report cache. Useful after changing setting.
Args:
subset:
- "rendering" to invalidate the html, json and widget report rendering
- "report" to remove the caching of the report structure
- None (default) to invalidate all caches
Returns:
None
"""
if subset is not None and subset not in ["rendering", "report"]:
raise ValueError(
"'subset' parameter should be None, 'rendering' or 'report'"
)
if subset is None or subset in ["rendering", "report"]:
self._widgets = None
self._json = None
self._html = None
if subset is None or subset == "report":
self._report = None
if subset is None:
self._description_set = None
| (self, subset: Optional[str] = None) -> NoneType |
36,891 | ydata_profiling.serialize_report | load |
Load ProfileReport from file
Raises:
ValueError: if the DataFrame or Config do not match with the current ProfileReport
| def load(
self, load_file: Union[Path, str]
) -> Union["ProfileReport", "SerializeReport"]:
"""
Load ProfileReport from file
Raises:
ValueError: if the DataFrame or Config do not match with the current ProfileReport
"""
if not isinstance(load_file, Path):
load_file = Path(str(load_file))
self.loads(load_file.read_bytes())
return self
| (self, load_file: Union[pathlib.Path, str]) -> Union[ForwardRef('ProfileReport'), ForwardRef('SerializeReport')] |
36,892 | ydata_profiling.serialize_report | loads |
Deserialize the serialized report
Args:
data: The bytes of a serialize ProfileReport object.
Raises:
ValueError: if ignore_config is set to False and the configs do not match.
Returns:
self
| def loads(self, data: bytes) -> Union["ProfileReport", "SerializeReport"]:
"""
Deserialize the serialized report
Args:
data: The bytes of a serialize ProfileReport object.
Raises:
ValueError: if ignore_config is set to False and the configs do not match.
Returns:
self
"""
import pickle
try:
(
df_hash,
loaded_config,
loaded_description_set,
loaded_report,
) = pickle.loads(data)
except Exception as e:
raise ValueError("Failed to load data") from e
if not all(
(
df_hash is None or isinstance(df_hash, str),
isinstance(loaded_config, Settings),
loaded_description_set is None
or isinstance(loaded_description_set, BaseDescription),
loaded_report is None or isinstance(loaded_report, Root),
)
):
raise ValueError(
"Failed to load data: file may be damaged or from an incompatible version"
)
if (df_hash == self.df_hash) or (self.df is None):
# load to an empty ProfileReport
# Set description_set, report, sample if they are None,or raise an warning.
if self._description_set is None:
self._description_set = loaded_description_set
else:
warnings.warn(
"The description set of current ProfileReport is not None. It won't be loaded."
)
if self._report is None:
self._report = loaded_report
else:
warnings.warn(
"The report of current ProfileReport is not None. It won't be loaded."
)
# overwrite config
self.config = loaded_config
# warn if version not equal
if (
loaded_description_set is not None
and loaded_description_set.package["ydata_profiling_version"]
!= __version__
):
warnings.warn(
f"The package version specified in the loaded data is not equal to the version installed. "
f"Currently running on ydata-profiling {__version__} , while loaded data is generated by ydata_profiling, {loaded_description_set.package['ydata_profiling_version']}."
)
# set df_hash
self._df_hash = df_hash
else:
raise ValueError("DataFrame does not match with the current ProfileReport.")
return self
| (self, data: bytes) -> Union[ForwardRef('ProfileReport'), ForwardRef('SerializeReport')] |
36,893 | ydata_profiling.expectations_report | to_expectation_suite |
All parameters default to True to make it easier to access the full functionality of Great Expectations out of
the box.
Args:
suite_name: The name of your expectation suite
data_context: A user-specified data context
save_suite: Boolean to determine whether to save the suite to .json as part of the method
run_validation: Boolean to determine whether to run validation as part of the method
build_data_docs: Boolean to determine whether to build data docs, save the .html file, and open data docs in
your browser
handler: The handler to use for building expectation
Returns:
An ExpectationSuite
| def to_expectation_suite(
self,
suite_name: Optional[str] = None,
data_context: Optional[Any] = None,
save_suite: bool = True,
run_validation: bool = True,
build_data_docs: bool = True,
handler: Optional[Handler] = None,
) -> Any:
"""
All parameters default to True to make it easier to access the full functionality of Great Expectations out of
the box.
Args:
suite_name: The name of your expectation suite
data_context: A user-specified data context
save_suite: Boolean to determine whether to save the suite to .json as part of the method
run_validation: Boolean to determine whether to run validation as part of the method
build_data_docs: Boolean to determine whether to build data docs, save the .html file, and open data docs in
your browser
handler: The handler to use for building expectation
Returns:
An ExpectationSuite
"""
try:
import great_expectations as ge
except ImportError as ex:
raise ImportError(
"Please install great expectations before using the expectation functionality"
) from ex
# Use report title if suite is empty
if suite_name is None:
suite_name = slugify(self.config.title)
# Use the default handler if none
if handler is None:
handler = ExpectationHandler(self.typeset)
# Obtain the ge context and create the expectation suite
if not data_context:
data_context = ge.data_context.DataContext()
suite = data_context.add_expectation_suite(suite_name, overwrite_existing=True)
# Instantiate an in-memory pandas dataset
batch = ge.dataset.PandasDataset(self.df, expectation_suite=suite)
# Obtain the profiling summary
summary: BaseDescription = self.get_description() # type: ignore
# Dispatch to expectations per semantic variable type
for name, variable_summary in summary.variables.items():
handler.handle(variable_summary["type"], name, variable_summary, batch)
# We don't actually update the suite object on the batch in place, so need
# to get the populated suite from the batch
suite = batch.get_expectation_suite()
validation_result_identifier = None
if run_validation:
batch = ge.dataset.PandasDataset(self.df, expectation_suite=suite)
results = data_context.run_validation_operator(
"action_list_operator", assets_to_validate=[batch]
)
validation_result_identifier = results.list_validation_result_identifiers()[
0
]
# Write expectations and open data docs
if save_suite or build_data_docs:
data_context.save_expectation_suite(suite)
if build_data_docs:
data_context.build_data_docs()
data_context.open_data_docs(validation_result_identifier)
return batch.get_expectation_suite()
| (self, suite_name: Optional[str] = None, data_context: Optional[Any] = None, save_suite: bool = True, run_validation: bool = True, build_data_docs: bool = True, handler: Optional[ydata_profiling.model.handler.Handler] = None) -> Any |
36,894 | ydata_profiling.profile_report | to_file | Write the report to a file.
Args:
output_file: The name or the path of the file to generate including the extension (.html, .json).
silent: if False, opens the file in the default browser or download it in a Google Colab environment
| def to_file(self, output_file: Union[str, Path], silent: bool = True) -> None:
"""Write the report to a file.
Args:
output_file: The name or the path of the file to generate including the extension (.html, .json).
silent: if False, opens the file in the default browser or download it in a Google Colab environment
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pillow_version = pkg_resources.get_distribution("Pillow").version
version_tuple = tuple(map(int, pillow_version.split(".")))
if version_tuple < (9, 5, 0):
warnings.warn(
"Try running command: 'pip install --upgrade Pillow' to avoid ValueError"
)
if not isinstance(output_file, Path):
output_file = Path(str(output_file))
if output_file.suffix == ".json":
data = self.to_json()
else:
if not self.config.html.inline:
self.config.html.assets_path = str(output_file.parent)
if self.config.html.assets_prefix is None:
self.config.html.assets_prefix = str(output_file.stem) + "_assets"
create_html_assets(self.config, output_file)
data = self.to_html()
if output_file.suffix != ".html":
suffix = output_file.suffix
output_file = output_file.with_suffix(".html")
warnings.warn(
f"Extension {suffix} not supported. For now we assume .html was intended. "
f"To remove this warning, please use .html or .json."
)
disable_progress_bar = not self.config.progress_bar
with tqdm(
total=1, desc="Export report to file", disable=disable_progress_bar
) as pbar:
output_file.write_text(data, encoding="utf-8")
pbar.update()
if not silent:
try:
from google.colab import files # noqa: F401
files.download(output_file.absolute().as_uri())
except ModuleNotFoundError:
import webbrowser
webbrowser.open_new_tab(output_file.absolute().as_uri())
| (self, output_file: Union[str, pathlib.Path], silent: bool = True) -> NoneType |
36,895 | ydata_profiling.profile_report | to_html | Generate and return complete template as lengthy string
for using with frameworks.
Returns:
Profiling report html including wrapper.
| def to_html(self) -> str:
"""Generate and return complete template as lengthy string
for using with frameworks.
Returns:
Profiling report html including wrapper.
"""
return self.html
| (self) -> str |
36,896 | ydata_profiling.profile_report | to_json | Represent the ProfileReport as a JSON string
Returns:
JSON string
| def to_json(self) -> str:
"""Represent the ProfileReport as a JSON string
Returns:
JSON string
"""
return self.json
| (self) -> str |
36,897 | ydata_profiling.profile_report | to_notebook_iframe | Used to output the HTML representation to a Jupyter notebook.
When config.notebook.iframe.attribute is "src", this function creates a temporary HTML file
in `./tmp/profile_[hash].html` and returns an Iframe pointing to that contents.
When config.notebook.iframe.attribute is "srcdoc", the same HTML is injected in the "srcdoc" attribute of
the Iframe.
Notes:
This constructions solves problems with conflicting stylesheets and navigation links.
| def to_notebook_iframe(self) -> None:
"""Used to output the HTML representation to a Jupyter notebook.
When config.notebook.iframe.attribute is "src", this function creates a temporary HTML file
in `./tmp/profile_[hash].html` and returns an Iframe pointing to that contents.
When config.notebook.iframe.attribute is "srcdoc", the same HTML is injected in the "srcdoc" attribute of
the Iframe.
Notes:
This constructions solves problems with conflicting stylesheets and navigation links.
"""
from IPython.core.display import display
from ydata_profiling.report.presentation.flavours.widget.notebook import (
get_notebook_iframe,
)
# Ignore warning: https://github.com/ipython/ipython/pull/11350/files
with warnings.catch_warnings():
warnings.simplefilter("ignore")
display(get_notebook_iframe(self.config, self))
| (self) -> NoneType |
36,898 | ydata_profiling.profile_report | to_widgets | The ipython notebook widgets user interface. | def to_widgets(self) -> None:
"""The ipython notebook widgets user interface."""
try:
from google.colab import files # noqa: F401
warnings.warn(
"Ipywidgets is not yet fully supported on Google Colab (https://github.com/googlecolab/colabtools/issues/60)."
"As an alternative, you can use the HTML report. See the documentation for more information."
)
except ModuleNotFoundError:
pass
from IPython.core.display import display
display(self.widgets)
| (self) -> NoneType |
36,899 | ydata_profiling.compare_reports | compare |
Compare Profile reports
Args:
reports: two reports to compare
input may either be a ProfileReport, or the summary obtained from report.get_description()
config: the settings object for the merged ProfileReport
compute: recompute the profile report using config or the left report config
recommended in cases where the reports were created using different settings
| def compare(
reports: Union[List[ProfileReport], List[BaseDescription]],
config: Optional[Settings] = None,
compute: bool = False,
) -> ProfileReport:
"""
Compare Profile reports
Args:
reports: two reports to compare
input may either be a ProfileReport, or the summary obtained from report.get_description()
config: the settings object for the merged ProfileReport
compute: recompute the profile report using config or the left report config
recommended in cases where the reports were created using different settings
"""
if len(reports) == 0:
raise ValueError("No reports available for comparison.")
report_dtypes = [type(r) for r in reports]
if len(set(report_dtypes)) > 1:
raise TypeError(
"The input must have the same data type for all reports. Comparing ProfileReport objects to summaries obtained from the get_description() method is not supported."
)
if isinstance(reports[0], ProfileReport):
all_configs = [r.config for r in reports] # type: ignore
else:
configs_str = [
json.loads(r.package["ydata_profiling_config"]) for r in reports # type: ignore
]
all_configs = []
for c_str in configs_str:
c_setting = Settings()
c_setting = c_setting.update(c_str)
all_configs.append(c_setting)
validate_reports(reports=reports, configs=all_configs)
if isinstance(reports[0], ProfileReport):
base_features = reports[0].df.columns # type: ignore
for report in reports[1:]:
cols_2_compare = [col for col in base_features if col in report.df.columns] # type: ignore
report.df = report.df.loc[:, cols_2_compare] # type: ignore
reports = [r for r in reports if not r.df.empty] # type: ignore
if len(reports) == 1:
return reports[0] # type: ignore
else:
base_features = list(reports[0].variables.keys())
non_empty_reports = 0
for report in reports[1:]:
cols_2_compare = [
col for col in base_features if col in list(report.variables.keys()) # type: ignore
]
if len(cols_2_compare) > 0:
non_empty_reports += 1
if non_empty_reports == 0:
profile = ProfileReport(None, config=all_configs[0])
profile._description_set = reports[0]
return profile
_config = None
if config is None:
_config = all_configs[0].copy()
else:
_config = config.copy()
if isinstance(reports[0], ProfileReport):
for report in reports:
tsmode = report.config.vars.timeseries.active # type: ignore
title = report.config.title # type: ignore
report.config = config.copy() # type: ignore
report.config.title = title # type: ignore
report.config.vars.timeseries.active = tsmode # type: ignore
if compute:
report._description_set = None # type: ignore
if all(isinstance(report, ProfileReport) for report in reports):
# Type ignore is needed as mypy does not pick up on the type narrowing
# Consider using TypeGuard (3.10): https://docs.python.org/3/library/typing.html#typing.TypeGuard
_update_titles(reports) # type: ignore
labels, descriptions = _compare_profile_report_preprocess(reports, _config) # type: ignore
elif all(isinstance(report, BaseDescription) for report in reports):
labels, descriptions = _compare_dataset_description_preprocess(reports) # type: ignore
else:
raise TypeError(
"The input must have the same data type for all reports. Comparing ProfileReport objects to summaries obtained from the get_description() method is not supported."
)
_config.html.style._labels = labels
_placeholders(descriptions)
descriptions_dict = [asdict(_apply_config(d, _config)) for d in descriptions]
res: dict = _update_merge(None, descriptions_dict[0])
for r in descriptions_dict[1:]:
res = _update_merge(res, r)
res["analysis"]["title"] = _compare_title(res["analysis"]["title"])
res["alerts"] = _create_placehoder_alerts(res["alerts"])
if not any(res["time_index_analysis"]):
res["time_index_analysis"] = None
profile = ProfileReport(None, config=_config)
profile._description_set = from_dict(data_class=BaseDescription, data=res)
return profile
| (reports: Union[List[ydata_profiling.profile_report.ProfileReport], List[ydata_profiling.model.description.BaseDescription]], config: Optional[ydata_profiling.config.Settings] = None, compute: bool = False) -> ydata_profiling.profile_report.ProfileReport |
36,916 | spacy_lookups_data | get_file | null | def get_file(filename):
if sys.version_info < (3, 9):
import pkg_resources
return pkg_resources.resource_filename(__name__, os.path.join("data", filename))
else:
import importlib.resources
return importlib.resources.files(__name__).joinpath("data", filename)
| (filename) |
36,920 | validador_colab.loop.first_block | first_block | null | from validador_colab.infra.repositories import SmarketAnalyticsRepositoryPostgres, ColabCountersRepositoryPostgres
from validador_colab.core.usecases import SmarketService, ColabService
from validador_colab.core.entities import Sakura, NaturalOne, ValidationDetail
from validador_colab.core.errors import NoDataFound
from validador_colab.infra.database import DBConnectionHandlerPostgres, DBConnectionHandlerPostgresControle, DB_CONTROLE
from validador_colab.infra.utils import break_no_data_found
import pickle
import os
import asyncio
import datetime
temp_data_folder = os.path.join(os.getcwd(), 'temp_data')
db = DBConnectionHandlerPostgresControle()
async def first_block(industry_id: int, date_start, date_end, print_corretos=True, pickle_use=False):
if not os.path.exists(temp_data_folder):
os.makedirs(temp_data_folder)
if industry_id == 11: #Natural One
industry = NaturalOne()
elif industry_id == 13: #Sakura
industry = Sakura()
pickle_colab_name = "colab_data_" + industry.__repr__() + "_" + date_start + "_" + date_end + ".pkl"
pickle_smarket_name = "smarket_data_" + industry.__repr__() + "_" + date_start + "_" + date_end + ".pkl"
if pickle_use:
try:
if not os.path.exists(os.path.join(temp_data_folder, pickle_smarket_name)):
print("Getting data from smarket")
smarket_data = SmarketService(SmarketAnalyticsRepositoryPostgres()).get_smarket_counters(industry, date_start, date_end)
with open(os.path.join(temp_data_folder, pickle_smarket_name), 'wb') as f:
pickle.dump(smarket_data, f)
print("Data saved")
else:
print("Loading data from smarket")
with open(os.path.join(temp_data_folder, pickle_smarket_name), 'rb') as f:
smarket_data = pickle.load(f)
print("Data loaded")
if not os.path.exists(os.path.join(temp_data_folder, pickle_colab_name)):
print("Getting data from colab")
colab_data = ColabService(ColabCountersRepositoryPostgres()).get_colab_counters(industry_id, date_start, date_end)
with open(os.path.join(temp_data_folder, pickle_colab_name), 'wb') as f:
pickle.dump(colab_data, f)
print("Data saved")
else:
print("Loading data from colab")
with open(os.path.join(temp_data_folder, pickle_colab_name), 'rb') as f:
colab_data = pickle.load(f)
print("Data loaded")
except NoDataFound:
print(f"Sem dados cadastrados para a industria no dia {date_start} até o dia {date_end}")
else:
print("Getting data from smarket")
smarket_data = SmarketService(SmarketAnalyticsRepositoryPostgres()).get_smarket_counters(industry, date_start,
date_end)
print("Getting data from colab")
colab_data = ColabService(ColabCountersRepositoryPostgres()).get_colab_counters(industry_id, date_start,
date_end)
if len(smarket_data) == 0 or len(colab_data) == 0:
print("Erro na execução:")
print(f"Data de consulta: {date_start} - {date_end}")
print(f"len(smarket_data) = {len(smarket_data)}")
print(f"len(colab_data) = {len(colab_data)}")
print("Classificando dias como não aptos a atualização")
DB_CONTROLE.insert(break_no_data_found(industry, date_start, date_end, 'fat_cupom'))
print("Finalizando processo")
return 0
#esta merda aqui vai ficar troncha, loop dentro de loop, mas é o que tem pra hoje
print('Iniciando processamento')
count_produtos_certos = {}
count_produtos_errados = {}
validation_intel = []
# Primeira validação: Se o produto está com os quantitativos iguais na fat_cupom e no Analytics
for colab_unit in colab_data:
for smarket_unit in smarket_data:
if colab_unit.id_cliente == smarket_unit.client_id:
if colab_unit.date == smarket_unit.date:
if int(colab_unit.seqproduto) == int(smarket_unit.seqproduto):
if colab_unit.seqproduto == 164059:
print("Me gusta lá conchita")
if int(colab_unit.qtd_fatcupom) == int(smarket_unit.analytics):
if print_corretos:
print('-------------------')
print(f'Cliente {smarket_unit.client_id}')
print(f"Data {smarket_unit.date}")
print(f'Tudo certo no produto: {smarket_unit.seqproduto} - {smarket_unit.description}')
print(f'Quantitativo no analytics: {smarket_unit.analytics}')
print(f'Quantitativo na fatcupom colab: {colab_unit.qtd_fatcupom}')
print('-------------------')
if smarket_unit.client_id not in count_produtos_certos:
count_produtos_certos[smarket_unit.client_id] = {}
if smarket_unit.date not in count_produtos_certos[smarket_unit.client_id]:
count_produtos_certos[smarket_unit.client_id][smarket_unit.date] = 0
count_produtos_certos[smarket_unit.client_id][smarket_unit.date] += 1
else:
print('-------------------')
print(f'Cliente {smarket_unit.client_id}')
print(f"Data {smarket_unit.date}")
print(f'ERRO no produto: {smarket_unit.seqproduto} - {smarket_unit.description}')
print(f'Quantitativo no analytics: {smarket_unit.analytics}')
print(f'Quantitativo na fatcupom colab: {colab_unit.qtd_fatcupom}')
print('-------------------')
if smarket_unit.client_id not in count_produtos_errados:
count_produtos_errados[smarket_unit.client_id] = {}
if smarket_unit.date not in count_produtos_errados[smarket_unit.client_id]:
count_produtos_errados[smarket_unit.client_id][smarket_unit.date] = 0
count_produtos_errados[smarket_unit.client_id][smarket_unit.date] += 1
# Segunda validação - Produtos presentes nas tabelas:
prod_cliente_data = {}
colab_cliente_data = {}
produtos_ausentes_analytics = {}
produtos_ausentes_colab = {}
datas_para_nao_subir = {}
for colab_unit in colab_data:
if colab_unit.id_cliente not in colab_cliente_data:
colab_cliente_data[colab_unit.id_cliente] = {}
if colab_unit.date not in colab_cliente_data[colab_unit.id_cliente]:
colab_cliente_data[colab_unit.id_cliente][colab_unit.date] = []
if colab_unit.seqproduto not in colab_cliente_data[colab_unit.id_cliente][colab_unit.date]:
colab_cliente_data[colab_unit.id_cliente][colab_unit.date].append(colab_unit.seqproduto)
for smarket_unit in smarket_data:
if smarket_unit.client_id not in prod_cliente_data:
prod_cliente_data[smarket_unit.client_id] = {}
if smarket_unit.date not in prod_cliente_data[smarket_unit.client_id]:
prod_cliente_data[smarket_unit.client_id][smarket_unit.date] = []
if smarket_unit.seqproduto not in prod_cliente_data[smarket_unit.client_id][smarket_unit.date]:
prod_cliente_data[smarket_unit.client_id][smarket_unit.date].append(smarket_unit.seqproduto)
#Gerador produtos ausentes colab
for client_id, date_counter in prod_cliente_data.items():
for date, prod_list in date_counter.items():
for prod in prod_list:
if client_id in colab_cliente_data:
if date in colab_cliente_data[client_id]:
if prod not in colab_cliente_data[client_id][date]:
if client_id not in produtos_ausentes_colab:
produtos_ausentes_colab[client_id] = {}
if date not in produtos_ausentes_colab[client_id]:
produtos_ausentes_colab[client_id][date] = []
print(f"Produto {prod} não encontrado na base do colab para cliente: {client_id} na data: {date}, para a industria {industry_id}")
produtos_ausentes_colab[client_id][date].append(prod)
else:
print(f"Data: {date} não encontrada na base do colab para cliente: {client_id} para a industria {industry_id}")
if client_id not in datas_para_nao_subir:
datas_para_nao_subir[client_id] = []
if date not in datas_para_nao_subir[client_id]:
validation_intel.append(ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
))
datas_para_nao_subir[client_id].append(date)
break
else:
print(f"Cliente {client_id} não encontrado na base do colab para industria: {industry_id}")
if client_id not in datas_para_nao_subir:
datas_para_nao_subir[client_id] = []
if date not in datas_para_nao_subir[client_id]:
validation_intel.append(ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
))
datas_para_nao_subir[client_id].append(date)
# Gerador produtos ausentes analitycs
for client_id, date_counter in colab_cliente_data.items():
for date, prod_list in date_counter.items():
for prod in prod_list:
if client_id in prod_cliente_data:
if date in prod_cliente_data[client_id]:
if prod not in prod_cliente_data[client_id][date]:
if client_id not in produtos_ausentes_analytics:
produtos_ausentes_analytics[client_id] = {}
if date not in produtos_ausentes_analytics[client_id]:
produtos_ausentes_analytics[client_id][date] = []
produtos_ausentes_analytics[client_id][date].append(prod)
print(f"Produto {prod} não encontrado na base do analytics para cliente: {client_id} na data: {date} para a industria {industry_id}")
else:
print(f"Data: {date} não encontrada na base do analytics para cliente: {client_id} para a industria {industry_id}")
if client_id not in datas_para_nao_subir:
datas_para_nao_subir[client_id] = []
if date not in datas_para_nao_subir[client_id]:
validation_intel.append(ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
))
datas_para_nao_subir[client_id].append(date)
break
else:
print(f"Cliente {client_id} não encontrado na base do colab para industria: {industry_id}")
if client_id not in datas_para_nao_subir:
datas_para_nao_subir[client_id] = []
if date not in datas_para_nao_subir[client_id]:
validation_intel.append(ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
))
datas_para_nao_subir[client_id].append(date)
# Geração da primeira tabela com base nos valores errados
if len(count_produtos_errados) > 0:
for client_id, counter in count_produtos_errados.items():
for date, count in counter.items():
if client_id in datas_para_nao_subir:
if date.strftime("%Y-%m-%d") not in [x.strftime("%Y-%m-%d") for x in datas_para_nao_subir[client_id]]:
if count > 0:
validation_intel.append(
ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
)
)
else:
validation_intel.append(
ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=True
)
)
else:
validation_intel.append(
ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
)
)
if client_id not in datas_para_nao_subir:
datas_para_nao_subir[client_id] = []
if date not in datas_para_nao_subir[client_id]:
datas_para_nao_subir[client_id].append(date)
if len(count_produtos_certos) > 0:
for client_id, counter in count_produtos_certos.items():
for date, count in counter.items():
if client_id in datas_para_nao_subir:
if date.strftime("%Y-%m-%d") not in [x.strftime("%Y-%m-%d") for x in datas_para_nao_subir[client_id]]:
if count > 0:
validation_intel.append(
ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=True
)
)
else:
validation_intel.append(
ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=True
)
)
# Inserindo os dados no banco de validation:
sql_insert = """INSERT INTO
PUBLIC.CONTROLE(id_industria, id_cliente, dt_venda, status_validacao, etapa)
VALUES
"""
for validation in validation_intel:
sql_insert += f""" ({int(validation.industry_id)}, {int(validation.client_id)}, '{validation.sale_date}', {bool(validation.validation_status)}, 'fat_cupom'),"""
sql_insert = sql_insert[:-1]
print(sql_insert)
DB_CONTROLE.insert(sql_insert)
| (industry_id: int, date_start, date_end, print_corretos=True, pickle_use=False) |
36,923 | validador_colab.loop.reprocessing_first_block | reprocessing_first_block | null | from validador_colab.loop import first_block
import pickle
import os
import asyncio
import datetime
from validador_colab.core.entities import Sakura, NaturalOne
from validador_colab.infra.utils import clean_old_data
from validador_colab.infra.database import DB_CONTROLE
temp_data_folder = os.path.join(os.getcwd(), 'temp_data')
async def reprocessing_first_block(industry_id: int, date_start, date_end, print_corretos=True):
if industry_id == 11: #Natural One
industry = NaturalOne()
elif industry_id == 13: #Sakura
industry = Sakura()
sqls = clean_old_data(industry, date_start, date_end)
print(f"Limpado dado já cadastrados para a industria {industry.id} no periodo de {date_start} a {date_end}")
for sql in sqls:
DB_CONTROLE.insert(sql)
await first_block(industry_id, date_start, date_end, print_corretos)
| (industry_id: int, date_start, date_end, print_corretos=True) |
36,924 | validador_colab.loop.second_block | second_block | null | from validador_colab.infra.repositories import SmarketAnalyticsRepositoryPostgres, DatamartCountersRepositoryPostgres
from validador_colab.core.usecases import SmarketService, DatamartService
from validador_colab.core.entities import Sakura, NaturalOne, ValidationDetail
from validador_colab.core.errors import NoDataFound
from validador_colab.infra.database import DBConnectionHandlerPostgres, DBConnectionHandlerPostgresControle, DB_CONTROLE
from validador_colab.infra.utils import break_no_data_found
import pickle
import os
import asyncio
import datetime
temp_data_folder = os.path.join(os.getcwd(), 'temp_data')
db = DBConnectionHandlerPostgresControle()
async def second_block(industry_id: int, date_start, date_end, print_corretos=True, pickle_use=False):
if not os.path.exists(temp_data_folder):
os.makedirs(temp_data_folder)
if industry_id == 11: #Natural One
industry = NaturalOne()
elif industry_id == 13: #Sakura
industry = Sakura()
pickle_datamart_name = "datamart_data_" + industry.__repr__() + "_" + date_start + "_" + date_end + ".pkl"
pickle_smarket_name = "smarket_data_" + industry.__repr__() + "_" + date_start + "_" + date_end + ".pkl"
if pickle_use:
try:
if not os.path.exists(os.path.join(temp_data_folder, pickle_smarket_name)):
print("Getting data from smarket")
smarket_data = SmarketService(SmarketAnalyticsRepositoryPostgres()).get_smarket_counters(industry, date_start, date_end)
with open(os.path.join(temp_data_folder, pickle_smarket_name), 'wb') as f:
pickle.dump(smarket_data, f)
print("Data saved")
else:
print("Loading data from smarket")
with open(os.path.join(temp_data_folder, pickle_smarket_name), 'rb') as f:
smarket_data = pickle.load(f)
print("Data loaded")
if not os.path.exists(os.path.join(temp_data_folder, pickle_datamart_name)):
print("Getting data from datamart")
datamart_data = DatamartService(DatamartCountersRepositoryPostgres()).get_datamart_counters(industry_id, date_start, date_end)
with open(os.path.join(temp_data_folder, pickle_datamart_name), 'wb') as f:
pickle.dump(datamart_data, f)
print("Data saved")
else:
print("Loading data from datamart")
with open(os.path.join(temp_data_folder, pickle_datamart_name), 'rb') as f:
datamart_data = pickle.load(f)
print("Data loaded")
except NoDataFound:
print(f"Sem dados cadastrados para a industria no dia {date_start} até o dia {date_end}")
else:
print("Getting data from smarket")
smarket_data = SmarketService(SmarketAnalyticsRepositoryPostgres()).get_smarket_counters(industry, date_start,
date_end)
print("Getting data from datamart")
datamart_data = DatamartService(DatamartCountersRepositoryPostgres()).get_datamart_counters(industry_id, date_start,
date_end)
if len(smarket_data) == 0 or len(datamart_data) == 0:
print("Erro na execução:")
print(f"Data de consulta: {date_start} - {date_end}")
print(f"len(smarket_data) = {len(smarket_data)}")
print(f"len(colab_data) = {len(datamart_data)}")
print("Classificando dias como não aptos a atualização")
DB_CONTROLE.insert(break_no_data_found(industry, date_start, date_end, 'datamart'))
print("Finalizando processo")
return 0
#esta merda aqui vai ficar troncha, loop dentro de loop, mas é o que tem pra hoje
print('Iniciando processamento')
count_produtos_certos = {}
count_produtos_errados = {}
validation_intel = []
clientes_cadastrados_data = {}
# verificar quais clientes estão presentes para aquela industria no analytics naquele dia de venda
for smarket_unit in smarket_data:
if smarket_unit.date.strftime("%Y-%m-%d") not in clientes_cadastrados_data:
clientes_cadastrados_data[smarket_unit.date.strftime("%Y-%m-%d")] = []
if smarket_unit.client_id not in clientes_cadastrados_data[smarket_unit.date.strftime("%Y-%m-%d")]:
clientes_cadastrados_data[smarket_unit.date.strftime("%Y-%m-%d")].append(smarket_unit.client_id)
# Primeira validação: Se o produto está com os quantitativos iguais no datamart e no Analytics
for datamart_unit in datamart_data:
for smarket_unit in smarket_data:
if datamart_unit.id_cliente == smarket_unit.client_id:
if datamart_unit.date == smarket_unit.date:
if int(datamart_unit.seqproduto) == int(smarket_unit.seqproduto):
if int(datamart_unit.qtd_datamart) == int(smarket_unit.analytics):
if print_corretos:
print('-------------------')
print(f'Cliente {smarket_unit.client_id}')
print(f"Data {smarket_unit.date}")
print(f'Tudo certo no produto: {smarket_unit.seqproduto} - {smarket_unit.description}')
print(f'Quantitativo no analytics: {smarket_unit.analytics}')
print(f'Quantitativo no datamart: {datamart_unit.qtd_datamart}')
print('-------------------')
if smarket_unit.client_id not in count_produtos_certos:
count_produtos_certos[smarket_unit.client_id] = {}
if smarket_unit.date not in count_produtos_certos[smarket_unit.client_id]:
count_produtos_certos[smarket_unit.client_id][smarket_unit.date] = 0
count_produtos_certos[smarket_unit.client_id][smarket_unit.date] += 1
else:
print('-------------------')
print(f'Cliente {smarket_unit.client_id}')
print(f"Data {smarket_unit.date}")
print(f'ERRO no produto: {smarket_unit.seqproduto} - {smarket_unit.description}')
print(f'Quantitativo no analytics: {smarket_unit.analytics}')
print(f'Quantitativo no datamart: {datamart_unit.qtd_datamart}')
print('-------------------')
if smarket_unit.client_id not in count_produtos_errados:
count_produtos_errados[smarket_unit.client_id] = {}
if smarket_unit.date not in count_produtos_errados[smarket_unit.client_id]:
count_produtos_errados[smarket_unit.client_id][smarket_unit.date] = 0
count_produtos_errados[smarket_unit.client_id][smarket_unit.date] += 1
# Segunda validação - Produtos presentes nas tabelas:
prod_cliente_data = {}
datamart_cliente_data = {}
produtos_ausentes_analytics = {}
produtos_ausentes_datamart = {}
datas_para_nao_subir = {}
for datamart_unit in datamart_data:
if datamart_unit.id_cliente not in datamart_cliente_data:
datamart_cliente_data[datamart_unit.id_cliente] = {}
if datamart_unit.date not in datamart_cliente_data[datamart_unit.id_cliente]:
datamart_cliente_data[datamart_unit.id_cliente][datamart_unit.date] = []
if datamart_unit.seqproduto not in datamart_cliente_data[datamart_unit.id_cliente][datamart_unit.date]:
datamart_cliente_data[datamart_unit.id_cliente][datamart_unit.date].append(datamart_unit.seqproduto)
for smarket_unit in smarket_data:
if smarket_unit.client_id not in prod_cliente_data:
prod_cliente_data[smarket_unit.client_id] = {}
if smarket_unit.date not in prod_cliente_data[smarket_unit.client_id]:
prod_cliente_data[smarket_unit.client_id][smarket_unit.date] = []
if smarket_unit.seqproduto not in prod_cliente_data[smarket_unit.client_id][smarket_unit.date]:
prod_cliente_data[smarket_unit.client_id][smarket_unit.date].append(smarket_unit.seqproduto)
#Gerador produtos ausentes datamart
for client_id, date_counter in prod_cliente_data.items():
for date, prod_list in date_counter.items():
for prod in prod_list:
if client_id in datamart_cliente_data:
if date in datamart_cliente_data[client_id]:
if prod not in datamart_cliente_data[client_id][date]:
if client_id not in produtos_ausentes_datamart:
produtos_ausentes_datamart[client_id] = {}
if date not in produtos_ausentes_datamart[client_id]:
produtos_ausentes_datamart[client_id][date] = []
print(f"Produto {prod} não encontrado na base do colab para cliente: {client_id} na data: {date}, para a industria {industry_id}")
produtos_ausentes_datamart[client_id][date].append(prod)
else:
print(f"Data: {date} não encontrada na base do colab para cliente: {client_id} para a industria {industry_id}")
if client_id not in datas_para_nao_subir:
datas_para_nao_subir[client_id] = []
if date not in datas_para_nao_subir[client_id]:
validation_intel.append(ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
))
datas_para_nao_subir[client_id].append(date)
break
else:
print(f"Cliente {client_id} não encontrado na base do colab para industria: {industry_id}")
if client_id not in datas_para_nao_subir:
datas_para_nao_subir[client_id] = []
if date not in datas_para_nao_subir[client_id]:
validation_intel.append(ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
))
datas_para_nao_subir[client_id].append(date)
# Gerador produtos ausentes analitycs
for client_id, date_counter in datamart_cliente_data.items():
for date, prod_list in date_counter.items():
for prod in prod_list:
if client_id in prod_cliente_data:
if date in prod_cliente_data[client_id]:
if prod not in prod_cliente_data[client_id][date]:
if client_id not in produtos_ausentes_analytics:
produtos_ausentes_analytics[client_id] = {}
if date not in produtos_ausentes_analytics[client_id]:
produtos_ausentes_analytics[client_id][date] = []
produtos_ausentes_analytics[client_id][date].append(prod)
print(f"Produto {prod} não encontrado na base do analytics para cliente: {client_id} na data: {date} para a industria {industry_id}")
else:
print(f"Data: {date} não encontrada na base do analytics para cliente: {client_id} para a industria {industry_id}")
if client_id not in datas_para_nao_subir:
datas_para_nao_subir[client_id] = []
if date not in datas_para_nao_subir[client_id]:
validation_intel.append(ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
))
datas_para_nao_subir[client_id].append(date)
break
else:
print(f"Cliente {client_id} não encontrado na base do colab para industria: {industry_id}")
if client_id not in datas_para_nao_subir:
datas_para_nao_subir[client_id] = []
if date not in datas_para_nao_subir[client_id]:
validation_intel.append(ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
))
datas_para_nao_subir[client_id].append(date)
# Geração da primeira tabela com base nos valores errados
if len(count_produtos_errados) > 0:
for client_id, counter in count_produtos_errados.items():
for date, count in counter.items():
if client_id in datas_para_nao_subir:
if date.strftime("%Y-%m-%d") not in [x.strftime("%Y-%m-%d") for x in datas_para_nao_subir[client_id]]:
if count > 0:
validation_intel.append(
ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
)
)
else:
validation_intel.append(
ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=True
)
)
else:
validation_intel.append(
ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=False
)
)
if client_id not in datas_para_nao_subir:
datas_para_nao_subir[client_id] = []
if date not in datas_para_nao_subir[client_id]:
datas_para_nao_subir[client_id].append(date)
if len(count_produtos_certos) > 0:
for client_id, counter in count_produtos_certos.items():
for date, count in counter.items():
if client_id in datas_para_nao_subir:
if date.strftime("%Y-%m-%d") not in [x.strftime("%Y-%m-%d") for x in datas_para_nao_subir[client_id]]:
if count > 0:
validation_intel.append(
ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=True
)
)
else:
validation_intel.append(
ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=client_id,
sale_date=date,
validation_status=True
)
)
# Validação dos controles já cadastrados
sql_controle_validate = f"""
select distinct dt_venda, id_industria, id_cliente from public.controle
where 0=0
and created_at >= current_date
and dt_venda between '{date_start}' and '{date_end}'
and etapa = 'fat_cupom'
and id_industria = {industry_id}
"""
controle_validate_data = DB_CONTROLE.get(sql_controle_validate)
controle_controle_data = {}
if len(controle_validate_data) > 0 :
for controle_validate_unit in controle_validate_data:
if controle_validate_unit[0] not in controle_controle_data:
controle_controle_data[controle_validate_unit[0]] = []
if controle_validate_unit[2] not in controle_controle_data[controle_validate_unit[0]]:
controle_controle_data[controle_validate_unit[0]].append(controle_validate_unit[2])
#Gera os dados para o processo abaixo
validation_counter = {}
for validation_unit in validation_intel:
if validation_unit.sale_date not in validation_counter:
validation_counter[validation_unit.sale_date] = []
if validation_unit.client_id not in validation_counter[validation_unit.sale_date]:
validation_counter[validation_unit.sale_date].append(validation_unit.client_id)
# Para todos os dias onde por algum motivo o cliente não apareceu em uma data de venda específica. Gerar
# um intel com negativa de inserção
for data, clientes in controle_controle_data.items():
for cliente in clientes:
if cliente not in clientes_cadastrados_data[data.strftime("%Y-%m-%d")]:
validation_intel.append(
ValidationDetail(
exec_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
industry_id=industry_id,
client_id=cliente,
sale_date=date,
validation_status=False
)
)
print(f"Os dados do cliente {cliente}, para a industria {industry_id}, na data {date}, não está presente no datamart")
# Inserindo os dados no banco de validation:
sql_insert = """INSERT INTO
PUBLIC.CONTROLE(id_industria, id_cliente, dt_venda, status_validacao, etapa)
VALUES
"""
for validation in validation_intel:
sql_insert += f""" ({int(validation.industry_id)}, {int(validation.client_id)}, '{validation.sale_date}', {bool(validation.validation_status)}, 'datamart'),"""
sql_insert = sql_insert[:-1]
print(sql_insert)
DB_CONTROLE.insert(sql_insert)
| (industry_id: int, date_start, date_end, print_corretos=True, pickle_use=False) |
36,925 | nlpaug.util.action | Action | null | class Action:
INSERT = 'insert'
SUBSTITUTE = 'substitute'
DELETE = 'delete'
SWAP = 'swap'
SPLIT = 'split'
ALIGN = 'align'
CROP = 'crop'
SEQUENTIAL = 'sequential'
SOMETIMES = 'sometimes'
@staticmethod
def getall():
return [Action.INSERT, Action.SUBSTITUTE, Action.SWAP, Action.DELETE, Action.SPLIT, Action.CROP,
Action.SEQUENTIAL, Action.SOMETIMES, Action.ALIGN]
| () |
36,926 | nlpaug.util.action | getall | null | @staticmethod
def getall():
return [Action.INSERT, Action.SUBSTITUTE, Action.SWAP, Action.DELETE, Action.SPLIT, Action.CROP,
Action.SEQUENTIAL, Action.SOMETIMES, Action.ALIGN]
| () |
36,927 | nlpaug.base_augmenter | Augmenter | null | class Augmenter:
def __init__(self, name, method, action, aug_min, aug_max, aug_p=0.1, device='cpu',
include_detail=False, verbose=0):
self.name = name
self.action = action
self.method = method
self.aug_min = aug_min
self.aug_max = aug_max
self.aug_p = aug_p
self.device = device
self.verbose = verbose
self.include_detail = include_detail
self.parent_change_seq = 0
self._validate_augmenter(method, action)
@classmethod
def _validate_augmenter(cls, method, action):
if method not in Method.getall():
raise ValueError(
'Method must be one of {} while {} is passed'.format(Method.getall(), method))
if action not in Action.getall():
raise ValueError(
'Action must be one of {} while {} is passed'.format(Action.getall(), action))
def augment(self, data, n=1, num_thread=1):
"""
:param object/list data: Data for augmentation. It can be list of data (e.g. list
of string or numpy) or single element (e.g. string or numpy). Numpy format only
supports audio or spectrogram data. For text data, only support string or
list of string.
:param int n: Default is 1. Number of unique augmented output. Will be force to 1
if input is list of data
:param int num_thread: Number of thread for data augmentation. Use this option
when you are using CPU and n is larger than 1
:return: Augmented data
>>> augmented_data = aug.augment(data)
"""
max_retry_times = 3 # max loop times of n to generate expected number of outputs
aug_num = 1 if isinstance(data, list) else n
expected_output_num = len(data) if isinstance(data, list) else aug_num
exceptions = self._validate_augment(data)
# TODO: Handle multiple exceptions
for exception in exceptions:
if isinstance(exception, WarningException):
if self.verbose > 0:
exception.output()
# Return empty value per data type
if isinstance(data, str):
return []
elif isinstance(data, list):
return []
elif isinstance(data, np.ndarray):
return np.array([])
return []
action_fx = None
clean_data = self.clean(data)
if self.action == Action.INSERT:
action_fx = self.insert
elif self.action == Action.SUBSTITUTE:
action_fx = self.substitute
elif self.action == Action.SWAP:
action_fx = self.swap
elif self.action == Action.DELETE:
action_fx = self.delete
elif self.action == Action.CROP:
action_fx = self.crop
elif self.action == Action.SPLIT:
action_fx = self.split
for _ in range(max_retry_times+1):
augmented_results = []
# By design, it is one-to-many
if self.__class__.__name__ in ['LambadaAug']:
augmented_results = action_fx(clean_data, n=n)
# PyTorch's augmenter
elif self.__class__.__name__ in ['AbstSummAug', 'BackTranslationAug', 'ContextualWordEmbsAug', 'ContextualWordEmbsForSentenceAug']:
for _ in range(aug_num):
result = action_fx(clean_data)
if isinstance(result, list):
augmented_results.extend(result)
else:
augmented_results.append(result)
# Multi inputs
elif isinstance(data, list):
# Single Thread
if num_thread == 1:
augmented_results = [action_fx(d) for d in clean_data]
# Multi Thread
else:
batch_data = [data[i:i+num_thread] for i in range(0, len(data), num_thread)]
for mini_batch_data in batch_data:
augmented_results.extend(self._parallel_augments(self.augment, mini_batch_data))
# Single input with/without multiple input
else:
# Single Thread
if num_thread == 1:
augmented_results = [action_fx(clean_data) for _ in range(n)]
# Multi Thread
else:
augmented_results = self._parallel_augment(action_fx, clean_data, n=n, num_thread=num_thread)
if len(augmented_results) >= expected_output_num:
break
if len(augmented_results) == 0:
# if not result, return itself
if isinstance(data, list):
return data
# Single input with/without multiple input
else:
return [data]
if isinstance(augmented_results, pd.DataFrame):
return augmented_results
else:
if isinstance(data, list):
return augmented_results
else:
return augmented_results[:n]
# return augmented_results
# def augments(self, data, num_thread=1):
# """
# :param list data: List of data
# :param int num_thread: Number of thread for data augmentation. Use this option when you are using CPU and
# n is larger than 1. Do NOT support GPU process.
# :return: Augmented data (Does not follow original order)
# >>> augmented_data = aug.augment(data)
# """
# n = 1
# augmented_results = []
# if num_thread == 1 or self.device == 'cuda':
# for d in data:
# augmented_result = self.augment(data=d, n=n, num_thread=1) # TOOD: cuda does not support mulithread
# if n == 1:
# augmented_results.append(augmented_result)
# else:
# augmented_results.extend(augmented_result)
# else:
# batch_data = [data[i:i+num_thread] for i in range(0, len(data), num_thread)]
# for i in range(n):
# for mini_batch_data in batch_data:
# augmented_results.extend(self._parallel_augments(self.augment, mini_batch_data))
# return augmented_results
@classmethod
def _validate_augment(cls, data):
if data is None or len(data) == 0:
return [WarningException(name=WarningName.INPUT_VALIDATION_WARNING,
code=WarningCode.WARNING_CODE_001, msg=WarningMessage.LENGTH_IS_ZERO)]
return []
@classmethod
def _parallel_augment(cls, action_fx, data, n, num_thread=2):
pool = ThreadPool(num_thread)
results = pool.map(action_fx, [data] * n)
pool.close()
pool.join()
return results
@classmethod
def _parallel_augments(cls, action_fx, data):
pool = ThreadPool(len(data))
results = pool.map(action_fx, data)
pool.close()
pool.join()
return results
def insert(self, data):
raise NotImplementedError
def substitute(self, data):
raise NotImplementedError
def swap(self, data):
raise NotImplementedError
def delete(self, data):
raise NotImplementedError
def crop(self, data):
raise NotImplementedError
def split(self, data):
raise NotImplementedError
def tokenizer(self, tokens):
raise NotImplementedError
def evaluate(self):
raise NotImplementedError
@classmethod
def is_duplicate(cls, dataset, data):
raise NotImplementedError
@classmethod
def prob(cls):
return np.random.random()
@classmethod
def sample(cls, x, num=None):
if isinstance(x, list):
return random.sample(x, num)
elif isinstance(x, int):
return np.random.randint(1, x-1)
@classmethod
def clean(cls, data):
raise NotImplementedError
def _generate_aug_cnt(self, size, aug_min, aug_max, aug_p=None):
if aug_p is not None:
percent = aug_p
elif self.aug_p:
percent = self.aug_p
else:
percent = 0.3
cnt = int(math.ceil(percent * size))
if aug_min and cnt < aug_min:
return aug_min
if aug_max and cnt > aug_max:
return aug_max
return cnt
def generate_aug_cnt(self, size, aug_p=None):
if size == 0:
return 0
return self._generate_aug_cnt(size, self.aug_min, self.aug_max, aug_p)
def generate_aug_idxes(self, inputs):
aug_cnt = self.generate_aug_cnt(len(inputs))
token_idxes = [i for i, _ in enumerate(inputs)]
aug_idxes = self.sample(token_idxes, aug_cnt)
return aug_idxes
def _get_random_aug_idxes(self, data):
aug_cnt = self.generate_aug_cnt(len(data))
idxes = self.pre_skip_aug(data)
if len(idxes) < aug_cnt:
aug_cnt = len(idxes)
aug_idxes = self.sample(idxes, aug_cnt)
return aug_idxes
def __str__(self):
return 'Name:{}, Action:{}, Method:{}'.format(self.name, self.action, self.method)
| (name, method, action, aug_min, aug_max, aug_p=0.1, device='cpu', include_detail=False, verbose=0) |
36,928 | nlpaug.base_augmenter | __init__ | null | def __init__(self, name, method, action, aug_min, aug_max, aug_p=0.1, device='cpu',
include_detail=False, verbose=0):
self.name = name
self.action = action
self.method = method
self.aug_min = aug_min
self.aug_max = aug_max
self.aug_p = aug_p
self.device = device
self.verbose = verbose
self.include_detail = include_detail
self.parent_change_seq = 0
self._validate_augmenter(method, action)
| (self, name, method, action, aug_min, aug_max, aug_p=0.1, device='cpu', include_detail=False, verbose=0) |
36,929 | nlpaug.base_augmenter | __str__ | null | def __str__(self):
return 'Name:{}, Action:{}, Method:{}'.format(self.name, self.action, self.method)
| (self) |
36,930 | nlpaug.base_augmenter | _generate_aug_cnt | null | def _generate_aug_cnt(self, size, aug_min, aug_max, aug_p=None):
if aug_p is not None:
percent = aug_p
elif self.aug_p:
percent = self.aug_p
else:
percent = 0.3
cnt = int(math.ceil(percent * size))
if aug_min and cnt < aug_min:
return aug_min
if aug_max and cnt > aug_max:
return aug_max
return cnt
| (self, size, aug_min, aug_max, aug_p=None) |
36,931 | nlpaug.base_augmenter | _get_random_aug_idxes | null | def _get_random_aug_idxes(self, data):
aug_cnt = self.generate_aug_cnt(len(data))
idxes = self.pre_skip_aug(data)
if len(idxes) < aug_cnt:
aug_cnt = len(idxes)
aug_idxes = self.sample(idxes, aug_cnt)
return aug_idxes
| (self, data) |
36,932 | nlpaug.base_augmenter | augment |
:param object/list data: Data for augmentation. It can be list of data (e.g. list
of string or numpy) or single element (e.g. string or numpy). Numpy format only
supports audio or spectrogram data. For text data, only support string or
list of string.
:param int n: Default is 1. Number of unique augmented output. Will be force to 1
if input is list of data
:param int num_thread: Number of thread for data augmentation. Use this option
when you are using CPU and n is larger than 1
:return: Augmented data
>>> augmented_data = aug.augment(data)
| def augment(self, data, n=1, num_thread=1):
"""
:param object/list data: Data for augmentation. It can be list of data (e.g. list
of string or numpy) or single element (e.g. string or numpy). Numpy format only
supports audio or spectrogram data. For text data, only support string or
list of string.
:param int n: Default is 1. Number of unique augmented output. Will be force to 1
if input is list of data
:param int num_thread: Number of thread for data augmentation. Use this option
when you are using CPU and n is larger than 1
:return: Augmented data
>>> augmented_data = aug.augment(data)
"""
max_retry_times = 3 # max loop times of n to generate expected number of outputs
aug_num = 1 if isinstance(data, list) else n
expected_output_num = len(data) if isinstance(data, list) else aug_num
exceptions = self._validate_augment(data)
# TODO: Handle multiple exceptions
for exception in exceptions:
if isinstance(exception, WarningException):
if self.verbose > 0:
exception.output()
# Return empty value per data type
if isinstance(data, str):
return []
elif isinstance(data, list):
return []
elif isinstance(data, np.ndarray):
return np.array([])
return []
action_fx = None
clean_data = self.clean(data)
if self.action == Action.INSERT:
action_fx = self.insert
elif self.action == Action.SUBSTITUTE:
action_fx = self.substitute
elif self.action == Action.SWAP:
action_fx = self.swap
elif self.action == Action.DELETE:
action_fx = self.delete
elif self.action == Action.CROP:
action_fx = self.crop
elif self.action == Action.SPLIT:
action_fx = self.split
for _ in range(max_retry_times+1):
augmented_results = []
# By design, it is one-to-many
if self.__class__.__name__ in ['LambadaAug']:
augmented_results = action_fx(clean_data, n=n)
# PyTorch's augmenter
elif self.__class__.__name__ in ['AbstSummAug', 'BackTranslationAug', 'ContextualWordEmbsAug', 'ContextualWordEmbsForSentenceAug']:
for _ in range(aug_num):
result = action_fx(clean_data)
if isinstance(result, list):
augmented_results.extend(result)
else:
augmented_results.append(result)
# Multi inputs
elif isinstance(data, list):
# Single Thread
if num_thread == 1:
augmented_results = [action_fx(d) for d in clean_data]
# Multi Thread
else:
batch_data = [data[i:i+num_thread] for i in range(0, len(data), num_thread)]
for mini_batch_data in batch_data:
augmented_results.extend(self._parallel_augments(self.augment, mini_batch_data))
# Single input with/without multiple input
else:
# Single Thread
if num_thread == 1:
augmented_results = [action_fx(clean_data) for _ in range(n)]
# Multi Thread
else:
augmented_results = self._parallel_augment(action_fx, clean_data, n=n, num_thread=num_thread)
if len(augmented_results) >= expected_output_num:
break
if len(augmented_results) == 0:
# if not result, return itself
if isinstance(data, list):
return data
# Single input with/without multiple input
else:
return [data]
if isinstance(augmented_results, pd.DataFrame):
return augmented_results
else:
if isinstance(data, list):
return augmented_results
else:
return augmented_results[:n]
# return augmented_results
| (self, data, n=1, num_thread=1) |
36,933 | nlpaug.base_augmenter | crop | null | def crop(self, data):
raise NotImplementedError
| (self, data) |
36,934 | nlpaug.base_augmenter | delete | null | def delete(self, data):
raise NotImplementedError
| (self, data) |
36,935 | nlpaug.base_augmenter | evaluate | null | def evaluate(self):
raise NotImplementedError
| (self) |
36,936 | nlpaug.base_augmenter | generate_aug_cnt | null | def generate_aug_cnt(self, size, aug_p=None):
if size == 0:
return 0
return self._generate_aug_cnt(size, self.aug_min, self.aug_max, aug_p)
| (self, size, aug_p=None) |
36,937 | nlpaug.base_augmenter | generate_aug_idxes | null | def generate_aug_idxes(self, inputs):
aug_cnt = self.generate_aug_cnt(len(inputs))
token_idxes = [i for i, _ in enumerate(inputs)]
aug_idxes = self.sample(token_idxes, aug_cnt)
return aug_idxes
| (self, inputs) |
36,938 | nlpaug.base_augmenter | insert | null | def insert(self, data):
raise NotImplementedError
| (self, data) |
36,939 | nlpaug.base_augmenter | split | null | def split(self, data):
raise NotImplementedError
| (self, data) |
36,940 | nlpaug.base_augmenter | substitute | null | def substitute(self, data):
raise NotImplementedError
| (self, data) |
36,941 | nlpaug.base_augmenter | swap | null | def swap(self, data):
raise NotImplementedError
| (self, data) |
36,942 | nlpaug.base_augmenter | tokenizer | null | def tokenizer(self, tokens):
raise NotImplementedError
| (self, tokens) |
36,943 | nlpaug.util.method | Method | null | class Method:
CHAR = 'char'
WORD = 'word'
SENTENCE = 'sentence'
SPECTROGRAM = 'spectrogram'
AUDIO = 'audio'
FLOW = 'flow'
@staticmethod
def getall():
return [Method.CHAR, Method.WORD, Method.SENTENCE, Method.AUDIO, Method.SPECTROGRAM, Method.FLOW]
| () |
36,944 | nlpaug.util.method | getall | null | @staticmethod
def getall():
return [Method.CHAR, Method.WORD, Method.SENTENCE, Method.AUDIO, Method.SPECTROGRAM, Method.FLOW]
| () |
36,945 | multiprocessing.dummy | Pool | null | def Pool(processes=None, initializer=None, initargs=()):
from ..pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
| (processes=None, initializer=None, initargs=()) |
36,946 | nlpaug.util.exception.warning | WarningCode | null | class WarningCode:
WARNING_CODE_001 = 'W001'
WARNING_CODE_002 = 'W002'
| () |
36,947 | nlpaug.util.exception.warning | WarningException | null | class WarningException(ExceptionInfo):
def __init__(self, name, code, msg):
super(WarningException, self).__init__(name=name, exp_type=ExceptionType.WARNING, code=code, msg=msg)
| (name, code, msg) |
36,948 | nlpaug.util.exception.warning | __init__ | null | def __init__(self, name, code, msg):
super(WarningException, self).__init__(name=name, exp_type=ExceptionType.WARNING, code=code, msg=msg)
| (self, name, code, msg) |
36,949 | nlpaug.util.exception.exception_info | output | null | def output(self):
msg = '[{}] Name:{}, Code:{}, Message:{}'.format(self.exp_type, self.name, self.code, self.msg)
print(msg)
| (self) |
36,950 | nlpaug.util.exception.warning | WarningMessage | null | class WarningMessage:
LENGTH_IS_ZERO = 'Length of input is 0'
NO_WORD = 'No other word except stop words and OOV. Returning input data without augmentation'
DEPRECATED = 'Warning: {} will be removed after {} release. Change to use {}'
| () |
36,951 | nlpaug.util.exception.warning | WarningName | null | class WarningName:
INPUT_VALIDATION_WARNING = 'Input validation issue'
OUT_OF_VOCABULARY = 'Out of vocabulary issue'
| () |
36,964 | sphinx_autodoc_typehints | InsertIndexInfo | InsertIndexInfo(insert_index: 'int', found_param: 'bool' = False, found_return: 'bool' = False, found_directive: 'bool' = False) | class InsertIndexInfo:
insert_index: int
found_param: bool = False
found_return: bool = False
found_directive: bool = False
| (insert_index: int, found_param: bool = False, found_return: bool = False, found_directive: bool = False) -> None |
36,965 | sphinx_autodoc_typehints | __eq__ | null | """Sphinx autodoc type hints."""
from __future__ import annotations
import ast
import importlib
import inspect
import re
import sys
import textwrap
import types
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, AnyStr, Callable, ForwardRef, NewType, TypeVar, get_type_hints
from docutils import nodes
from docutils.frontend import OptionParser
from sphinx.ext.autodoc.mock import mock
from sphinx.parsers import RSTParser
from sphinx.util import logging, rst
from sphinx.util.inspect import signature as sphinx_signature
from sphinx.util.inspect import stringify_signature
from .parser import parse
from .patches import install_patches
from .version import __version__
if TYPE_CHECKING:
from ast import FunctionDef, Module, stmt
from docutils.nodes import Node
from docutils.parsers.rst import states
from sphinx.application import Sphinx
from sphinx.config import Config
from sphinx.environment import BuildEnvironment
from sphinx.ext.autodoc import Options
_LOGGER = logging.getLogger(__name__)
_PYDATA_ANNOTATIONS = {"Any", "AnyStr", "Callable", "ClassVar", "Literal", "NoReturn", "Optional", "Tuple", "Union"}
# types has a bunch of things like ModuleType where ModuleType.__module__ is
# "builtins" and ModuleType.__name__ is "module", so we have to check for this.
_TYPES_DICT = {getattr(types, name): name for name in types.__all__}
# Prefer FunctionType to LambdaType (they are synonymous)
_TYPES_DICT[types.FunctionType] = "FunctionType"
def _get_types_type(obj: Any) -> str | None:
try:
return _TYPES_DICT.get(obj)
except Exception: # noqa: BLE001
# e.g. exception: unhashable type
return None
| (self, other) |
36,967 | sphinx_autodoc_typehints | __repr__ | null | def format_annotation(annotation: Any, config: Config) -> str: # noqa: C901, PLR0911, PLR0912, PLR0915, PLR0914
"""
Format the annotation.
:param annotation:
:param config:
:return:
"""
typehints_formatter: Callable[..., str] | None = getattr(config, "typehints_formatter", None)
if typehints_formatter is not None:
formatted = typehints_formatter(annotation, config)
if formatted is not None:
return formatted
# Special cases
if isinstance(annotation, ForwardRef):
return annotation.__forward_arg__
if annotation is None or annotation is type(None):
return ":py:obj:`None`"
if annotation is Ellipsis:
return ":py:data:`...<Ellipsis>`"
if isinstance(annotation, tuple):
return format_internal_tuple(annotation, config)
try:
module = get_annotation_module(annotation)
class_name = get_annotation_class_name(annotation, module)
args = get_annotation_args(annotation, module, class_name)
except ValueError:
return str(annotation).strip("'")
# Redirect all typing_extensions types to the stdlib typing module
if module == "typing_extensions":
module = "typing"
if module == "_io":
module = "io"
full_name = f"{module}.{class_name}" if module != "builtins" else class_name
fully_qualified: bool = getattr(config, "typehints_fully_qualified", False)
prefix = "" if fully_qualified or full_name == class_name else "~"
role = "data" if module == "typing" and class_name in _PYDATA_ANNOTATIONS else "class"
args_format = "\\[{}]"
formatted_args: str | None = ""
always_use_bars_union: bool = getattr(config, "always_use_bars_union", True)
is_bars_union = full_name == "types.UnionType" or (
always_use_bars_union and type(annotation).__qualname__ == "_UnionGenericAlias"
)
if is_bars_union:
full_name = ""
# Some types require special handling
if full_name == "typing.NewType":
args_format = f"\\(``{annotation.__name__}``, {{}})"
role = "class" if sys.version_info >= (3, 10) else "func"
elif full_name in {"typing.TypeVar", "typing.ParamSpec"}:
params = {k: getattr(annotation, f"__{k}__") for k in ("bound", "covariant", "contravariant")}
params = {k: v for k, v in params.items() if v}
if "bound" in params:
params["bound"] = f" {format_annotation(params['bound'], config)}"
args_format = f"\\(``{annotation.__name__}``{', {}' if args else ''}"
if params:
args_format += "".join(f", {k}={v}" for k, v in params.items())
args_format += ")"
formatted_args = None if args else args_format
elif full_name == "typing.Optional":
args = tuple(x for x in args if x is not type(None))
elif full_name in {"typing.Union", "types.UnionType"} and type(None) in args:
if len(args) == 2: # noqa: PLR2004
full_name = "typing.Optional"
role = "data"
args = tuple(x for x in args if x is not type(None))
else:
simplify_optional_unions: bool = getattr(config, "simplify_optional_unions", True)
if not simplify_optional_unions:
full_name = "typing.Optional"
role = "data"
args_format = f"\\[:py:data:`{prefix}typing.Union`\\[{{}}]]"
args = tuple(x for x in args if x is not type(None))
elif full_name in {"typing.Callable", "collections.abc.Callable"} and args and args[0] is not ...:
fmt = [format_annotation(arg, config) for arg in args]
formatted_args = f"\\[\\[{', '.join(fmt[:-1])}], {fmt[-1]}]"
elif full_name == "typing.Literal":
formatted_args = f"\\[{', '.join(f'``{arg!r}``' for arg in args)}]"
elif is_bars_union:
return " | ".join([format_annotation(arg, config) for arg in args])
if args and not formatted_args:
try:
iter(args)
except TypeError:
fmt = [format_annotation(args, config)]
else:
fmt = [format_annotation(arg, config) for arg in args]
formatted_args = args_format.format(", ".join(fmt))
escape = "\\ " if formatted_args else ""
return f":py:{role}:`{prefix}{full_name}`{escape}{formatted_args}"
| (self) |
36,968 | typing | NewType | NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy callable that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
| class NewType:
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy callable that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __call__(self, x):
return x
def __reduce__(self):
return self.__qualname__
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
| (name, tp) |
36,969 | typing | __call__ | null | def __call__(self, x):
return x
| (self, x) |
36,970 | typing | __init__ | null | def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing':
self.__module__ = def_mod
| (self, name, tp) |
36,972 | typing | __reduce__ | null | def __reduce__(self):
return self.__qualname__
| (self) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.