python_code
stringlengths 0
108k
|
---|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`586`-compliant **type hint test data.**
'''
# ....................{ IMPORTS }....................
from beartype_test._util.mod.pytmodtyping import (
is_typing_attrs,
iter_typing_attrs,
)
from enum import Enum
# ....................{ ENUMERATIONS }....................
class _MasterlessDecreeVenomlessWhich(Enum):
'''
Arbitrary enumeration whose members are accessed below as literals.
'''
NOMENCLATURE_WEATHER_VANES_OF = 0
NOMINALLY_UNSWAIN_AUTODIDACTIC_IDIOCRACY_LESS_A = 1
# ....................{ ADDERS }....................
def add_data(data_module: 'ModuleType') -> None:
'''
Add :pep:`586`-compliant type hint test data to various global containers
declared by the passed module.
Parameters
----------
data_module : ModuleType
Module to be added to.
'''
# If *NO* typing module declares a "Literal" factory, the active Python
# interpreter fails to support PEP 586. In this case, reduce to a noop.
if not is_typing_attrs('Literal'):
# print('Ignoring "Literal"...')
return
# print('Testing "Literal"...')
# Else, this interpreter supports PEP 586.
# ..................{ IMPORTS }..................
# Defer attribute-dependent imports.
from beartype.typing import List
from beartype._data.hint.pep.sign.datapepsigns import (
HintSignList,
HintSignLiteral,
)
from beartype_test.a00_unit.data.hint.util.data_hintmetacls import (
HintPepMetadata,
HintPithSatisfiedMetadata,
HintPithUnsatisfiedMetadata,
)
# ..................{ FACTORIES }..................
# For each "Literal" type hint factory importable from a typing module...
for Literal in iter_typing_attrs('Literal'):
# Add PEP 586-specific test type hints to this tuple global.
data_module.HINTS_PEP_META.extend((
# ..............{ LITERALS }..............
# Literal "None" singleton. Look, this is ridiculous. What you do?
HintPepMetadata(
hint=Literal[None],
pep_sign=HintSignLiteral,
is_args=True,
piths_meta=(
# "None" singleton defined by the same syntax.
HintPithSatisfiedMetadata(None),
# "None" singleton defined by different syntax but
# semantically equal to the "None" singleton.
HintPithSatisfiedMetadata(
{}.get('Looting Uncouth, ruddy Bȴood and')),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Worthily untrust-',
# Match that the exception message raised for this
# object embeds the representation of the expected
# type.
exception_str_match_regexes=(r'\bNone\b',),
),
),
),
# Literal arbitrary boolean. (Not that there are many of those...)
HintPepMetadata(
hint=Literal[True],
pep_sign=HintSignLiteral,
is_args=True,
piths_meta=(
# Boolean constant defined by the same syntax.
HintPithSatisfiedMetadata(True),
# Boolean constant defined by different syntax but
# semantically equal to the same boolean.
HintPithSatisfiedMetadata(data_module is data_module),
# Boolean constant *NOT* equal to the same boolean.
HintPithUnsatisfiedMetadata(
pith=False,
# Match that the exception message raised for this
# object embeds the representation of the expected
# literal.
exception_str_match_regexes=(r'\bTrue\b',),
),
# Integer constant semantically equal to the same boolean
# but of a differing type.
HintPithUnsatisfiedMetadata(
pith=1,
# Match that the exception message raised for this
# object embeds the representation of the expected
# type.
exception_str_match_regexes=(r'\bbool\b',),
),
),
),
# Literal arbitrary integer.
HintPepMetadata(
hint=Literal[0x2a],
pep_sign=HintSignLiteral,
is_args=True,
piths_meta=(
# Integer constant defined by the same syntax.
HintPithSatisfiedMetadata(0x2a),
# Integer constant defined by different syntax but
# semantically equal to the same integer.
HintPithSatisfiedMetadata(42),
# Integer constant *NOT* equal to the same integer.
HintPithUnsatisfiedMetadata(
pith=41,
# Match that the exception message raised for this
# object embeds the representation of the expected
# literal.
exception_str_match_regexes=(r'\b42\b',),
),
# Floating-point constant semantically equal to the same
# integer but of a differing type.
HintPithUnsatisfiedMetadata(
pith=42.0,
# Match that the exception message raised for this
# object embeds the representation of the expected
# type.
exception_str_match_regexes=(r'\bint\b',),
),
),
),
# Literal arbitrary byte string.
HintPepMetadata(
hint=Literal[
b"Worthy, 'vain truthiness of (very invective-elected)"],
pep_sign=HintSignLiteral,
is_args=True,
piths_meta=(
# Byte string constant defined by the same syntax.
HintPithSatisfiedMetadata(
b"Worthy, 'vain truthiness of (very invective-elected)"),
# Byte string constant defined by different syntax but
# semantically equal to the same byte string.
HintPithSatisfiedMetadata(
b"Worthy, 'vain truthiness of "
b"(very invective-elected)"
),
# Byte string constant *NOT* equal to the same byte string.
HintPithUnsatisfiedMetadata(
pith=b"Thanes within",
# Match that the exception message raised for this
# object embeds the representation of the expected
# literal.
exception_str_match_regexes=(r'\btruthiness\b',),
),
# Unicode string constant semantically equal to the same
# byte string but of a differing type.
HintPithUnsatisfiedMetadata(
pith=(
"Worthy, 'vain truthiness of "
"(very invective-elected)"
),
# Match that the exception message raised for this
# object embeds the representation of the expected
# type.
exception_str_match_regexes=(r'\bbytes\b',),
),
),
),
# Literal arbitrary Unicode string.
HintPepMetadata(
hint=Literal['Thanklessly classed, nominal'],
pep_sign=HintSignLiteral,
is_args=True,
piths_meta=(
# Unicode string constant defined by the same syntax.
HintPithSatisfiedMetadata('Thanklessly classed, nominal'),
# Unicode string constant defined by different syntax but
# semantically equal to the same Unicode string.
HintPithSatisfiedMetadata(
'Thanklessly classed, '
'nominal'
),
# Unicode string constant *NOT* equal to the same string.
HintPithUnsatisfiedMetadata(
pith='Mass and',
# Match that the exception message raised for this
# object embeds the representation of the expected
# literal.
exception_str_match_regexes=(r'\bnominal\b',),
),
# Byte string constant semantically equal to the same
# Unicode string but of a differing type.
HintPithUnsatisfiedMetadata(
pith=b'Thanklessly classed, nominal',
# Match that the exception message raised for this
# object embeds the representation of the expected
# type.
exception_str_match_regexes=(r'\bstr\b',),
),
),
),
# Literal arbitrary enumeration member.
HintPepMetadata(
hint=Literal[
_MasterlessDecreeVenomlessWhich.
NOMENCLATURE_WEATHER_VANES_OF
],
pep_sign=HintSignLiteral,
is_args=True,
piths_meta=(
# Enumeration member accessed by the same syntax.
HintPithSatisfiedMetadata(
_MasterlessDecreeVenomlessWhich.
NOMENCLATURE_WEATHER_VANES_OF
),
# Enumeration member accessed by different syntax but
# semantically equal to the same enumeration member.
HintPithSatisfiedMetadata(
_MasterlessDecreeVenomlessWhich(0)),
# Enumeration member *NOT* equal to the same member.
HintPithUnsatisfiedMetadata(
pith=(
_MasterlessDecreeVenomlessWhich.
NOMINALLY_UNSWAIN_AUTODIDACTIC_IDIOCRACY_LESS_A
),
# Match that the exception message raised for this
# object embeds the representation of the expected
# literal.
exception_str_match_regexes=(
r'\bNOMENCLATURE_WEATHER_VANES_OF\b',),
),
# Integer constant semantically equal to the same index of
# this enumeration member but of a differing type.
HintPithUnsatisfiedMetadata(
pith=0,
# Match that the exception message raised for this
# object embeds the representation of the expected
# type.
exception_str_match_regexes=(
r'\b_MasterlessDecreeVenomlessWhich\b',),
),
),
),
# ..............{ LITERALS ~ nested }..............
# List of literal arbitrary Unicode strings.
HintPepMetadata(
hint=List[Literal[
'ç‐omically gnomical whitebellied burden’s empathy of']],
pep_sign=HintSignList,
isinstanceable_type=list,
piths_meta=(
# List of Unicode string constants semantically equal to
# the same Unicode string.
HintPithSatisfiedMetadata([
'ç‐omically gnomical whitebellied burden’s empathy of',
(
'ç‐omically gnomical '
'whitebellied burden’s '
'empathy of'
),
]),
# List of Unicode string constants *NOT* equal to the same
# Unicode string.
HintPithUnsatisfiedMetadata(
pith=[
'Earpiece‐piecemealed, mealy straw headpiece‐',
'Earned peace appeasement easements',
],
# Match that the exception message raised for this
# object embeds the representation of the expected
# literal.
exception_str_match_regexes=(r'\bgnomical\b',),
),
# List of byte string constants.
HintPithUnsatisfiedMetadata(
pith=[
b'Than',
b"Thankful strumpet's",
],
# Match that the exception message raised for this
# object embeds the representation of the expected
# type.
exception_str_match_regexes=(r'\bstr\b',),
),
),
),
# ..............{ LITERALS ~ union }..............
# Literal union of two or more arbitrary literal objects.
HintPepMetadata(
hint=Literal[
None,
True,
0x2a,
b"Worthy, 'vain truthiness of (very invective-elected)",
'Thanklessly classed, nominal',
(
_MasterlessDecreeVenomlessWhich.
NOMENCLATURE_WEATHER_VANES_OF
),
],
pep_sign=HintSignLiteral,
is_args=True,
piths_meta=(
# Literal objects subscripting this literal union.
HintPithSatisfiedMetadata(None),
HintPithSatisfiedMetadata(True),
HintPithSatisfiedMetadata(0x2a),
HintPithSatisfiedMetadata(
b"Worthy, 'vain truthiness of (very invective-elected)"
),
HintPithSatisfiedMetadata('Thanklessly classed, nominal'),
HintPithSatisfiedMetadata(
_MasterlessDecreeVenomlessWhich.
NOMENCLATURE_WEATHER_VANES_OF
),
# Arbitrary object of the same type as one or more literal
# objects subscripting this literal union but unequal to
# any objects subscripting this literal union.
HintPithUnsatisfiedMetadata(
pith='Empirism‐Tṙumpeted,',
# Match that the exception message raised for this
# object embeds the representation of all expected
# literals.
exception_str_match_regexes=(
r'\bNone\b',
r'\bTrue\b',
r'\b42\b',
r'\btruthiness\b',
r'\bnominal\b',
r'\bNOMENCLATURE_WEATHER_VANES_OF\b',
),
),
# Arbitrary object of a differing type from all literal
# objects subscripting this literal union.
HintPithUnsatisfiedMetadata(
pith=42.0,
# Match that the exception message raised for this
# object embeds the representation of all expected
# types.
exception_str_match_regexes=(
r'\bNone\b',
r'\bbool\b',
r'\bint\b',
r'\bbytes\b',
r'\bstr\b',
r'\b_MasterlessDecreeVenomlessWhich\b',
),
),
),
),
))
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`589`-compliant **type hint test data.**
'''
# ....................{ IMPORTS }....................
from beartype_test._util.mod.pytmodtyping import (
import_typing_attr_or_none_safe)
# ....................{ ADDERS }....................
def add_data(data_module: 'ModuleType') -> None:
'''
Add :pep:`589`-compliant type hint test data to various global containers
declared by the passed module.
Parameters
----------
data_module : ModuleType
Module to be added to.
'''
# "TypedDict" type hint factory imported from either the "typing" or
# "typing_extensions" modules if importable *OR* "None" otherwise.
TypedDict = import_typing_attr_or_none_safe('TypedDict')
# If this factory is unimportable, the active Python interpreter fails to
# support PEP 589. In this case, reduce to a noop.
if TypedDict is None:
return
# Else, this interpreter supports PEP 589.
# ..................{ IMPORTS }..................
# Defer attribute-dependent imports.
from beartype._data.hint.pep.sign.datapepsigns import (
HintSignList,
HintSignTypedDict,
)
from beartype_test.a00_unit.data.hint.util.data_hintmetacls import (
HintPepMetadata,
HintPithSatisfiedMetadata,
HintPithUnsatisfiedMetadata,
)
from typing import List, Type, Union
# ..................{ SUBCLASSES }..................
class ISeemAsInATranceSublimeAndStrange(TypedDict):
'''
Arbitrary empty typed dictionary annotated to require *NO* key-value
pairs.
While patently absurd, this dictionary exercises an uncommon edge case
in :pep:`589`.
'''
pass
class DizzyRavine(ISeemAsInATranceSublimeAndStrange):
'''
Arbitrary non-empty typed dictionary annotated to require arbitrary
key-value pairs, intentionally subclassing the empty typed dictionary
subclass :class:`ISeemAsInATranceSublimeAndStrange` to trivially
exercise subclassability.
'''
# Arbitrary key whose value is annotated to be a PEP-noncompliant
# instance of an isinstanceable type.
and_when: str
# Arbitrary key whose value is annotated to be a PEP-compliant union of
# either a subclass of an issubclassable type or a PEP-noncompliant
# instance of an isinstanceable type.
I_gaze_on_thee: Union[bytes, Type[Exception]]
#FIXME: Note that even this fails to suffice, thanks to *CRAY-CRAY*
#subclassing logic that absolutely no one has ever exercised, but which
#we'll nonetheless need to support. And I quoth:
# The totality flag only applies to items defined in the body of the
# TypedDict definition. Inherited items won't be affected, and instead
# use totality of the TypedDict type where they were defined. This makes
# it possible to have a combination of required and non-required keys in
# a single TypedDict type.
#Ergo, we need to additionally declare yet another new class subclassing
#"ToMuse" but *NOT* explicitly subclassed with a "total" keyword parameter.
#This clearly gets *EXTREMELY* ugly *EXTREMELY* fast, as we'll now need to
#iterate over "hint.__mro__" in our code generation algorithm. Well, I
#suppose we technically needed to do that anyway... but still. Yikes!
class ToMuse(TypedDict, total=False):
'''
Arbitrary non-empty typed dictionary annotated to require zero or more
arbitrary key-value pairs.
'''
# Arbitrary key whose value is annotated to be a PEP-noncompliant
# instance of an isinstanceable type.
on_my_own: str
# Arbitrary key whose value is annotated to be a PEP-compliant union of
# either a subclass of an issubclassable type or a PEP-noncompliant
# instance of an isinstanceable type.
separate_fantasy: Union[Type[Exception], bytes]
# ..................{ TUPLES }..................
# Add PEP 586-specific test type hints to this tuple global.
data_module.HINTS_PEP_META.extend((
# ................{ TYPEDDICT }................
# Empty typed dictionary. Look, this is ridiculous. What can you do?
HintPepMetadata(
hint=ISeemAsInATranceSublimeAndStrange,
pep_sign=HintSignTypedDict,
is_type_typing=False,
piths_meta=(
# Empty dictionary instantiated with standard Python syntax.
HintPithSatisfiedMetadata({}),
# Empty dictionary instantiated from this typed dictionary.
HintPithSatisfiedMetadata(ISeemAsInATranceSublimeAndStrange()),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Hadithian bodies kindle Bodkin deathbeds',
# Match that the exception message raised for this object
# embeds the representation of the expected type.
exception_str_match_regexes=(r'\bMapping\b',),
),
#FIXME: Uncomment *AFTER* deeply type-checking "TypedDict".
# # Non-empty dictionary.
# HintPithSatisfiedMetadata({
# 'Corinthian bodachean kinslayers lay': (
# 'wedded weal‐kith with in‐'),
# }),
),
),
# Non-empty totalizing typed dictionary.
HintPepMetadata(
hint=DizzyRavine,
pep_sign=HintSignTypedDict,
is_type_typing=False,
piths_meta=(
# Non-empty dictionary of the expected keys and values.
HintPithSatisfiedMetadata({
'and_when': 'Corrigible‐ragged gun corruptions within',
'I_gaze_on_thee': b"Hatross-ev-olved eleven imp's",
}),
# Non-empty dictionary of the expected keys and values
# instantiated from this typed dictionary.
HintPithSatisfiedMetadata(DizzyRavine(
and_when=(
'Machiavellian‐costumed, tumid stock fonts of a'),
I_gaze_on_thee=RuntimeError,
)),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Matross‐elevated elven velvet atrocities of',
# Match that the exception message raised for this object
# embeds the representation of the expected type.
exception_str_match_regexes=(r'\bMapping\b',),
),
# #FIXME: Uncomment *AFTER* deeply type-checking "TypedDict".
# # Empty dictionary.
# HintPithUnsatisfiedMetadata(
# pith={},
# # Match that the exception message raised for this object
# # embeds the expected number of key-value pairs.
# exception_str_match_regexes=(r'\b2\b',),
# ),
# # Non-empty dictionary of the expected keys but *NOT* values.
# HintPithUnsatisfiedMetadata(
# pith={
# 'and_when': 'Matricidally',
# 'I_gaze_on_thee': (
# 'Hatchet‐cachepotting, '
# 'Scossetting mock misrule by'
# ),
# },
# # Match that the exception message raised for this object
# # embeds:
# # * The name of the unsatisfied key.
# # * The expected types of this key's value.
# exception_str_match_regexes=(
# r'\bI_gaze_on_thee\b',
# r'\bbytes\b',
# ),
# ),
),
),
# Non-empty non-totalizing typed dictionary.
HintPepMetadata(
hint=ToMuse,
pep_sign=HintSignTypedDict,
is_type_typing=False,
piths_meta=(
# Empty dictionary.
HintPithSatisfiedMetadata({}),
# Non-empty dictionary defining only one of the expected keys.
HintPithSatisfiedMetadata({
'on_my_own': (
'Spurned Court‐upturned, upper gladness, '
'edifyingly humidifying'),
}),
# Non-empty dictionary defining *ALL* of the expected keys,
# instantiated from this typed dictionary.
HintPithSatisfiedMetadata(ToMuse(
on_my_own=(
'Sepulchral epic‐âpostatizing home tombs metem‐'),
separate_fantasy=b'Macroglia relics',
)),
# String constant.
HintPithUnsatisfiedMetadata(
pith=(
'Psychotically tempered Into temporal '
'afterwork‐met portals portending a'
),
# Match that the exception message raised for this object
# embeds the representation of the expected type.
exception_str_match_regexes=(r'\bMapping\b',),
),
# #FIXME: Uncomment *AFTER* deeply type-checking "TypedDict".
# # Non-empty dictionary of the expected keys but *NOT* values.
# HintPithUnsatisfiedMetadata(
# pith={
# 'on_my_own': (
# 'Psyche’s Maidenly‐enladened, '
# 'aidful Lads‐lickspittling Potenc‐ies —',
# ),
# 'separate_fantasy': (
# 'Psychedelic metal‐metastasized, glib'),
# },
# # Match that the exception message raised for this object
# # embeds:
# # * The name of the unsatisfied key.
# # * The expected types of this key's value.
# exception_str_match_regexes=(
# r'\bseparate_fantasy\b',
# r'\bbytes\b',
# ),
# ),
),
),
# ................{ LITERALS ~ nested }................
# List of non-empty totalizing typed dictionaries.
HintPepMetadata(
hint=List[DizzyRavine],
pep_sign=HintSignList,
isinstanceable_type=list,
piths_meta=(
# List of dictionaries of the expected keys and values.
HintPithSatisfiedMetadata([
{
'and_when': (
'Matriculating ‘over‐sized’ '
'research urchin Haunts of',
),
'I_gaze_on_thee': b"Stands - to",
},
{
'and_when': (
'That resurrected, Erectile reptile’s '
'pituitary capitulations to',
),
'I_gaze_on_thee': b"Strand our under-",
},
]),
# List of string constants.
HintPithUnsatisfiedMetadata(
pith=[
'D-as K-apital, '
'notwithstanding Standard adiós‐',
],
# Match that the exception message raised for this object
# embeds the representation of the expected type.
exception_str_match_regexes=(r'\bMapping\b',),
),
# #FIXME: Uncomment *AFTER* deeply type-checking "TypedDict".
# # List of empty dictionaries.
# HintPithUnsatisfiedMetadata(
# pith=[{}, {},],
# # Match that the exception message raised for this object
# # embeds the expected number of key-value pairs.
# exception_str_match_regexes=(r'\b2\b',),
# ),
# # List of non-empty dictionaries, only one of which fails to
# # define both the expected keys and values.
# HintPithUnsatisfiedMetadata(
# pith=[
# {
# 'and_when': (
# 'Diased capitalization of (or into)'),
# 'I_gaze_on_thee': (
# b'Witheringly dithering, dill husks of'),
# },
# {
# 'and_when': (
# 'Will, like Whitewash‐ed, musky'),
# 'I_gaze_on_thee': 'Likenesses injecting',
# },
# ],
# # Match that the exception message raised for this object
# # embeds:
# # * The index of the unsatisfied dictionary.
# # * The name of the unsatisfied key.
# # * The expected types of this key's value.
# exception_str_match_regexes=(
# r'\b1\b',
# r'\bI_gaze_on_thee\b',
# r'\bbytes\b',
# ),
# ),
),
),
))
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`484`-compliant **type hint test data.**
Caveats
----------
Note that:
* The :pep:`484`-compliant annotated builtin containers created and returned by
the :func:`typing.NamedTuple` and :func:`typing.TypedDict` factory functions
are *mostly* indistinguishable from PEP-noncompliant types and thus
intentionally tested in the
:mod:`beartype_test.a00_unit.data.hint.nonpep.proposal._data_nonpep484`
submodule rather than here despite being standardized by :pep:`484`.
* The ``typing.Supports*`` family of abstract base classes (ABCs) are
intentionally tested in the
:mod:`beartype_test.a00_unit.data.hint.pep.proposal._data_pep544`
submodule rather than here despite being specified by :pep:`484` and
available under Python < 3.8. Why? Because the implementation of these ABCs
under Python < 3.8 is unusable at runtime, which is nonsensical and awful,
but that's :mod:`typing` for you. What you goin' do?
'''
# ....................{ IMPORTS }....................
import contextlib, re
from beartype._cave._cavefast import (
RegexMatchType,
RegexCompiledType,
)
from beartype_test.a00_unit.data.data_type import (
Class,
Subclass,
SubclassSubclass,
OtherClass,
OtherSubclass,
# OtherSubclassSubclass,
context_manager_factory,
)
from beartype_test.a00_unit.data.hint.util.data_hintmetacls import (
HintPepMetadata,
HintPithSatisfiedMetadata,
HintPithUnsatisfiedMetadata,
)
from collections import abc as collections_abc
from typing import (
Any,
AnyStr,
BinaryIO,
ByteString,
Callable,
Container,
ContextManager,
Dict,
Generic,
Hashable,
IO,
Iterable,
List,
Match,
MutableSequence,
NewType,
Pattern,
Sequence,
Sized,
TextIO,
Tuple,
Type,
TypeVar,
Optional,
Union,
)
# ....................{ TYPEVARS ~ unbounded }....................
S = TypeVar('S')
'''
Arbitrary unbounded (i.e., universally applicable) type variable.
'''
T = TypeVar('T')
'''
Arbitrary unbounded (i.e., universally applicable) type variable.
'''
# ....................{ TYPEVARS ~ unbounded }....................
T_BOUNDED = TypeVar('T_BOUNDED', bound=int)
'''
Arbitrary **bounded type variable** (i.e., type variable parametrized by a
PEP-compliant child type hint passed as the ``bound`` keyword argument).
'''
T_CONSTRAINED = TypeVar('T_CONSTRAINED', str, bytes)
'''
Arbitrary **constrained type variable** (i.e., type variable parametrized by
two or more PEP-compliant child type hints passed as positional arguments).
'''
# ....................{ GENERICS ~ io }....................
PEP484_GENERICS_IO = frozenset((BinaryIO, IO, TextIO,))
'''
Frozen set of all :pep:`484`-compliant :mod:`typing` IO generic base classes.
'''
# ....................{ GENERICS ~ single }....................
class Pep484GenericTypevaredSingle(Generic[S, T]):
'''
:pep:`484`-compliant user-defined generic subclassing a single parametrized
:mod:`typing` type.
'''
pass
# ....................{ PRIVATE ~ generics : single }....................
class _Pep484GenericUnsubscriptedSingle(List):
'''
:pep:`484`-compliant user-defined generic subclassing a single
unsubscripted :mod:`typing` type.
'''
pass
class _Pep484GenericUntypevaredShallowSingle(List[str]):
'''
:pep:`484`-compliant user-defined generic subclassing a single
unparametrized :mod:`typing` type.
'''
pass
class _Pep484GenericUntypevaredDeepSingle(List[List[str]]):
'''
:pep:`484`-compliant user-defined generic subclassing a single
unparametrized :mod:`typing` type, itself subclassing a single
unparametrized :mod:`typing` type.
'''
pass
# ....................{ PRIVATE ~ generics : multiple }....................
class _Pep484GenericUntypevaredMultiple(
collections_abc.Callable, ContextManager[str], Sequence[str]):
'''
:pep:`484`-compliant user-defined generic subclassing multiple
unparametrized :mod:`typing` types *and* a non-:mod:`typing` abstract base
class (ABC).
'''
# ..................{ INITIALIZERS }..................
def __init__(self, sequence: tuple) -> None:
'''
Initialize this generic from the passed tuple.
'''
assert isinstance(sequence, tuple), f'{repr(sequence)} not tuple.'
self._sequence = sequence
# ..................{ ABCs }..................
# Define all protocols mandated by ABCs subclassed by this generic above.
def __call__(self) -> int:
return len(self)
def __contains__(self, obj: object) -> bool:
return obj in self._sequence
def __enter__(self) -> object:
return self
def __exit__(self, *args, **kwargs) -> bool:
return False
def __getitem__(self, index: int) -> object:
return self._sequence[index]
def __iter__(self) -> bool:
return iter(self._sequence)
def __len__(self) -> bool:
return len(self._sequence)
def __reversed__(self) -> object:
return self._sequence.reverse()
class _Pep484GenericTypevaredShallowMultiple(Iterable[T], Container[T]):
'''
:pep:`484`-compliant user-defined generic subclassing multiple directly
parametrized :mod:`typing` types.
'''
# ..................{ INITIALIZERS }..................
def __init__(self, iterable: tuple) -> None:
'''
Initialize this generic from the passed tuple.
'''
assert isinstance(iterable, tuple), f'{repr(iterable)} not tuple.'
self._iterable = iterable
# ..................{ ABCs }..................
# Define all protocols mandated by ABCs subclassed by this generic above.
def __contains__(self, obj: object) -> bool:
return obj in self._iterable
def __iter__(self) -> bool:
return iter(self._iterable)
class _Pep484GenericTypevaredDeepMultiple(
collections_abc.Sized, Iterable[Tuple[S, T]], Container[Tuple[S, T]]):
'''
:pep:`484`-compliant user-defined generic subclassing multiple indirectly
parametrized :mod:`typing` types *and* a non-:mod:`typing` abstract base
class (ABC).
'''
# ..................{ INITIALIZERS }..................
def __init__(self, iterable: tuple) -> None:
'''
Initialize this generic from the passed tuple.
'''
assert isinstance(iterable, tuple), f'{repr(iterable)} not tuple.'
self._iterable = iterable
# ..................{ ABCs }..................
# Define all protocols mandated by ABCs subclassed by this generic above.
def __contains__(self, obj: object) -> bool:
return obj in self._iterable
def __iter__(self) -> bool:
return iter(self._iterable)
def __len__(self) -> bool:
return len(self._iterable)
# ....................{ PRIVATE ~ forwardref }....................
_TEST_PEP484_FORWARDREF_CLASSNAME = (
'beartype_test.a00_unit.data.data_type.Subclass')
'''
Fully-qualified classname of an arbitrary class guaranteed to be importable.
'''
_TEST_PEP484_FORWARDREF_TYPE = Subclass
'''
Arbitrary class referred to by :data:`_PEP484_FORWARDREF_CLASSNAME`.
'''
# ....................{ ADDERS }....................
def add_data(data_module: 'ModuleType') -> None:
'''
Add :pep:`484`-compliant type hint test data to various global containers
declared by the passed module.
Parameters
----------
data_module : ModuleType
Module to be added to.
'''
# ..................{ IMPORTS }..................
# Defer fixture-specific imports.
from beartype.door import (
CallableTypeHint,
NewTypeTypeHint,
TypeVarTypeHint,
UnionTypeHint,
)
from beartype._data.hint.pep.sign.datapepsigns import (
HintSignAny,
HintSignByteString,
HintSignCallable,
HintSignContextManager,
HintSignDict,
HintSignForwardRef,
HintSignGeneric,
HintSignHashable,
HintSignList,
HintSignMatch,
HintSignMutableSequence,
HintSignNewType,
HintSignNone,
HintSignOptional,
HintSignPattern,
HintSignSequence,
HintSignSized,
HintSignTuple,
HintSignType,
HintSignTypeVar,
HintSignUnion,
)
from beartype._util.hint.pep.proposal.pep484.utilpep484ref import (
HINT_PEP484_FORWARDREF_TYPE)
from beartype._util.py.utilpyversion import (
IS_PYTHON_AT_MOST_3_10,
IS_PYTHON_AT_LEAST_3_11,
IS_PYTHON_AT_LEAST_3_10,
IS_PYTHON_AT_LEAST_3_9,
)
# ..................{ BOOLEANS }..................
# True only if unsubscripted typing attributes (i.e., public attributes of
# the "typing" module without arguments) are parametrized by one or more
# type variables under the active Python interpreter.
#
# This boolean is true for Python interpreters targeting Python < 3.9.
# Prior to Python 3.9, the "typing" module parametrized most unsubscripted
# typing attributes by default. Python 3.9 halted that barbaric practice by
# leaving unsubscripted typing attributes unparametrized by default.
_IS_TYPEVARS_HIDDEN = not IS_PYTHON_AT_LEAST_3_9
# True only if unsubscripted typing attributes (i.e., public attributes of
# the "typing" module without arguments) are actually subscripted by one or
# more type variables under the active Python interpreter.
#
# This boolean is true for Python interpreters targeting 3.6 < Python <
# 3.9, oddly. (We don't make the rules. We simply complain about them.)
_IS_ARGS_HIDDEN = False
# ..................{ SETS }..................
# Add PEP 484-specific shallowly ignorable test type hints to that set
# global.
data_module.HINTS_PEP_IGNORABLE_SHALLOW.update((
# The "Generic" superclass imposes no constraints and is thus also
# semantically synonymous with the ignorable PEP-noncompliant
# "beartype.cave.AnyType" and hence "object" types. Since PEP
# 484 stipulates that *ANY* unsubscripted subscriptable PEP-compliant
# singleton including "typing.Generic" semantically expands to that
# singelton subscripted by an implicit "Any" argument, "Generic"
# semantically expands to the implicit "Generic[Any]" singleton.
Generic,
))
# Add PEP 484-specific deeply ignorable test type hints to this set global.
data_module.HINTS_PEP_IGNORABLE_DEEP.update((
# Parametrizations of the "typing.Generic" abstract base class (ABC).
Generic[S, T],
# New type aliasing any ignorable type hint.
NewType('TotallyNotAny', Any),
NewType('TotallyNotObject', object),
# Optionals containing any ignorable type hint.
Optional[Any],
Optional[object],
# Unions containing any ignorable type hint.
Union[Any, float, str,],
Union[complex, int, object,],
))
# ..................{ TUPLES }..................
# Add PEP 484-specific test type hints to this dictionary global.
data_module.HINTS_PEP_META.extend((
# ................{ UNSUBSCRIPTED }................
# Note that the PEP 484-compliant unsubscripted "NoReturn" type hint is
# permissible *ONLY* as a return annotation and *MUST* thus be
# exercised independently with special-purposed unit tests.
# Unsubscripted "Any" singleton.
HintPepMetadata(
hint=Any,
pep_sign=HintSignAny,
is_ignorable=True,
),
# Unsubscripted "ByteString" singleton. Bizarrely, note that:
# * "collections.abc.ByteString" is subscriptable under PEP 585.
# * "typing.ByteString" is *NOT* subscriptable under PEP 484.
# Since neither PEP 484 nor 585 comment on "ByteString" in detail (or
# at all, really), this non-orthogonality remains inexplicable,
# frustrating, and utterly unsurprising. We elect to merely shrug.
HintPepMetadata(
hint=ByteString,
pep_sign=HintSignByteString,
isinstanceable_type=collections_abc.ByteString,
piths_meta=(
# Byte string constant.
HintPithSatisfiedMetadata(
b'By nautical/particle consciousness'),
# Byte array initialized from a byte string constant.
HintPithSatisfiedMetadata(
bytearray(b"Hour's straight fates, (distemperate-ly)")),
# String constant.
HintPithUnsatisfiedMetadata(
'At that atom-nestled canticle'),
),
),
# Unsubscripted "Hashable" attribute.
HintPepMetadata(
hint=Hashable,
pep_sign=HintSignHashable,
isinstanceable_type=collections_abc.Hashable,
piths_meta=(
# String constant.
HintPithSatisfiedMetadata(
"Oh, importunate Θ Fortuna'd afforded"),
# Tuple of string constants.
HintPithSatisfiedMetadata((
'Us vis‐a‐vis conduit fjords',
'Of weal‐th, and well‐heeled,',
)),
# List of string constants.
HintPithUnsatisfiedMetadata([
'Oboes‐obsoleting tines',
'Of language',
]),
),
),
# Unsubscripted "Sized" attribute.
HintPepMetadata(
hint=Sized,
pep_sign=HintSignSized,
isinstanceable_type=collections_abc.Sized,
piths_meta=(
# String constant.
HintPithSatisfiedMetadata('Faire, a'),
# Tuple of string constants.
HintPithSatisfiedMetadata((
'Farthing scrap',
'Of comfort’s ‘om’‐Enwrapped, rapt appeal — that',
)),
# Boolean constant.
HintPithUnsatisfiedMetadata(False),
),
),
# ................{ UNSUBSCRIPTED ~ forwardref }................
# Forward references defined below are *ONLY* intended to shallowly
# exercise support for types of forward references across the codebase;
# they are *NOT* intended to deeply exercise resolution of forward
# references to undeclared classes, which requires more finesse.
#
# See the "data_hintref" submodule for the latter.
# Unsubscripted forward reference defined as a simple string.
HintPepMetadata(
hint=_TEST_PEP484_FORWARDREF_CLASSNAME,
pep_sign=HintSignForwardRef,
is_type_typing=False,
piths_meta=(
# Instance of the class referred to by this reference.
HintPithSatisfiedMetadata(_TEST_PEP484_FORWARDREF_TYPE()),
# String object.
HintPithUnsatisfiedMetadata(
'Empirical Ṗath after‐mathematically harvesting agro‐'),
),
),
# Unsubscripted forward reference defined as a typing object.
HintPepMetadata(
hint=HINT_PEP484_FORWARDREF_TYPE(
_TEST_PEP484_FORWARDREF_CLASSNAME),
pep_sign=HintSignForwardRef,
piths_meta=(
# Instance of the class referred to by this reference.
HintPithSatisfiedMetadata(_TEST_PEP484_FORWARDREF_TYPE()),
# String object.
HintPithUnsatisfiedMetadata('Silvicultures of'),
),
),
# ................{ UNSUBSCRIPTED ~ none }................
# Unsubscripted "None" singleton, which transparently reduces to
# "types.NoneType". While not explicitly defined by the "typing" module,
# PEP 484 explicitly supports this singleton:
# When used in a type hint, the expression None is considered
# equivalent to type(None).
HintPepMetadata(
hint=None,
pep_sign=HintSignNone,
is_type_typing=False,
piths_meta=(
# "None" singleton.
HintPithSatisfiedMetadata(None),
# String constant.
HintPithUnsatisfiedMetadata('Betossing Bilious libidos, and'),
),
),
# ................{ UNSUBSCRIPTED ~ typevar : unbound }................
# Unbounded type variable.
HintPepMetadata(
hint=T,
pep_sign=HintSignTypeVar,
typehint_cls=TypeVarTypeHint,
#FIXME: Remove after fully supporting type variables.
is_ignorable=True,
is_typing=False,
piths_meta=(
# Builtin "int" class itself.
HintPithSatisfiedMetadata(int),
# String constant.
HintPithSatisfiedMetadata('Oblate weapon Stacks (actually'),
# By definition, all objects satisfy all unbounded type
# variables. Ergo, we define *NO* "HintPithSatisfiedMetadata"
# objects here.
),
),
# ................{ UNSUBSCRIPTED ~ typevar : bound }................
# Constrained type variable declared by the "typing" module.
HintPepMetadata(
hint=AnyStr,
pep_sign=HintSignTypeVar,
typehint_cls=TypeVarTypeHint,
#FIXME: Remove after fully supporting type variables.
is_ignorable=True,
piths_meta=(
# String constant.
HintPithSatisfiedMetadata('We were mysteries, unwon'),
# Byte string constant.
HintPithSatisfiedMetadata(b'We donned apportionments'),
# Integer constant.
HintPithUnsatisfiedMetadata(0x8BADF00D), # <-- 2343432205
# List of string constants.
HintPithUnsatisfiedMetadata([
'Of Politico‐policed diction maledictions,',
'Of that numeral addicts’ “—Additive game,” self‐',
]),
),
),
# User-defined constrained type variable.
HintPepMetadata(
hint=T_CONSTRAINED,
pep_sign=HintSignTypeVar,
typehint_cls=TypeVarTypeHint,
#FIXME: Remove after fully supporting type variables.
is_ignorable=True,
is_typing=False,
piths_meta=(
# String constant.
HintPithSatisfiedMetadata('Prim (or'),
# Byte string constant.
HintPithSatisfiedMetadata(
b'Primely positional) Quality inducements'),
# Integer constant.
HintPithUnsatisfiedMetadata(0xABADBABE), # <-- 2880289470
# List of string constants.
HintPithUnsatisfiedMetadata([
'Into lavishly crested, crestfallen ',
'epaulette‐cross‐pollinated st‐Ints,',
]),
),
),
# User-defined bounded type variable.
HintPepMetadata(
hint=T_BOUNDED,
pep_sign=HintSignTypeVar,
typehint_cls=TypeVarTypeHint,
#FIXME: Remove after fully supporting type variables.
is_ignorable=True,
is_typing=False,
piths_meta=(
# Integer constant.
HintPithSatisfiedMetadata(0x0B00B135), # <-- 184594741
# String constant.
HintPithUnsatisfiedMetadata(
'Light‐expectorating, aspectant '
'thujone‐inspecting enswathement of'
),
# List of integer constants.
HintPithUnsatisfiedMetadata([0xBAAAAAAD, 0xBADDCAFE,]),
),
),
# ................{ CALLABLE }................
# Callable accepting no parameters and returning a string.
HintPepMetadata(
hint=Callable[[], str],
pep_sign=HintSignCallable,
typehint_cls=CallableTypeHint,
isinstanceable_type=collections_abc.Callable,
piths_meta=(
# Lambda function returning a string constant.
HintPithSatisfiedMetadata(lambda: 'Eudaemonia.'),
# String constant.
HintPithUnsatisfiedMetadata('...grant we heal'),
),
),
# ................{ CONTEXTMANAGER }................
# Context manager yielding strings.
HintPepMetadata(
hint=ContextManager[str],
pep_sign=HintSignContextManager,
isinstanceable_type=contextlib.AbstractContextManager,
piths_meta=(
# Context manager.
HintPithSatisfiedMetadata(
pith=lambda: context_manager_factory(
'We were mysteries, unwon'),
is_context_manager=True,
is_pith_factory=True,
),
# String constant.
HintPithUnsatisfiedMetadata('We donned apportionments'),
),
),
# ................{ DICT }................
# Unsubscripted "Dict" attribute.
HintPepMetadata(
hint=Dict,
pep_sign=HintSignDict,
is_args=_IS_ARGS_HIDDEN,
is_typevars=_IS_TYPEVARS_HIDDEN,
isinstanceable_type=dict,
piths_meta=(
# Dictionary containing arbitrary key-value pairs.
HintPithSatisfiedMetadata({
'Of': 'our disappointment’s purse‐anointed ire',
'Offloading': '1. Coffer‐bursed statehood ointments;',
}),
# Set containing arbitrary items.
HintPithUnsatisfiedMetadata({
'2. Disjointly jade‐ and Syndicate‐disbursed retirement funds,',
'Untiringly,'
}),
),
),
# Flat dictionary.
HintPepMetadata(
hint=Dict[int, str],
pep_sign=HintSignDict,
isinstanceable_type=dict,
piths_meta=(
# Dictionary mapping integer keys to string values.
HintPithSatisfiedMetadata({
1: 'For taxing',
2: "To a lax and golden‐rendered crucifixion, affix'd",
}),
# String constant.
HintPithUnsatisfiedMetadata(
'To that beep‐prattling, LED‐ and lead-rattling crux'),
),
),
# Generic dictionary.
HintPepMetadata(
hint=Dict[S, T],
pep_sign=HintSignDict,
isinstanceable_type=dict,
is_typevars=True,
piths_meta=(
# Dictionary mapping string keys to integer values.
HintPithSatisfiedMetadata({
'Less-ons"-chastened': 2,
'Chanson': 1,
}),
# String constant.
HintPithUnsatisfiedMetadata('Swansong.'),
),
),
# ................{ GENERATOR }................
# Note that testing generators requires creating generators, which
# require a different syntax to that of standard callables; ergo,
# generator type hints are tested elsewhere.
# ................{ GENERICS ~ single }................
# Generic subclassing a single unsubscripted "typing" type.
HintPepMetadata(
hint=_Pep484GenericUnsubscriptedSingle,
pep_sign=HintSignGeneric,
generic_type=_Pep484GenericUnsubscriptedSingle,
is_type_typing=False,
piths_meta=(
# Subclass-specific generic list of string constants.
HintPithSatisfiedMetadata(_Pep484GenericUnsubscriptedSingle((
'Ibid., incredibly indelible, edible craws a',
'Of a liturgically upsurging, Θṙgiast‐ic holiness, and',
))),
# String constant.
HintPithUnsatisfiedMetadata(
'To pare their geognostic screeds'),
# List of string constants.
HintPithUnsatisfiedMetadata([
'Of voluntary simplicities, Creed‐crinkled cities',
'Of a liberal quiet, and',
]),
),
),
# Generic subclassing a single shallowly unparametrized "typing" type.
HintPepMetadata(
hint=_Pep484GenericUntypevaredShallowSingle,
pep_sign=HintSignGeneric,
generic_type=_Pep484GenericUntypevaredShallowSingle,
is_type_typing=False,
piths_meta=(
# Subclass-specific generic list of string constants.
HintPithSatisfiedMetadata(
_Pep484GenericUntypevaredShallowSingle((
'Forgive our Vocation’s vociferous publications',
'Of',
))
),
# String constant.
HintPithUnsatisfiedMetadata(
'Hourly sybaritical, pub sabbaticals'),
# List of string constants.
HintPithUnsatisfiedMetadata([
'Materially ostracizing, itinerant‐',
'Anchoretic digimonks initiating',
]),
),
),
# Generic subclassing a single deeply unparametrized "typing" type.
HintPepMetadata(
hint=_Pep484GenericUntypevaredDeepSingle,
pep_sign=HintSignGeneric,
generic_type=_Pep484GenericUntypevaredDeepSingle,
is_type_typing=False,
piths_meta=(
# Subclass-specific generic list of list of string constants.
HintPithSatisfiedMetadata(
_Pep484GenericUntypevaredDeepSingle([
[
'Intravenous‐averse effigy defamations, traversing',
'Intramurally venal-izing retro-',
],
[
'Versions of a ',
"Version 2.2.a‐excursioned discursive Morningrise's ravenous ad-",
],
])
),
# String constant.
HintPithUnsatisfiedMetadata('Vent of'),
# List of string constants.
HintPithUnsatisfiedMetadata([
"Ventral‐entrailed rurality's cinder-",
'Block pluralities of',
]),
# Subclass-specific generic list of string constants.
HintPithUnsatisfiedMetadata(
_Pep484GenericUntypevaredDeepSingle([
'Block-house stockade stocks, trailer',
'Park-entailed central heating, though those',
])
),
),
),
# Generic subclassing a single parametrized "typing" type.
HintPepMetadata(
hint=Pep484GenericTypevaredSingle,
pep_sign=HintSignGeneric,
generic_type=Pep484GenericTypevaredSingle,
is_typevars=True,
is_type_typing=False,
piths_meta=(
# Subclass-specific generic.
HintPithSatisfiedMetadata(Pep484GenericTypevaredSingle()),
# String constant.
HintPithUnsatisfiedMetadata(
'An arterially giving, triage nature — '
'like this agat‐adzing likeness'
),
),
),
# Generic subclassing a single parametrized "typing" type, itself
# parametrized by the same type variables in the same order.
HintPepMetadata(
hint=Pep484GenericTypevaredSingle[S, T],
pep_sign=HintSignGeneric,
generic_type=Pep484GenericTypevaredSingle,
is_typevars=True,
is_type_typing=True,
is_typing=False,
piths_meta=(
# Subclass-specific generic.
HintPithSatisfiedMetadata(Pep484GenericTypevaredSingle()),
# String constant.
HintPithUnsatisfiedMetadata(
'Token welfare’s malformed keening fare, keenly despaired'
),
),
),
# ................{ GENERICS ~ multiple }................
# Generic subclassing multiple unparametrized "typing" types *AND* a
# non-"typing" abstract base class (ABC).
HintPepMetadata(
hint=_Pep484GenericUntypevaredMultiple,
pep_sign=HintSignGeneric,
generic_type=_Pep484GenericUntypevaredMultiple,
is_type_typing=False,
piths_meta=(
# Subclass-specific generic 2-tuple of string constants.
HintPithSatisfiedMetadata(_Pep484GenericUntypevaredMultiple((
'Into a viscerally Eviscerated eras’ meditative hallways',
'Interrupting Soul‐viscous, vile‐ly Viceroy‐insufflating',
))),
# String constant.
HintPithUnsatisfiedMetadata('Initiations'),
# 2-tuple of string constants.
HintPithUnsatisfiedMetadata((
"Into a fat mendicant’s",
'Endgame‐defendant, dedicate rants',
)),
),
),
# Generic subclassing multiple parametrized "typing" types.
HintPepMetadata(
hint=_Pep484GenericTypevaredShallowMultiple,
pep_sign=HintSignGeneric,
generic_type=_Pep484GenericTypevaredShallowMultiple,
# is_args=False,
is_typevars=True,
is_type_typing=False,
piths_meta=(
# Subclass-specific generic iterable of string constants.
HintPithSatisfiedMetadata(
_Pep484GenericTypevaredShallowMultiple((
"Of foliage's everliving antestature —",
'In us, Leviticus‐confusedly drunk',
)),
),
# String constant.
HintPithUnsatisfiedMetadata("In Usufructose truth's"),
),
),
# Generic subclassing multiple indirectly parametrized "typing" types
# *AND* a non-"typing" abstract base class (ABC).
HintPepMetadata(
hint=_Pep484GenericTypevaredDeepMultiple,
pep_sign=HintSignGeneric,
generic_type=_Pep484GenericTypevaredDeepMultiple,
# is_args=False,
is_typevars=True,
is_type_typing=False,
piths_meta=(
# Subclass-specific generic iterable of 2-tuples of string
# constants.
HintPithSatisfiedMetadata(
_Pep484GenericTypevaredDeepMultiple((
(
'Inertially tragicomipastoral, pastel anticandour —',
'remanding undemanding',
),
(
'Of a',
'"hallow be Thy nameless',
),
)),
),
# String constant.
HintPithUnsatisfiedMetadata('Invitations'),
),
),
# Nested list of PEP 484-compliant generics.
HintPepMetadata(
hint=List[_Pep484GenericUntypevaredMultiple],
pep_sign=HintSignList,
isinstanceable_type=list,
piths_meta=(
# List of subclass-specific generic 2-tuples of string
# constants.
HintPithSatisfiedMetadata([
_Pep484GenericUntypevaredMultiple((
'Stalling inevit‐abilities)',
'For carbined',
)),
_Pep484GenericUntypevaredMultiple((
'Power-over (than',
'Power-with)',
)),
]),
# String constant.
HintPithUnsatisfiedMetadata(
'that forced triforced, farcically carcinogenic Obelisks'),
# List of 2-tuples of string constants.
HintPithUnsatisfiedMetadata([
(
'Obliterating their literate decency',
'Of a cannabis‐enthroning regency',
),
]),
),
),
# ................{ LIST }................
# Unsubscripted "List" attribute.
HintPepMetadata(
hint=List,
pep_sign=HintSignList,
isinstanceable_type=list,
is_args=_IS_ARGS_HIDDEN,
is_typevars=_IS_TYPEVARS_HIDDEN,
piths_meta=(
# Empty list, which satisfies all hint arguments by definition.
HintPithSatisfiedMetadata([]),
# Listing containing arbitrary items.
HintPithSatisfiedMetadata([
'Of an Autos‐respirating, ăutonomies‐gnashing machineries‐',
'Laxity, and taxonomic attainment',
3,
]),
# String constant.
HintPithUnsatisfiedMetadata('Of acceptance.'),
# Tuple containing arbitrary items.
HintPithUnsatisfiedMetadata((
'Of their godliest Tellurion’s utterance —“Șuper‐ior!”;',
'3. And Utter‐most, gutterly gut‐rending posts, glutton',
3.1415,
)),
),
),
# List of ignorable objects.
HintPepMetadata(
hint=List[object],
pep_sign=HintSignList,
isinstanceable_type=list,
piths_meta=(
# Empty list, which satisfies all hint arguments by definition.
HintPithSatisfiedMetadata([]),
# List of arbitrary objects.
HintPithSatisfiedMetadata([
'Of philomathematically bliss‐postulating Seas',
'Of actuarial postponement',
23.75,
]),
# String constant.
HintPithUnsatisfiedMetadata(
'Of actual change elevating alleviation — that'),
),
),
# List of non-"typing" objects.
HintPepMetadata(
hint=List[str],
pep_sign=HintSignList,
isinstanceable_type=list,
piths_meta=(
# Empty list, which satisfies all hint arguments by definition.
HintPithSatisfiedMetadata([]),
# List of strings.
HintPithSatisfiedMetadata([
'Ously overmoist, ov‐ertly',
'Deverginating vertigo‐originating',
]),
# String constant.
HintPithUnsatisfiedMetadata('Devilet‐Sublet cities waxing'),
# List containing exactly one integer. Since list items are
# only randomly type-checked, only a list of exactly one item
# enables us to match the explicit index at fault below.
HintPithUnsatisfiedMetadata(
pith=[1010011010,], # <-- oh, we've done it now
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Declares the index of the random list item *NOT*
# satisfying this hint.
r'\b[Ll]ist index \d+ item\b',
# Preserves the value of this item as is.
r'\s1010011010\s',
),
),
),
),
# Generic list.
HintPepMetadata(
hint=List[T],
pep_sign=HintSignList,
isinstanceable_type=list,
is_typevars=True,
piths_meta=(
# Empty list, which satisfies all hint arguments by definition.
HintPithSatisfiedMetadata([]),
# List of strings.
HintPithSatisfiedMetadata([
'Lesion this ice-scioned',
'Legion',
]),
# String constant.
HintPithUnsatisfiedMetadata(
'Lest we succumb, indelicately, to'),
),
),
# ................{ NEWTYPE }................
# New type aliasing a non-ignorable type.
HintPepMetadata(
hint=NewType('TotallyNotAStr', str),
pep_sign=HintSignNewType,
typehint_cls=NewTypeTypeHint,
# "typing.NewType" type hints are always declared by that module.
is_typing=True,
# If the active Python interpreter targets:
# * Python >= 3.10, "typing.NewType" type hints are instances of
# that class -- which is thus declared by the "typing" module.
# * Else, "typing.NewType" type hints are merely pure-Python
# closures of the pure-Python function type -- which is *NOT*
# declared by the "typing" module.
is_type_typing=IS_PYTHON_AT_LEAST_3_10,
piths_meta=(
# String constant.
HintPithSatisfiedMetadata('Ishmælite‐ish, aberrant control'),
# Tuple of string constants.
HintPithUnsatisfiedMetadata((
'Of Common Street‐harrying barrens',
'Of harmony, harm’s abetting Harlem bedlam, and',
)),
),
),
# ................{ REGEX ~ match }................
# Regular expression match of either strings or byte strings.
HintPepMetadata(
hint=Match,
pep_sign=HintSignMatch,
isinstanceable_type=RegexMatchType,
is_args=_IS_ARGS_HIDDEN,
is_typevars=_IS_TYPEVARS_HIDDEN,
piths_meta=(
# Regular expression match of one or more string constants.
HintPithSatisfiedMetadata(re.search(
r'\b[a-z]+ance[a-z]+\b',
'æriferous Elements’ dance, entranced',
)),
# String constant.
HintPithUnsatisfiedMetadata(
'Formless, demiurgic offerings, preliminarily,'),
),
),
# Regular expression match of only strings.
HintPepMetadata(
hint=Match[str],
pep_sign=HintSignMatch,
isinstanceable_type=RegexMatchType,
piths_meta=(
# Regular expression match of one or more string constants.
HintPithSatisfiedMetadata(re.search(
r'\b[a-z]+itiat[a-z]+\b',
'Vitiating novitiate Succubæ – a',
)),
# String constant.
HintPithUnsatisfiedMetadata('Into Elitistly'),
),
),
# ................{ REGEX ~ pattern }................
# Regular expression pattern of either strings or byte strings.
HintPepMetadata(
hint=Pattern,
pep_sign=HintSignPattern,
isinstanceable_type=RegexCompiledType,
is_args=_IS_ARGS_HIDDEN,
is_typevars=_IS_TYPEVARS_HIDDEN,
piths_meta=(
# Regular expression string pattern.
HintPithSatisfiedMetadata(
re.compile(r'\b[A-Z]+ANCE[A-Z]+\b')),
# String constant.
HintPithUnsatisfiedMetadata('Legal indiscretions'),
),
),
# Regular expression pattern of only strings.
HintPepMetadata(
hint=Pattern[str],
pep_sign=HintSignPattern,
isinstanceable_type=RegexCompiledType,
piths_meta=(
# Regular expression string pattern.
HintPithSatisfiedMetadata(
re.compile(r'\b[A-Z]+ITIAT[A-Z]+\b')),
# String constant.
HintPithUnsatisfiedMetadata('Obsessing men'),
),
),
# ................{ SUBCLASS }................
# Unsubscripted "Type" singleton.
HintPepMetadata(
hint=Type,
pep_sign=HintSignType,
isinstanceable_type=type,
is_args=_IS_ARGS_HIDDEN,
is_typevars=_IS_TYPEVARS_HIDDEN,
piths_meta=(
# Transitive superclass of all superclasses.
HintPithSatisfiedMetadata(object),
# Arbitrary class.
HintPithSatisfiedMetadata(str),
# String constant.
HintPithUnsatisfiedMetadata('Samely:'),
),
),
# Any type, semantically equivalent under PEP 484 to the unsubscripted
# "Type" singleton.
HintPepMetadata(
hint=Type[Any],
pep_sign=HintSignType,
isinstanceable_type=type,
piths_meta=(
# Arbitrary class.
HintPithSatisfiedMetadata(bool),
# String constant.
HintPithUnsatisfiedMetadata('Coulomb‐lobed lobbyist’s Ģom'),
),
),
# "type" superclass, semantically equivalent to the unsubscripted
# "Type" singleton.
HintPepMetadata(
hint=Type[type],
pep_sign=HintSignType,
isinstanceable_type=type,
piths_meta=(
# Arbitrary class.
HintPithSatisfiedMetadata(complex),
# String constant.
HintPithUnsatisfiedMetadata('Had al-'),
),
),
# Specific class.
HintPepMetadata(
hint=Type[Class],
pep_sign=HintSignType,
isinstanceable_type=type,
piths_meta=(
# Subclass of this class.
HintPithSatisfiedMetadata(Subclass),
# String constant.
HintPithUnsatisfiedMetadata('Namely,'),
# Non-subclass of this class.
HintPithUnsatisfiedMetadata(str),
),
),
# Specific class deferred with a forward reference.
HintPepMetadata(
hint=Type[_TEST_PEP484_FORWARDREF_CLASSNAME],
pep_sign=HintSignType,
isinstanceable_type=type,
piths_meta=(
# Subclass of this class.
HintPithSatisfiedMetadata(SubclassSubclass),
# String constant.
HintPithUnsatisfiedMetadata('Jabbar‐disbarred'),
# Non-subclass of this class.
HintPithUnsatisfiedMetadata(dict),
),
),
# Two or more specific classes.
HintPepMetadata(
hint=Type[Union[Class, OtherClass,]],
pep_sign=HintSignType,
isinstanceable_type=type,
piths_meta=(
# Arbitrary subclass of one class subscripting this hint.
HintPithSatisfiedMetadata(Subclass),
# Arbitrary subclass of another class subscripting this hint.
HintPithSatisfiedMetadata(OtherSubclass),
# String constant.
HintPithUnsatisfiedMetadata('Jabberings'),
# Non-subclass of any classes subscripting this hint.
HintPithUnsatisfiedMetadata(set),
),
),
# Generic class.
HintPepMetadata(
hint=Type[T],
pep_sign=HintSignType,
isinstanceable_type=type,
is_typevars=True,
piths_meta=(
# Arbitrary class.
HintPithSatisfiedMetadata(int),
# String constant.
HintPithUnsatisfiedMetadata('Obligation, and'),
),
),
# ................{ TUPLE }................
# Unsubscripted "Tuple" attribute. Note that this attribute is *NOT*
# parametrized by one or more type variables under any Python version,
# unlike most other unsubscripted "typing" attributes originating from
# container types. Non-orthogonality, thy name is the "typing" module.
HintPepMetadata(
hint=Tuple,
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
piths_meta=(
# Tuple containing arbitrary items.
HintPithSatisfiedMetadata((
'a Steely dittied',
'Steel ‘phallus’ ballast',
)),
# List containing arbitrary items.
HintPithUnsatisfiedMetadata([
'In this Tellus‐cloistered, pre‐mature pop nomenclature',
'Of irremediable Media mollifications',
]),
),
),
# ................{ TUPLE ~ fixed }................
# Empty tuple. Yes, this is ridiculous, useless, and non-orthogonal
# with standard sequence syntax, which supports no comparable notion of
# an "empty {insert-type-here}" (e.g., empty list). For example:
# >>> from typing import List
# >>> List[()]
# TypeError: Too few parameters for List; actual 0, expected 1
# >>> List[[]]
# TypeError: Parameters to generic types must be types. Got [].
HintPepMetadata(
hint=Tuple[()],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
piths_meta=(
# Empty tuple.
HintPithSatisfiedMetadata(()),
# Non-empty tuple containing arbitrary items.
HintPithUnsatisfiedMetadata(
pith=(
'They shucked',
'(Or huckstered, knightly rupturing veritas)',
),
# Match that the exception message raised for this object...
exception_str_match_regexes=(
# Identify this tuple as non-empty.
r'\bnon-empty\b',
),
),
),
),
# Fixed-length tuple of only ignorable child hints.
HintPepMetadata(
hint=Tuple[Any, object,],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
piths_meta=(
# Tuple containing arbitrary items.
HintPithSatisfiedMetadata((
'Surseance',
'Of sky, the God, the surly',
)),
# Tuple containing fewer items than required.
HintPithUnsatisfiedMetadata(
pith=('Obeisance',),
# Match that the exception message raised for this object...
exception_str_match_regexes=(
# Compare this tuple's length to the expected length.
r'\b1 != 2\b',
),
),
),
),
# Fixed-length tuple of at least one ignorable child hint.
HintPepMetadata(
hint=Tuple[float, Any, str,],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
piths_meta=(
# Tuple containing a floating-point number, string, and integer
# (in that exact order).
HintPithSatisfiedMetadata((
20.09,
'Of an apoptosic T.A.R.P.’s torporific‐riven ecocide',
"Nightly tolled, pindololy, ol'",
)),
# String constant.
HintPithUnsatisfiedMetadata(
'Jangling (brinkmanship “Ironside”) jingoisms'),
# Tuple containing fewer items than required.
HintPithUnsatisfiedMetadata(
pith=(
999.888,
'Obese, slipshodly muslin‐shod priests had maudlin solo',
),
# Match that the exception message raised for this object...
exception_str_match_regexes=(
# Compare this tuple's length to the expected length.
r'\b2 != 3\b',
),
),
# Tuple containing a floating-point number, a string, and a
# boolean (in that exact order).
HintPithUnsatisfiedMetadata(
pith=(
75.83,
'Unwholesome gentry ventings',
False,
),
# Match that the exception message raised for this object...
exception_str_match_regexes=(
# Declares the index and expected type of this fixed
# tuple item *NOT* satisfying this hint.
r'\b[Tt]uple index 2 item\b',
r'\bstr\b',
),
),
),
),
# Nested fixed-length tuple of at least one ignorable child hint.
HintPepMetadata(
hint=Tuple[Tuple[float, Any, str,], ...],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
piths_meta=(
# Tuple containing tuples containing a floating-point number,
# string, and integer (in that exact order).
HintPithSatisfiedMetadata((
(
90.02,
'Father — "Abstracted, OH WE LOVE YOU',
'Farther" — that',
),
(
2.9,
'To languidly Ent‐wine',
'Towards a timely, wines‐enticing gate',
),
)),
# Tuple containing a tuple containing fewer items than
# required.
HintPithUnsatisfiedMetadata((
(
888.999,
'Oboes‐obsoleting tines',
),
)),
# Tuple containing a tuple containing a floating-point number,
# string, and boolean (in that exact order).
HintPithUnsatisfiedMetadata(
pith=(
(
75.83,
'Vespers’ hymnal seance, invoking',
True,
),
),
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Declares the index and expected type of a rondom
# tuple item of a fixed tuple item *NOT* satisfying
# this hint.
r'\b[Tt]uple index \d+ item tuple index 2 item\b',
r'\bstr\b',
),
),
),
),
# Generic fixed-length tuple.
HintPepMetadata(
hint=Tuple[S, T],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
is_typevars=True,
piths_meta=(
# Tuple containing a floating-point number and string (in that
# exact order).
HintPithSatisfiedMetadata((
33.77,
'Legal indiscretions',
)),
# String constant.
HintPithUnsatisfiedMetadata('Leisurely excreted by'),
# Tuple containing fewer items than required.
HintPithUnsatisfiedMetadata((
'Market states‐created, stark abscess',
)),
),
),
# ................{ TUPLE ~ variadic }................
# Variadic tuple.
HintPepMetadata(
hint=Tuple[str, ...],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
piths_meta=(
# Tuple containing arbitrarily many string constants.
HintPithSatisfiedMetadata((
'Of a scantly raptured Overture,'
'Ur‐churlishly',
)),
# String constant.
HintPithUnsatisfiedMetadata(
'Of Toll‐descanted grant money'),
# Tuple containing exactly one integer. Since tuple items are
# only randomly type-checked, only a tuple of exactly one item
# enables us to match the explicit index at fault below.
HintPithUnsatisfiedMetadata(
pith=((53,)),
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Declares the index and expected type of a random
# tuple item *NOT* satisfying this hint.
r'\b[Tt]uple index \d+ item\b',
r'\bstr\b',
),
),
),
),
# Generic variadic tuple.
HintPepMetadata(
hint=Tuple[T, ...],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
is_typevars=True,
piths_meta=(
# Tuple containing arbitrarily many string constants.
HintPithSatisfiedMetadata((
'Loquacious s‐age, salaciously,',
'Of regal‐seeming, freemen‐sucking Hovels, a',
)),
# String constant.
HintPithUnsatisfiedMetadata(
'Concubine enthralling contractually novel'),
),
),
# ................{ UNION }................
# Note that unions of one argument (e.g., "Union[str]") *CANNOT* be
# listed here, as the "typing" module implicitly reduces these unions
# to only that argument (e.g., "str") on our behalf.
#
# Thanks. Thanks alot, "typing".
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# CAUTION: The Python < 3.7.0-specific implementations of "Union"
# are defective, in that they silently filter out various subscripted
# arguments that they absolutely should *NOT*, including "bool": e.g.,
# $ python3.6
# >>> import typing
# >>> Union[bool, float, int, Sequence[
# ... Union[bool, float, int, Sequence[str]]]]
# Union[float, int, Sequence[Union[float, int, Sequence[str]]]]
# For this reason, these arguments *MUST* be omitted below.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Ignorable unsubscripted "Union" attribute.
HintPepMetadata(
hint=Union,
pep_sign=HintSignUnion,
typehint_cls=UnionTypeHint,
is_ignorable=True,
),
# Union of one non-"typing" type and an originative "typing" type,
# exercising a prominent edge case when raising human-readable
# exceptions describing the failure of passed parameters or returned
# values to satisfy this union.
HintPepMetadata(
hint=Union[int, Sequence[str]],
pep_sign=HintSignUnion,
typehint_cls=UnionTypeHint,
piths_meta=(
# Integer constant.
HintPithSatisfiedMetadata(21),
# Sequence of string items.
HintPithSatisfiedMetadata((
'To claim all ͼarth a number, penumbraed'
'By blessed Pendragon’s flagon‐bedraggling constancies',
)),
# Floating-point constant.
#
# Note that a string constant is intentionally *NOT* listed
# here, as strings are technically sequences of strings of
# length one commonly referred to as Unicode code points or
# simply characters.
HintPithUnsatisfiedMetadata(
pith=802.11,
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bSequence\b',
r'\bint\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
# Tuple of integers.
HintPithUnsatisfiedMetadata(
pith=(1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89,),
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Contains a bullet point declaring the non-"typing"
# type *NOT* satisfied by this object.
r'\n\*\s.*\bint\b',
# Contains a bullet point declaring the index of the
# random tuple item *NOT* satisfying this hint.
r'\n\*\s.*\b[Tt]uple index \d+ item\b',
),
),
),
),
# Union of three non-"typing" types and an originative "typing" type of
# a union of three non-"typing" types and an originative "typing" type,
# exercising a prominent edge case when raising human-readable
# exceptions describing the failure of passed parameters or returned
# values to satisfy this union.
HintPepMetadata(
hint=Union[dict, float, int,
Sequence[Union[dict, float, int, MutableSequence[str]]]],
pep_sign=HintSignUnion,
typehint_cls=UnionTypeHint,
piths_meta=(
# Empty dictionary.
HintPithSatisfiedMetadata({}),
# Floating-point number constant.
HintPithSatisfiedMetadata(777.777),
# Integer constant.
HintPithSatisfiedMetadata(777),
# Sequence of dictionary, floating-point number, integer, and
# sequence of string constant items.
HintPithSatisfiedMetadata((
# Non-empty dictionary.
{
'Of': 'charnal memories,',
'Or': 'coterminously chordant‐disarmed harmonies',
},
# Floating-point number constant.
666.666,
# Integer constant.
666,
# Mutable sequence of string constants.
[
'Ansuded scientifically pontifical grapheme‐',
'Denuded hierography, professedly, to emulate ascen-',
],
)),
# Complex number constant.
HintPithUnsatisfiedMetadata(
pith=356+260j,
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bSequence\b',
r'\bdict\b',
r'\bfloat\b',
r'\bint\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
# Sequence of bytestring items.
HintPithUnsatisfiedMetadata(
pith=(b"May they rest their certainties' Solicitousness to",),
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Contains a bullet point declaring one of the
# non-"typing" types *NOT* satisfied by this object.
r'\n\*\s.*\bint\b',
# Contains a bullet point declaring the index of the
# random tuple item *NOT* satisfying this hint.
r'\n\*\s.*\b[Tt]uple index \d+ item\b',
),
),
# Sequence of mutable sequences of bytestring items.
HintPithUnsatisfiedMetadata(
pith=([b'Untaint these ties',],),
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Contains an unindented bullet point declaring one of
# the non-"typing" types unsatisfied by this object.
r'\n\*\s.*\bfloat\b',
# Contains an indented bullet point declaring one of
# the non-"typing" types unsatisfied by this object.
r'\n\s+\*\s.*\bint\b',
# Contains an unindented bullet point declaring the
# index of the random tuple item *NOT* satisfying
# this hint.
r'\n\*\s.*\b[Tt]uple index \d+ item\b',
# Contains an indented bullet point declaring the index
# of the random list item *NOT* satisfying this hint.
r'\n\s+\*\s.*\b[Ll]ist index \d+ item\b',
),
),
),
),
#FIXME: Actually list two or more "piths_meta" here, please.
# Union of one non-"typing" type and one concrete generic.
HintPepMetadata(
hint=Union[str, Iterable[Tuple[S, T]]],
pep_sign=HintSignUnion,
typehint_cls=UnionTypeHint,
is_typevars=True,
),
# ................{ UNION ~ nested }................
# Nested unions exercising edge cases induced by Python >= 3.8
# optimizations leveraging PEP 572-style assignment expressions.
# Nested union of multiple non-"typing" types.
HintPepMetadata(
hint=List[Union[int, str,]],
pep_sign=HintSignList,
isinstanceable_type=list,
piths_meta=(
# List containing a mixture of integer and string constants.
HintPithSatisfiedMetadata([
'Un‐seemly preening, pliant templar curs; and',
272,
]),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Un‐seemly preening, pliant templar curs; and',
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bint\b',
r'\bstr\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
# List of bytestring items.
HintPithUnsatisfiedMetadata(
pith=[
b'Blamelessly Slur-chastened rights forthwith, affrighting',
b"Beauty's lurid, beleaguered knolls, eland-leagued and",
],
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Declares all non-"typing" types *NOT* satisfied by a
# random list item *NOT* satisfying this hint.
r'\bint\b',
r'\bstr\b',
# Declares the index of the random list item *NOT*
# satisfying this hint.
r'\b[Ll]ist index \d+ item\b',
),
),
),
),
# Nested union of one non-"typing" type and one "typing" type.
HintPepMetadata(
hint=Sequence[Union[str, ByteString]],
pep_sign=HintSignSequence,
isinstanceable_type=collections_abc.Sequence,
piths_meta=(
# Sequence of string and bytestring constants.
HintPithSatisfiedMetadata((
b'For laconically formulaic, knavish,',
u'Or sordidly sellsword‐',
f'Horded temerities, bravely unmerited',
)),
# Integer constant.
HintPithUnsatisfiedMetadata(
pith=7898797,
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bByteString\b',
r'\bstr\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
# Sequence of integer items.
HintPithUnsatisfiedMetadata(
pith=((144, 233, 377, 610, 987, 1598, 2585, 4183, 6768,)),
# Match that the exception message raised for this object...
exception_str_match_regexes=(
# Declares all non-"typing" types *NOT* satisfied by a
# random tuple item *NOT* satisfying this hint.
r'\bByteString\b',
r'\bstr\b',
# Declares the index of the random tuple item *NOT*
# satisfying this hint.
r'\b[Tt]uple index \d+ item\b',
),
),
),
),
# Nested union of *NO* isinstanceable type and multiple "typing" types.
HintPepMetadata(
hint=MutableSequence[Union[ByteString, Callable]],
pep_sign=HintSignMutableSequence,
isinstanceable_type=collections_abc.MutableSequence,
piths_meta=(
# Mutable sequence of string and bytestring constants.
HintPithSatisfiedMetadata([
b"Canonizing Afrikaans-kennelled Mine canaries,",
lambda: 'Of a floridly torrid, hasty love — that league',
]),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Effaced.',
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bByteString\b',
r'\bCallable\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
# Mutable sequence of string constants.
HintPithUnsatisfiedMetadata(
pith=[
'Of genteel gentle‐folk — that that Ƹsper',
'At my brand‐defaced, landless side',
],
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Declares all non-"typing" types *NOT* satisfied by a
# random list item *NOT* satisfying this hint.
r'\bByteString\b',
r'\bCallable\b',
# Declares the index of the random list item *NOT*
# satisfying this hint.
r'\b[Ll]ist index \d+ item\b',
),
),
),
),
# ................{ UNION ~ optional }................
# Ignorable unsubscripted "Optional" attribute.
HintPepMetadata(
hint=Optional,
pep_sign=HintSignOptional,
typehint_cls=UnionTypeHint,
is_ignorable=True,
),
# Optional isinstance()-able "typing" type.
HintPepMetadata(
hint=Optional[Sequence[str]],
# Subscriptions of the "Optional" attribute reduce to
# fundamentally different unsubscripted typing attributes depending
# on Python version. Specifically, under:
# * Python >= 3.9, the "Optional" and "Union" attributes are
# distinct.
# * Python < 3.9, the "Optional" and "Union" attributes are *NOT*
# distinct. The "typing" module implicitly reduces *ALL*
# subscriptions of the "Optional" attribute by the corresponding
# "Union" attribute subscripted by both that argument and
# "type(None)". Ergo, there effectively exists *NO*
# "Optional" attribute under older Python versions.
pep_sign=(
HintSignOptional if IS_PYTHON_AT_LEAST_3_9 else HintSignUnion),
typehint_cls=UnionTypeHint,
piths_meta=(
# None singleton.
HintPithSatisfiedMetadata(None),
# Sequence of string items.
HintPithSatisfiedMetadata((
'Of cuticular currents (...wide, wildly articulate,',
'And canting free, physico-stipulatingly) -',
)),
# Floating-point constant.
#
# Note that a string constant is intentionally *NOT* listed
# here, as strings are technically sequences of strings of
# length one commonly referred to as Unicode code points or
# simply characters.
HintPithUnsatisfiedMetadata(
pith=802.2,
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bNoneType\b',
r'\bSequence\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
),
),
))
# PEP-compliant type hints conditionally dependent on the major version of
# Python targeted by the active Python interpreter.
if IS_PYTHON_AT_LEAST_3_9:
data_module.HINTS_PEP_META.extend((
# ..............{ GENERICS ~ user }..............
# Subscripted generic subclassing a single unsubscripted "typing"
# type. Note that these types constitute an edge case supported
# *ONLY* under Python >= 3.9, which implements these tests in an
# ambiguous (albeit efficient) manner effectively indistinguishable
# from PEP 585-compliant type hints.
HintPepMetadata(
hint=_Pep484GenericUnsubscriptedSingle[str],
pep_sign=HintSignGeneric,
generic_type=_Pep484GenericUnsubscriptedSingle,
is_type_typing=False,
piths_meta=(
# Subclass-specific generic list of string constants.
HintPithSatisfiedMetadata(
_Pep484GenericUnsubscriptedSingle((
'Volubly vi‐brant libations',
'To blubber‐lubed Bacchus — hustling',
))
),
# String constant.
HintPithUnsatisfiedMetadata('O’ the frock'),
# List of string constants.
HintPithUnsatisfiedMetadata([
'O’ Friday’s squealing — Sounding',
'Freedom’s unappealing, Passive delights',
]),
),
),
))
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`604`-compliant **type hint test data.**
'''
# ....................{ IMPORTS }....................
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_10
# ....................{ ADDERS }....................
def add_data(data_module: 'ModuleType') -> None:
'''
Add :pep:`604`-compliant type hint test data to various global containers
declared by the passed module.
Parameters
----------
data_module : ModuleType
Module to be added to.
'''
# If the active Python interpreter targets Python < 3.10, this interpreter
# fails to support PEP 604. In this case, reduce to a noop.
if not IS_PYTHON_AT_LEAST_3_10:
return
# Else, this interpreter supports PEP 604.
# ..................{ IMPORTS }..................
# Defer attribute-dependent imports.
from beartype._data.hint.pep.sign.datapepsigns import (
HintSignList,
HintSignUnion,
)
from beartype_test.a00_unit.data.hint.util.data_hintmetacls import (
HintPepMetadata,
HintPithSatisfiedMetadata,
HintPithUnsatisfiedMetadata,
)
from typing import (
Any,
)
# ..................{ SETS }..................
# Add PEP 604-specific deeply ignorable test type hints to this set global.
data_module.HINTS_PEP_IGNORABLE_DEEP.update((
# "|"-style unions containing any ignorable type hint.
#
# Note that including *ANY* "typing"-based type hint (including
# "typing.Any") in an "|"-style union causes Python to implicitly
# produce a PEP 484- rather than PEP 604-compliant union (e.g.,
# "typing.Union[Any, float, str]" in this case). Since that is more or
# less fine in this context, we intentionally list such a deeply
# ignorable hint here.
Any | float | str,
complex | int | object,
))
# ..................{ TUPLES }..................
# Add PEP 604-specific test type hints to this tuple global.
data_module.HINTS_PEP_META.extend((
# ................{ |-UNION }................
# Union of one non-"typing" type and an originative "typing" type,
# exercising a prominent edge case when raising human-readable
# exceptions describing the failure of passed parameters or returned
# values to satisfy this union.
#
# Interestingly, Python preserves this union as a PEP 604-compliant
# |-style union rather than implicitly coercing this into a PEP
# 484-compliant union: e.g.,
# >>> int | list[str]
# int | list[str]
HintPepMetadata(
hint=int | list[str],
pep_sign=HintSignUnion,
is_type_typing=False,
piths_meta=(
# Integer constant.
HintPithSatisfiedMetadata(87),
# List of string items.
HintPithSatisfiedMetadata([
'Into, my myopic mandrake‐manhandling, panhandling slakes of',
'Televisual, dis‐informative Lakes, unsaintly, of',
]),
# Floating-point constant.
HintPithUnsatisfiedMetadata(
pith=10100.00101,
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\blist\b',
r'\bint\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
# List of integers.
HintPithUnsatisfiedMetadata(
pith=([1, 10, 271, 36995]),
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Contains a bullet point declaring the non-"typing"
# type *NOT* satisfied by this object.
r'\n\*\s.*\bint\b',
# Contains a bullet point declaring the index of the
# random list item *NOT* satisfying this hint.
r'\n\*\s.*\b[Ll]ist index \d+ item\b',
),
),
),
),
# ................{ UNION ~ nested }................
# Nested unions exercising edge cases induced by Python >= 3.8
# optimizations leveraging PEP 572-style assignment expressions.
# Nested union of multiple non-"typing" types.
HintPepMetadata(
hint=list[int | str],
pep_sign=HintSignList,
isinstanceable_type=list,
is_pep585_builtin=True,
piths_meta=(
# List containing a mixture of integer and string constants.
HintPithSatisfiedMetadata([
'Telemarketing affirmative‐mined Ketamine’s',
470,
]),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Apolitically',
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bint\b',
r'\bstr\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
# List of bytestring items.
HintPithUnsatisfiedMetadata(
pith=[
b'Apoplectic hints of',
b'Stenographically',
],
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Declares all non-"typing" types *NOT* satisfied by a
# random list item *NOT* satisfying this hint.
r'\bint\b',
r'\bstr\b',
# Declares the index of the random list item *NOT*
# satisfying this hint.
r'\b[Ll]ist index \d+ item\b',
),
),
),
),
# ................{ UNION ~ optional }................
# Optional isinstance()-able "typing" type.
HintPepMetadata(
hint=tuple[str, ...] | None,
# Note that although Python >= 3.10 distinguishes equivalent
# PEP 484-compliant "typing.Union[...]" and "typing.Optional[...]"
# type hints via differing machine-readable representations, the
# same does *NOT* apply to PEP 604-compliant |-style unions, which
# remain PEP 604-compliant and thus unions rather than optional.
# This includes PEP 604-compliant |-style unions including the
# "None" singleton signifying an optional type hint. Go figure.
pep_sign=HintSignUnion,
is_type_typing=False,
piths_meta=(
# None singleton.
HintPithSatisfiedMetadata(None),
# Tuple of string items.
HintPithSatisfiedMetadata((
'Stentorian tenor of',
'Stunted numbness (in',
)),
# Floating-point constant.
HintPithUnsatisfiedMetadata(
pith=2397.7932,
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bNoneType\b',
r'\btuple\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
),
),
))
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`585`-compliant **type hint test data.**
'''
# ....................{ IMPORTS }....................
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_9
# ....................{ ADDERS }....................
def add_data(data_module: 'ModuleType') -> None:
'''
Add :pep:`585`-compliant type hint test data to various global containers
declared by the passed module.
Parameters
----------
data_module : ModuleType
Module to be added to.
'''
# If the active Python interpreter targets less than Python < 3.9, this
# interpreter fails to support PEP 585. In this case, reduce to a noop.
if not IS_PYTHON_AT_LEAST_3_9:
return
# Else, the active Python interpreter targets at least Python >= 3.9 and
# thus supports PEP 585.
# ..................{ IMPORTS }..................
# Defer Python >= 3.9-specific imports.
import re
from beartype._cave._cavefast import IntType
from beartype._data.hint.pep.sign.datapepsigns import (
HintSignByteString,
HintSignCallable,
HintSignContextManager,
HintSignDict,
HintSignGeneric,
HintSignList,
HintSignMatch,
HintSignMutableSequence,
HintSignPattern,
HintSignSequence,
HintSignTuple,
HintSignType,
)
from beartype_test.a00_unit.data.data_type import (
Class,
Subclass,
SubclassSubclass,
OtherClass,
OtherSubclass,
# OtherSubclassSubclass,
context_manager_factory,
)
from beartype_test.a00_unit.data.hint.util.data_hintmetacls import (
HintPepMetadata,
HintPithSatisfiedMetadata,
HintPithUnsatisfiedMetadata,
)
from collections.abc import (
ByteString,
Callable,
Container,
Iterable,
MutableSequence,
Sequence,
Sized,
)
from contextlib import AbstractContextManager
from re import Match, Pattern
from typing import Any, TypeVar, Union
# ..................{ TYPEVARS }..................
S = TypeVar('S')
'''
User-defined generic :mod:`typing` type variable.
'''
T = TypeVar('T')
'''
User-defined generic :mod:`typing` type variable.
'''
# ..................{ GENERICS ~ single }..................
# Note we intentionally do *NOT* declare unsubscripted PEP 585-compliant
# generics (e.g., "class _Pep585GenericUnsubscriptedSingle(list):"). Why?
# Because PEP 585-compliant generics are necessarily subscripted; when
# unsubscripted, the corresponding subclasses are simply standard types.
class _Pep585GenericTypevaredSingle(list[T]):
'''
:pep:`585`-compliant user-defined generic subclassing a single
parametrized builtin type.
'''
# Redefine this generic's representation for debugging purposes.
def __repr__(self) -> str:
return f'{self.__class__.__name__}({super().__repr__()})'
class _Pep585GenericUntypevaredShallowSingle(list[str]):
'''
:pep:`585`-compliant user-defined generic subclassing a single
subscripted (but unparametrized) builtin type.
'''
# Redefine this generic's representation for debugging purposes.
def __repr__(self) -> str:
return f'{self.__class__.__name__}({super().__repr__()})'
class _Pep585GenericUntypevaredDeepSingle(list[list[str]]):
'''
:pep:`585`-compliant user-defined generic subclassing a single
unparametrized :mod:`typing` type, itself subclassing a single
unparametrized :mod:`typing` type.
'''
pass
# ..................{ GENERICS ~ multiple }..................
class _Pep585GenericUntypevaredMultiple(
Callable, AbstractContextManager[str], Sequence[str]):
'''
:pep:`585`-compliant user-defined generic subclassing multiple
subscripted (but unparametrized) :mod:`collection.abc` abstract base
classes (ABCs) *and* an unsubscripted :mod:`collection.abc` ABC.
'''
# ................{ INITIALIZERS }................
def __init__(self, sequence: tuple) -> None:
'''
Initialize this generic from the passed tuple.
'''
assert isinstance(sequence, tuple), f'{repr(sequence)} not tuple.'
self._sequence = sequence
# ................{ ABCs }................
# Define all protocols mandated by ABCs subclassed by this generic.
def __call__(self) -> int:
return len(self)
def __contains__(self, obj: object) -> bool:
return obj in self._sequence
def __enter__(self) -> object:
return self
def __exit__(self, *args, **kwargs) -> bool:
return False
def __getitem__(self, index: int) -> object:
return self._sequence[index]
def __iter__(self) -> bool:
return iter(self._sequence)
def __len__(self) -> bool:
return len(self._sequence)
def __reversed__(self) -> object:
return self._sequence.reverse()
class _Pep585GenericTypevaredShallowMultiple(Iterable[T], Container[T]):
'''
:pep:`585`-compliant user-defined generic subclassing multiple directly
parametrized :mod:`collections.abc` abstract base classes (ABCs).
'''
# ................{ INITIALIZERS }................
def __init__(self, iterable: tuple) -> None:
'''
Initialize this generic from the passed tuple.
'''
assert isinstance(iterable, tuple), f'{repr(iterable)} not tuple.'
self._iterable = iterable
# ................{ ABCs }................
# Define all protocols mandated by ABCs subclassed by this generic.
def __contains__(self, obj: object) -> bool:
return obj in self._iterable
def __iter__(self) -> bool:
return iter(self._iterable)
class _Pep585GenericTypevaredDeepMultiple(
Sized, Iterable[tuple[S, T]], Container[tuple[S, T]]):
'''
:pep:`585`-compliant user-defined generic subclassing multiple
indirectly parametrized (but unsubscripted) :mod:`collections.abc`
abstract base classes (ABCs) *and* an unsubscripted and unparametrized
:mod:`collections.abc` ABC.
'''
# ................{ INITIALIZERS }................
def __init__(self, iterable: tuple) -> None:
'''
Initialize this generic from the passed tuple.
'''
assert isinstance(iterable, tuple), f'{repr(iterable)} not tuple.'
self._iterable = iterable
# ................{ ABCs }................
# Define all protocols mandated by ABCs subclassed by this generic.
def __contains__(self, obj: object) -> bool:
return obj in self._iterable
def __iter__(self) -> bool:
return iter(self._iterable)
def __len__(self) -> bool:
return len(self._iterable)
# ..................{ PRIVATE ~ forwardref }..................
_TEST_PEP585_FORWARDREF_CLASSNAME = (
'beartype_test.a00_unit.data.data_type.Subclass')
'''
Fully-qualified classname of an arbitrary class guaranteed to be
importable.
'''
_TEST_PEP585_FORWARDREF_TYPE = Subclass
'''
Arbitrary class referred to by :data:`_PEP484_FORWARDREF_CLASSNAME`.
'''
# ..................{ MAPPINGS }..................
# Add PEP 585-specific test type hints to this dictionary global.
data_module.HINTS_PEP_META.extend((
# ................{ BYTESTRING }................
# Byte string of integer constants satisfying the builtin "int" type.
#
# Note that *ALL* byte strings necessarily contain only integer
# constants, regardless of whether those byte strings are instantiated
# as "bytes" or "bytearray" instances. Ergo, subscripting
# "collections.abc.ByteString" by any class other than those satisfying
# the standard integer protocol raises a runtime error from @beartype.
# Yes, this means that subscripting "collections.abc.ByteString"
# conveys no information and is thus nonsensical. Welcome to PEP 585.
HintPepMetadata(
hint=ByteString[int],
pep_sign=HintSignByteString,
isinstanceable_type=ByteString,
is_pep585_builtin=True,
piths_meta=(
# Byte string constant.
HintPithSatisfiedMetadata(b'Ingratiatingly'),
# String constant.
HintPithUnsatisfiedMetadata('For an Ǽeons’ æon.'),
),
),
# Byte string of integer constants satisfying the stdlib
# "numbers.Integral" protocol.
HintPepMetadata(
hint=ByteString[IntType],
pep_sign=HintSignByteString,
isinstanceable_type=ByteString,
is_pep585_builtin=True,
piths_meta=(
# Byte array initialized from a byte string constant.
HintPithSatisfiedMetadata(bytearray(b'Cutting Wit')),
# String constant.
HintPithUnsatisfiedMetadata(
'Of birch‐rut, smut‐smitten papers and'),
),
),
# ................{ CALLABLE }................
# Callable accepting no parameters and returning a string.
HintPepMetadata(
hint=Callable[[], str],
pep_sign=HintSignCallable,
isinstanceable_type=Callable,
is_pep585_builtin=True,
piths_meta=(
# Lambda function returning a string constant.
HintPithSatisfiedMetadata(lambda: 'Eudaemonia.'),
# String constant.
HintPithUnsatisfiedMetadata('...grant we heal'),
),
),
# ................{ CONTEXTMANAGER }................
# Context manager yielding strings.
HintPepMetadata(
hint=AbstractContextManager[str],
pep_sign=HintSignContextManager,
isinstanceable_type=AbstractContextManager,
is_pep585_builtin=True,
piths_meta=(
# Context manager.
HintPithSatisfiedMetadata(
pith=lambda: context_manager_factory(
'We were mysteries, unwon'),
is_context_manager=True,
is_pith_factory=True,
),
# String constant.
HintPithUnsatisfiedMetadata('We donned apportionments'),
),
),
# ................{ DICT }................
# Flat dictionary.
HintPepMetadata(
hint=dict[int, str],
pep_sign=HintSignDict,
isinstanceable_type=dict,
is_pep585_builtin=True,
piths_meta=(
# Dictionary mapping integer keys to string values.
HintPithSatisfiedMetadata({
1: 'For taxing',
2: "To a lax and golden‐rendered crucifixion, affix'd",
}),
# String constant.
HintPithUnsatisfiedMetadata(
'To that beep‐prattling, LED‐ and lead-rattling crux'),
),
),
# Generic dictionary.
HintPepMetadata(
hint=dict[S, T],
pep_sign=HintSignDict,
isinstanceable_type=dict,
is_typevars=True,
is_pep585_builtin=True,
piths_meta=(
# Dictionary mapping string keys to integer values.
HintPithSatisfiedMetadata({
'Less-ons"-chastened': 2,
'Chanson': 1,
}),
# String constant.
HintPithUnsatisfiedMetadata('Swansong.'),
),
),
# ................{ GENERATOR }................
# Note that testing generators requires creating generators, which
# require a different syntax to that of standard callables; ergo,
# generator type hints are tested elsewhere.
# ................{ GENERICS ~ single }................
# Note that PEP 585-compliant generics are *NOT* explicitly detected as
# PEP 585-compliant due to idiosyncrasies in the CPython implementation
# of these generics. Ergo, we intentionally do *NOT* set
# "is_pep585_builtin=True," below.
# Generic subclassing a single shallowly unparametrized builtin
# container type.
HintPepMetadata(
hint=_Pep585GenericUntypevaredShallowSingle,
pep_sign=HintSignGeneric,
generic_type=_Pep585GenericUntypevaredShallowSingle,
is_pep585_generic=True,
piths_meta=(
# Subclass-specific generic list of string constants.
HintPithSatisfiedMetadata(
_Pep585GenericUntypevaredShallowSingle((
'Forgive our Vocation’s vociferous publications',
'Of',
))
),
# String constant.
HintPithUnsatisfiedMetadata(
'Hourly sybaritical, pub sabbaticals'),
# List of string constants.
HintPithUnsatisfiedMetadata([
'Materially ostracizing, itinerant‐',
'Anchoretic digimonks initiating',
]),
),
),
# Generic subclassing a single deeply unparametrized builtin container
# type.
HintPepMetadata(
hint=_Pep585GenericUntypevaredDeepSingle,
pep_sign=HintSignGeneric,
generic_type=_Pep585GenericUntypevaredDeepSingle,
is_pep585_generic=True,
piths_meta=(
# Subclass-specific generic list of list of string constants.
HintPithSatisfiedMetadata(
_Pep585GenericUntypevaredDeepSingle([
[
'Intravenous‐averse effigy defamations, traversing',
'Intramurally venal-izing retro-',
],
[
'Versions of a ',
"Version 2.2.a‐excursioned discursive Morningrise's ravenous ad-",
],
])
),
# String constant.
HintPithUnsatisfiedMetadata('Vent of'),
# List of string constants.
HintPithUnsatisfiedMetadata([
"Ventral‐entrailed rurality's cinder-",
'Block pluralities of',
]),
# Subclass-specific generic list of string constants.
HintPithUnsatisfiedMetadata(
_Pep585GenericUntypevaredDeepSingle([
'Block-house stockade stocks, trailer',
'Park-entailed central heating, though those',
])
),
),
),
# Generic subclassing a single parametrized builtin container type.
HintPepMetadata(
hint=_Pep585GenericTypevaredSingle,
pep_sign=HintSignGeneric,
generic_type=_Pep585GenericTypevaredSingle,
is_pep585_generic=True,
is_typevars=True,
piths_meta=(
# Subclass-specific generic list of string constants.
HintPithSatisfiedMetadata(_Pep585GenericTypevaredSingle((
'Pleasurable, Raucous caucuses',
'Within th-in cannon’s cynosure-ensuring refectories',
))),
# String constant.
HintPithUnsatisfiedMetadata(
'We there-in leather-sutured scriptured books'),
# List of string constants.
HintPithUnsatisfiedMetadata([
'We laboriously let them boringly refactor',
'Of Meme‐hacked faith’s abandonment, retroactively',
]),
),
),
# Generic subclassing a single parametrized builtin container, itself
# parametrized by the same type variables in the same order.
HintPepMetadata(
hint=_Pep585GenericTypevaredSingle[S, T],
pep_sign=HintSignGeneric,
generic_type=_Pep585GenericTypevaredSingle,
is_pep585_generic=True,
is_typevars=True,
piths_meta=(
# Subclass-specific generic list of string constants.
HintPithSatisfiedMetadata(_Pep585GenericTypevaredSingle((
'Bandage‐managed',
'Into Faithless redaction’s didact enactment — crookedly',
))),
# String constant.
HintPithUnsatisfiedMetadata('Down‐bound'),
# List of string constants.
HintPithUnsatisfiedMetadata([
'To prayer',
'To Ɯṙaith‐like‐upwreathed ligaments',
]),
),
),
# ................{ GENERICS ~ multiple }................
# Generic subclassing multiple unparametrized "collection.abc" abstract
# base class (ABCs) *AND* an unsubscripted "collection.abc" ABC.
HintPepMetadata(
hint=_Pep585GenericUntypevaredMultiple,
pep_sign=HintSignGeneric,
generic_type=_Pep585GenericUntypevaredMultiple,
is_pep585_generic=True,
piths_meta=(
# Subclass-specific generic 2-tuple of string constants.
HintPithSatisfiedMetadata(_Pep585GenericUntypevaredMultiple((
'Into a viscerally Eviscerated eras’ meditative hallways',
'Interrupting Soul‐viscous, vile‐ly Viceroy‐insufflating',
))),
# String constant.
HintPithUnsatisfiedMetadata('Initiations'),
# 2-tuple of string constants.
HintPithUnsatisfiedMetadata((
"Into a fat mendicant’s",
'Endgame‐defendant, dedicate rants',
)),
),
),
# Generic subclassing multiple parametrized "collections.abc" abstract
# base classes (ABCs).
HintPepMetadata(
hint=_Pep585GenericTypevaredShallowMultiple,
pep_sign=HintSignGeneric,
generic_type=_Pep585GenericTypevaredShallowMultiple,
is_pep585_generic=True,
is_typevars=True,
piths_meta=(
# Subclass-specific generic iterable of string constants.
HintPithSatisfiedMetadata(
_Pep585GenericTypevaredShallowMultiple((
"Of foliage's everliving antestature —",
'In us, Leviticus‐confusedly drunk',
)),
),
# String constant.
HintPithUnsatisfiedMetadata("In Usufructose truth's"),
),
),
# Generic subclassing multiple indirectly parametrized
# "collections.abc" abstract base classes (ABCs) *AND* an
# unparametrized "collections.abc" ABC.
HintPepMetadata(
hint=_Pep585GenericTypevaredDeepMultiple,
pep_sign=HintSignGeneric,
generic_type=_Pep585GenericTypevaredDeepMultiple,
is_pep585_generic=True,
is_typevars=True,
piths_meta=(
# Subclass-specific generic iterable of 2-tuples of string
# constants.
HintPithSatisfiedMetadata(
_Pep585GenericTypevaredDeepMultiple((
(
'Inertially tragicomipastoral, pastel ',
'anticandour — remanding undemanding',
),
(
'Of a',
'"hallow be Thy nameless',
),
)),
),
# String constant.
HintPithUnsatisfiedMetadata('Invitations'),
),
),
# Nested list of PEP 585-compliant generics.
HintPepMetadata(
hint=list[_Pep585GenericUntypevaredMultiple],
pep_sign=HintSignList,
isinstanceable_type=list,
is_pep585_builtin=True,
piths_meta=(
# List of subclass-specific generic 2-tuples of string
# constants.
HintPithSatisfiedMetadata([
_Pep585GenericUntypevaredMultiple((
'Stalling inevit‐abilities)',
'For carbined',
)),
_Pep585GenericUntypevaredMultiple((
'Power-over (than',
'Power-with)',
)),
]),
# String constant.
HintPithUnsatisfiedMetadata(
'that forced triforced, farcically carcinogenic Obelisks'),
# List of 2-tuples of string constants.
HintPithUnsatisfiedMetadata([
(
'Obliterating their literate decency',
'Of a cannabis‐enthroning regency',
),
]),
),
),
# ................{ LIST }................
# List of ignorable objects.
HintPepMetadata(
hint=list[object],
pep_sign=HintSignList,
isinstanceable_type=list,
is_pep585_builtin=True,
piths_meta=(
# Empty list, which satisfies all hint arguments by definition.
HintPithSatisfiedMetadata([]),
# List of arbitrary objects.
HintPithSatisfiedMetadata([
'Of philomathematically bliss‐postulating Seas',
'Of actuarial postponement',
23.75,
]),
# String constant.
HintPithUnsatisfiedMetadata(
'Of actual change elevating alleviation — that'),
),
),
# List of non-"typing" objects.
HintPepMetadata(
hint=list[str],
pep_sign=HintSignList,
isinstanceable_type=list,
is_pep585_builtin=True,
piths_meta=(
# Empty list, which satisfies all hint arguments by definition.
HintPithSatisfiedMetadata([]),
# List of strings.
HintPithSatisfiedMetadata([
'Ously overmoist, ov‐ertly',
'Deverginating vertigo‐originating',
]),
# String constant.
HintPithUnsatisfiedMetadata('Devilet‐Sublet cities waxing'),
# List containing exactly one integer. Since list items are
# only randomly type-checked, only a list of exactly one item
# enables us to match the explicit index at fault below.
HintPithUnsatisfiedMetadata(
pith=[73,],
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Declares the index of a random list item *NOT*
# satisfying this hint.
r'\b[Ll]ist index \d+ item\b',
# Preserves the value of this item unquoted.
r'\s73\s',
),
),
),
),
# Generic list.
HintPepMetadata(
hint=list[T],
pep_sign=HintSignList,
isinstanceable_type=list,
is_typevars=True,
is_pep585_builtin=True,
piths_meta=(
# Empty list, which satisfies all hint arguments by definition.
HintPithSatisfiedMetadata([]),
# List of strings.
HintPithSatisfiedMetadata([
'Lesion this ice-scioned',
'Legion',
]),
# String constant.
HintPithUnsatisfiedMetadata(
'Lest we succumb, indelicately, to'),
),
),
# ................{ REGEX ~ match }................
# Regular expression match of only strings.
HintPepMetadata(
hint=Match[str],
pep_sign=HintSignMatch,
isinstanceable_type=Match,
is_pep585_builtin=True,
piths_meta=(
# Regular expression match of one or more string constants.
HintPithSatisfiedMetadata(re.search(
r'\b[a-z]+itiat[a-z]+\b',
'Vitiating novitiate Succubæ – a',
)),
# String constant.
HintPithUnsatisfiedMetadata('Into Elitistly'),
),
),
# ................{ REGEX ~ pattern }................
# Regular expression pattern of only strings.
HintPepMetadata(
hint=Pattern[str],
pep_sign=HintSignPattern,
isinstanceable_type=Pattern,
is_pep585_builtin=True,
piths_meta=(
# Regular expression string pattern.
HintPithSatisfiedMetadata(
re.compile(r'\b[A-Z]+ITIAT[A-Z]+\b')),
# String constant.
HintPithUnsatisfiedMetadata('Obsessing men'),
),
),
# ................{ SUBCLASS }................
# Any type, semantically equivalent under PEP 484 to the unsubscripted
# "Type" singleton.
HintPepMetadata(
hint=type[Any],
pep_sign=HintSignType,
isinstanceable_type=type,
is_pep585_builtin=True,
piths_meta=(
# Arbitrary class.
HintPithSatisfiedMetadata(float),
# String constant.
HintPithUnsatisfiedMetadata('Coulomb‐lobed lobbyist’s Ģom'),
),
),
# "type" superclass, semantically equivalent to the unsubscripted
# "Type" singleton.
HintPepMetadata(
hint=type[type],
pep_sign=HintSignType,
isinstanceable_type=type,
is_pep585_builtin=True,
piths_meta=(
# Arbitrary class.
HintPithSatisfiedMetadata(complex),
# String constant.
HintPithUnsatisfiedMetadata('Had al-'),
),
),
# Specific class.
HintPepMetadata(
hint=type[Class],
pep_sign=HintSignType,
isinstanceable_type=type,
is_pep585_builtin=True,
piths_meta=(
# Subclass of this class.
HintPithSatisfiedMetadata(Subclass),
# String constant.
HintPithUnsatisfiedMetadata('Namely,'),
# Non-subclass of this class.
HintPithUnsatisfiedMetadata(str),
),
),
# Specific class deferred with a forward reference.
HintPepMetadata(
hint=type[_TEST_PEP585_FORWARDREF_CLASSNAME],
pep_sign=HintSignType,
isinstanceable_type=type,
is_pep585_builtin=True,
piths_meta=(
# Subclass of this class.
HintPithSatisfiedMetadata(SubclassSubclass),
# String constant.
HintPithUnsatisfiedMetadata('Jabbar‐disbarred'),
# Non-subclass of this class.
HintPithUnsatisfiedMetadata(dict),
),
),
# Two or more specific classes.
HintPepMetadata(
hint=type[Union[Class, OtherClass,]],
pep_sign=HintSignType,
isinstanceable_type=type,
is_pep585_builtin=True,
piths_meta=(
# Arbitrary subclass of one class subscripting this hint.
HintPithSatisfiedMetadata(Subclass),
# Arbitrary subclass of another class subscripting this hint.
HintPithSatisfiedMetadata(OtherSubclass),
# String constant.
HintPithUnsatisfiedMetadata('Jabberings'),
# Non-subclass of any classes subscripting this hint.
HintPithUnsatisfiedMetadata(set),
),
),
# Generic class.
HintPepMetadata(
hint=type[T],
pep_sign=HintSignType,
isinstanceable_type=type,
is_pep585_builtin=True,
is_typevars=True,
piths_meta=(
# Arbitrary class.
HintPithSatisfiedMetadata(int),
# String constant.
HintPithUnsatisfiedMetadata('Obligation, and'),
),
),
# ................{ TUPLE ~ fixed }................
# Empty tuple. Yes, this is ridiculous, useless, and non-orthogonal
# with standard sequence syntax, which supports no comparable notion of
# an "empty {insert-standard-sequence-here}" (e.g., empty list): e.g.,
# >>> import typing
# >>> List[()]
# TypeError: Too few parameters for List; actual 0, expected 1
# >>> List[[]]
# TypeError: Parameters to generic types must be types. Got [].
HintPepMetadata(
hint=tuple[()],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
is_pep585_builtin=True,
piths_meta=(
# Empty tuple.
HintPithSatisfiedMetadata(()),
# Non-empty tuple containing arbitrary items.
HintPithUnsatisfiedMetadata(
pith=(
'They shucked',
'(Or huckstered, knightly rupturing veritas)',
),
# Match that the raised exception message...
exception_str_match_regexes=(
# Identifies this tuple as non-empty.
r'\bnon-empty\b',
),
),
),
),
# Fixed-length tuple of only ignorable child hints.
HintPepMetadata(
hint=tuple[Any, object,],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
is_pep585_builtin=True,
piths_meta=(
# Tuple containing arbitrary items.
HintPithSatisfiedMetadata((
'Surseance',
'Of sky, the God, the surly',
)),
# Tuple containing fewer items than required.
HintPithUnsatisfiedMetadata(
pith=('Obeisance',),
# Match that the raised exception message...
exception_str_match_regexes=(
# Compares this tuple's length to the expected length.
r'\b1 != 2\b',
),
),
),
),
# Fixed-length tuple of at least one ignorable child hint.
HintPepMetadata(
hint=tuple[float, Any, str,],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
is_pep585_builtin=True,
piths_meta=(
# Tuple containing a floating-point number, string, and integer
# (in that exact order).
HintPithSatisfiedMetadata((
20.09,
'Of an apoptosic T.A.R.P.’s torporific‐riven ecocide',
"Nightly tolled, pindololy, ol'",
)),
# String constant.
HintPithUnsatisfiedMetadata(
'Jangling (brinkmanship “Ironside”) jingoisms'),
# Tuple containing fewer items than required.
HintPithUnsatisfiedMetadata(
pith=(
999.888,
'Obese, slipshodly muslin‐shod priests had maudlin solo',
),
# Match that the raised exception message...
exception_str_match_regexes=(
# Compares this tuple's length to the expected length.
r'\b2 != 3\b',
),
),
# Tuple containing a floating-point number, a string, and a
# boolean (in that exact order).
HintPithUnsatisfiedMetadata(
pith=(
75.83,
'Unwholesome gentry ventings',
False,
),
# Match that the raised exception message...
exception_str_match_regexes=(
# Declares the index and expected type of a fixed tuple
# item *NOT* satisfying this hint.
r'\b[Tt]uple index 2 item\b',
r'\bstr\b',
),
),
),
),
# Nested fixed-length tuple of at least one ignorable child hint.
HintPepMetadata(
hint=tuple[tuple[float, Any, str,], ...],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
is_pep585_builtin=True,
piths_meta=(
# Tuple containing tuples containing a floating-point number,
# string, and integer (in that exact order).
HintPithSatisfiedMetadata((
(
90.02,
'Father — "Abstracted, OH WE LOVE YOU',
'Farther" — that',
),
(
2.9,
'To languidly Ent‐wine',
'Towards a timely, wines‐enticing gate',
),
)),
# Tuple containing a tuple containing fewer items than needed.
HintPithUnsatisfiedMetadata((
(
888.999,
'Oboes‐obsoleting tines',
),
)),
# Tuple containing a tuple containing a floating-point number,
# string, and boolean (in that exact order).
HintPithUnsatisfiedMetadata(
pith=(
(
75.83,
'Vespers’ hymnal seance, invoking',
True,
),
),
# Match that the raised exception message...
exception_str_match_regexes=(
# Declares the index and expected type of a random
# tuple item of a fixed tuple item *NOT* satisfying
# this hint.
r'\b[Tt]uple index \d+ item tuple index 2 item\b',
r'\bstr\b',
),
),
),
),
# Generic fixed-length tuple.
HintPepMetadata(
hint=tuple[S, T],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
is_pep585_builtin=True,
is_typevars=True,
piths_meta=(
# Tuple containing a floating-point number and string (in that
# exact order).
HintPithSatisfiedMetadata((
33.77,
'Legal indiscretions',
)),
# String constant.
HintPithUnsatisfiedMetadata('Leisurely excreted by'),
# Tuple containing fewer items than required.
HintPithUnsatisfiedMetadata((
'Market states‐created, stark abscess',
)),
),
),
# ................{ TUPLE ~ variadic }................
# Variadic tuple.
HintPepMetadata(
hint=tuple[str, ...],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
is_pep585_builtin=True,
piths_meta=(
# Tuple containing arbitrarily many string constants.
HintPithSatisfiedMetadata((
'Of a scantly raptured Overture,'
'Ur‐churlishly',
)),
# String constant.
HintPithUnsatisfiedMetadata(
'Of Toll‐descanted grant money'),
# Tuple containing exactly one integer. Since tuple items are
# only randomly type-checked, only a tuple of exactly one item
# enables us to match the explicit index at fault below.
HintPithUnsatisfiedMetadata(
pith=((53,)),
# Match that the raised exception message...
exception_str_match_regexes=(
# Declares the index and expected type of this tuple's
# problematic item.
r'\b[Tt]uple index 0 item\b',
r'\bstr\b',
),
),
),
),
# Generic variadic tuple.
HintPepMetadata(
hint=tuple[T, ...],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
is_pep585_builtin=True,
is_typevars=True,
piths_meta=(
# Tuple containing arbitrarily many string constants.
HintPithSatisfiedMetadata((
'Loquacious s‐age, salaciously,',
'Of regal‐seeming, freemen‐sucking Hovels, a',
)),
# String constant.
HintPithUnsatisfiedMetadata(
'Concubine enthralling contractually novel'),
),
),
# ................{ UNION ~ nested }................
# Nested unions exercising edge cases induced by Python >= 3.8
# optimizations leveraging PEP 572-style assignment expressions.
# Nested union of multiple non-"typing" types.
HintPepMetadata(
hint=list[Union[int, str,]],
pep_sign=HintSignList,
isinstanceable_type=list,
is_pep585_builtin=True,
piths_meta=(
# List containing a mixture of integer and string constants.
HintPithSatisfiedMetadata([
'Un‐seemly preening, pliant templar curs; and',
272,
]),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Un‐seemly preening, pliant templar curs; and',
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bint\b',
r'\bstr\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
# List of bytestring items.
HintPithUnsatisfiedMetadata(
pith=[
b'Blamelessly Slur-chastened rights forthwith, affrighting',
b"Beauty's lurid, beleaguered knolls, eland-leagued and",
],
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Declares all non-"typing" types *NOT* satisfied by a
# random list item *NOT* satisfying this hint.
r'\bint\b',
r'\bstr\b',
# Declares the index of the random list item *NOT*
# satisfying this hint.
r'\b[Ll]ist index \d+ item\b',
),
),
),
),
# Nested union of one non-"typing" type and one "typing" type.
HintPepMetadata(
hint=Sequence[Union[str, ByteString]],
pep_sign=HintSignSequence,
isinstanceable_type=Sequence,
is_pep585_builtin=True,
piths_meta=(
# Sequence of string and bytestring constants.
HintPithSatisfiedMetadata((
b'For laconically formulaic, knavish,',
u'Or sordidly sellsword‐',
f'Horded temerities, bravely unmerited',
)),
# Integer constant.
HintPithUnsatisfiedMetadata(
pith=7898797,
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bByteString\b',
r'\bstr\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
# Sequence of integer items.
HintPithUnsatisfiedMetadata(
pith=((144, 233, 377, 610, 987, 1598, 2585, 4183, 6768,)),
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Declares all non-"typing" types *NOT* satisfied by a
# random tuple item *NOT* satisfying this hint.
r'\bByteString\b',
r'\bstr\b',
# Declares the index of the random tuple item *NOT*
# satisfying this hint.
r'\b[Tt]uple index \d+ item\b',
),
),
),
),
# Nested union of no non-"typing" type and multiple "typing" types.
HintPepMetadata(
hint=MutableSequence[Union[ByteString, Callable]],
pep_sign=HintSignMutableSequence,
isinstanceable_type=MutableSequence,
is_pep585_builtin=True,
piths_meta=(
# Mutable sequence of string and bytestring constants.
HintPithSatisfiedMetadata([
b"Canonizing Afrikaans-kennelled Mine canaries,",
lambda: 'Of a floridly torrid, hasty love — that league',
]),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Effaced.',
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bByteString\b',
r'\bCallable\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
# Mutable sequence of string constants.
HintPithUnsatisfiedMetadata(
pith=[
'Of genteel gentle‐folk — that that Ƹsper',
'At my brand‐defaced, landless side',
],
# Match that the exception message raised for this
# object...
exception_str_match_regexes=(
# Declares all non-"typing" types *NOT* satisfied by a
# random list item *NOT* satisfying this hint.
r'\bByteString\b',
r'\bCallable\b',
# Declares the index of the random list item *NOT*
# satisfying this hint.
r'\b[Ll]ist index \d+ item\b',
),
),
),
),
))
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**NumPy-specific PEP-noncompliant type hints** (i.e., unofficial type hints
declared by the third-party :mod:`numpy` package) test data.
These hints include:
* **Typed NumPy arrays** (i.e., subscriptions of the
:attr:`numpy.typing.NDArray` type hint factory).
Caveats
----------
Although NumPy-specific type hints are technically PEP-noncompliant, the
:mod:`beartype` codebase currently treats these hints as PEP-compliant to
dramatically simplify code generation for these hints. Ergo, so we do.
'''
# ....................{ IMPORTS }....................
from beartype_test._util.mod.pytmodtest import (
is_package_numpy_typing_ndarray_deep)
# ....................{ ADDERS }....................
def add_data(data_module: 'ModuleType') -> None:
'''
Add :mod:`numpy`-specific type hint test data to various global containers
declared by the passed module.
Parameters
----------
data_module : ModuleType
Module to be added to.
'''
# ..................{ UNSUPPORTED }..................
# If beartype does *NOT* deeply support "numpy.typing.NDArray" type hints
# under the active Python interpreter, silently reduce to a noop.
if not is_package_numpy_typing_ndarray_deep():
return
# Else, beartype deeply supports "numpy.typing.NDArray" type hints under
# the active Python interpreter.
# ..................{ IMPORTS }..................
# Defer attribute-dependent imports.
from beartype.typing import Tuple
from beartype.vale import Is
from beartype._data.hint.pep.sign.datapepsigns import (
HintSignNumpyArray,
HintSignTuple,
)
from beartype._util.mod.lib.utiltyping import import_typing_attr
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_9
from beartype_test.a00_unit.data.hint.util.data_hintmetacls import (
HintPepMetadata,
HintPithSatisfiedMetadata,
HintPithUnsatisfiedMetadata,
)
# Defer NumPy-specific imports.
from numpy import asarray, dtype, float32, float64, floating
from numpy.typing import NDArray
from typing import Any
# ..................{ TUPLES }..................
# Add NumPy-specific test type hints to this tuple global.
data_module.HINTS_PEP_META.extend((
# ................{ NUMPY ~ array }................
# Untyped unsubscripted NumPy array.
HintPepMetadata(
hint=NDArray,
pep_sign=HintSignNumpyArray,
# "NDArray" is implemented as:
# * Under Python >= 3.9, a PEP 585-compliant generic.
# * Under Python >= 3.8, a pure-Python generic backport.
is_pep585_builtin=IS_PYTHON_AT_LEAST_3_9,
is_type_typing=False,
is_typing=False,
# Oddly, NumPy implicitly parametrizes the "NDArray[Any]" type hint
# by expanding that hint to "numpy.typing.NDArray[+ScalarType]",
# where "+ScalarType" is a public type variable declared by the
# "numpy" package bounded above by the "numpy.generic" abstract
# base class for NumPy scalars. *sigh*
is_typevars=True,
piths_meta=(
# NumPy array containing only 64-bit integers.
HintPithSatisfiedMetadata(asarray((
1, 0, 3, 5, 2, 6, 4, 9, 2, 3, 8, 4, 1, 3, 7, 7, 5, 0,))),
# NumPy array containing only 64-bit floats.
HintPithSatisfiedMetadata(asarray((
1.3, 8.23, 70.222, 726.2431, 8294.28730, 100776.357238,))),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Ine Gerrymander‐consigned electorate sangu‐',
# Match that the exception message raised for this object
# embeds the representation of the expected class.
exception_str_match_regexes=(r'\bnumpy\.ndarray\b',),
),
),
),
# Untyped subscripted NumPy array.
HintPepMetadata(
hint=NDArray[Any],
pep_sign=HintSignNumpyArray,
is_pep585_builtin=IS_PYTHON_AT_LEAST_3_9,
is_type_typing=False,
is_typing=False,
piths_meta=(
# NumPy array containing only 64-bit integers.
HintPithSatisfiedMetadata(asarray((
1, 7, 39, 211, 1168, 6728, 40561, 256297, 1696707,))),
# NumPy array containing only 64-bit floats.
HintPithSatisfiedMetadata(asarray((
1.1, 2.4, -4.4, 32.104, 400.5392, -3680.167936,))),
# String constant.
HintPithUnsatisfiedMetadata(
pith=(
'Inity my guinea‐konsealed Ğuinness’ pint '
'glockenspieled spells',
),
# Match that the exception message raised for this object
# embeds the representation of the expected class.
exception_str_match_regexes=(r'\bnumpy\.ndarray\b',),
),
),
),
# ................{ NUMPY ~ array : dtype : equals }................
# Typed NumPy array subscripted by an actual data type (i.e., instance
# of the "numpy.dtype" class).
HintPepMetadata(
hint=NDArray[dtype(float64)],
pep_sign=HintSignNumpyArray,
is_pep585_builtin=IS_PYTHON_AT_LEAST_3_9,
is_type_typing=False,
is_typing=False,
piths_meta=(
# NumPy array containing only 64-bit floats.
HintPithSatisfiedMetadata(
asarray((1.0, 1.5, 1.8333, 2.08333, 2.28333, 2.45,)),
),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Aggrandizing strifes with‐in',
# Match that the exception message raised for this object
# embeds the representation of the expected class.
exception_str_match_regexes=(r'\bnumpy\.ndarray\b',),
),
# NumPy array containing only 64-bit integers.
HintPithUnsatisfiedMetadata(
pith=asarray((4, 36, 624, 3744, 5108, 10200, 54912,)),
# Match that the exception message raised for this object
# embeds the representation of the expected data type.
exception_str_match_regexes=(r'\bfloat64\b',),
),
),
),
# Typed NumPy array subscripted by a scalar data type. Since scalar
# data types are *NOT* actual data types, this exercises an edge case.
HintPepMetadata(
hint=NDArray[float64],
pep_sign=HintSignNumpyArray,
is_pep585_builtin=IS_PYTHON_AT_LEAST_3_9,
is_type_typing=False,
is_typing=False,
piths_meta=(
# NumPy array containing only 64-bit floats.
HintPithSatisfiedMetadata(asarray(
(2.0, 2.5, 2.6, 2.7083, 2.716, 2.71805,))),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Silver, ore, and almost dazedly aggro‐',
# Match that the exception message raised for this object
# embeds the representation of the expected class.
exception_str_match_regexes=(r'\bnumpy\.ndarray\b',),
),
# NumPy array containing only 64-bit integers.
HintPithUnsatisfiedMetadata(
pith=asarray((1, 1, 1, 1, 2, 3, 6, 11, 23, 47, 106, 235,)),
# Match that the exception message raised for this object
# embeds the representation of the expected data type.
exception_str_match_regexes=(r'\bfloat64\b',),
),
),
),
# ................{ NUMPY ~ array : dtype : subclass }................
# Typed NumPy array subscripted by a data type superclass.
HintPepMetadata(
hint=NDArray[floating],
pep_sign=HintSignNumpyArray,
is_pep585_builtin=IS_PYTHON_AT_LEAST_3_9,
is_type_typing=False,
is_typing=False,
piths_meta=(
# NumPy array containing only 32-bit floats.
HintPithSatisfiedMetadata(asarray(
(1.2, 2.4, 3.0, 3.6, 4.0, 4.5, 4.8, 5.6, 6.0, 6.3, 7.0,),
dtype=float32,
)),
# NumPy array containing only 64-bit floats.
HintPithSatisfiedMetadata(asarray(
(3.2, 5, 1, 2, 1, 8, 2, 5, 1, 3, 1, 2.8, 1, 1.5, 1, 1, 4,),
dtype=float64,
)),
# String constant.
HintPithUnsatisfiedMetadata('Then, and'),
# NumPy array containing only 64-bit integers.
HintPithUnsatisfiedMetadata(asarray(
(3, 6, 5, 12, 7, 18, 9, 12, 11, 30, 13, 16, 15, 18, 17,))),
),
),
))
# ..................{ VALIDATORS ~ hints }..................
# "typing.Annotated" type hint factory imported from either the "typing" or
# "typing_extensions" modules if importable *OR* "None" otherwise. By prior
# validation, this factory *MUST* be non-"None" here.
Annotated = import_typing_attr('Annotated')
# Validator matching one-dimensional NumPy arrays of floats of 64-bit
# precision, combining both validator and NumPy type hinting syntax. This
# exercises an edge case previously generating syntactically invalid code.
Numpy1DFloat64Array = Annotated[
NDArray[float64], Is[lambda array: array.ndim == 1]]
# ..................{ VALIDATORS ~ tuples }..................
# Add NumPy-specific test type hints to this tuple global.
data_module.HINTS_PEP_META.extend((
# ................{ NUMPY ~ array : nested }................
# 2-tuple of one-dimensional typed NumPy arrays of 64-bit floats.
HintPepMetadata(
hint=Tuple[Numpy1DFloat64Array, Numpy1DFloat64Array],
pep_sign=HintSignTuple,
isinstanceable_type=tuple,
is_pep585_builtin=Tuple is tuple,
piths_meta=(
# 2-tuple of NumPy arrays containing only 64-bit floats.
HintPithSatisfiedMetadata((
asarray((0.5, 0.75, 0.875, 0.9375, 0.96875, 0.984375)),
asarray((1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5)),
)),
# String constant.
HintPithUnsatisfiedMetadata(
pith=(
"A Spherically clerical,"
"cylindroid‐cindered cleft, and",
),
# Match that the exception message raised for this object
# embeds the representation of the expected class.
exception_str_match_regexes=(r'\bnumpy\.ndarray\b',),
),
# 2-tuple of NumPy arrays containing only integers.
HintPithUnsatisfiedMetadata(
pith=(
asarray((1, 1, 4, 6, 14, 23, 45, 72, 126, 195, 315,)),
asarray((1, 0, 1, 1, 2, 2, 5, 4, 9, 10, 16, 19, 31,)),
),
# Match that the exception message raised for this object
# embeds the representation of the expected data type.
exception_str_match_regexes=(r'\bfloat64\b',),
),
),
),
))
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **PEP-compliant type hints test data.**
This submodule predefines low-level global constants whose values are
PEP-noncompliant type hints, exercising known edge cases on behalf of
higher-level unit test submodules.
'''
# ....................{ TUPLES }....................
# Initialized by the _init() function below.
HINTS_NONPEP_META = []
'''
Tuple of **PEP-noncompliant type hint metadata** (i.e.,
:class:`HintNonpepMetadata` instances describing test-specific PEP-noncompliant
type hints with metadata leveraged by various testing scenarios).
'''
# ....................{ INITIALIZERS }....................
def _init() -> None:
'''
Initialize this submodule.
'''
# Defer function-specific imports.
import sys
from beartype_test.a00_unit.data.hint.nonpep.mod import (
_data_nonpepbeartype,
)
from beartype_test.a00_unit.data.hint.nonpep.proposal import (
_data_nonpep484,
)
from beartype_test.a00_unit.data.hint.util.data_hintmetacls import (
HintNonpepMetadata)
# Submodule globals to be redefined below.
global HINTS_NONPEP_META
# Current submodule, obtained via the standard idiom. See also:
# https://stackoverflow.com/a/1676860/2809027
CURRENT_SUBMODULE = sys.modules[__name__]
# Tuple of all private submodules of this subpackage to be initialized.
DATA_HINT_NONPEP_SUBMODULES = (
_data_nonpep484,
_data_nonpepbeartype,
)
# Initialize all private submodules of this subpackage.
for data_hint_nonpep_submodule in DATA_HINT_NONPEP_SUBMODULES:
data_hint_nonpep_submodule.add_data(CURRENT_SUBMODULE)
# Assert these global to have been initialized by these private submodules.
assert HINTS_NONPEP_META, 'Tuple global "HINTS_NONPEP_META" empty.'
# Assert this global to contain only instances of its expected dataclass.
assert (
isinstance(hint_nonpep_meta, HintNonpepMetadata)
for hint_nonpep_meta in HINTS_NONPEP_META
), (f'{repr(HINTS_NONPEP_META)} not iterable of '
f'"HintNonpepMetadata" instances.')
# Frozen sets defined *AFTER* initializing these private submodules and
# thus the lower-level globals required by these sets.
HINTS_NONPEP_META = tuple(HINTS_NONPEP_META)
# Initialize this submodule.
_init()
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`484`-compliant PEP-noncompliant type hint test data.
:pep:`484`-compliant type hints *mostly* indistinguishable from
PEP-noncompliant type hints include:
* :func:`typing.NamedTuple`, a high-level factory function deferring to the
lower-level :func:`collections.namedtuple` factory function creating and
returning :class:`tuple` instances annotated by PEP-compliant type hints.
* :func:`typing.TypedDict`, a high-level factory function creating and
returning :class:`dict` instances annotated by PEP-compliant type hints.
'''
# ....................{ TODO }....................
#FIXME: *WOOPS.* We should have read the standards a bit closer. Neither
#"typing.NamedTuple" or "typing.TypedDict" are intended for direct use as type
#hints. To quote official "typing" documentation:
# These are not used in annotations. They are building blocks for declaring
# types.
#
#Of course, all types *ARE* valid type hints. "typing.NamedTuple" and
#"typing.TypedDict" subclasses are types and thus also valid type hints. So, the
#superficial testing we perform below is certainly useful; we just don't need to
#do anything further, really. Phew!
# ....................{ ADDERS }....................
def add_data(data_module: 'ModuleType') -> None:
'''
Add :pep:`484`**-compliant PEP-noncompliant type hint test data to various
global containers declared by the passed module.
Parameters
----------
data_module : ModuleType
Module to be added to.
'''
# ..................{ IMPORTS }..................
# Defer test-specific imports.
import sys
from beartype import BeartypeConf
from beartype.typing import (
NamedTuple,
)
from beartype._cave._cavefast import (
EllipsisType,
FunctionType,
FunctionOrMethodCType,
MethodBoundInstanceOrClassType,
ModuleType,
NoneType,
NotImplementedType,
)
from beartype_test.a00_unit.data.data_type import Class
from beartype_test.a00_unit.data.hint.util.data_hintmetacls import (
HintNonpepMetadata,
HintPithSatisfiedMetadata,
HintPithUnsatisfiedMetadata,
)
# ....................{ LOCALS }....................
# PEP-compliant user-defined "collections.namedtuple" instance typed with
# PEP-compliant type hints.
NamedTupleType = NamedTuple(
'NamedTupleType', [('fumarole', str), ('enrolled', int)])
# ..................{ TUPLES }..................
# Add PEP 484-specific PEP-noncompliant test type hints to this dictionary
# global.
data_module.HINTS_NONPEP_META.extend((
# ................{ NAMEDTUPLE }................
# "NamedTuple" instances transparently reduce to standard tuples and
# *MUST* thus be handled as non-"typing" type hints.
HintNonpepMetadata(
hint=NamedTupleType,
piths_meta=(
# Named tuple containing correctly typed items.
HintPithSatisfiedMetadata(
NamedTupleType(fumarole='Leviathan', enrolled=37)),
# String constant.
HintPithUnsatisfiedMetadata('Of ͼarthen concordance that'),
#FIXME: Uncomment after implementing "NamedTuple" support.
# # Named tuple containing incorrectly typed items.
# HintPithUnsatisfiedMetadata(
# pith=NamedTupleType(fumarole='Leviathan', enrolled=37),
# # Match that the exception message raised for this object...
# exception_str_match_regexes=(
# # Declares the name of this tuple's problematic item.
# r'\s[Ll]ist item 0\s',
# ),
# ),
),
),
# ................{ TYPEDDICT }................
# "TypedDict" instances transparently reduce to dicts.
#FIXME: Implement us up, but note when doing so that:
#* We currently unconditionally reduce "TypeDict" to "Mapping".
#* "TypedDict" was first introduced with Python 3.8.
# ................{ TYPE ~ builtin }................
# Integer.
HintNonpepMetadata(
hint=int,
piths_meta=(
# Integer constant.
HintPithSatisfiedMetadata(42), # <-- we went there, folks
# String constant.
HintPithUnsatisfiedMetadata(
pith='Introspectively ‘allein,’ dealigning consangui-',
# Match that the exception message raised for this pith
# contains...
exception_str_match_regexes=(
# The type *NOT* satisfied by this object.
r'\bint\b',
),
# Match that the exception message raised for this pith
# does *NOT* contain...
exception_str_not_match_regexes=(
# A newline.
r'\n',
# A bullet delimiter.
r'\*',
# The double-quoted name of this builtin type.
r'"int"',
),
),
),
),
# Unicode string.
HintNonpepMetadata(
hint=str,
piths_meta=(
# String constant.
HintPithSatisfiedMetadata('Glassily lassitudinal bȴood-'),
# Byte-string constant.
HintPithUnsatisfiedMetadata(
pith=b'Stains, disdain-fully ("...up-stairs!"),',
# Match that the exception message raised for this pith
# contains...
exception_str_match_regexes=(
# The type *NOT* satisfied by this object.
r'\bstr\b',
# The representation of this object preserved as is.
r'\sb\'Stains, disdain-fully \("...up-stairs!"\),\'\s',
),
# Match that the exception message raised for this pith
# does *NOT* contain...
exception_str_not_match_regexes=(
# A newline.
r'\n',
# A bullet delimiter.
r'\*',
# The double-quoted name of this builtin type.
r'"str"',
),
),
# Integer constant.
HintPithUnsatisfiedMetadata(
pith=666, # <-- number of the beast, yo
# Match that the exception message raised for this pith
# contains...
exception_str_match_regexes=(
# The type *NOT* satisfied by this object.
r'\bstr\b',
# The representation of this object preserved as is.
r'\s666\s',
),
# Match that the exception message raised for this pith
# does *NOT* contain...
exception_str_not_match_regexes=(
# A newline.
r'\n',
# A bullet delimiter.
r'\*',
# The double-quoted name of this builtin type.
r'"str"',
),
),
),
),
# ................{ TYPE ~ builtin : tower }................
# Types implicated in the PEP 484-compliant implicit numeric tower only
# optionally supported by enabling the
# "beartype.BeartypeConf.is_pep484_tower" parameter, which expands:
# * "float" as an alias for "float | int".
# * "complex" as an alias for "complex | float | int".
# Floating-point number with the implicit numeric tower disabled.
HintNonpepMetadata(
hint=float,
conf=BeartypeConf(is_pep484_tower=False),
piths_meta=(
# Floating-point number constant.
HintPithSatisfiedMetadata(0.110001),
# Integer constant.
HintPithUnsatisfiedMetadata(
pith=110001,
# Match that the exception message raised for this pith
# contains...
exception_str_match_regexes=(
# The type *NOT* satisfied by this object.
r'\bfloat\b',
),
# Match that the exception message raised for this pith
# does *NOT* contain...
exception_str_not_match_regexes=(
# A newline.
r'\n',
# A bullet delimiter.
r'\*',
# The double-quoted name of this builtin type.
r'"float"',
),
),
),
),
# Floating-point number with the implicit numeric tower enabled.
HintNonpepMetadata(
hint=float,
conf=BeartypeConf(is_pep484_tower=True),
piths_meta=(
# Floating-point number constant.
HintPithSatisfiedMetadata(0.577215664901532860606512090082),
# Integer constant.
HintPithSatisfiedMetadata(5772),
# Complex number constant.
HintPithUnsatisfiedMetadata(
pith=(1566 + 4901j),
# Match that the exception message raised for this pith
# contains...
exception_str_match_regexes=(
# The type *NOT* satisfied by this object.
r'\bfloat\b',
),
# Match that the exception message raised for this pith
# does *NOT* contain...
exception_str_not_match_regexes=(
# A newline.
r'\n',
# A bullet delimiter.
r'\*',
# The double-quoted name of this builtin type.
r'"float"',
),
),
),
),
# Complex number with the implicit numeric tower disabled.
HintNonpepMetadata(
hint=complex,
conf=BeartypeConf(is_pep484_tower=False),
piths_meta=(
# Complex number constant.
HintPithSatisfiedMetadata(1.787 + 2316.5j),
# Floating-point number constant.
HintPithUnsatisfiedMetadata(
pith=0.300330000000000330033,
# Match that the exception message raised for this pith
# contains...
exception_str_match_regexes=(
# The type *NOT* satisfied by this object.
r'\bcomplex\b',
),
# Match that the exception message raised for this pith
# does *NOT* contain...
exception_str_not_match_regexes=(
# A newline.
r'\n',
# A bullet delimiter.
r'\*',
# The double-quoted name of this builtin type.
r'"complex"',
),
),
),
),
# Complex number with the implicit numeric tower enabled.
HintNonpepMetadata(
hint=complex,
conf=BeartypeConf(is_pep484_tower=True),
piths_meta=(
# Complex number constant.
HintPithSatisfiedMetadata(2.622 + 575.5j),
# Floating-point number constant.
HintPithSatisfiedMetadata(0.8346268),
# Integer constant.
HintPithSatisfiedMetadata(1311),
# String constant.
HintPithUnsatisfiedMetadata(
pith='Park-ed trails',
# Match that the exception message raised for this pith
# contains...
exception_str_match_regexes=(
# The type *NOT* satisfied by this object.
r'\bcomplex\b',
),
# Match that the exception message raised for this pith
# does *NOT* contain...
exception_str_not_match_regexes=(
# A newline.
r'\n',
# A bullet delimiter.
r'\*',
# The double-quoted name of this builtin type.
r'"complex"',
),
),
),
),
# ................{ TYPE ~ builtin : fake }................
# Fake builtin types (i.e., types that are *NOT* builtin but which
# nonetheless erroneously masquerade as being builtin), exercising edge
# cases in @beartype code generation.
#
# See also:
# * The "beartype._data.cls.datacls.TYPES_BUILTIN_FAKE" set.
# Fake builtin ellipsis type.
HintNonpepMetadata(
hint=EllipsisType,
piths_meta=(
# Ellipsis singleton.
HintPithSatisfiedMetadata(...),
# String constant.
HintPithUnsatisfiedMetadata(
'Masterless decree, venomless, which'),
),
),
# Fake builtin pure-Python function type.
HintNonpepMetadata(
hint=FunctionType,
piths_meta=(
# Pure-Python function.
HintPithSatisfiedMetadata(add_data),
# String constant.
HintPithUnsatisfiedMetadata('Nomenclature weather‐vanes of'),
),
),
# Fake builtin C-based function type.
HintNonpepMetadata(
hint=FunctionOrMethodCType,
piths_meta=(
# C-based function.
HintPithSatisfiedMetadata(len),
# String constant.
HintPithUnsatisfiedMetadata(
'Nominally unswain, autodidactic idiocracy, less a'),
),
),
# Fake builtin bound method type.
HintNonpepMetadata(
hint=MethodBoundInstanceOrClassType,
piths_meta=(
# Bound method.
HintPithSatisfiedMetadata(Class().instance_method),
# String constant.
HintPithUnsatisfiedMetadata(
'ç‐omically gnomical whitebellied burden’s empathy of'),
),
),
# Fake builtin module type.
HintNonpepMetadata(
hint=ModuleType,
piths_meta=(
# Imported module.
HintPithSatisfiedMetadata(sys),
# String constant.
HintPithUnsatisfiedMetadata(
'Earpiece‐piecemealed, mealy straw headpiece-'),
),
),
# Fake builtin "None" singleton type.
HintNonpepMetadata(
hint=NoneType,
piths_meta=(
# "None" singleton.
HintPithSatisfiedMetadata(None),
# String constant.
HintPithUnsatisfiedMetadata(
'Earned peace appeasement easements'),
),
),
# Fake builtin "NotImplemented" type.
HintNonpepMetadata(
hint=NotImplementedType,
piths_meta=(
# "NotImplemented" singleton.
HintPithSatisfiedMetadata(NotImplemented),
# String constant.
HintPithUnsatisfiedMetadata('Than'),
),
),
))
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype-specific PEP-noncompliant type hints** (i.e., unofficial type hints
supported *only* by the :mod:`beartype.beartype` decorator) test data.
These hints include:
* **Fake builtin types** (i.e., types that are *not* builtin but which
nonetheless erroneously masquerade as being builtin).
* **Tuple unions** (i.e., tuples containing *only* standard classes and
forward references to standard classes).
'''
# ....................{ IMPORTS }....................
from beartype_test.a00_unit.data.hint.util.data_hintmetacls import (
HintNonpepMetadata,
HintPithSatisfiedMetadata,
HintPithUnsatisfiedMetadata,
)
# ....................{ ADDERS }....................
def add_data(data_module: 'ModuleType') -> None:
'''
Add :mod:`beartype`-specific PEP-noncompliant type hint test data to
various global containers declared by the passed module.
Parameters
----------
data_module : ModuleType
Module to be added to.
'''
# ..................{ TUPLES }..................
# Add beartype-specific PEP-noncompliant test type hints to this dictionary
# global.
data_module.HINTS_NONPEP_META.extend((
# ................{ TUPLE UNION }................
# Beartype-specific tuple unions (i.e., tuples containing one or more
# isinstanceable classes).
# Tuple union of one isinstanceable class.
HintNonpepMetadata(
hint=(str,),
piths_meta=(
# String constant.
HintPithSatisfiedMetadata('Pinioned coin tokens'),
# Byte-string constant.
HintPithUnsatisfiedMetadata(
pith=b'Murkily',
# Match that the exception message raised for this pith
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bstr\b',
),
# Match that the exception message raised for this pith
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
),
),
# Tuple union of two or more isinstanceable classes.
HintNonpepMetadata(
hint=(int, str),
piths_meta=(
# Integer constant.
HintPithSatisfiedMetadata(12),
# String constant.
HintPithSatisfiedMetadata('Smirk‐opined — openly'),
# Byte-string constant.
HintPithUnsatisfiedMetadata(
pith=b'Betokening',
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bint\b',
r'\bstr\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
),
),
))
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Testing-specific **type hint metadata class hierarchy** (i.e., hierarchy of
classes encapsulating sample type hints instantiated by the
:mod:`beartype_test.a00_unit.data.hint` submodules).
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype._conf.confcls import (
BEARTYPE_CONF_DEFAULT,
BeartypeConf,
)
from typing import Optional
from collections.abc import Iterable
# ....................{ PRIVATE }....................
_EXCEPTION_STR_MATCH_REGEXES_MANDATORY = (
# Ensure *ALL* exception messages contain the substring "type hint".
# Exception messages *NOT* containing this substring are overly ambiguous
# and thus effectively erroneous.
r'\btype hint\b',
)
'''
Tuple of all **mandatory exception matching regexes** (i.e., r''-style
uncompiled regular expression strings, each unconditionally matching a
substring of the exception message expected to be raised by wrapper functions
when either passed or returning *any* possible pith).
'''
_NoneTypeOrType = (type, type(None))
'''
2-tuple matching both classes and the ``None`` singleton.
'''
# ....................{ CLASSES ~ hint : [un]satisfied }....................
class HintPithSatisfiedMetadata(object):
'''
**Type hint-satisfying pith metadata** (i.e., dataclass whose instance
variables describe an object satisfying a type hint when either passed as a
parameter *or* returned as a value annotated by that hint).
Attributes
----------
pith : object
Arbitrary object *not* satisfying this hint when either passed as a
parameter *or* returned as a value annotated by this hint.
is_context_manager : bool
If this pith is a **context manager** (i.e., object defining both the
``__exit__`` and ``__enter__`` dunder methods required to satisfy the
context manager protocol), this boolean is either:
* ``True`` if callers should preserve this context manager as is (e.g.,
by passing this context manager to the decorated callable).
* ``False`` if callers should safely open and close this context
manager to its context *and* replace this context manager with that
context (e.g., by passing this context to the decorated callable).
If this pith is *not* a context manager, this boolean is ignored.
Defaults to ``False``.
is_pith_factory : bool
``True`` only if this pith is actually a **pith factory** (i.e.,
callable accepting *no* parameters and dynamically creating and
returning the value to be used as the desired pith, presumably by
passing this value to the decorated callable). Defaults to ``False``.
'''
# ..................{ INITIALIZERS }..................
def __init__(
self,
# Mandatory parameters.
pith: object,
# Optional parameters.
is_context_manager: bool = False,
is_pith_factory: bool = False,
) -> None:
assert isinstance(is_context_manager, bool), (
f'{repr(is_context_manager)} not boolean.')
assert isinstance(is_pith_factory, bool), (
f'{repr(is_pith_factory)} not boolean.')
# Classify all passed parameters.
self.pith = pith
self.is_context_manager = is_context_manager
self.is_pith_factory = is_pith_factory
# ..................{ STRINGIFIERS }..................
def __repr__(self) -> str:
return '\n'.join((
f'{self.__class__.__name__}(',
f' pith={repr(self.pith)},',
f' is_context_manager={repr(self.is_context_manager)},',
f' is_pith_factory={repr(self.is_pith_factory)},',
f')',
))
class HintPithUnsatisfiedMetadata(HintPithSatisfiedMetadata):
'''
**Type hint-unsatisfying pith metadata** (i.e., dataclass whose instance
variables describe an object *not* satisfying a type hint when either
passed as a parameter *or* returned as a value annotated by that hint).
Attributes
----------
exception_str_match_regexes : Iterable[str]
Iterable of zero or more r''-style uncompiled regular expression
strings, each matching a substring of the exception message expected to
be raised by wrapper functions when either passed or returning this
``pith``. Defaults to the empty tuple.
exception_str_not_match_regexes : Iterable[str]
Iterable of zero or more r''-style uncompiled regular expression
strings, each *not* matching a substring of the exception message
expected to be raised by wrapper functions when either passed or
returning this ``pith``. Defaults to the empty tuple.
'''
# ..................{ INITIALIZERS }..................
def __init__(
self,
*args,
# Optional parameters.
exception_str_match_regexes: 'Iterable[str]' = (),
exception_str_not_match_regexes: 'Iterable[str]' = (),
**kwargs
) -> None:
assert isinstance(exception_str_match_regexes, Iterable), (
f'{repr(exception_str_match_regexes)} not iterable.')
assert isinstance(exception_str_not_match_regexes, Iterable), (
f'{repr(exception_str_not_match_regexes)} not iterable.')
assert all(
isinstance(exception_str_match_regex, str)
for exception_str_match_regex in exception_str_match_regexes
), f'{repr(exception_str_match_regexes)} not iterable of regexes.'
assert all(
isinstance(exception_str_not_match_regex, str)
for exception_str_not_match_regex in (
exception_str_not_match_regexes)
), f'{repr(exception_str_not_match_regexes)} not iterable of regexes.'
# Initialize our superclass with all variadic parameters.
super().__init__(*args, **kwargs)
# Classify all remaining passed parameters.
self.exception_str_not_match_regexes = exception_str_not_match_regexes
# Classify the tuple of all r''-style uncompiled regular expression
# strings appended by the tuple of all mandatory such strings.
self.exception_str_match_regexes = (
exception_str_match_regexes +
_EXCEPTION_STR_MATCH_REGEXES_MANDATORY
)
# ..................{ STRINGIFIERS }..................
def __repr__(self) -> str:
return '\n'.join((
f'{self.__class__.__name__}(',
f' pith={repr(self.pith)},',
f' is_context_manager={repr(self.is_context_manager)},',
f' is_pith_factory={repr(self.is_pith_factory)},',
f' exception_str_match_regexes={repr(self.exception_str_match_regexes)},',
f' exception_str_not_match_regexes={repr(self.exception_str_not_match_regexes)},',
f')',
))
# ....................{ CLASSES ~ hint : superclass }....................
class HintNonpepMetadata(object):
'''
**PEP-noncompliant type hint metadata** (i.e., dataclass whose instance
variables describe a type hint that is either PEP-noncompliant or *mostly*
indistinguishable from a PEP-noncompliant type hint with metadata
applicable to various testing scenarios).
Examples of PEP-compliant type hints *mostly* indistinguishable from
PEP-noncompliant type hints include:
* :func:`typing.NamedTuple`, a high-level factory function deferring to the
lower-level :func:`collections.namedtuple` factory function creating and
returning :class:`tuple` instances annotated by PEP-compliant type hints.
* :func:`typing.TypedDict`, a high-level factory function creating and
returning :class:`dict` instances annotated by PEP-compliant type hints.
Attributes
----------
hint : object
Type hint to be tested.
conf : BeartypeConf
**Beartype configuration** (i.e., self-caching dataclass encapsulating
all settings configuring type-checking for this type hint).
is_ignorable : bool
``True`` only if this hint is safely ignorable by the
:func:`beartype.beartype` decorator. Defaults to ``False``.
is_supported : bool
``True`` only if this hint is currently supported by
the :func:`beartype.beartype` decorator. Defaults to ``True``.
piths_meta : Iterable[HintPithSatisfiedMetadata]
Iterable of zero or more **(un)satisfied metadata objects** (i.e.,
:class:`HintPithSatisfiedMetadata` and
:class:`HintPithUnsatisfiedMetadata` instances), each describing an
arbitrary object either satisfying or violating this hint when passed
as a parameter *or* returned as a value annotated by this hint.
Defaults to the empty tuple.
'''
# ..................{ INITIALIZERS }..................
def __init__(
self,
*,
# Mandatory keyword-only parameters.
hint: object,
# Optional keyword-only parameters.
conf: BeartypeConf = BEARTYPE_CONF_DEFAULT,
is_ignorable: bool = False,
is_supported: bool = True,
piths_meta: 'Iterable[HintPithSatisfiedMetadata]' = (),
) -> None:
# Validate passed non-variadic parameters.
assert isinstance(conf, BeartypeConf), (
f'{repr(conf)} not beartype configuration.')
assert isinstance(is_ignorable, bool), (
f'{repr(is_ignorable)} not bool.')
assert isinstance(is_supported, bool), (
f'{repr(is_supported)} not bool.')
assert isinstance(piths_meta, Iterable), (
f'{repr(piths_meta)} not iterable.')
assert all(
isinstance(piths_meta, HintPithSatisfiedMetadata)
for piths_meta in piths_meta
), (
f'{repr(piths_meta)} not iterable of '
f'"HintPithSatisfiedMetadata" and '
f'"HintPithUnsatisfiedMetadata" instances.')
# Classify all passed parameters.
self.hint = hint
self.conf = conf
self.is_ignorable = is_ignorable
self.is_supported = is_supported
self.piths_meta = piths_meta
# ..................{ STRINGIFIERS }..................
def __repr__(self) -> str:
return '\n'.join((
f'{self.__class__.__name__}(',
f' hint={repr(self.hint)},',
f' conf={repr(self.conf)},',
f' is_ignorable={repr(self.is_ignorable)},',
f' is_supported={repr(self.is_supported)},',
f' piths_meta={repr(self.piths_meta)},',
f')',
))
# ....................{ CLASSES ~ hint : subclass }....................
class HintPepMetadata(HintNonpepMetadata):
'''
**PEP-compliant type hint metadata** (i.e., dataclass whose instance
variables describe a PEP-compliant type hint with metadata applicable to
various testing scenarios).
Attributes
----------
pep_sign : HintSign
**Sign** (i.e., arbitrary object uniquely identifying this
PEP-compliant type hint) if this hint is uniquely identified by such a
sign *or* ``None`` otherwise. Examples of PEP-compliant type hints
*not* uniquely identified by such attributes include those reducing to
standard builtins on instantiation such as:
* :class:`typing.NamedTuple` reducing to :class:`tuple`.
* :class:`typing.TypedDict` reducing to :class:`dict`.
is_args : bool, optional
``True`` only if this hint is subscripted by one or more **arguments**
(i.e., PEP-compliant type hints that are *not* type variables) and/or
**type variables** (i.e., :class:`typing.TypeVar` instances). Defaults
to ``True`` only if the machine-readable representation of this hint
contains one or more "[" delimiters.
is_pep585_builtin : bool, optional
``True`` only if this hint is a `PEP 585`-compliant builtin. If
``True``, then :attr:`is_type_typing` *must* be ``False``. Defaults to
the negation of :attr:`is_pep585_generic` if non-``None`` *or*
``False`` otherwise (i.e., if :attr:`is_pep585_generic` is ``None``).
is_pep585_generic : bool, optional
``True`` only if this hint is a `PEP 585`-compliant generic. If
``True``, then :attr:`is_type_typing` *must* be ``False``. Defaults to
the negation of :attr:`is_pep585_generic` if non-``None`` *or*
``False`` otherwise (i.e., if :attr:`is_pep585_generic` is ``None``).
is_typevars : bool, optional
``True`` only if this hint is subscripted by one or more **type
variables** (i.e., :class:`typing.TypeVar` instances). Defaults to
``False``.
is_type_typing : bool, optional
``True`` only if this hint's class is defined by the :mod:`typing`
module. If ``True``, then :attr:`is_pep585_builtin` and
:attr:`is_pep585_generic` *must* both be ``False``. Defaults to
either:
* If either :attr:`is_pep585_builtin` *or* :attr:`is_pep585_generic`
are ``True``, ``False``.
* Else, ``True``.
is_typing : bool, optional
``True`` only if this hint itself is defined by the :mod:`typing`
module. Defaults to :attr:`is_type_typing`.
isinstanceable_type : Optional[type]
**Origin type** (i.e., non-:mod:`typing` class such that *all* objects
satisfying this hint are instances of this class) originating this hint
if this hint originates from a non-:mod:`typing` class *or* ``None``
otherwise (i.e., if this hint does *not* originate from such a class).
Defaults to ``None``.
generic_type : Optional[type]
Subscripted origin type associated with this hint if any *or* ``None``
otherwise (i.e., if this hint is associated with *no* such type).
Defaults to either:
* If this hint is subscripted, :attr:`isinstanceable_type`.
* Else, ``None``.
typehint_cls : Optional[Type[beartype.door.TypeHint]]
Concrete :class:`beartype.door.TypeHint` subclass responsible for
handling this hint if any *or* ``None`` otherwise (e.g., if the
:mod:`beartype.door` submodule has yet to support this hint).
All remaining keyword arguments are passed as is to the superclass
:meth:`HintNonpepMetadata.__init__` method.
'''
# ..................{ INITIALIZERS }..................
def __init__(
self,
*,
# Mandatory keyword-only parameters.
pep_sign: 'beartype._data.hint.pep.sign.datapepsigncls.HintSign',
# Optional keyword-only parameters.
is_args: Optional[bool] = None,
is_pep585_builtin: Optional[bool] = None,
is_pep585_generic: Optional[bool] = None,
is_typevars: bool = False,
is_type_typing: Optional[bool] = None,
is_typing: Optional[bool] = None,
isinstanceable_type: Optional[type] = None,
generic_type: Optional[type] = None,
typehint_cls: 'Optional[Type[beartype.door.TypeHint]]' = None,
**kwargs
) -> None:
# Defer test-specific imports.
from beartype._data.hint.pep.sign.datapepsigncls import HintSign
from beartype.door import TypeHint
# Validate passed non-variadic parameters.
assert isinstance(is_typevars, bool), (
f'{repr(is_typevars)} not bool.')
assert isinstance(pep_sign, HintSign), f'{repr(pep_sign)} not sign.'
assert isinstance(isinstanceable_type, _NoneTypeOrType), (
f'{repr(isinstanceable_type)} neither class nor "None".')
# Initialize our superclass with all remaining variadic parameters.
super().__init__(**kwargs)
# Machine-readable representation of this hint.
hint_repr = repr(self.hint)
# Conditionally default all unpassed parameters.
if is_args is None:
# Default this parameter to true only if the machine-readable
# representation of this hint contains "[": e.g., "List[str]".
is_args = '[' in hint_repr
if is_pep585_builtin is None:
# Default this parameter to true only if...
is_pep585_builtin = (
# This hint originates from an origin type *AND*...
isinstanceable_type is not None and
# The machine-readable representation of this hint is prefixed
# by the unqualified name of this origin type (e.g., "list[str]
# " is prefixed by "list"), suggesting this hint to be a PEP
# 585-compliant builtin.
hint_repr.startswith(isinstanceable_type.__name__)
)
# print(f'is_pep585_builtin: {is_pep585_builtin}')
# print(f'hint_repr: {hint_repr}')
# print(f'isinstanceable_type.__name__: {isinstanceable_type.__name__}')
if is_pep585_generic is None:
# Default this parameter to false, because we can't think of
# anything better.
is_pep585_generic = False
if is_type_typing is None:
# Default this parameter to the negation of all PEP 585-compliant
# boolean parameters. By definition, PEP 585-compliant type hints
# are *NOT* defined by the "typing" module and vice versa.
is_type_typing = not (is_pep585_builtin or is_pep585_generic)
if is_typing is None:
# Default this parameter to true only if this hint's class is
# defined by the "typing" module.
is_typing = is_type_typing
if generic_type is None:
# Default this parameter to this hint's type origin only if this
# hint is subscripted.
generic_type = isinstanceable_type if is_args else None
# Defer validating parameters defaulting to "None" until *AFTER*
# initializing these parameters above.
assert isinstance(is_args, bool), (
f'{repr(is_args)} not bool.')
assert isinstance(is_pep585_builtin, bool), (
f'{repr(is_pep585_builtin)} not bool.')
assert isinstance(is_pep585_generic, bool), (
f'{repr(is_pep585_generic)} not bool.')
assert isinstance(is_type_typing, bool), (
f'{repr(is_type_typing)} not bool.')
assert isinstance(is_typing, bool), (
f'{repr(is_typing)} not bool.')
assert isinstance(generic_type, _NoneTypeOrType), (
f'{repr(generic_type)} neither class nor "None".')
assert isinstance(generic_type, _NoneTypeOrType), (
f'{repr(generic_type)} neither class nor "None".')
assert (
typehint_cls is None or (
isinstance(typehint_cls, type) and
issubclass(typehint_cls, TypeHint),
)
), (
f'{repr(typehint_cls)} neither '
f'"beartype.door.TypeHint" subclass nor "None".'
)
# Validate that the "is_pep585_builtin" and "is_type_typing" parameters
# are *NOT* both true. Note, however, that both can be false (e.g., for
# PEP 484-compliant user-defined generics).
assert not (
(is_pep585_builtin or is_pep585_generic) and is_type_typing), (
f'Mutually incompatible boolean parameters '
f'is_type_typing={repr(is_type_typing)} and either '
f'is_pep585_builtin={repr(is_pep585_builtin)} or '
f'is_pep585_generic={repr(is_pep585_generic)} enabled.'
)
# Classify all passed parameters.
self.generic_type = generic_type
self.is_args = is_args
self.is_pep585_builtin = is_pep585_builtin
self.is_pep585_generic = is_pep585_generic
self.is_typevars = is_typevars
self.is_type_typing = is_type_typing
self.is_typing = is_typing
self.isinstanceable_type = isinstanceable_type
self.pep_sign = pep_sign
self.typehint_cls = typehint_cls
# ..................{ STRINGIFIERS }..................
def __repr__(self) -> str:
return '\n'.join((
f'{self.__class__.__name__}(',
f' hint={repr(self.hint)},',
f' conf={repr(self.conf)},',
f' pep_sign={repr(self.pep_sign)},',
f' typehint_cls={repr(self.typehint_cls)},',
f' generic_type={repr(self.generic_type)},',
f' isinstanceable_type={repr(self.isinstanceable_type)},',
f' is_args={repr(self.is_args)},',
f' is_ignorable={repr(self.is_ignorable)},',
f' is_pep585_builtin={repr(self.is_pep585_builtin)},',
f' is_pep585_generic={repr(self.is_pep585_generic)},',
f' is_supported={repr(self.is_supported)},',
f' is_typevars={repr(self.is_typevars)},',
f' is_type_typing={repr(self.is_type_typing)},',
f' is_typing={repr(self.is_typing)},',
f' piths_meta={repr(self.piths_meta)},',
f')',
))
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
:mod:`pytest` **PEP-agnostic type hint utilities.**
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ CLASSES }....................
class HintPithMetadata(object):
'''
Dataclass encapsulating all relevant type hint- and pith-specific metadata
iteratively yielded by each iteration of the :func:`iter_hints_piths_meta`
generator.
Attributes
----------
hint_meta : HintNonpepMetadata
Metadata describing the currently iterated type hint.
pith_meta : HintPithSatisfiedMetadata
Metadata describing this pith.
pith : object
Object either satisfying or violating this hint.
'''
# ..................{ INITIALIZERS }..................
def __init__(
self,
hint_meta: 'HintNonpepMetadata',
pith_meta: 'HintPithSatisfiedMetadata',
pith: object,
) -> None:
'''
Initialize this dataclass.
Parameters
----------
hint_meta : HintNonpepMetadata
Metadata describing the currently iterated type hint.
pith_meta : Union[HintPithSatisfiedMetadata, HintPithUnsatisfiedMetadata]
Metadata describing this pith.
pith : object
Object either satisfying or violating this hint.
'''
# Classify all passed parameters. For simplicity, avoid validating
# these parameters; we simply *CANNOT* be bothered at the moment.
self.hint_meta = hint_meta
self.pith_meta = pith_meta
self.pith = pith
# ....................{ ITERATORS }....................
def iter_hints_piths_meta() -> 'Generator[HintPithMetadata]':
'''
Generator iteratively yielding test-specific type hints with metadata
leveraged by various testing scenarios -- including both PEP-compliant and
-noncompliant type hints.
Yields
----------
HintPithMetadata
Metadata describing the currently iterated hint.
'''
# Defer test-specific imports.
from beartype._util.utilobject import is_object_context_manager
from beartype_test.a00_unit.data.hint.data_hint import HINTS_META
from beartype_test.a00_unit.data.hint.util.data_hintmetacls import (
HintPithSatisfiedMetadata,
HintPithUnsatisfiedMetadata,
)
from beartype_test._util.pytcontext import noop_context_manager
# Tuple of two arbitrary values used to trivially iterate twice below.
RANGE_2 = (None, None)
# For each predefined PEP-compliant type hint and associated metadata...
for hint_meta in HINTS_META:
# print(f'Type-checking PEP type hint {repr(hint_meta.hint)}...')
# If this hint is currently unsupported, continue to the next.
if not hint_meta.is_supported:
continue
# Else, this hint is currently supported.
# Repeat the following logic twice. Why? To exercise memoization across
# repeated @beartype decorations on different callables annotated by
# the same hints.
for _ in RANGE_2:
# For each pith either satisfying or violating this hint...
for pith_meta in hint_meta.piths_meta:
# Assert this metadata is an instance of the desired dataclass.
assert isinstance(pith_meta, HintPithSatisfiedMetadata)
# Pith to be type-checked against this hint, defined as...
pith = (
# If this pith is actually a pith factory (i.e., callable
# accepting *NO* parameters and dynamically creating and
# returning the value to be used as the desired pith), call
# this factory and localize its return value.
pith_meta.pith()
if pith_meta.is_pith_factory else
# Else, localize this pith as is.
pith_meta.pith
)
# print(f'Type-checking PEP type hint {repr(hint_meta.hint)} against {repr(pith)}...')
# Context manager under which to validate this pith against
# this hint, defined as either...
pith_context_manager = (
# This pith itself if both...
pith
if (
# This pith is a context manager *AND*...
is_object_context_manager(pith) and
# This pith should be safely opened and closed as a
# context rather than preserved as a context manager...
not pith_meta.is_context_manager
) else
# Else, the noop context manager yielding this pith.
noop_context_manager(pith)
)
# With this pith safely opened and closed as a context...
with pith_context_manager as pith_context:
# If this pith does *NOT* satisfy this hint...
if isinstance(pith_meta, HintPithUnsatisfiedMetadata):
# Assert that iterables of uncompiled regular
# expression expected to match and *NOT* match this
# message are *NOT* strings, as commonly occurs when
# accidentally omitting a trailing comma in tuples
# containing only one string: e.g.,
# * "('This is a tuple, yo.',)" is a 1-tuple containing
# one string.
# * "('This is a string, bro.')" is a string *NOT*
# contained in a 1-tuple.
assert not isinstance(
pith_meta.exception_str_match_regexes, str)
assert not isinstance(
pith_meta.exception_str_not_match_regexes, str)
# Yield this metadata to the caller.
yield HintPithMetadata(
hint_meta=hint_meta,
pith_meta=pith_meta,
pith=pith_context,
)
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`570` **data submodule.**
This submodule exercises :pep:`570` support for positional-only parameters
implemented in the :func:`beartype.beartype` decorator by declaring callables
accepting one or more positional-only parameters. For safety, these callables
are intentionally isolated from the main codebase.
Caveats
----------
**This submodule requires the active Python interpreter to target at least
Python 3.8.0.** If this is *not* the case, importing this submodule raises an
:class:`SyntaxError` exception.
'''
# ....................{ IMPORTS }....................
from typing import Union
# ....................{ CALLABLES }....................
def pep570_posonly(
now_take_away_that_flesh: Union[bytearray, str],
take_away_the_teeth: Union[bool, str] = ('and the tongue'),
/,
) -> Union[list, str]:
'''
Arbitrary :pep:`570`-compliant callable passed a mandatory and optional
positional-only parameter, all annotated with PEP-compliant type hints.
'''
return now_take_away_that_flesh + '\n' + take_away_the_teeth
def pep570_posonly_flex_varpos_kwonly(
all_of_your_nightmares: Union[bytearray, str],
for_a_time_obscured: Union[bool, str] = (
'As by a shining brainless beacon'),
/,
or_a_blinding_eclipse: Union[bytes, str] = (
'Or a blinding eclipse of the many terrible shapes of this world,'),
*you_are_calm_and_joyful: Union[float, str],
your_special_plan: Union[int, str],
) -> Union[list, str]:
'''
Arbitrary :pep:`570`-compliant callable passed a mandatory positional-only
parameter, optional positional-only parameter, flexible parameter, variadic
positional parameter, and keyword-only parameter, all annotated with
PEP-compliant type hints.
'''
return (
all_of_your_nightmares + '\n' +
for_a_time_obscured + '\n' +
or_a_blinding_eclipse + '\n' +
'\n'.join(you_are_calm_and_joyful) + '\n' +
your_special_plan
)
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`563` **cultural data submodule.**
This submodule exercises edge-case :pep:`563` support implemented in the
:func:`beartype.beartype` decorator against a `recently submitted issue <issue
#49_>`__. For reproducibility, this edge case is intentionally isolated from
the comparable :mod:`beartype_test.a00_unit.data.pep.pep563.data_pep563_poem`
submodule.
Caveats
----------
**This submodule requires the active Python interpreter to target at least
Python 3.7.0.** If this is *not* the case, importing this submodule raises an
:class:`AttributeError` exception.
.. _issue #49:
https://github.com/beartype/beartype/issues/49
'''
# ....................{ IMPORTS }....................
from __future__ import annotations
from beartype import beartype
from beartype.typing import Union
# ....................{ CONSTANTS }....................
COLORS = 'red, gold, and green'
'''
Arbitrary string constant returned by the
:meth:`Chameleon.like_my_dreams` class method.
'''
CLING = 'our love is strong'
'''
Arbitrary string constant returned by the :meth:`Karma.when_we_cling` class
method.
'''
DREAMS = 'Loving would be easy'
'''
Arbitrary string constant returned by the :meth:`Karma.if_your_colors` class
method.
'''
Karma = 'you come and go'
'''
Arbitrary string constant whose attribute name intentionally conflicts with
that of a subsequently declared class.
'''
# ....................{ CLASSES }....................
@beartype
class Chameleon(object):
'''
Arbitrary class declaring arbitrary methods.
Attributes
----------
colors : str
Arbitrary string.
'''
# ..................{ INITIALIZER }..................
def __init__(self, colors: str) -> None:
'''
Arbitrary object initializer.
'''
self.colors = colors
# ..................{ METHODS }..................
# Intentionally decorate this class method directly by @beartype to validate
# that @beartype resolves directly self-referential type hints (i.e., type
# hints that are directly self-references to the declaring class).
# @beartype
@classmethod
def like_my_dreams(cls) -> Chameleon:
'''
Arbitrary class method decorated by the :mod:`beartype.beartype`
decorator creating and returning an arbitrary instance of this class
and thus annotated as returning the same class, exercising a pernicious
edge case unique to :pep:`563`-specific forward references.
Superficially, the PEP-compliant type hint annotating this return
appears to recursively (and thus erroneously) refer to the class
currently being declared. Had :pep:`563` *not* been conditionally
enabled above via the ``from __future__ import annotations`` statement,
this recursive reference would have induced a low-level parse-time
exception from the active Python interpreter.
In actuality, this recursive reference is silently elided away at
runtime by the active Python interpreter. Under :pep:`563`-specific
postponement (i.e., type hint unparsing), this interpreter internally
stringifies this type hint into a relative forward reference to this
class, thus obviating erroneous recursion at method declaration time.
Ergo, this method's signature is actually the following:
def like_my_dreams(cls) -> 'Chameleon':
'''
return Chameleon(COLORS)
# Intentionally avoid decorating this static method directly by @beartype to
# validate that @beartype resolves indirectly self-referential type hints
# (i.e., parent type hints subscripted by one or more child type hints that
# are self-references to the declaring class).
#
# Note that indirectly self-referential type hints *CANNOT* be properly
# resolved for methods directly decorated by @beartype. Due to
# decoration-time constraints, this class itself *MUST* be decorated.
@staticmethod
def when_we_cling() -> Union[Chameleon, complex]:
'''
Arbitrary static method decorated by the :mod:`beartype.beartype`
decorator creating and returning an arbitrary instance of this class
and thus annotated as returning a union containing the same class and
one or more arbitrary child type hints, exercising a pernicious
edge case unique to :pep:`563`-specific self-referential types.
Note that this and the comparable :meth:`like_my_dreams` class method
exercise different edge cases. That method exercises an edge case
concerning forward references, as a method annotated as returning the
type to which this method is bound under :pep:`563` is syntactically
indistinguishable from a standard forward reference without :pep:`563`.
This method, in the other hand, exercises an edge case concerning
self-referential types, as a method annotated as returning an arbitrary
type hint subscripted by the type to which this method is bound under
:pep:`563` is syntactically *distinguishable* from a standard forward
reference without :pep:`563`.
Specifically, this method exercises a `recently submitted issue <issue
#152_>`__.
.. _issue #152:
https://github.com/beartype/beartype/issues/152
'''
return Chameleon(CLING)
class Karma(object):
'''
Arbitrary class whose name intentionally conflicts with that of a
previously declared global of this submodule, declaring arbitrary methods.
Attributes
----------
dreams : str
Arbitrary string.
'''
# ..................{ INITIALIZER }..................
def __init__(self, dreams: str):
'''
Arbitrary object initializer.
'''
self.dreams = dreams
# ..................{ METHODS ~ class }..................
@classmethod
@beartype
def if_your_colors(cls) -> Karma:
'''
Arbitrary class method decorated by the :mod:`beartype.beartype`
decorator creating and returning an arbitrary instance of this class
and thus annotated as returning the same class, exercising a pernicious
edge case unique to :pep:`563`-specific forward references.
See Also
----------
:meth:`Chameleon.like_my_dreams`
'''
return Karma(DREAMS)
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`563` **poetic data submodule.**
This submodule exercises :pep:`563` support implemented in the
:func:`beartype.beartype` decorator by enabling this support with a leading
``from __future__ import annotations`` statement and then declaring a callable
decorated by that decorator. External unit tests are expected to conditionally
import this submodule if the active Python interpreter targets at least
Python 3.7.0 and then call that callable.
Caveats
----------
**This submodule requires the active Python interpreter to target at least
Python 3.7.0.** If this is *not* the case, importing this submodule raises an
:class:`AttributeError` exception.
'''
# ....................{ IMPORTS }....................
from __future__ import annotations
from beartype import beartype
from beartype.typing import List
from beartype._cave._cavefast import IntType
from beartype_test.a00_unit.data.data_type import decorator
from collections.abc import Callable
from typing import Union
# ....................{ CONSTANTS }....................
_MINECRAFT_END_TXT_STANZAS = (
'I see the player you mean.',
'{player_name}?',
'Yes. Take care. It has reached a higher level now. It can read our thoughts.',
"That doesn't matter. It thinks we are part of the game.",
'I like this player. It played well. It did not give up.',
'It is reading our thoughts as though they were words on a screen.',
'That is how it chooses to imagine many things, when it is deep in the dream of a game.',
'Words make a wonderful interface. Very flexible. And less terrifying than staring at the reality behind the screen.',
'They used to hear voices. Before players could read. Back in the days when those who did not play called the players witches, and warlocks. And players dreamed they flew through the air, on sticks powered by demons.',
'What did this player dream?',
'This player dreamed of sunlight and trees. Of fire and water. It dreamed it created. And it dreamed it destroyed. It dreamed it hunted, and was hunted. It dreamed of shelter.',
'Hah, the original interface. A million years old, and it still works. But what true structure did this player create, in the reality behind the screen?',
'It worked, with a million others, to sculpt a true world in a fold of the [scrambled], and created a [scrambled] for [scrambled], in the [scrambled].',
'It cannot read that thought.',
'No. It has not yet achieved the highest level. That, it must achieve in the long dream of life, not the short dream of a game.',
'Does it know that we love it? That the universe is kind?',
'Sometimes, through the noise of its thoughts, it hears the universe, yes.',
'But there are times it is sad, in the long dream. It creates worlds that have no summer, and it shivers under a black sun, and it takes its sad creation for reality.',
'To cure it of sorrow would destroy it. The sorrow is part of its own private task. We cannot interfere.',
'Sometimes when they are deep in dreams, I want to tell them, they are building true worlds in reality. Sometimes I want to tell them of their importance to the universe. Sometimes, when they have not made a true connection in a while, I want to help them to speak the word they fear.',
'It reads our thoughts.',
'Sometimes I do not care. Sometimes I wish to tell them, this world you take for truth is merely [scrambled] and [scrambled], I wish to tell them that they are [scrambled] in the [scrambled]. They see so little of reality, in their long dream.',
'And yet they play the game.',
'But it would be so easy to tell them...',
'Too strong for this dream. To tell them how to live is to prevent them living.',
'I will not tell the player how to live.',
'The player is growing restless.',
'I will tell the player a story.',
'But not the truth.',
'No. A story that contains the truth safely, in a cage of words. Not the naked truth that can burn over any distance.',
'Give it a body, again.',
'Yes. Player...',
'Use its name.',
'{player_name}. Player of games.',
'Good.',
'Take a breath, now. Take another. Feel air in your lungs. Let your limbs return. Yes, move your fingers. Have a body again, under gravity, in air. Respawn in the long dream. There you are. Your body touching the universe again at every point, as though you were separate things. As though we were separate things.',
'Who are we? Once we were called the spirit of the mountain. Father sun, mother moon. Ancestral spirits, animal spirits. Jinn. Ghosts. The green man. Then gods, demons. Angels. Poltergeists. Aliens, extraterrestrials. Leptons, quarks. The words change. We do not change.',
"We are the universe. We are everything you think isn't you. You are looking at us now, through your skin and your eyes. And why does the universe touch your skin, and throw light on you? To see you, player. To know you. And to be known. I shall tell you a story.",
'Once upon a time, there was a player.',
'The player was you, {player_name}.',
'Sometimes it thought itself human, on the thin crust of a spinning globe of molten rock. The ball of molten rock circled a ball of blazing gas that was three hundred and thirty thousand times more massive than it. They were so far apart that light took eight minutes to cross the gap. The light was information from a star, and it could burn your skin from a hundred and fifty million kilometres away.',
'Sometimes the player dreamed it was a miner, on the surface of a world that was flat, and infinite. The sun was a square of white. The days were short; there was much to do; and death was a temporary inconvenience.',
'Sometimes the player dreamed it was lost in a story.',
'Sometimes the player dreamed it was other things, in other places. Sometimes these dreams were disturbing. Sometimes very beautiful indeed. Sometimes the player woke from one dream into another, then woke from that into a third.',
'Sometimes the player dreamed it watched words on a screen.',
"Let's go back.",
'The atoms of the player were scattered in the grass, in the rivers, in the air, in the ground. A woman gathered the atoms; she drank and ate and inhaled; and the woman assembled the player, in her body.',
"And the player awoke, from the warm, dark world of its mother's body, into the long dream.",
'And the player was a new story, never told before, written in letters of DNA. And the player was a new program, never run before, generated by a sourcecode a billion years old. And the player was a new human, never alive before, made from nothing but milk and love.',
'You are the player. The story. The program. The human. Made from nothing but milk and love.',
"Let's go further back.",
"The seven billion billion billion atoms of the player's body were created, long before this game, in the heart of a star. So the player, too, is information from a star. And the player moves through a story, which is a forest of information planted by a man called Julian, on a flat, infinite world created by a man called Markus, that exists inside a small, private world created by the player, who inhabits a universe created by...",
'Shush. Sometimes the player created a small, private world that was soft and warm and simple. Sometimes hard, and cold, and complicated. Sometimes it built a model of the universe in its head; flecks of energy, moving through vast empty spaces. Sometimes it called those flecks "electrons" and "protons".',
'Sometimes it called them "planets" and "stars".',
'Sometimes it believed it was in a universe that was made of energy that was made of offs and ons; zeros and ones; lines of code. Sometimes it believed it was playing a game. Sometimes it believed it was reading words on a screen.',
'You are the player, reading words...',
'Shush... Sometimes the player read lines of code on a screen. Decoded them into words; decoded words into meaning; decoded meaning into feelings, emotions, theories, ideas, and the player started to breathe faster and deeper and realised it was alive, it was alive, those thousand deaths had not been real, the player was alive',
'You. You. You are alive.',
'and sometimes the player believed the universe had spoken to it through the sunlight that came through the shuffling leaves of the summer trees',
"and sometimes the player believed the universe had spoken to it through the light that fell from the crisp night sky of winter, where a fleck of light in the corner of the player's eye might be a star a million times as massive as the sun, boiling its planets to plasma in order to be visible for a moment to the player, walking home at the far side of the universe, suddenly smelling food, almost at the familiar door, about to dream again",
'and sometimes the player believed the universe had spoken to it through the zeros and ones, through the electricity of the world, through the scrolling words on a screen at the end of a dream',
'and the universe said I love you',
'and the universe said you have played the game well',
'and the universe said everything you need is within you',
'and the universe said you are stronger than you know',
'and the universe said you are the daylight',
'and the universe said you are the night',
'and the universe said the darkness you fight is within you',
'and the universe said the light you seek is within you',
'and the universe said you are not alone',
'and the universe said you are not separate from every other thing',
'and the universe said you are the universe tasting itself, talking to itself, reading its own code',
'and the universe said I love you because you are love.',
'And the game was over and the player woke up from the dream. And the player began a new dream. And the player dreamed again, dreamed better. And the player was the universe. And the player was love.',
'You are the player.',
'Wake up.',
)
# ....................{ CALLABLES ~ module }....................
# Callables exercising module-scoped edge cases under PEP 563.
def get_minecraft_end_txt(player_name: str) -> str:
'''
Callable *not* decorated by :func:`beartype.beartype`.
The ``test_pep_563()`` unit test tests that :func:`beartype.beartype`
silently accepts callables with one or more non-postponed annotations under
PEP 563 by manually resolving all postponed annotations on this callable
and then manually passing this callable to :func:`beartype.beartype`.
'''
return ''.join(_MINECRAFT_END_TXT_STANZAS).format(player_name=player_name)
@beartype
def get_minecraft_end_txt_stanza(
player_name: str, stanza_index: IntType) -> str:
'''
Callable decorated by :func:`beartype.beartype`.
'''
return _MINECRAFT_END_TXT_STANZAS[stanza_index].format(
player_name=player_name)
# ....................{ CALLABLES ~ closure }....................
# Callables exercising closure-scoped edge cases under PEP 563.
@beartype
def get_minecraft_end_txt_closure(player_name: str) -> Callable:
'''
Callable decorated by :func:`beartype.beartype`, internally declaring and
returning a closure also decorated by :func:`beartype.beartype` and
annotated by PEP-compliant type hints accessible only as local variables.
'''
# PEP-compliant type hints accessible only as local variables to the
# following closure, exercising a significant edge case in PEP 563 support.
StringLike = Union[str, int, bytes]
ListOfStrings = List[str]
# Intentionally delimited by one layer of decoration to exercise edges.
@decorator
@beartype
@decorator
def get_minecraft_end_txt_substr(substr: StringLike) -> ListOfStrings:
'''
Closure decorated by both :func:`beartype.beartype` and one or more
decorators that are *not* :func:`beartype.beartype`, annotated by
PEP-compliant type hints accessible only as local variables.
'''
return [
stanza.format(player_name=player_name)
for stanza in _MINECRAFT_END_TXT_STANZAS
if str(substr) in stanza
]
# print(f'mc.__qualname__: {get_minecraft_end_txt_substr.__qualname__}')
# Return this closure.
return get_minecraft_end_txt_substr
@beartype
def get_minecraft_end_txt_closure_factory(player_name: str) -> Callable:
'''
Callable decorated by :func:`beartype.beartype`, internally declaring and
returning a closure also decorated by :func:`beartype.beartype` and
annotated by PEP-compliant type hints accessible only as local variables,
internally declaring and returning *another* nested closure also decorated
by :func:`beartype.beartype` and annotated by PEP-compliant type hints
accessible only as local variables in a manner exercising edge case
precedence in scope aggregation.
'''
# PEP-compliant type hints accessible only as local variables to the
# following closure, exercising a significant edge case in PEP 563 support.
IntLike = Union[float, int]
ReturnType = Callable
InnerReturnType = List[str]
# Intentionally delimited by two layers of decoration to exercise edges.
@decorator
@decorator
@beartype
@decorator
@decorator
def get_minecraft_end_txt_closure_outer(
stanza_len_min: IntLike) -> ReturnType:
'''
Outer closure decorated by :func:`beartype.beartype` and one or more
decorators that are *not* :func:`beartype.beartype`, annotated by
PEP-compliant type hints accessible only as local variables, internally
declaring and returning *another* nested closure also decorated by
:func:`beartype.beartype` and annotated by PEP-compliant type hints
accessible only as local variables in a manner exercising edge case
precedence in scope aggregation.
'''
# PEP-compliant type hints accessible only as local variables to the
# following closure, overriding those declared above and again
# exercising a significant edge case in PEP 563 support.
StringLike = Union[str, bytes]
ReturnType = InnerReturnType
# Intentionally delimited by no layers of decoration to exercise edges.
@beartype
def get_minecraft_end_txt_closure_inner(
stanza_len_max: IntLike,
substr: StringLike,
) -> ReturnType:
'''
Inner closure decorated by :func:`beartype.beartype` and one or
more decorators that are *not* :func:`beartype.beartype`, annotated
by PEP-compliant type hints accessible only as local variables.
'''
return [
stanza.format(player_name=player_name)
for stanza in _MINECRAFT_END_TXT_STANZAS
if (
len(stanza) >= int(stanza_len_min) and
len(stanza) <= int(stanza_len_max) and
str(substr) in stanza
)
]
# Return this closure.
return get_minecraft_end_txt_closure_inner
# print(f'mc.__qualname__: {get_minecraft_end_txt_substr.__qualname__}')
# Return this closure.
return get_minecraft_end_txt_closure_outer
# ....................{ CLASSES }....................
# Classes exercising module-scoped edge cases under PEP 563.
#FIXME: We should probably nest this class in a function to exercise
#everything, but this would seem to suffice for now as an initial foray.
class MinecraftEndTxtUnscrambler(object):
'''
Class declaring a method decorated by :func:`beartype.beartype` annotated
by PEP-compliant type hints accessible only as class variables.
'''
# PEP-compliant type hints accessible only as class variables to the
# following method, exercising a significant edge case in PEP 563 support.
NoneIsh = None
TextIsh = Union[str, bytes]
@beartype
def __init__(self, unscrambling: TextIsh) -> NoneIsh:
'''
Method decorated by :func:`beartype.beartype`, annotated by
PEP-compliant type hints accessible only as class variables.
'''
_minecraft_end_txt_stanzas_unscrambled = [
minecraft_end_txt_stanza.replace('[scrambled]', unscrambling)
for minecraft_end_txt_stanza in _MINECRAFT_END_TXT_STANZAS
if '[scrambled]' in minecraft_end_txt_stanza
]
# PEP-compliant type hints accessible only as local variables to the
# following closure, exercising an edge case in PEP 563 support.
BoolIsh = Union[bool, float, int]
@beartype
def get_minecraft_end_txt_unscrambled_stanza_closure(
self, is_stanza_last: BoolIsh) -> self.TextIsh:
'''
Closure decorated by :func:`beartype.beartype`, annotated by
PEP-compliant type hints accessible only as both class and local
variables.
'''
return _minecraft_end_txt_stanzas_unscrambled[
int(bool(is_stanza_last))]
# Reuse this closure as a bound method.
self.get_minecraft_end_txt_unscrambled_stanza = (
get_minecraft_end_txt_unscrambled_stanza_closure)
# ....................{ CALLABLES ~ limit }....................
#FIXME: Hilariously, we can't even unit test whether the
#beartype._decor._pep.pep563._die_if_hint_repr_exceeds_child_limit() function
#behaves as expected. Why not? Because some combination of the "typing" module
#and/or PEP 563 were implemented so space-inefficiently than even attempting to
#instantiate a PEP-compliant type hint that would violate the child limit
#(i.e., the maximum size for fixed lists used by the @beartype decorator to
#implement its breadth-first search (BFS) across child hints) induces a memory
#error from the CPython parser -- complete with non-human-readable debug
#"stderr" output that I highly doubt CPython is even supposed to publicly emit:
#
# beartype_test/unit/data/data_pep563.py:180: MemoryError
# ---------------------------------------------------- Captured stderr call -----------------------------------------------------
# s_push: parser stack overflow
#
#"s_push: parser stack overflow"? Really? What the pablum is this nonsense?
#
#Naturally, this implies that end users are by definition prohibited from
#violating our package-specific child limit without our ever needing to even
#explicitly validate this limit. This is ridiculous, absurd, and yet another
#proverbial nail in the coffin for annotation-centric PEPs. I don't know who
#was tasked with implementing this API, but they clearly had little to no
#coherent idea of what they were doing.
# from beartype._util.cache.pool.utilcachepoollistfixed import FIXED_LIST_SIZE_MEDIUM
# from typing import List, Union
#
# # This global is defined below for sanity.
# _HINT_BIG = None
# '''
# PEP-compliant type hint guaranteed to raise an exception from the private
# :func:`beartype._decor._pep.pep563._die_if_hint_repr_exceeds_child_limit`
# function, which imposes strict limits on the number of child hints permitted to
# be transitively nested in any top-level PEP-compliant type hint.
# '''
#
#
# def _init() -> None:
# '''
# Define the :data:`_HINT_BIG` global declared above.
# '''
#
# # Enable this global to be defined.
# global _HINT_BIG
#
# # This fixed length subtracted by 1 divided by 3. Just 'cause.
# SIZE_LESS_BIG = (FIXED_LIST_SIZE_MEDIUM-1) / 3
#
# # Assert the fixed length of the cached fixed lists constraining the number
# # of child hints permitted to be transitively nested in any top-level
# # PEP-compliant type hint is evenly divisible by 3 when subtracted by 1,
# # thus producing whole integers when subject to the above operation.
# #
# # Oddly, this condition applies to a surprising number of powers of two:
# # >>> (1024 - 1) % 3
# # 341
# # >>> (256 - 1) % 3
# # 85
# assert SIZE_LESS_BIG.is_integer(), (
# '{} not integer.'.format(SIZE_LESS_BIG))
#
# # Constrain this length to an integer as expected by the range() builtin.
# SIZE_LESS_BIG = int(SIZE_LESS_BIG)
#
# # Python expression used to dynamically define this global below.
# _HINT_BIG_EXPR = '{}{}{}'.format(
# # Substring prefixing this hint.
# ''.join('Union[int, List[' for _ in range(SIZE_LESS_BIG)),
# # Substring subscripting the last "List" child hint of this hint.
# 'str',
# # Substring suffixing this hint.
# ''.join(']]' for _ in range(SIZE_LESS_BIG)),
# )
#
# # Dynamically define this global, as "FIXED_LIST_SIZE_MEDIUM" is typically too large to
# # allow this global to be statically defined.
# _HINT_BIG = eval(_HINT_BIG_EXPR, globals())
#
#
# # Define the "_HINT_BIG" global declared above.
# _init()
#
#
# # Callable annotated by this global *AFTER* defining this global above.
# #
# # Note that this callable is intentionally *NOT* decorated by @beartype here,
# # as doing so would immediately raise an exception that we would rather
# # explicitly test for elsewhere.
# def player_was_love(player_was_the_universe: _HINT_BIG) -> _HINT_BIG:
# return player_was_the_universe
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **importable data submodule.**
This submodule exercises dynamic importability by providing an importable
submodule defining an arbitrary attribute. External unit tests are expected to
dynamically import this attribute from this submodule.
'''
# ....................{ ATTRIBUTES }....................
attrgood = (
"I started to see human beings as little lonesome, water based, pink "
"meat, life forms pushing air through themselves and making noises that "
"the other little pieces of meat seemed to understand. I was thinking to "
"myself, \"There's five billion people here but we've never been more "
"isolated.\" The only result of the aggressive individualism we pursue is "
"that you lose sight of your compassion and we go to bed at night "
"thinking \"Is this all there is?\" because we don't feel fulfilled."
)
'''
Arbitrary module-scope attribute.
'''
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **unimportable data submodule.**
This submodule exercises dynamic importability by providing an unimportable
submodule defining an arbitrary attribute. External unit tests are expected to
dynamically import this attribute from this submodule.
'''
# ....................{ EXCEPTIONS }....................
raise ValueError(
'Can you imagine a fulfilled society? '
'Whoa, what would everyone do?'
)
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype module getter data submodule.**
This submodule predefines module-scoped objects of various types with well-known
line numbers guaranteed to remain constant, exercising issues with respect to
line numbers in higher-level test submodules.
'''
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# CAUTION: For completeness, unit tests test the *EXACT* contents of this file.
# Changes to this file must thus be synchronized with those tests.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ CALLABLES ~ non-lambda }....................
def like_snakes_that_watch_their_prey():
'''
Arbitrary non-lambda function physically declared by this submodule.
'''
return 'from their far fountains,'
# ....................{ CALLABLES ~ lambda }....................
ozymandias = lambda: 'I met a traveller from an antique land,'
'''
Arbitrary lambda function declared on-disk.
'''
which_yet_survive = eval("lambda: 'stamped on these lifeless things'")
'''
Arbitrary lambda function declared in-memory.
'''
# ....................{ CLASSES }....................
class SlowRollingOn(object):
'''
Arbitrary class physically declared by this submodule.
'''
THERE = 'many a precipice'
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype generic callable code data submodule.**
This submodule predefines low-level class constants exercising known edge
cases on behalf of the higher-level
:mod:`beartype_test.a00_unit.a20_util.func.test_utilfunccode` submodule. Unit
tests defined in that submodule are sufficiently fragile that *no* other
submodule should import from this submodule.
'''
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# CAUTION: For completeness, unit tests test the *EXACT* contents of this file.
# Changes to this file must thus be synchronized with those tests.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ IMPORTS }....................
from beartype._util.func.utilfuncmake import make_func
# ....................{ CALLABLES ~ dynamic }....................
of_vapours = make_func(
func_name='vaulted_with_all_thy_congregated_might',
func_code='''
def vaulted_with_all_thy_congregated_might():
return 'Of vapours, from whose solid atmosphere'
''',
func_doc='''
Arbitrary callable dynamically declared in-memory.
''')
# ....................{ CALLABLES ~ physical }....................
def will_be_the_dome():
'''
Arbitrary non-lambda function physically declared by this submodule.
'''
return 'of a vast sepulchre'
# ....................{ CALLABLES ~ physical : lambda }....................
thou_dirge = lambda: 'Of the dying year, to which this closing night'
'''
Arbitrary lambda function physically declared by this submodule.
'''
yellow = lambda: 'and black,', lambda: 'and pale,', lambda: 'and hectic red,'
'''
3-tuple of three arbitrary lambda functions physically declared by this
submodule, intentionally declared on the same line so as to induce edge cases
in lambda function detection code.
'''
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`570` **data submodule.**
This submodule exercises :pep:`570` support implemented in the
:func:`beartype.beartype` decorator by declaring callables accepting one or
more **positional-only parameters** (i.e., parameters that *must* be passed
positionally, syntactically followed in the signatures of their callables by
the :pep:`570`-compliant ``/,`` pseudo-parameter).
Caveats
----------
**This submodule requires the active Python interpreter to target Python >=
3.8.** If this is *not* the case, importing this submodule raises an
:class:`SyntaxError` exception. In particular, this submodule *must* not be
imported from module scope. If this submodule is imported from module scope
*and* the active Python interpreter targets Python < 3.8, :mod:`pytest` raises
non-human-readable exceptions at test collection time resembling:
/usr/lib64/python3.6/site-packages/_pytest/python.py:578: in _importtestmodule
mod = import_path(self.fspath, mode=importmode)
/usr/lib64/python3.6/site-packages/_pytest/pathlib.py:531: in import_path
importlib.import_module(module_name)
/usr/lib64/python3.6/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
<frozen importlib._bootstrap>:994: in _gcd_import
???
<frozen importlib._bootstrap>:971: in _find_and_load
???
<frozen importlib._bootstrap>:955: in _find_and_load_unlocked
???
<frozen importlib._bootstrap>:665: in _load_unlocked
???
/usr/lib64/python3.6/site-packages/_pytest/assertion/rewrite.py:161: in exec_module
source_stat, co = _rewrite_test(fn, self.config)
/usr/lib64/python3.6/site-packages/_pytest/assertion/rewrite.py:354: in _rewrite_test
tree = ast.parse(source, filename=fn_)
/usr/lib64/python3.6/ast.py:35: in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
E File "/home/leycec/py/beartype/beartype_test/a00_unit/a20_util/func/test_utilfuncarg.py", line 237
E /,
E ^
E SyntaxError: invalid syntax
'''
# ....................{ IMPORTS }....................
from typing import Union
# ....................{ CALLABLES }....................
def func_args_2_posonly_mixed(
before_spreading_his_black_wings: Union[bytearray, str],
reaching_for_the_skies: Union[bool, str] = 'in this forest',
/,
) -> Union[list, str]:
'''
Arbitrary :pep:`570`-compliant callable passed a mandatory and optional
positional-only parameter, all annotated with PEP-compliant type hints.
'''
return (
before_spreading_his_black_wings + '\n' + reaching_for_the_skies)
def func_args_10_all_except_flex_mandatory(
in_solitude_i_wander,
through_the_vast_enchanted_forest,
the_surrounding_skies='are one',
/,
torn_apart_by='the phenomenon of lightning',
rain_is_pouring_down='my now shivering shoulders',
*in_the_rain_my_tears_are_forever_lost,
the_darkened_oaks_are_my_only_shelter,
red_leaves_are_blown_by='the wind',
an_ebony_raven_now_catches='my eye.',
**sitting_in_calmness,
) -> str:
'''
Arbitrary :pep:`570`-compliant callable accepting all possible kinds of
parameters, including both mandatory and optional variants of these kinds
except mandatory flexible parameters.
Since callables cannot by definition accept both optional positional-only
parameters *and* mandatory flexible parameters, this callable necessarily
omits the latter in favour of the former.
'''
# Arbitrary local variable declared in the body of this callable.
before_spreading_his_black_wings = 'Reaching for the skies.'
return before_spreading_his_black_wings
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **sample callable** submodule.
This submodule predefines sample pure-Python callables exercising known edge
cases on behalf of higher-level unit test submodules.
'''
# ....................{ IMPORTS }....................
from typing import Union
# ....................{ CALLABLES }....................
def func_args_0() -> str:
'''
Arbitrary callable accepting *no* parameters.
'''
return 'And so there grew great tracts of wilderness,'
def func_args_1_flex_mandatory(had_one_fair_daughter: str) -> str:
'''
Arbitrary callable accepting one mandatory flexible parameter.
'''
return 'But man was less and less, till Arthur came.'
def func_args_1_varpos(*and_in_her_his_one_delight: str) -> str:
'''
Arbitrary callable accepting one variadic positional parameter.
'''
return 'Wherein the beast was ever more and more,'
def func_args_2_flex_mandatory(
thick_with_wet_woods: str, and_many_a_beast_therein: str) -> str:
'''
Arbitrary callable accepting two or more mandatory flexible parameters.
'''
return 'For here between the man and beast we die.'
def func_args_3_flex_mandatory_optional_varkw(
and_the_wolf_tracks_her_there: str,
how_hideously: str = "Its shapes are heap'd around!",
**rude_bare_and_high
) -> str:
'''
Arbitrary callable accepting one mandatory flexible parameter, one optional
flexible parameter, and one variadic keyword parameter.
This test exercises a recent failure in our pre-0.10.0 release cycle:
https://github.com/beartype/beartype/issues/78
'''
return "Ghastly, and scarr'd, and riven.—Is this the scene"
# ....................{ TESTS ~ pep 3102 }....................
# Keyword-only keywords require PEP 3102 compliance, which has thankfully been
# available since Python >= 3.0.
def func_args_1_kwonly_mandatory(
*, when_can_I_take_you_from_this_place: str) -> str:
'''
Arbitrary callable accepting one mandatory keyword-only parameter.
'''
return 'When is the word but a sigh?'
def func_args_2_kwonly_mixed(
*,
white_summer: Union[dict, str] = 'So far I have gone to see you again.',
hiding_your_face_in_the_palm_of_your_hands: Union[set, str],
) -> Union[tuple, str]:
'''
Arbitrary callable passed one optional keyword-only parameter and one
mandatory keyword-only parameter (in that non-standard and quite
counter-intuitive order), each annotated with PEP-compliant type hints.
'''
return white_summer + '\n' + hiding_your_face_in_the_palm_of_your_hands
def func_args_5_flex_mandatory_varpos_kwonly_varkw(
we_are_selfish_men,
oh_raise_us_up,
*and_give_us,
return_to_us_again='Of inward happiness.',
**manners_virtue_freedom_power,
) -> str:
'''
Arbitrary callable accepting two mandatory flexible parameters, one
variadic positional parameter, one optional keyword-only parameter (defined
implicitly), and one variadic keyword parameter.
'''
# Arbitrary local variable declared in the body of this callable.
thy_soul_was_like_a_star = 'and dwelt apart:'
return thy_soul_was_like_a_star
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Project-wide functional static type-checker tests.**
This submodule functionally tests the this project's compliance with
third-party static type-checkers and hence :pep:`561`.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype_test._util.mark.pytskip import (
skip_if_ci,
skip_if_pypy,
skip_unless_package,
skip_unless_pathable,
)
# ....................{ TESTS ~ mypy }....................
#FIXME: Consider submitting as a StackOverflow post. Dis iz l33t, yo!
#FIXME: Sadly, diz iz no longer l33t. Mypy broke its runtime API. This test now
#spuriously fails under CI with a non-human-readable exception message
#resembling:
# TypeError: 'mypy' is not a package
#
#The solution? Refactor this test ala the existing test_pep561_pyright() test,
#which thankfully already does everything we want here. Rejoice!
# If the third-party "mypy" package is unavailable, skip this test. Note that:
# * "mypy" is the reference standard for static type-checking in Python.
# * Unbelievably, "mypy" violates PEP 8 versioning standards by failing to
# define the "mypy.__init__.__version__" attribute, which means that passing
# the optional "minimum_version" parameter to the skip_unless_package()
# decorator fails on failing to find that attribute. While we *COULD* instead
# explicitly test the "mypy.version.__version__" attribute, doing so would
# require defining a new and *MUCH* less trivial
# @skip_unless_module_attribute decorator. For sanity, we instead currently
# accept the importability of the "mypy" package as sufficient, which it
# absolutely isn't, but what you gonna do, right?
#
# Skip this "mypy"-specific functional test unless all of the following apply:
# * The "mypy" package is importable under the active Python interpreter.
# * The active Python interpreter is *NOT* PyPy. mypy is currently incompatible
# with PyPy for inscrutable reasons that should presumably be fixed at some
# future point. See also:
# https://mypy.readthedocs.io/en/stable/faq.html#does-it-run-on-pypy
@skip_unless_package('mypy')
@skip_if_pypy()
def test_pep561_mypy() -> None:
'''
Functional test testing this project's compliance with :pep:`561` by
externally running :mod:`mypy` (i.e., the most popular third-party static
type checker as of this test) against this project's top-level package.
'''
# ....................{ IMPORTS }....................
# Defer test-specific imports.
from beartype._util.py.utilpyinterpreter import get_interpreter_filename
from beartype_test._util.cmd.pytcmdrun import (
run_command_return_stdout_stderr)
from beartype_test._util.path.pytpathmain import (
get_main_mypy_config_file,
get_main_package_dir,
)
# ....................{ COMMAND }....................
# Tuple of all shell words with which to run the external "mypy" command.
MYPY_ARGS = (
# Absolute filename of the executable running the active Python process.
get_interpreter_filename(),
# Fully-qualified name of the "mypy" package to be run.
'-m', 'mypy',
# Absolute dirname of this project's top-level mypy configuration.
# Since our "tox" configuration isolates testing to a temporary
# directory, mypy is unable to find its configuration without help.
'--config-file', str(get_main_mypy_config_file()),
# Absolute dirname of this project's top-level package.
str(get_main_package_dir()),
)
# Run the external "pyright" command in the current ${PATH} with these
# options and arguments, raising an exception on subprocess failure while
# forwarding all standard output and error output by this subprocess to the
# standard output and error file handles of the active Python process.
#
# Note that we intentionally do *NOT* assert that call to have exited with
# a successful exit code. Although mypy does exit with success on local
# developer machines, it inexplicably does *NOT* under remote GitHub
# Actions-based continuous integration despite "mypy_stderr" being empty.
# Ergo, we conveniently ignore the former in favour of the latter.
mypy_stdout, mypy_stderr = run_command_return_stdout_stderr(
command_words=MYPY_ARGS)
# ....................{ ASSERT }....................
# Assert "mypy" to have emitted *NO* warnings or errors to either standard
# output or error.
#
# Note that "mypy" predominantly emits both warnings and errors to "stdout"
# rather than "stderr", despite this contravening sane POSIX semantics.
# They did this because some guy complained about not being able to
# trivially grep "mypy" output, regardless of the fact that redirecting
# stderr to stdout is a trivial shell fragment (e.g., "2>&1"), but let's
# break everything just because some guy can't shell. See also:
# https://github.com/python/mypy/issues/1051
#
# Assert "mypy" to have emitted *NO* warnings or errors to "stdout".
# Unfortunately, doing so is complicated by the failure of "mypy" to
# conform to sane POSIX semantics. Specifically:
# * If "mypy" succeeds, "mypy" emits to "stdout" a single line resembling:
# Success: no issues found in 83 source files
# * If "mypy" fails, "mypy" emits to "stdout" *ANY* other line(s).
#
# Ergo, asserting this string to start with "Success:" suffices. Note this
# assertion depends on "mypy" internals and is thus fragile, but that we
# have *NO* sane alternative. Specifically, if either...
if (
# Mypy emitted one or more characters to standard error *OR*...
mypy_stderr or
# Mypy emitted standard output that does *NOT* start with this prefix...
not mypy_stdout.startswith('Success: ')
):
# Print this string to standard output for debuggability, which pytest
# will then implicitly capture and reprint on the subsequent assertion
# failure.
print(mypy_stdout)
# Force an unconditional assertion failure.
assert False
#FIXME: Preserved for posterity. Sadly, diz iz no longer l33t. Mypy broke
#its runtime API. This test now spuriously fails under CI with a
#non-human-readable exception message resembling:
# TypeError: 'mypy' is not a package
#
#Submit an upstream issue reporting this to mypy devs, please.
# from mypy import api
#
# # List of all shell words with which to run the external "mypy" command.
# #
# # Note this iterable *MUST* be defined as a list rather than tuple. If a
# # tuple, the function called below raises an exception. Hot garbage!
# MYPY_ARGS = [
# # Absolute dirname of this project's top-level mypy configuration.
# # Since our "tox" configuration isolates testing to a temporary
# # directory, mypy is unable to find its configuration without help.
# '--config-file', str(get_main_mypy_config_file()),
#
# # Absolute dirname of this project's top-level package.
# str(get_main_package_dir()),
# ]
#
# # Note that we intentionally do *NOT* assert that call to have exited with
# # a successful exit code. Although mypy does exit with success on local
# # developer machines, it inexplicably does *NOT* under remote GitHub
# # Actions-based continuous integration despite "mypy_stderr" being empty.
# # Ergo, we conveniently ignore the former in favour of the latter.
# mypy_stdout, mypy_stderr, _ = api.run(MYPY_OPTIONS + MYPY_ARGUMENTS)
# # mypy_stdout, mypy_stderr, mypy_exit = api.run(MYPY_OPTIONS + MYPY_ARGUMENTS)
#
# # Assert "mypy" to have emitted *NO* warnings or errors to "stderr".
# #
# # Note that "mypy" predominantly emits both warnings and errors to "stdout"
# # rather than "stderr", despite this contravening sane POSIX semantics.
# # They did this because some guy complained about not being able to
# # trivially grep "mypy" output, regardless of the fact that redirecting
# # stderr to stdout is a trivial shell fragment (e.g., "2>&1"), but let's
# # break everything just because some guy can't shell. See also:
# # https://github.com/python/mypy/issues/1051
# assert not mypy_stderr
#
# # Assert "mypy" to have emitted *NO* warnings or errors to "stdout".
# # Unfortunately, doing so is complicated by the failure of "mypy" to
# # conform to sane POSIX semantics. Specifically:
# # * If "mypy" succeeds, "mypy" emits to "stdout" a single line resembling:
# # Success: no issues found in 83 source files
# # * If "mypy" fails, "mypy" emits to "stdout" *ANY* other line(s).
# #
# # Ergo, asserting this string to start with "Success:" suffices. Note this
# # assertion depends on "mypy" internals and is thus fragile, but that we
# # have *NO* sane alternative.
# assert mypy_stdout.startswith('Success: ')
# ....................{ TESTS ~ pyright }....................
# If the external third-party "pyright" command is *NOT* pathable (i.e., an
# executable command residing in the ${PATH} of the local filesystem), skip this
# test. Note that:
# * "pyright" is the most popular static type-checker for Python, mostly due to
# "pylance" (i.e., the most popular Python language plugin for VSCode, itself
# the most popular integrated development environment (IDE)) both bundling
# *AND* enabling "pyright" by default.
# * "pyright" is implemented in pure-TypeScript (i.e., JavaScript augmented with
# type hints transpiling down to pure-JavaScript at compilation time).
# * There exists a largely unrelated "pyright" Python package shim unofficially
# published at:
# https://github.com/RobertCraigie/pyright-python
# Sadly, that package does fundamentally unsafe things like:
# * Violating privacy encapsulation of "pytest", repeatedly.
# * Performing online "npm"-based auto-installation of the "pyright"
# JavaScript package if currently not installed. Currently, there exists
# *NO* means of disabling that dubious behavior.
# Ergo, we resoundingly ignore that high-level package in favour of the
# low-level "pyright" command. Such is quality assurance. It always hurts.
#
# Skip this "pyright"-specific functional test unless all of the following
# apply:
# * The "pyright" command is in the current "${PATH}".
# * Tests are *NOT* running remotely under GitHub Actions-based continuous
# integration (CI). Since the only sane means of installing "pyright" under
# GitHub Actions is via the third-party "jakebailey/pyright-action" action
# (which implicitly exercises this package against "pyright"), explicitly
# exercising this package against "pyright" yet again would only needlessly
# complicate CI workflows and consume excess CI minutes for *NO* gain.
@skip_unless_pathable('pyright')
@skip_if_ci()
def test_pep561_pyright() -> None:
'''
Functional test testing this project's compliance with :pep:`561` by
externally running :mod:`pyright` (i.e., the most popular third-party static
type checker as of this test) against this project's top-level package.
See Also
----------
:mod:`pytest_pyright`
Third-party :mod:`pytest` plugin automating this integration. Since this
integration is trivial *and* since :mod:`beartype` assiduously avoids
*all* mandatory dependencies, we perform this integration manually.
Moreover, this plugin:
* Internally violates privacy encapsulation in
:mod:`pytest` by widely importing private :mod:`pytest` attributes.
* Explicitly admonishes downstream dependencies *not* to depend upon
this plugin:
This project was created for internal use within another project of
mine, support will be minimal.
'''
# ....................{ IMPORTS }....................
# Defer test-specific imports.
from beartype.meta import PACKAGE_NAME
from beartype._util.py.utilpyversion import get_python_version_major_minor
from beartype_test._util.cmd.pytcmdrun import run_command_forward_output
# ....................{ COMMAND }....................
# Tuple of all shell words with which to run the external "pyright" command.
PYRIGHT_ARGS = (
# Basename of the external "pyright" command to be run.
'pyright',
#FIXME: Note that we *COULD* additionally pass the "--verifytypes"
#option, which exposes further "pyright" complaints. Let's avoid doing
#so until someone explicitly requests we do so, please. This has dragged
#on long enough, people!
# Major and minor version of the active Python interpreter, ignoring the
# patch version of this interpreter.
'--pythonversion', get_python_version_major_minor(),
# Relative basename of this project's top-level package. Ideally, the
# absolute dirname of this package would instead be passed as:
# str(get_main_package_dir())
#
# Doing so succeeds when manually running tests via our top-level
# "pytest" script but fails when automatically running tests via the
# "tox" command, presumably due to "pyright" failing to recognize that
# that dirname encapsulates a Python package. *sigh*
PACKAGE_NAME,
)
# Run the external "pyright" command in the current ${PATH} with these
# options and arguments, raising an exception on subprocess failure while
# forwarding all standard output and error output by this subprocess to the
# standard output and error file handles of the active Python process.
run_command_forward_output(command_words=PYRIGHT_ARGS)
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Project-wide nuitka integration tests.**
This submodule functionally tests that the third-party ``nuitka`` compiler
successfully compiles this pure-Python project.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype_test._util.mark.pytskip import (
skip_unless_os_linux,
skip_unless_package,
skip_unless_pathable,
)
# ....................{ TESTS }....................
# Skip this "nuitka"-specific functional test unless all of the following apply:
# * The "nuitka" package is importable under the active Python interpreter.
# * The third-party GCC compiler is in the current ${PATH}.
# * The current platform is *NOT* a Linux distribution. Although "nuitka"
# technically *CAN* be used under non-Linux platforms, doing so is typically
# non-trivial and likely to unexpectedly explode with catastrophic errors.
@skip_unless_os_linux()
@skip_unless_package('nuitka')
@skip_unless_pathable('gcc')
def test_nuitka(capsys, tmp_path) -> None:
'''
Functional test testing that the third-party ``nuitka`` compiler
successfully compiles a minimal-length example (MLE) extensively leveraging
this project.
Parameters
----------
capsys
:mod:`pytest`-specific systems capability fixture.
tmp_path : pathlib.Path
Abstract path encapsulating a temporary directory unique to this unit
test, created in the base temporary directory.
'''
# ....................{ IMPORTS }....................
# Defer test-specific imports.
from beartype._util.py.utilpyinterpreter import get_interpreter_filename
from beartype_test._util.cmd.pytcmdrun import (
run_command_forward_output,
run_command_forward_stderr_return_stdout,
)
from beartype_test._util.os.pytosshell import shell_quote
from beartype_test._util.path.pytpathtest import (
get_test_func_data_lib_nuitka_file)
# ....................{ COMMAND }....................
#FIXME: "nuitka" occasionally attempts to download external third-party
#tooling. By default, it interactively prompts the user before doing so.
#This is obviously bad for automated testing. Thankfully, "nuitka" avoids
#doing so when it detects that stdin has been redirected to "/dev/null".
#Ergo, improve the run_command_forward_output() function called below to
#*ALWAYS* redirect stdin to "/dev/null". We *ALWAYS* want that everywhere.
# Tuple of all shell words with which to run the external "nuitka" command.
NUITKA_ARGS = (
# Absolute filename of the executable running the active Python process.
get_interpreter_filename(),
# Fully-qualified name of the "nuitka" package to be run.
'-m', 'nuitka',
# Do *NOT* attempt to cache the compilation of intermediate C artifacts
# with the third-party "ccache" command, which may not even necessarily
# be installed into the current ${PATH}.
'--disable-ccache',
#FIXME: Currently disabled as enabling even just this incurs *FAR* too
#much time and space. Sadness ensues.
# # Instruct "nuitka" to additionally compile the entirety of the
# # "beartype" package. By default, "nuitka" only compiles the script
# # (specified below).
# '--follow-import-to=beartype',
# Absolute or relative dirname of a test-specific temporary directory to
# which "nuitka" will generate ignorable compilation artifacts.
f'--output-dir={shell_quote(str(tmp_path))}',
# Absolute filename of a minimal-length example (MLE) leveraging this
# project to be compiled by "nuitka".
str(get_test_func_data_lib_nuitka_file()),
)
# With pytest's default capturing of standard output and error temporarily
# disabled...
#
# Technically, this is optional. Pragmatically, "nuitka" is sufficiently
# slow that failing to do this renders this test silent for several tens of
# seconds to minutes, which is significantly worse than printing progress.
with capsys.disabled():
# Run the "nuitka" command in the current ${PATH} with these options and
# arguments, raising an exception on subprocess failure while forwarding
# all standard output and error output by this subprocess to the
# standard output and error file handles of the active Python process.
run_command_forward_output(command_words=NUITKA_ARGS)
# ....................{ ASSERT }....................
# Absolute or relative filename of the executable binary generated by
# "nuitka" after running the above command.
COMPILED_FILENAME = str(tmp_path / 'beartype_nuitka.bin')
# Standard output emitted by running this executable binary.
COMPILED_STDOUT = run_command_forward_stderr_return_stdout(
COMPILED_FILENAME)
# Assert that this is the expected output.
assert COMPILED_STDOUT == (
'''TypeHint(<class 'int'>)
TypeHint(<class 'float'>)''')
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Project-wide functional beartype-in-Sphinx tests.**
This submodule functionally tests the :func:`beartype.beartype` decorator to
conditionally reduce to a noop when the active Python interpreter is building
documentation for the third-party :mod:`sphinx` package.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype_test._util.mark.pytskip import (
skip,
skip_unless_package,
)
# ....................{ TESTS }....................
#FIXME: Reenable this once we resolve how to actually do so. Currently, the
#make_app() call below is failing with a low-level exception raised from the
#third-party "autoapi" Sphinx extension, seemingly due to a deprecation:
# sphinx.errors.ExtensionError: Handler <function run_autoapi at 0x7f82568552d0> for event 'builder-inited' threw an exception (exception: The 'Module.doc' attribute is deprecated, use 'Module.doc_node' instead.)
#FIXME: For the benefit of the community, externally document how to do this
#for others at this open issue:
# https://github.com/sphinx-doc/sphinx/issues/7008
#Note the trivial "conftest" submodule in this directory. Since this is all
#surprisingly easy, a simple comment describing this should easily suffice.
@skip('Currently broken by Sphinx "autoapi" deprecations.')
@skip_unless_package('sphinx')
def test_sphinx_build(make_app, tmp_path) -> None:
'''
Functional test validating that the third-party :mod:`sphinx` package
successfully builds documentation for this project (i.e., residing in the
top-level ``doc/`` subdirectory).
To do so, this test externally runs the ``sphinx-build`` command against
our ``doc/source/` Sphinx document tree.
Parameters
----------
make_app : sphinx.testing.util.SphinxTestApp
Factory fixture creating and returning a :mod:`pytest`-friendly Sphinx
object encapsulating the process of running the ``sphinx-build``
command with the passed parameters.
tmp_path : pathlib.Path
Abstract path encapsulating a temporary directory unique to this unit
test, created in the base temporary directory.
'''
# Defer test-specific imports.
from beartype_test._util.path.pytpathmain import get_main_sphinx_source_dir
from sphinx.testing.path import path
#FIXME: Pass "parallel=CPUS_LEN" as well, where "CPUS_LEN" is the number of
#CPU cores available to the active Python interpreter. We can't be bothered
#to decide how to query that at the moment. It's probably trivial. *shrug*
# "pytest"-friendly Sphinx object encapsulating the process of running the
# "sphinx-build" command with the passed parameters. For reproducibility,
# emulate the options passed to the root "sphinx" script locally building
# this project's documentation as much as allowed by this API.
sphinx_build = make_app(
buildername='html',
srcdir=str(get_main_sphinx_source_dir()),
# "sphinx.testing"-specific path object encapsulating the absolute or
# relative dirname of a test-specific temporary directory to which
# Sphinx will emit ignorable rendered documentation files.
#
# Yes, this is *INSANE.* Sphinx should *ABSOLUTELY* be leveraging the
# portable and standard "pathlib.Path" class rather than attempting to
# roll its own non-standard (and thus probably non-portable) class.
builddir=path(str(tmp_path)),
# Instruct Sphinx to cache as little as possible.
freshenv=True,
)
#FIXME: Re-enable this once Sphinx no longer emits warnings. Currently, the
#third-party "autoapi" extension is emitting a consider number of warnings.
# # Instruct Sphinx to raise a fatal exception rather than emitting a
# # non-fatal warning on the first warning (identical to the "-W" option).
# sphinx_build.warningiserror = True
# Assert that building this project's documentation succeeds *WITHOUT*
# raising any exceptions or emitting any warnings.
sphinx_build.build()
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
:mod:`pytest` **Sphinx test plugin** (i.e., early-time configuration guaranteed
to be run by :mod:`pytest` *after* passed command-line arguments are parsed).
:mod:`pytest` implicitly imports *all* functionality defined by this module
into *all* submodules of this subpackage.
'''
# ....................{ IMPORTS }....................
# Attempt to...
try:
# Import the Sphinx-specific make_app() fixture required to portably test
# Sphinx documentation builds.
#
# Note that the Sphinx-specific test_params() fixture is imported *ONLY* to
# expose that fixture to make_app(), which requires that fixture.
from sphinx.testing.fixtures import (
make_app,
test_params,
)
except:
pass
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Project-wide functional beartype-in-Sphinx tests.**
This submodule functionally tests the :func:`beartype.beartype` decorator to
conditionally reduce to a noop when the active Python interpreter is building
documentation for the third-party :mod:`sphinx` package.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype_test._util.mark.pytskip import (
skip_if_python_version_greater_than_or_equal_to,
skip_unless_package,
)
# ....................{ TESTS }....................
#FIXME: *NON-IDEAL.* This test manually invokes Sphinx internals. Instead, this
#test should be fundamentally refactored from the ground up to leverage the
#public (and increasingly documented) "sphinx.testing" subpackage.
#FIXME: This test is currently skipped under Python >= 3.11, due to both Sphinx
#itself *AND* Sphinx dependencies (e.g., Babel) importing from multiple modules
#deprecated by Python 3.11. Since safely ignoring the specific
#"DeprecationWarning" warnings while *NOT* ignoring all other warnings is
#non-trivial and thus a waste of volunteer time, we prefer to simply avoid
#Sphinx altogether under Python >= 3.11 for the moment. Revisit this in >+ 2023
#once the dust has settled and Sphinx & friends have corrected themselves.
@skip_if_python_version_greater_than_or_equal_to('3.11.0')
@skip_unless_package('sphinx')
def test_beartype_in_sphinx(tmp_path) -> None:
'''
Functional test validating that the :func:`beartype.beartype` decorator
conditionally reduces to a noop when the active Python interpreter is
building documentation for the third-party :mod:`sphinx` package.
To do so, this test externally runs the ``sphinx-build`` command against a
minimal-length Sphinx document tree exercising all known edge cases.
Parameters
----------
tmp_path : pathlib.Path
Abstract path encapsulating a temporary directory unique to this unit
test, created in the base temporary directory.
'''
# ..................{ IMPORTS }..................
# Defer test-specific imports.
from beartype import beartype
from beartype._util.mod.lib.utilsphinx import (
_SPHINX_AUTODOC_SUBPACKAGE_NAME)
from beartype_test._util.cmd.pytcmdexit import is_success
from beartype_test._util.path.pytpathtest import (
get_test_func_data_lib_sphinx_dir)
from sys import modules as modules_imported_name
# Entry-point (i.e., pure-Python function accepting a list of zero or more
# command-line arguments) underlying the external "sphinx-build" command.
from sphinx.cmd.build import main as sphinx_build
# ..................{ SPHINX-BUILD }..................
# List of all command-line options (i.e., "-"-prefixed strings) to be
# effectively passed to the external "sphinx-build" command.
#
# Note this iterable *MUST* be defined as a list rather than tuple. If a
# tuple, the function called below raises an exception. Hot garbage!
SPHINX_OPTIONS = [
# Avoid caching data into a "{OUTPUTDIR}/.doctrees/" subdirectory.
# Although typically advisable, "{OUTPUTDIR}" is an ignorable temporary
# test-specific directory deleted immediately after completion of this
# test. Caching data would only needlessly consume time and space.
'-E',
# Enable the HTML mode, rendering HTML-specific documentation files.
# Although technically arbitrary, this is the typical default mode.
'-b', 'html',
# Treat non-fatal warnings as fatal errors. This is *CRITICAL.* By
# default, Sphinx insanely emits non-fatal warnings for fatal "autodoc"
# errors resembling:
# WARNING: autodoc: failed to import module 'beartype_sphinx'; the following exception was raised:
# No module named 'beartype_sphinx'
'-W',
]
# List of all command-line arguments (i.e., non-options) to be effectively
# passed to the external "sphinx-build" command.
#
# Note this iterable *MUST* be defined as a list rather than tuple. If a
# tuple, the function called below raises an exception. Steaming trash!
SPHINX_ARGUMENTS = [
# Absolute or relative dirname of a test-specific subdirectory
# containing a sample Sphinx structure exercising edge cases in the
# @beartype decorator.
str(get_test_func_data_lib_sphinx_dir()),
# Absolute or relative dirname of a test-specific temporary directory to
# which Sphinx will emit ignorable rendered documentation files.
str(tmp_path),
]
# Run "sphinx-build" to build documentation for this fake project.
sphinx_build_exit_code = sphinx_build(SPHINX_OPTIONS + SPHINX_ARGUMENTS)
# Assert that "sphinx-build" successfully builds documentation for this
# fake project *WITHOUT* raising an exception.
assert is_success(sphinx_build_exit_code), (
f'"sphinx-build" exit code {sphinx_build_exit_code} != 0.')
# ..................{ VALIDATION }..................
def thou_art_there() -> str:
'''
Arbitrary callable *not* decorated by the :func:`beartype.beartype`
decorator intentionally annotated by one or more arbitrary unignorable
type hints to prevent that decorator from silently reducing to a noop.
'''
return 'From which they fled recalls them'
# That callable decorated by @beartype.
thou_art_there_beartyped = beartype(thou_art_there)
# Assert @beartype decorated that callable with runtime type-checking
# rather than erroneously reducing to a noop.
assert thou_art_there_beartyped is not thou_art_there
# ..................{ OPTIMIZATION }..................
# Crudely unimport the Sphinx "autodoc" extension. Doing so optimizes
# subsequent invocations of the @beartype decorator by reducing the
# beartype._util.mod.lib.utilsphinx.is_sphinx_autodocing() tester
# internally called by that decorator from an O(n) test with non-negligible
# constants to an O(1) test with negligible constants.
del modules_imported_name[_SPHINX_AUTODOC_SUBPACKAGE_NAME]
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Project ``README.rst`` functional tests.**
This submodule functionally tests the syntactic validity of this project's
top-level ``README.rst`` file.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype_test._util.mark.pytskip import skip_unless_package
# ....................{ TESTS }....................
#FIXME: Consider submitting as a StackOverflow post. Dis iz l33t, yo!
# If the third-party "docutils" package satisfying this minimum version is
# unavailable, skip this test. Note that:
#
# * "docutils" is the reference standard for parsing reStructuredText (reST).
# Unsurprisingly, even Sphinx parses reST with "docutils".
# * This test makes assumptions about the "docutils" public API satisfied
# *ONLY* by this minimum version.
@skip_unless_package(package_name='docutils', minimum_version='0.15')
def test_doc_readme(monkeypatch) -> None:
'''
Functional test testing the syntactic validity of this project's top-level
``README.rst`` file by monkeypatching the public :mod:`docutils` singleton
responsible for emitting warnings and errors to instead convert these
warnings and errors into a test failure.
Parameters
----------
monkeypatch : MonkeyPatch
Builtin fixture object permitting object attributes to be safely
modified for the duration of this test.
'''
# Defer test-specific imports.
from docutils.core import publish_parts
from docutils.utils import Reporter
from beartype_test._util.path.pytpathmain import get_main_readme_file
# Decoded plaintext contents of this project's readme file as a string.
#
# Note this encoding *MUST* explicitly be passed here. Although macOS and
# Linux both default to this encoding, Windows defaults to the single-byte
# encoding "cp1252" for backward compatibility. Failing to pass this
# encoding here results in a non-human-readable test failure under Windows:
# UnicodeDecodeError: 'charmap' codec can't decode byte 0x9d in
# position 1495: character maps to <undefined>
README_CONTENTS = get_main_readme_file().read_text(encoding='utf-8')
# List of all warning and error messages emitted by "docutils" during
# parsing of this project's top-level "README.rst" file.
system_messages = []
# Original non-monkey-patched method of the public :mod:`docutils`
# singleton emitting warnings and errors *BEFORE* patching this method.
system_message_unpatched = Reporter.system_message
def system_message_patched(reporter, level, message, *args, **kwargs):
'''
Method of the public :mod:`docutils` singleton emitting warnings and
errors redefined as a closure collecting these warnings and errors into
the local list defined above.
'''
# Call this non-monkey-patched method with all passed parameters as is.
message_result = system_message_unpatched(
reporter, level, message, *args, **kwargs)
# If this message is either a warning *OR* error, append this message
# to the above list.
if level >= reporter.WARNING_LEVEL:
system_messages.append(message)
# Else, this message is neither a warning *NOR* error. In this case,
# silently ignore this message.
# Return value returned by the above call as is.
return message_result
# Temporarily install this monkey-patch for the duration of this test.
monkeypatch.setattr(
Reporter,
name='system_message',
value=system_message_patched,
)
# Attempt to render this "README.rst" file as reST, implicitly invoking this
# monkey-patch.
publish_parts(source=README_CONTENTS, writer_name='html4css1')
# Assert "docutils" to have emitted *NO* warnings or errors.
assert not system_messages
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# Note that this is mandatory. If absent, the "autodoc" extension enabled below
# fails with the following build-time error:
# autodoc: failed to import module 'beartype_sphinx'; the following exception was raised:
# No module named 'beartype_sphinx'
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'beartype_sphinx'
copyright = '2021, @leycec'
author = '@leycec'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# ..................{ BUILTIN }..................
# Builtin extensions unconditionally available under *ALL* reasonably
# modern versions of Sphinx uniquely prefixed by "sphinx.ext.".
# Builtin extension autogenerating reStructuredText documentation from
# class, callable, and variable docstrings embedded in Python modules,
# documenting this project's public (and optionally also private) API.
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Sphinx-specific functional test module** (i.e., module intended to be
imported *only* by Sphinx's bundled :mod:`sphinx.ext.autodoc` extension from a
``".. automodule:: beartype_sphin"`` statement in the top-level ``index.rst``
file governing this test).
This module exercises the expected relationship between the
:mod:`beartype.beartype` decorator and the :mod:`sphinx.ext.autodoc` extension
by ensuring our decorator reduces to the **identity decorator** (i.e.,
decorator preserving the decorated callable as is) when imported by that
extension. Why? Because of mocking. When :mod:`beartype.beartype`-decorated
callables are annotated with one more classes mocked by
``autodoc_mock_imports``, our decorator frequently raises exceptions at
decoration time. Why? Because mocking subverts our assumptions and expectations
about classes used as annotations.
'''
# ....................{ IMPORTS }....................
from beartype import beartype
#FIXME: Uncomment to debug that this module is actually being imported.
# print('Some phantom, some faint image; till the breast')
# ....................{ VALIDATION }....................
def till_the_breast() -> str:
'''
Arbitrary callable *not* decorated by the :func:`beartype.beartype`
decorator intentionally annotated by one or more arbitrary unignorable
type hints to prevent that decorator from silently reducing to a noop.
'''
return 'Some phantom, some faint image;'
# That callable decorated by @beartype.
till_the_breast_beartyped = beartype(till_the_breast)
# If beartype did *NOT* correctly detect itself to be running during Sphinx
# autodocumentation by preserving that callable as is, raise an exception.
if till_the_breast_beartyped is not till_the_breast:
raise ValueError(
'@beartype failed to reduce to the identity decorator during '
'automatic Sphinx document generation by "sphinx.ext.autodoc".'
)
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Nuitka-specific functional test script** (i.e., script intended to be
compiled by the third-party :mod:`nuitka` compiler).
This script exercises the expected relationship between the
:mod:`beartype.beartype` decorator and the :mod:`nuitka` compiler by ensuring
:mod:`nuitka` successfully compiles scripts leveraging that decorator.
'''
# ....................{ IMPORTS }....................
from beartype import beartype
from beartype.door import TypeHint
from beartype.typing import Tuple, Union
# ....................{ FUNCTIONS }....................
@beartype
def make_type_hints() -> Tuple[TypeHint, ...]:
'''
Non-empty tuple containing one or more :class:`beartype.door.TypeHint`
instances, exercising that :mod:`nuitka` supports those instances.
'''
hint = Union[int, float]
type_hints = TypeHint(hint)
return tuple(t for t in type_hints)
# ....................{ MAIN }....................
# Print the representations of these "TypeHint" instances for verifiability.
for type_hint in make_type_hints():
print(type_hint)
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Project-wide functional importation tests.**
This submodule functionally tests the this project's behaviour with respect to
imports of both internal subpackages and submodules (unique to this project)
*and* external third-party packages and modules.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ TESTS }....................
def test_package_import_isolation() -> None:
'''
Test that importing the top-level lightweight :mod:`beartype` package does
*not* accidentally import from one or more heavyweight (sub)packages.
This test ensures that the fix cost of the first importation of the
:mod:`beartype` package itself remains low -- if not ideally negligible.
'''
# Defer test-specific imports.
from beartype._util.py.utilpyinterpreter import get_interpreter_filename
from beartype_test._util.cmd.pytcmdrun import (
run_command_forward_stderr_return_stdout)
from re import (
compile as re_compile,
search as re_search,
)
#FIXME: *FRAGILE.* Manually hardcoding module names here invites
#desynchronization, particularly with optional third-party dependencies.
#Instead, the names of optional third-party packages should be dynamically
#constructed from the contents of the "beartype.meta" submodule.
# Tuple of uncompiled regular expressions, each matching the
# fully-qualified name of *ANY* heavyweight (sub)module or package whose
# importation violates our self-imposed constraint of fast importation of
# our core @beartype.beartype decorator.
#
# Note that:
# * The third-party "typing_extensions" module has been profiled across
# all supported CPython versions to import either faster or only slightly
# slower than the standard "typing" module. In either case, both modules
# implement sufficiently rapidly as to be ignorable with respect to
# importation costs here. See also @posita's stunning profiling work at:
# https://github.com/beartype/beartype/pull/103#discussion_r815027198
_HEAVY_MODULE_NAME_RAW_REGEXES = (
r'beartype\.abby',
r'beartype\.cave',
r'beartype\.vale',
r'numpy',
)
# Uncompiled regular expressions synthesized from this tuple.
_HEAVY_MODULE_NAME_RAW_REGEX = '|'.join(_HEAVY_MODULE_NAME_RAW_REGEXES)
# Compiled regular expression synthesized from this tuple.
_HEAVY_MODULE_NAME_REGEX = re_compile(
fr'^({_HEAVY_MODULE_NAME_RAW_REGEX})\b')
# print(f'_HEAVY_MODULE_NAME_REGEX: {_HEAVY_MODULE_NAME_REGEX}')
# Python code printing all imports (i.e., the contents of the standard
# "sys.modules" list) *AFTER* importing the @beartype decorator.
_CODE_PRINT_IMPORTS_AFTER_IMPORTING_BEARTYPE = '''
# Import the core @beartype decorator and requisite "sys" machinery.
from beartype import beartype
from sys import modules
# Print a newline-delimited list of the fully-qualified names of all modules
# transitively imported by the prior "import" statements.
print('\\n'.join(module_name for module_name in modules.keys()))
'''
# Tuple of all arguments to be passed to the active Python interpreter rerun
# as an external command.
_PYTHON_ARGS = (
# Absolute filename of the executable running the active Python process.
get_interpreter_filename(),
'-c',
_CODE_PRINT_IMPORTS_AFTER_IMPORTING_BEARTYPE,
)
# Run this code isolated to a Python subprocess, raising an exception on
# subprocess failure while both forwarding all standard error output by this
# subprocess to the standard error file handle of the active Python process
# *AND* capturing and returning all subprocess stdout.
module_names_str = run_command_forward_stderr_return_stdout(
command_words=_PYTHON_ARGS)
#FIXME: Actually parse "module_names" for bad entries here, please.
# print(module_names)
# List of the fully-qualified names of all modules transitively imported by
# importing the @beartype decorator in an isolated Python interpreter.
module_names = module_names_str.splitlines()
# For each such name, assert this name is *NOT* that of a heavyweight
# (sub)module or package enumerated above.
#
# Note that this iteration could, of course, also be more efficiently
# implementable as a single composite regex match on the newline-delimited
# "module_names_str" string. However, doing so would entirely defeat the
# purpose of this iteration: to offer unambiguous and human-readable error
# messages in the common event of an importation violation.
for module_name in module_names:
assert re_search(_HEAVY_MODULE_NAME_REGEX, module_name) is None, (
f'@beartype.beartype improperly imports heavyweight module '
f'"{module_name}" from global scope.'
)
|
import streamlit as st
st.set_page_config(
page_title="Home",
)
st.title('Annotated Transformers')
st.write("A collection of annotated transformer architectures in Meta's [PyTorch](https://pytorch.org/) and DeepMind's "
"[Haiku](https://github.com/deepmind/dm-haiku), [Optax](https://github.com/deepmind/optax), and Google's [JAX](https://jax.readthedocs.io/en/latest/index.html).")
st.header("[Vision Transformer](Vision_Transformer)")
st.markdown('''
- [Research Paper](https://arxiv.org/abs/2010.11929)
- [Official Repository](https://github.com/google-research/vision_transformer)
''')
## Citations
st.header("Citations")
st.markdown('''
@article{DBLP:journals/corr/abs-2010-11929,
author = {Alexey Dosovitskiy and
Lucas Beyer and
Alexander Kolesnikov and
Dirk Weissenborn and
Xiaohua Zhai and
Thomas Unterthiner and
Mostafa Dehghani and
Matthias Minderer and
Georg Heigold and
Sylvain Gelly and
Jakob Uszkoreit and
Neil Houlsby},
title = {An Image is Worth 16x16 Words: Transformers for Image Recognition
at Scale},
journal = {CoRR},
volume = {abs/2010.11929},
year = {2020},
url = {https://arxiv.org/abs/2010.11929},
eprinttype = {arXiv},
eprint = {2010.11929},
timestamp = {Fri, 20 Nov 2020 14:04:05 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-2010-11929.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}''')
|
import streamlit as st
# Global Variables
language='python'
# Section for Title
st.title('Vision Transformer')
st.image(
"https://media.giphy.com/media/lr9Xd6IkR8F0mAv0bL/giphy.gif",
caption="Animated Vision Transformer",
#width=400, # The actual size of most gifs on GIPHY are really small, and using the column-width parameter would make it weirdly big. So I would suggest adjusting the width manually!
)
# Section for Table of Contents
st.header('Table of contents')
st.markdown('''
- [Introduction](#Introduction)
- [Paper Abstract](#PaperAbstract)
- [Acknowledgment](#Acknowledgement)
- [Prerequisites](#Prerequisites)
- [Installation](#Installs)
- [Imports](#Imports)
- [Configuration](#Configuration)
- [Helper Functions](#Helpers)
- [Model Architecture](#Model)
- [Pre-Normalization](#prenorm)
- [Multilayer Perceptron](#feedforward)
- [Attention Mechanism](#attention)
- [Transformer Network](#transformer)
- [Vision Transformer Model](#visiontransformer)
- [Training](#Training)
- [Initialize Model](#InitializeModel)
- [Image Augmentation](#ImageAugmentation)
- [CIFAR10 Dataset](#Dataset)
- [Dataloader](#Dataloader)
- [Loss Function](#LossFunction)
- [Optimizer](#Optimizer)
- [Learning Rate Scheduler](#LRS)
- [Train Step](#TrainStep)
- [Validation Step](#ValidationStep)
- [Train the Model](#TrainingLoop)
- [Save Trained Model](#SaveModel)
- [Load Saved Model](#LoadModel)
- [Make Predictions](#MakePredictions)
- [References](#References)
- [Citations](#Citations)
''')
# Section for Introduction
st.header('Introduction', anchor='Introduction')
st.subheader('Paper Abstract', anchor='PaperAbstract')
st.markdown('''"
While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.
" - Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby''')
# Section for acknowledgement
st.subheader('Acknowledgement', anchor='Acknowledgement')
st.write('''
''')
# Section for Prereqs
st.header('Prerequisites', anchor='Prerequisites')
st.write('''It is expected that you at least have some basic working knowledge of Python and PyTorch or Haiku.
Deep learning greatly benefits from GPU or TPU acceleration.
You will want to have access to a machine with one or many accelerated devices.
If you do not have access to a GPU or TPU you can still use a CPU, although training times will be significantly longer.
You can check the version of CUDA from the command line with:
`nvcc --version`
Or check the devices available on your machine with:
`nvidia-smi`
''')
# Installation section
st.subheader('Installation', anchor='Installs')
st.write("You will first need to install either DeepMind's Haiku and Google's Jax, or Facebook's PyTorch.")
installs_tab_1, installs_tab_2 = st.tabs(["PyTorch", "Haiku"])
with installs_tab_2:
st.write('Install Haiku:')
haiku_installs = '''
$ pip3 install -U jax jaxlib dm-haiku
'''
st.code(haiku_installs, language='bash')
with installs_tab_1:
st.write('Install PyTorch:')
pytorch_installs = '''
$ pip3 install -U torch torchvision torchaudio
'''
st.code(pytorch_installs, language='bash')
st.write("Check if PyTorch was successfully installed from the command line with:")
st.code('python3 -c "import torch; print(torch.__version__)"', language='bash')
# Imports section
st.subheader('Imports', anchor='Imports')
st.write("You will need to import the necessary libraries in your Python file or Jupyter Notebook.")
imports_tab_1, imports_tab_2 = st.tabs(["PyTorch", "Haiku"])
with imports_tab_2:
haiku_imports = '''
from functools import partial
import haiku as hk
from haiku import PRNGSequence
import jax
from jax import random, nn
import jax.numpy as jnp
import optax
from einops import rearrange, repeat
'''
st.code(haiku_imports, language)
with imports_tab_1:
pytorch_imports = '''
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms as T
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
'''
st.code(pytorch_imports, language)
# Configuration for Global Variables
st.subheader('Configuration', anchor='Configuration')
st.write("""A configuration class for defining global variables to be used for training the model.
Each of these variables are explained in detail in the relevant sections below.
""")
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_2:
haiku = '''
class CFG:
learning_rate = 0.001
'''
st.code(haiku, language)
with tab_1:
pytorch = '''
class CFG:
learning_rate = 0.001
policy = T.AutoAugmentPolicy.CIFAR10
image_size = 224
num_classes = 10
batch_size = 4
device = 'cuda'
seed = 42
'''
st.code(pytorch, language)
# Helper functions section
st.subheader('Helper Functions', anchor='Helpers')
st.write('Define some basic helper functions for the Model.')
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_2:
haiku = '''
def pair(t):
return t if isinstance(t, tuple) else (t, t)
'''
st.code(haiku, language)
st.write('Haiku does not possess an Identity Layer class so we will want to define one as well.')
identity_layer_class = '''
class IdentityLayer(hk.Module):
def __call__(self, x):
x = hk.Sequential([])
return x
'''
st.code(identity_layer_class, language)
with tab_1:
pytorch = '''
def seed_environment(seed):
torch.manual_seed(seed)
seed_environment(CFG.seed)
def pair(t):
return t if isinstance(t, tuple) else (t, t)
'''
st.code(pytorch, language)
# Section for Model
st.header('Model Architecture', anchor='Model')
st.subheader('Pre-Normalization', anchor='prenorm')
st.write('''
Layer normalisation explicitly controls the mean and variance of individual neural network activations
Next, the output reinforced by residual connections goes through a layer normalization layer. Layer normalization,
similar to batch normalization is a way to reduce the “covariate shift” in neural networks allowing them to be trained
faster and achieve better performance. Covariate shift refers to changes in the distribution of neural network
activations (caused by changes in the data distribution), that transpires as the model goes through model training.
Such changes in the distribution hurts consistency during model training and negatively impact the model. It was
introduced in the paper, “Layer Normalization” by Ba et. al. (https://arxiv.org/pdf/1607.06450.pdf).
However, layer normalization computes mean and variance (i.e. the normalization terms) of the activations in such a way
that, the normalization terms are the same for every hidden unit. In other words, layer normalization has a single mean
and a variance value for all the hidden units in a layer. This is in contrast to batch normalization that maintains
individual mean and variance values for each hidden unit in a layer. Moreover, unlike batch normalization, layer
normalization does not average over the samples in the batch, rather leave the averaging out and have different
normalization terms for different inputs. By having a mean and variance per-sample, layer normalization gets rid of the
dependency on the mini-batch size. For more details about this method, please refer the original paper.
''')
st.latex(r'''
\mu^{l} = {\frac 1 H} \displaystyle\sum_{i=1}^H a_i^l
''')
st.latex(r'''
\sigma^l = \sqrt{{\frac 1 H} \displaystyle\sum_{i=1}^H (a_i^l - \mu^l)^2}
''')
st.latex(r'''
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
''')
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_2:
haiku = '''
LayerNorm = partial(hk.LayerNorm, create_scale=True, create_offset=False, axis=-1)
class PreNorm(hk.Module):
def __init__(self, fn):
super(PreNorm, self).__init__()
self.norm = LayerNorm()
self.fn = fn
def __call__(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
'''
st.code(haiku, language)
with tab_1:
pytorch = '''
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
'''
st.code(pytorch, language)
st.write("""
This is a class implementation of a pre-normalization layer, which is a composite layer consisting of a layer
normalization layer followed by another layer or function. The `__init__` method initializes the pre-normalization layer
with a given input dimension, `dim`, and a function, `fn`, which represents the layer or function that will be applied
after the normalization layer.
The `forward` method defines the forward pass of the pre-normalization layer, where the input, `x`, is first passed through
a layer normalization layer with `dim` dimensions. The output of the normalization layer is then passed through the
function `fn`, along with any additional keyword arguments provided in `kwargs`. The output of the `forward` method is the
result of applying the `fn` function to the normalized input.
The pre-normalization layer allows the input to be normalized before it is passed to the `fn` function, which can help
improve the performance and stability of the model. This is especially useful when the `fn` function is a non-linear
function, such as an activation function, that can benefit from input normalization.
""")
st.write("""
`self.norm` is an instance of the `nn.LayerNorm` class from PyTorch. This class represents a layer normalization operation,
which is a type of normalization that is applied to the inputs of a layer in a neural network. The `nn.LayerNorm` class
takes a single argument `dim`, which specifies the dimensions of the input data that will be normalized. In this case, the
value of `dim` is passed directly to the `nn.LayerNorm` class, so the dimensions of the input data will be the same as the
value of `dim`.
""")
st.subheader('Multilayer Perceptron', anchor='feedforward')
st.write("""
We propose the Gaussian Error Linear Unit (GELU), a high-performing neuralnetwork activation function.
The GELU activation function is xΦ(x), where Φ(x) the standard Gaussian cumulative distribution function.
The GELU nonlinearity weights inputs by their value
""")
st.write("The Gaussian Error Linear Unit (GELU) is defined as:")
st.latex(r"""
\text{GELU}(x) = xP(X \leq x) = x\Phi(x) = x * {\frac 1 2} \big[1 = \text{erf}\big(x/\sqrt2\big)\big]
""")
st.write("GELU is approximated with if greater feedforward speed is worth the cost of exactness.:")
st.latex(r'''
\text{GELU}(x) = 0.5 * x * \bigg(1 + \tanh\bigg(\sqrt{\frac 2 \pi} * (x + 0.044715 * x^3)\bigg)\bigg)
''')
st.write("""
The GELU (Gaussian Error Linear Unit) function is a type of activation function used in neural networks. It is defined
as a function of the input, `x`, as shown in the equation above. The GELU function is a smooth approximation of the
rectified linear unit (ReLU) activation function and has been shown to improve the performance of neural networks in
some cases. The GELU function outputs values in the range [0, 1], with input values close to 0 resulting in output
values close to 0 and input values close to 1 resulting in output values close to 1. This allows the GELU function to
retain information about the magnitude of the input, which can be useful for certain types of learning tasks.
""")
st.write("""
`nn.GELU()` is a PyTorch function that creates a GELU activation function. The GELU activation function is a
differentiable function that takes as input a tensor with any shape and returns a tensor with the same shape. The
function applies the GELU function elementwise to the input tensor, resulting in a tensor of the same shape with values
in the range [0, 1]. This allows the GELU activation function to be used in the forward pass of a neural network,
allowing the network to learn non-linear transformations of the input data.
""")
st.write('dropout')
st.write("""
During training, randomly zeroes some of the elements of the input tensor with probability p using samples from a
Bernoulli distribution. Each channel will be zeroed out independently on every forward call.
This has proven to be an effective technique for regularization and preventing the co-adaptation of neurons as
described in the paper Improving neural networks by preventing co-adaptation of feature detectors .
Furthermore, the outputs are scaled by a factor of \frac{1}{1-p}
during training. This means that during evaluation the module simply computes an identity function.
p – probability of an element to be zeroed. Default: 0.5
""")
st.write("""
`nn.Dropout(dropout)` is a PyTorch function that creates a dropout layer with a given dropout rate. The dropout layer is
a regularization technique that randomly sets a fraction of the input elements to 0 during the forward pass, with the
fraction determined by the dropout rate. This has the effect of reducing the dependence of each output element on a
specific subset of the input elements, making the model less susceptible to overfitting and improving generalization
performance. The `dropout` argument determines the dropout rate, which is the fraction of input elements that will be set
to 0. A dropout rate of 0 means that no elements will be dropped, while a dropout rate of 1 means that all elements
will be dropped. The default value for the `dropout` argument is 0, which means that no elements will be dropped by the
dropout layer.
""")
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_2:
haiku = '''
class MLP(hk.Module):
def __init__(self, dim, hidden_dim):
super(FeedForward, self).__init__()
self.linear1 = hk.Linear(hidden_dim)
self.linear2 = hk.Linear(dim)
def __call__(self, x):
x = self.linear1(x)
x = jax.nn.gelu(x)
x = hk.dropout(hk.next_rng_key(), rate = 0.0, x = x)
x = self.linear2(x)
x = hk.dropout(hk.next_rng_key(), rate = 0.0, x = x)
return x
'''
st.code(haiku, language)
with tab_1:
pytorch = '''
class MLP(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
'''
st.code(pytorch, language)
st.write("""
This is a class implementation of a multi-layer perceptron (MLP), a type of neural network. The `__init__` method
initializes the MLP with a given input dimension, `dim`, and hidden dimension, `hidden_dim`, as well as a dropout rate,
`dropout`, which is set to 0 by default.
The `forward` method defines the forward pass of the MLP, where the input, `x`, is passed through a series of linear layers
followed by a GELU non-linearity and dropout regularization. The output of the forward pass is the result of passing
the input through the defined sequence of layers.
The `__init__` method initializes the MLP by defining the sequence of layers that make up the network.
The first layer is a linear layer with `dim` input dimensions and `hidden_dim` output dimensions. This layer is followed by
a GELU activation function and a dropout layer with a dropout rate of `dropout`. The next layer is another linear layer
with `hidden_dim` input dimensions and `dim` output dimensions, followed by another dropout layer with the same dropout rate.
The sequence of layers is then stored in the `net` attribute of the MLP. This sequence of layers defines the architecture
of the MLP and determines how the input data is transformed as it passes through the network.
The `nn.Sequential` class is a PyTorch class that allows a sequence of layers to be defined and treated as a single,
composite layer. In this case, the `nn.Sequential` class is used to define a sequence of five layers: a linear layer,
a GELU activation function, a dropout layer, another linear layer, and another dropout layer. This sequence of layers
is then treated as a single, composite layer that can be used in the forward pass of the MLP.
""")
st.write("""
This code creates a neural network using PyTorch, which is a popular deep learning framework. The network consists of a
sequence of five layers, which are defined using the `nn.Sequential` class. The first layer is a linear layer, which
applies a linear transformation to the input data. The second layer is a GELU (Gaussian Error Linear Unit) activation
layer, which applies the GELU nonlinearity to the output of the previous layer. The GELU nonlinearity is a smooth,
monotonic function that has been shown to improve the performance of deep learning models. The third layer is a dropout
layer, which randomly sets some of the output values to zero. This is a regularization technique that helps to prevent
the model from overfitting to the training data. The fourth layer is another linear layer, which applies another linear
transformation to the output of the previous layer. The fifth and final layer is another dropout layer, which again
randomly sets some of the output values to zero. The resulting network takes an input vector of size `dim`, applies a
series of linear and nonlinear transformations to it, and produces an output vector of the same size.
""")
st.subheader('Attention Mechanism', anchor='attention')
st.write("""
An attention function can be described as mapping a query and a set of key-value pairs to an output,
where the query, keys, values, and output are all vectors. The output is computed as a weighted sum
of the values, where the weight assigned to each value is computed by a compatibility function of the
query with the corresponding key.
This code defines a class called `Attention` which extends the `nn.Module` class from the PyTorch library. The `Attention`
class is a neural network module for computing attention. It has several key components:
- `__init__`: the constructor function for the class, which initializes the various layers and submodules of the
network, such as a softmax layer for computing attention, dropout layers for regularization, and linear layers for
projecting the input tensor into different subspaces.
- `forward`: the forward propagation function, which takes an input tensor x and applies the various layers of the
network in sequence to produce the output. This includes computing dot products between the query, key, and value
tensors, applying softmax to the dot products to compute the attention weights, and then using these attention weights
to compute the weighted sum of the values.
""")
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_2:
haiku = '''
class Attention(hk.Module):
def __init__(self, dim, heads = 8, dim_head = 64):
super(Attention, self).__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.to_qkv = hk.Linear(output_size = inner_dim * 3, with_bias = False)
self.to_out = hk.Linear(dim) if project_out else IdentityLayer()
def __call__(self, x):
qkv = self.to_qkv(x)
qkv = jnp.split(qkv, 3, axis = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads), qkv)
dots = jnp.einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = nn.softmax(dots, axis = -1)
out = jnp.einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
out = hk.dropout(hk.next_rng_key(), rate = 0.0, x = out)
return out
'''
st.code(haiku, language)
with tab_1:
pytorch = '''
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.dropout = nn.Dropout(dropout)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
'''
st.code(pytorch, language)
st.write("""
The `Attention` class can be used as a building block for more complex neural networks that need to compute attention
over some input. By specifying different values for the hyperparameters `dim`, `heads`, `dim_head`, and `dropout`, the behavior
of the attention mechanism can be customized to suit different tasks and applications.
""")
st.write("""
The `__init__` function of the `Attention` class is the constructor function, which is called when a new instance of
the class is created. It initializes the various layers and submodules of the attention network, such as the softmax
layer, dropout layers, and linear layers.
""")
st.code("""
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.dropout = nn.Dropout(dropout)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
""", language='python')
st.write("""
Here is a detailed breakdown of what happens in each line of the `__init__` function:
1. The `super().__init__()` line calls the constructor of the `nn.Module` class, which is the base class for all neural
network modules in PyTorch. This initializes the `nn.Module` class with the `Attention` class as its child.
2. The `inner_dim` variable is set to the product of the `dim_head` and `heads` hyperparameters. This will be the size of the
inner subspaces that the input tensor is projected into by the `self.to_qkv` layer.
3. The `project_out` variable is set to `True` if the number of heads is not equal to 1 or the dimension of the head is not
equal to the original dimension of the input tensor. This will be used to determine whether the output tensor should be
projected back into the original space of the input tensor.
4. The `self.heads` attribute is set to the value of the heads hyperparameter. This specifies the number of heads in the
attention mechanism.
5. The `self.scale` attribute is set to the inverse square root of the dimension of the head tensor. This will be used to
scale the dot products of the query and key tensors.
6. The `self.attend` attribute is set to a new `nn.Softmax` layer, which will be used to compute the attention weights from
the dot products of the query and key tensors.
7. The `self.dropout` attribute is set to a new `nn.Dropout` layer, which will be used to apply dropout regularization to
the attention weights tensor.
8. The `self.to_qkv` attribute is set to a new linear layer, which will be used to project the input tensor into the
query, key, and value subspaces.
9. The `self.to_out` attribute is set to either a new `nn.Sequential` module containing a linear layer and a dropout layer,
or an `nn.Identity` layer depending on the value of the `project_out` variable. This will be used to project the output
tensor back into the original space of the input tensor if necessary.
""")
st.write("""
The `forward` function of the `Attention` class takes an input tensor `x` and applies the attention mechanism to it.
""")
st.code("""
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
""", language)
st.write("""
Here is a detailed breakdown of what happens in each line of the `forward` function:
1. The input tensor `x` is projected into three subspaces using a linear layer `self.to_qkv`. These subspaces correspond to
the query, key, and value tensors used in the attention mechanism. The resulting tensor is then split into three parts
along the last dimension, using the `chunk` method.
2. The query, key, and value tensors are rearranged using the `rearrange` function, which applies a specified reshaping
operation to a tensor. In this case, the reshaping operation is defined by the string `'b n (h d) -> b h n d'`, which
specifies that the tensors should be reshaped such that the batch and head dimensions are interleaved.
3. The query and key tensors are multiplied together using the `torch.matmul` function, and then scaled by the value
`self.scale`, which is the inverse square root of the dimensions of the head tensor. This produces a tensor of dot
products, which can be interpreted as the similarity between each query and key.
4. The dot products tensor is passed through the softmax function using the `self.attend` layer, which produces a tensor
of attention weights. These weights represent the importance of each value in the output.
5. The attention weights tensor is passed through the `self.dropout` layer, which applies dropout regularization to
prevent overfitting.
6. The attention weights tensor is multiplied by the value tensor, using the `torch.matmul` function, to compute the
weighted sum of the values. This is the output of the attention mechanism.
7. The output tensor is reshaped using the `rearrange` function and then passed through the `self.to_out layer`, which
projects it back into the original space of the input tensor. This is the final output of the `forward` function.
""")
st.subheader('Transformer Encoder', anchor='transformer')
#st.image('')
st.write("""
The Transformer encoder (Vaswani et al., 2017) consists of alternating layers of multiheaded selfattention (MSA, see Appendix A) and MLP blocks (Eq. 2, 3). Layernorm (LN) is applied before
every block, and residual connections after every block (Wang et al., 2019; Baevski & Auli, 2019).
Encoder: The encoder is composed of a stack of N = 6 identical layers. Each layer has two
sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, positionwise fully connected
feed-forward network. We employ a residual connection [11] around each of
the two sub-layers, followed by layer normalization [1]. That is, the output of each sub-layer is
LayerNorm(x + Sublayer(x)), where Sublayer(x) is the function implemented by the sub-layer
itself. To facilitate these residual connections, all sub-layers in the model, as well as the embedding
layers, produce outputs of dimension dmodel = 512.
""")
st.write("""
The `TransformerEncoder` class extends the `nn.Module` class from the PyTorch library. It is a neural network module that
implements a transformer encoder, which is a type of recurrent neural network that uses self-attention to compute a
weighted sum of its inputs. It has several key components:
- `__init__`: the constructor function for the class, which initializes the various layers and submodules of the network,
such as the `Attention` and `MLP` layers. It also creates a list of `PreNorm` layers, which are used to normalize the inputs
to the attention and `MLP` layers.
- `forward`: the forward propagation function, which takes an input tensor `x` and applies the various layers of the network
in sequence to produce the output. This includes applying the attention and MLP layers, adding the output of each layer
to the input, and then returning the final result.
The `TransformerEncoder` class can be used as a building block for more complex neural networks that need to compute
self-attention over some input. By specifying different values for the hyperparameters `dim`, `depth`, `heads`, `dim_head`,
`mlp_dim`, and `dropout`, the behavior of the transformer encoder can be customized to suit different tasks and applications.
""")
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_2:
haiku = '''
class Transformer(hk.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim):
super(Transformer, self).__init__()
self.layers = []
for _ in range(depth):
self.layers.append([
PreNorm(Attention(dim, heads=heads, dim_head=dim_head)),
PreNorm(MLP(dim, mlp_dim))
])
def __call__(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
'''
st.code(haiku, language)
with tab_1:
pytorch = '''
class TransformerEncoder(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, MLP(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
'''
st.code(pytorch, language)
st.code("""
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, MLP(dim, mlp_dim, dropout = dropout))
]))
""", language)
st.write("""
The `__init__` function of the `TransformerEncoder` class is the constructor function, which is called when a new instance
of the class is created. It initializes the various layers and submodules of the transformer encoder, such as the
`Attention` and `MLP` layers. Here is a detailed breakdown of what happens in each line of the `__init__` function:
1. The `super().__init__()` line calls the constructor of the `nn.Module` class, which is the base class for all neural
network modules in PyTorch. This initializes the nn.Module class with the `TransformerEncoder` class as its child.
2. The `self.layers` attribute is set to a new `nn.ModuleList` object, which is a list of neural network modules. This list
will be used to store the `PreNorm` layers that normalize the inputs to the attention and MLP layers.
3. A `for` loop iterates over the range of the `depth` hyperparameter, which specifies the number of layers in the transformer
encoder. For each iteration of the loop, a new `PreNorm` layer is created for the attention and MLP layers, and then
appended to the `self.layers` list.
4. The `PreNorm` layers are created using the `dim` hyperparameter, which specifies the dimension of the input and output
tensors, and the `Attention` and `MLP` layers, which are initialized with the specified hyperparameters. The `PreNorm` layers
are used to normalize the inputs to the attention and MLP layers, which helps improve the stability and performance of
the transformer encoder.
""")
st.code("""
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
""", language)
st.subheader('Vision Transformer Model', anchor='visiontransformer')
st.write("""
We split an image into fixed-size patches, linearly embed each of them,
add position embeddings, and feed the resulting sequence of vectors to a standard Transformer
encoder. In order to perform classification, we use the standard approach of adding an extra learnable
“classification token” to the sequence.
""")
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_2:
haiku = '''
class VitBase(hk.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64):
super(VitBase, self).__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
self.patch_height = patch_height
self.patch_width = patch_width
assert image_height % patch_height == 0 and image_width % patch_width == 0
num_patches = (image_height // patch_height) * (image_width // patch_width)
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = hk.Linear(dim)
self.pos_embedding = hk.get_parameter('pos_embedding', shape = [1, num_patches + 1, dim], init = jnp.zeros)
self.cls_token = hk.get_parameter('cls_token', shape = [1, 1, dim], init = jnp.zeros)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim)
self.pool = pool
self.mlp_head = hk.Sequential([
LayerNorm(),
hk.Linear(num_classes)
])
def __call__(self, img):
img = rearrange(img, 'b (h p1) (w p2) c -> b (h w) (p1 p2 c)', p1 = self.patch_height, p2 = self.patch_width)
x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b=b)
x = jnp.concatenate([cls_tokens, x], axis = 1)
x += self.pos_embedding[:, :(n + 1)]
x = hk.dropout(hk.next_rng_key(), rate = 0.0, x = x)
x = self.transformer(x)
if self.pool == 'mean':
x = jnp.mean(x, axis = 1)
else:
x = x[:, 0]
x = self.mlp_head(x)
return x
'''
st.code(haiku, language)
st.write('Haiku requires ')
haiku_transform = '''
def ViT(**kwargs):
@hk.transform
def inner(img):
return VitBase(**kwargs)(img)
return inner
'''
st.code(haiku_transform, language)
with tab_1:
pytorch = '''
class ViT(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
num_classes,
dim,
depth,
heads,
mlp_dim,
pool = 'cls',
channels = 3,
dim_head = 64,
dropout = 0.,
emb_dropout = 0.
):
super().__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_height // patch_height) * (image_width // patch_width)
patch_dim = channels * patch_height * patch_width
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(emb_dropout)
self.transformer = TransformerEncoder(dim, depth, heads, dim_head, mlp_dim, dropout)
self.pool = pool
self.to_latent = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, img):
x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '1 1 d -> b 1 d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
x = self.transformer(x)
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
x = self.to_latent(x)
return self.mlp_head(x)
'''
st.code(pytorch, language)
st.write("""
This is a class implementation of a vision transformer (ViT), a type of neural network that uses self-attention
mechanisms to process visual data. The `__init__` method initializes the ViT with several hyperparameters, including the
size of the input images, the patch size, the number of classes, the dimension of the hidden layers, the depth of the
transformer encoder, the number of attention heads, the dimension of the MLP layers, the pooling method, the number of
channels in the input images, and the dropout rate.
The `forward` method defines the forward pass of the ViT, where the input image is first split into patches and
transformed into patch embeddings using a linear layer. The patch embeddings are then concatenated with a special
"class" token and passed through a transformer encoder, which applies self-attention mechanisms to the input. The
output of the transformer encoder is then either pooled using mean pooling or reduced to a single vector using the
"class" token, depending on the `pool` parameter. The final output of the ViT is the result of passing the pooled or
reduced vector through a linear layer and a layer normalization layer.
The ViT model is a flexible and powerful model that can be used for a wide range of computer vision tasks. It has the
ability to process inputs of arbitrary size and to capture long-range dependencies in data, which makes it well-suited
for many types of visual data. However, it also has a large number of hyperparameters, which can make it challenging to
train and optimize.
""")
st.write("""
The `__init__` method initializes the ViT with several hyperparameters that determine the architecture and behavior of
the model. The `image_size` parameter determines the size of the input images, which should be a tuple of the form
`(image_height, image_width)`. The `patch_size` parameter determines the size of the patches into which the input images
will be split, which should also be a tuple of the form `(patch_height, patch_width)`. The `num_classes` parameter
determines the number of classes that the ViT will be trained to predict. The `dim` parameter determines the dimension of
the hidden layers in the ViT.
The `depth` parameter determines the number of layers in the transformer encoder, which is the core component of the ViT
that applies self-attention mechanisms to the input. The `heads` parameter determines the number of attention heads that
will be used in the transformer encoder. The `mlp_dim` parameter determines the dimension of the MLP layers that are used
in the transformer encoder. The `pool` parameter determines the pooling method that will be used to reduce the output of
the transformer encoder, which can be either 'cls' (class token pooling) or 'mean' (mean pooling).
The `channels` parameter determines the number of channels in the input images, which should be 3 for color images and 1
for grayscale images. The `dim_head` parameter determines the dimension of the attention heads used in the transformer
encoder. The `dropout` parameter determines the dropout rate that will be used in the transformer encoder.
The `emb_dropout` parameter determines the dropout rate that will be used on the patch embeddings after they are
concatenated with the "class" token.
After the hyperparameters are set, the `__init__` method performs some checks to ensure that the input image dimensions
are divisible by the patch size and that the pool parameter is set to a valid value. If these checks fail, an error
message is printed.
Next, the `__init__` method defines several layers that will be used in the forward pass of the ViT. The
`to_patch_embedding` layer is a sequential layer that first rearranges the input tensor to group the patches together and
then applies a linear layer to transform the patches into patch embeddings. The `pos_embedding` layer is a parameter
tensor that is used to add positional information to the patch embeddings. The `cls_token` layer is a parameter tensor
that represents the "class" token that will be concatenated with the patch embeddings. The `dropout` layer is a dropout
layer that will be applied to the patch embeddings after they are concatenated with the "class" token.
The `transformer` layer is a transformer encoder that will be applied to the concatenated patch embeddings and "class"
token. The transformer encoder applies self-attention mechanisms to the input using the specified number of layers,
attention heads, and MLP dimensions.
The `pool` variable is used to store the value of the `pool` parameter, which determines the pooling method that will be
used on the output of the transformer encoder. The `to_latent` layer is an identity layer that will be applied to the
output of the transformer encoder before it is passed to the final linear layer.
Finally, the `mlp_head` layer is a sequential layer that consists of a layer normalization layer followed by a linear
layer that maps the output of the transformer encoder to the predicted class probabilities.
Once the layers have been defined, the __init__ method is complete and the ViT is ready to process input images.
""")
# Section for Training
st.header('Training', anchor='Training')
st.subheader('Initialize Vision Transformer Model', anchor='InitializeModel')
st.write('''
''')
tab_pytorch, tab_haiku = st.tabs(["PyTorch", "Haiku"])
with tab_pytorch:
pytorch = '''
model = ViT(
image_size = CFG.image_size,
patch_size = 16,
num_classes = CFG.num_classes,
dim = 1024,
depth = 6,
heads = 16,
mlp_dim = 2048,
dropout = 0.1,
emb_dropout = 0.1
).to(CFG.device)
'''
st.code(pytorch, language)
with tab_haiku:
haiku = '''
'''
st.code(haiku, language)
st.write("""
The code you provided creates a new instance of the ViT class using the specified hyperparameters. The image_size
parameter is set to the CFG.image_size variable, which is assumed to be defined elsewhere in the code. The patch_size
parameter is set to 16, which means that the input images will be split into patches of size 16x16 pixels. The
num_classes parameter is set to CFG.num_classes, which is again assumed to be defined elsewhere.
The dim parameter is set to 1024, which determines the dimension of the hidden layers in the ViT. The depth parameter
is set to 6, which determines the number of layers in the transformer encoder. The heads parameter is set to 16, which
determines the number of attention heads that will be used in the transformer encoder. The mlp_dim parameter is set to
2048, which determines the dimension of the MLP layers used in the transformer encoder.
The dropout parameter is set to 0.1, which determines the dropout rate that will be used in the transformer encoder.
The emb_dropout parameter is set to 0.1, which determines the dropout rate that will be applied to the patch embeddings
after they are concatenated with the "class" token.
After the ViT is created, the to method is called on the instance, passing in the CFG.device variable as an argument.
This is assumed to be a PyTorch device, such as a CPU or a GPU, which determines where the ViT will be run. This allows
the ViT to be run on different hardware, depending on the availability and capabilities of the device.
""")
st.subheader('Image Augmentation', anchor='ImageAugmentation')
st.write('''
''')
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_2:
haiku = '''
'''
st.code(haiku, language)
with tab_1:
pytorch = '''
train_transforms = T.Compose([
T.Resize((CFG.image_size, CFG.image_size)),
T.AutoAugment(policy = CFG.policy),
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
validation_transforms = T.Compose([
T.Resize(CFG.image_size),
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
test_transforms = T.Compose([
T.Resize(CFG.image_size),
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
'''
st.code(pytorch, language)
# Section for Dataset
st.subheader('Dataset', anchor='Dataset')
st.write('''
The CIFAR-10 (Canadian Institute For Advanced Research) dataset is one of the most widely used datasets for benchmarking research in computer vision and machine learning. It is a subset of the 80 million tiny images dataset. The dataset consists of 60000 (32x32) labeled color images in 10 different classes. There are 6000 images per class. The classes are airplane, automobile, bird, cat, deer, dog, frog, horse, ship, and truck. The data is split into 50000 training images (five batches of 10000) and 10000 test images. The training batches contain 5000 randomnly selected images from each class while the test set includes 1000 from each.
Load the CIFAR-10 dataset from TorchVision with the following parameters:
- `root`: a path to where the dataset is stored. We define a directory, `'./cifar_data/'`, that will be created in this case.
- `train`: specifies whether the dataset is for training or not. The train parameter should only be set to `True` for `train_dataset`. It should be set to `False` in `test_dataset`.
- `download`: whether or not to download the dataset from the internet, if it is not already available in root. If you do not already have the dataset you will want this set to `True`.
- `transform`: apply image augmentations and transformations to the dataset. Previously we defined transformations using `AutoAugment` for CIFAR-10.
''')
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_1:
pytorch = '''
train_dataset = CIFAR10(
root = './cifar_data/',
train = True,
download = True,
transform = train_transform,
)
test_dataset = CIFAR10(
root = './cifar_data/',
train = False,
download = True,
transform = test_transform,
)
'''
st.code(pytorch, language)
st.write('''
In order to create a validation split, we will use PyTorch's `torch.utils.data.random_split` to randomly split the CIFAR10 `test_dataset` into a new non-overlapping `validation_dataset` and `test_dataset`. The `validation_dataset` will be 80% of the data in the original test set. The new `test_dataset` will encompass the remaining 20%. Additionally, we will set a generator with the seed from the configuration to reproduce the same results.
PyTorch's `torch.utils.data.random_split` takes the parameters:
- `dataset`: The dataset which will be split. In our case we will want to split the previously defined `test_dataset`.
- `length`: The lengths for the dataset split. Here we will use an 80:20 split.
- `generator`: Used to reproduce the same split results when set with a manual seed. Use the `seed` variable defined in the configuration.
''')
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_1:
pytorch = '''
validation_dataset_size = int(len(test_dataset) * 0.8)
test_dataset_size = len(test_dataset) - validation_dataset_size
validation_dataset, test_dataset = torch.utils.data.random_split(
test_dataset,
[validation_dataset_size, test_dataset_size],
generator=torch.Generator().manual_seed(CFG.seed)
)
'''
st.code(pytorch, language)
with tab_2:
haiku = '''
'''
st.code(haiku, language)
# Section for Dataloader
st.subheader('Dataloader', anchor='Dataloader')
st.write('''
Data loader. Combines a dataset and a sampler, and provides an iterable over the given dataset.
The DataLoader supports both map-style and iterable-style datasets with single- or multi-process loading, customizing loading order and optional automatic batching (collation) and memory pinning.
See torch.utils.data documentation page for more details.
dataset (Dataset) – dataset from which to load the data.
batch_size (int, optional) – how many samples per batch to load (default: 1).
shuffle (bool, optional) – set to True to have the data reshuffled at every epoch (default: False).
PyTorch provides two data primitives: torch.utils.data.DataLoader and torch.utils.data.Dataset that allow you to use pre-loaded datasets as well as your own data. Dataset stores the samples and their corresponding labels, and DataLoader wraps an iterable around the Dataset to enable easy access to the samples.
''')
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_1:
pytorch = '''
train_loader = Dataloader(
train_dataset,
batch_size = CFG.batch_size,
shuffle = True,
)
validation_loader = Dataloader(
validation_dataset,
batch_size = CFG.batch_size,
shuffle = True,
)
test_loader = Dataloader(
test_dataset,
batch_size = CFG.batch_size,
shuffle = True,
)
'''
st.code(pytorch, language)
with tab_2:
haiku = '''
'''
st.code(haiku, language)
# section for loss function
st.subheader('Loss function')
st.write('''
''')
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_2:
haiku = '''
criterion = optax.softmax_cross_entropy()
'''
st.code(haiku, language)
with tab_1:
pytorch = '''
criterion = nn.CrossEntropyLoss()
'''
st.code(pytorch, language)
# section for optimizer
st.subheader('Optimizer')
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_2:
haiku = '''
optimizer = optax.adam(learning_rate=CFG.learning_rate, b1=0.9, b2=0.99)
'''
st.code(haiku, language)
with tab_1:
pytorch = '''
optimizer = optim.Adam(model.parameters(), lr=CFG.learning_rate)
'''
st.code(pytorch, language)
# section for learning rate scheduler
st.subheader('Learning Rate Scheduler')
tab_1, tab_2 = st.tabs(["PyTorch", "Haiku"])
with tab_2:
haiku = '''
'''
st.code(haiku, language)
with tab_1:
pytorch = '''
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
'''
st.code(pytorch, language)
# Section for training step
st.subheader('Train Step', anchor='TrainStep')
st.write('''
''')
tab_pytorch, tab_haiku = st.tabs(["PyTorch", "Haiku"])
with tab_pytorch:
pytorch = '''
'''
st.code(pytorch, language)
with tab_haiku:
haiku = '''
'''
st.code(haiku, language)
# Section for Validation Step
st.subheader('Validation Step', anchor='ValidationStep')
st.write('''
''')
tab_pytorch, tab_haiku = st.tabs(["PyTorch", "Haiku"])
with tab_pytorch:
pytorch = '''
'''
st.code(pytorch, language)
with tab_haiku:
haiku = '''
'''
st.code(haiku, language)
# Section for Training Loop
st.subheader('Training Loop', anchor='TrainingLoop')
st.write('''
''')
tab_pytorch, tab_haiku = st.tabs(["PyTorch", "Haiku"])
with tab_pytorch:
pytorch = '''
'''
st.code(pytorch, language)
with tab_haiku:
haiku = '''
'''
st.code(haiku, language)
# Section for Saving Model
st.subheader('Save Trained Model', anchor='SaveModel')
tab_pytorch, tab_haiku = st.tabs(["PyTorch", "Haiku"])
with tab_pytorch:
pytorch = '''
'''
st.code(pytorch, language)
with tab_haiku:
haiku = '''
'''
st.code(haiku, language)
# Section for Loading Model
st.subheader('Load Trained Model', anchor='LoadModel')
tab_pytorch, tab_haiku = st.tabs(["PyTorch", "Haiku"])
with tab_pytorch:
pytorch = '''
'''
st.code(pytorch, language)
with tab_haiku:
haiku = '''
'''
st.code(haiku, language)
# Section for Making Predictions
st.subheader('Make Predictions', anchor='MakePredictions')
tab_pytorch, tab_haiku = st.tabs(["PyTorch", "Haiku"])
with tab_pytorch:
pytorch = '''
'''
st.code(pytorch, language)
with tab_haiku:
haiku = '''
'''
st.code(haiku, language)
# Section for distributed training
st.header('Distributed Training', anchor='DistributedTraining')
st.subheader('')
# Section for References
st.header('References', anchor='References')
st.write('https://www.cs.toronto.edu/~kriz/cifar.html')
# Section for Citations
st.header('Citations', anchor='Citations')
tab_pytorch, tab_haiku = st.tabs(["PyTorch", "Haiku"])
with tab_pytorch:
pytorch = '''
'''
st.code(pytorch, language)
with tab_haiku:
haiku = '''
'''
st.code(haiku, language)
|
import streamlit as st
st.header('Linear')
st.write("""
`nn.Linear()` is a class in PyTorch. This class represents a linear transformation of the input data. In other words, it
applies a linear function to the input data, which can be used to map the input data to a different space. This is often
used in the first few layers of a neural network, where it helps to extract features from the input data and compress it
into a more compact representation that is better suited for subsequent processing by the rest of the network.
""")
st.write("""
In this equation, **x** is a vector of input data, **A** is a matrix of weights, **b** is a vector of biases, and **y** is the output of
the linear layer. The equation says that the output of the linear layer is obtained by first multiplying the input
vector **x** by the weight matrix **A**, which applies the linear transformation to the input data. The result is then added to
the bias vector **b**, which shifts the output of the linear layer. The transpose of the weight matrix **A** is used in the
equation because the dimensions of **x** and **A** must be compatible for the multiplication to be performed. The transpose of a
matrix simply flips the matrix over its diagonal, so the rows and columns are switched, which allows the multiplication
to be performed.
""")
st.header('LayerNorm')
st.write("""
`nn.LayerNorm` is a class in PyTorch, which is a popular deep learning framework. This class represents a layer
normalization operation, which is a type of normalization that is applied to the inputs of a layer in a neural network.
Normalization is a common technique used in deep learning to improve the performance and stability of a neural network.
It helps to standardize the inputs to a layer, which can speed up training and improve the generalization of the model.
The `nn.LayerNorm` class normalizes the input data across the specified dimensions, which can help to reduce the variance
of the data and prevent the network from overfitting. It can be used as part of a larger model, such as a deep neural
network, to improve its performance.
""")
st.write("""
This equation describes the layer normalization operation, where **x** is the input data, **E[x]** is the mean of the input
data, **Var[x]** is the variance of the input data, epsilon is a small constant added to the variance to avoid division by
zero, **gamma** and **beta** are learnable parameters of the normalization operation, and **y** is the output of the normalization.
The layer normalization operation first subtracts the mean of the input data from each element of the input, which
centers the data around zero. It then divides the centered data by the square root of the variance of the input data,
which scales the data so that it has unit variance. This helps to standardize the input data and make it more consistent,
which can improve the performance of the neural network.
The **gamma** and **beta** parameters are learnable, which means that they can be adjusted during training to further improve
the performance of the normalization operation. The **gamma** parameter is used to scale the normalized data, and the **beta**
parameter is used to shift the normalized data. This allows the normalization operation to be adjusted to better suit
the specific needs of the network.
The **epsilon** constant is added to the variance to avoid division by zero. It is a very small value, such as 1e-5, which
has a negligible effect on the normalization operation but prevents numerical instability.
""")
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from setuptools import setup # type: ignore
setup(
name="cc_net",
version="1.0.0",
packages=["cc_net"],
# metadata to display on PyPI
author="Guillaume Wenzek",
author_email="[email protected]",
description="Tools to download and clean Common Crawl",
keywords="common crawl dataset",
url="https://github.com/facebookresearch/cc_net",
license="CC-BY-NC-4.0",
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
project_urls={
"Bug Tracker": "https://github.com/facebookresearch/cc_net/issues",
"Source Code": "https://github.com/facebookresearch/cc_net",
},
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
],
python_requires=">=3.7",
install_requires=[
"beautifulsoup4>=4.7.1",
"pandas>=0.23.4",
"requests>=2.22.0",
"fasttext>=0.9.1",
"sentencepiece>=0.1.82",
"kenlm @ git+https://github.com/kpu/kenlm.git@master",
"func_argparse>=1.1.1",
"psutil>=5.6.3",
"sacremoses",
"submitit>=1.0.0",
"typing_extensions",
],
extras_require={
"dev": ["mypy==0.790", "pytest", "black==19.3b0", "isort==5.6.4"],
# To use scripts inside cc_net/tools
"tools": ["lxml", "sentence_splitter"],
# Memory-efficient hashset.
# This fork only compiles the kind of dict used by cc_net.
# Full version is at https://github.com/atom-moyer/getpy
"getpy": ["getpy @ git+https://github.com/gwenzek/[email protected]"],
},
package_data={"cc_net": ["data/*"]},
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Main script to download a CC dump, remove duplicates, split by language and
filter the documents.
The pipeline parameters are described in the `Config` class.
"""
import hashlib
import json
import time
import warnings
from argparse import ArgumentParser
from collections import defaultdict
from itertools import repeat
from pathlib import Path
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Tuple
import func_argparse
# Local scripts
from cc_net import dedup, execution, jsonql, minify, perplexity, process_wet_file
from cc_net import regroup as regroup_module
from cc_net import split_by_lang
from cc_net.execution import Executor
# Constant
FILE_DIR = Path(__file__).parent
CUTOFF_CSV = FILE_DIR / "data" / "cutoff.csv"
DEFAULT_PIPELINE = [
"dedup",
"lid",
"keep_lang",
"sp",
"lm",
"pp_bucket",
"drop",
"split_by_lang",
]
class Config(NamedTuple):
"""
Mine Common Crawl with the given settings.
config_name
dump: CC dump id
output_dir: working directory
mined_dir: name of the destination folder, full path will be {ouput_dir}/{mined_dir}/{dump_id}
execution: chose how to parallelize the execution
num_shards: number of shards to split the dump
num_segments_per_shard: allow to download a small portion of CC (eg for tests)
min_len: remove documents shorter than this (in chars)
hashes_in_mem: number of shards hashes to use for dedup
lang_whitelist: only treat those languages
lang_blacklist: ignore those languages
lang_threshold: remove docs whose top language score is lower than this
keep_bucket: keep only those perplexity bucket chose from (head, middle, tail, all)
lm_dir: folder containing LMs
lm_languages: only use LMs for the following languages
cutoff: cutoff file to use for split in head/middle/tail
mine_num_processes: number of processes to use for mining
target_size: size of finals files produce during `regroup` stage
cleanup_after_regroup: delete intermediary files after regroup
task_parallelism: max number of task to run in parallel
pipeline: restricts the mining pipeline to the given steps. Order is important !
experiments: (HACK) enable specific experiments in the code
"""
config_name: str = "base"
dump: str = "2017-51"
output_dir: Path = Path("data")
mined_dir: str = "mined"
execution: str = "auto"
num_shards: int = 1600
num_segments_per_shard: int = -1
metadata: Optional[str] = None
min_len: int = 300
hash_in_mem: int = 50
lang_whitelist: Sequence[str] = []
lang_blacklist: Sequence[str] = []
lang_threshold: float = 0.5
keep_bucket: Sequence[str] = []
lm_dir: Path = Path("data/lm_sp")
cutoff: Path = CUTOFF_CSV
lm_languages: Optional[Sequence[str]] = None
mine_num_processes: int = 16
target_size: str = "4G"
cleanup_after_regroup: bool = True
task_parallelism: int = -1
pipeline: Sequence[str] = DEFAULT_PIPELINE
experiments: Sequence[str] = []
cache_dir: Optional[Path] = None
def get_executor(
self, name: str, timeout_hour: int = 1, mem_gb: int = 1, cpus: int = 1
) -> Executor:
name = "_".join((name, self.config_name, *self.experiments))
return execution.get_executor(
name,
self.output_dir / "logs",
self.execution,
timeout_hour=timeout_hour,
mem_gb=mem_gb,
cpus=cpus,
task_parallelism=self.task_parallelism,
)
def get_cc_shard(self, shard: int) -> process_wet_file.CCShardReader:
dump_cache: Optional[Path] = None
if self.cache_dir:
self.cache_dir.mkdir(exist_ok=True)
dump_cache = self.cache_dir / self.dump
dump_cache.mkdir(exist_ok=True)
return process_wet_file.CCShardReader(
self.dump,
shard=shard,
num_shards=self.num_shards,
num_segments_per_shard=self.num_segments_per_shard,
min_len=self.min_len,
cache_dir=dump_cache,
)
@classmethod
def from_json(cls, json_file: Path) -> "Config":
raw_lines = json_file.read_text().splitlines()
raw_lines = [l for l in raw_lines if not l.strip().startswith("//")]
json_config = json.loads("".join(raw_lines))
path_keys = ["cache_dir", "lm_dir", "output_dir"]
for key in path_keys:
if key in json_config:
json_config[key] = Path(json_config[key])
return Config(**json_config)
@property
def will_split(self) -> bool:
return "split_by_lang" in self.pipeline or "split_by_segment" in self.pipeline
def get_lm_languages(self) -> Sequence[str]:
if self.lm_languages is not None:
return self.lm_languages
if self.lang_whitelist:
return self.lang_whitelist
languages = [m.name.split(".")[0] for m in self.lm_dir.glob("*.arpa.bin")]
if self.lang_blacklist:
languages = [l for l in languages if l not in self.lang_blacklist]
return languages
def get_mined_dir(self, regroup: bool = False) -> Path:
if self.will_split and not regroup:
return self.output_dir / f"{self.mined_dir}_split" / self.dump
return self.output_dir / self.mined_dir / self.dump
BASE_CONFIG = Config()
BYLANG_CONFIG = Config(
config_name="by_lang",
mined_dir="mined_by_lang",
pipeline=list(BASE_CONFIG.pipeline[:-1]) + ["split_by_lang"],
)
REPRODUCE_CONFIG = Config(
config_name="reproduce",
dump="2019-09",
mined_dir="reproduce",
pipeline=["fetch_metadata", "keep_lang", "keep_bucket", "split_by_lang"],
metadata="https://dl.fbaipublicfiles.com/cc_net/1.0.0",
# Optional filtering:
# It won't change much the execution speed, but decreases the disk requirement.
# Restrict languages
lang_whitelist=["fr"],
# Restrict perplexity buckets
# Top languages have been split in perplexity buckets according
# to a Wikipedia trained LM.
# The buckets from low perplexity (good) to high (bad) are:
# ["head", "middle", "tail"]
# Languages without a LM have only one bucket "all".
# It won't change much the execution speed, but decreases the disk requirement.
keep_bucket=["head", "all"],
mine_num_processes=1,
)
TEST_CONFIG = BASE_CONFIG._replace(
config_name="test",
dump="2019-09",
output_dir=Path("test_data"),
execution="local",
num_shards=4,
num_segments_per_shard=1,
hash_in_mem=2,
mine_num_processes=2,
lang_whitelist=["de", "it", "fr"],
target_size="32M",
cleanup_after_regroup=False,
cache_dir=Path("test_data/wet_cache"),
)
PREDEF_CONFIGS = {
"base": BASE_CONFIG,
"by_lang": BYLANG_CONFIG,
"test": TEST_CONFIG,
"test_slurm": TEST_CONFIG._replace(execution="slurm,partition=dev"),
"debug": TEST_CONFIG._replace(config_name="debug", mine_num_processes=0),
"reproduce": REPRODUCE_CONFIG,
"augment": BASE_CONFIG._replace(
config_name="augment", dump="2019-13", lang_blacklist=["en"]
),
}
def tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def finalize(tmp_output: Path, output: Path) -> None:
if not tmp_output.exists():
warnings.warn(f"Targeted tmp output {tmp_output} doesn't exists.")
return
tmp_index = tmp_output.parent / (tmp_output.name + ".index")
tmp_output.rename(output)
if tmp_index.exists():
tmp_index.rename(output.parent / (output.name + ".index"))
def _transpose(iterable: Sequence[Tuple[Any, ...]], n=-1) -> Tuple[List, ...]:
if n < 0:
n = len(iterable[0])
columns: tuple = tuple([] for _ in range(n))
for row in iterable:
assert len(row) == n, f"Found tuple of len({len(row)}, expected {n}: {row}"
for i in range(n):
columns[i].append(row[i])
return columns
def hashes(conf: Config) -> List[Path]:
"""Computes hashes for each shard."""
hashes_dir = conf.output_dir / "hashes" / conf.dump
outputs = [hashes_dir / f"{shard:04d}.bin" for shard in range(conf.num_shards)]
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if not missing_outputs:
return outputs
hashes_dir.mkdir(parents=True, exist_ok=True)
# With FlatHashSet we need ~2Gb of RAM / shard, but we need to account for
# overhead due to how the dynamic allocation works.
ex = conf.get_executor(f"hashes_{conf.dump}", mem_gb=4, timeout_hour=6, cpus=2)
ex(_hashes_shard, repeat(conf), *_transpose(missing_outputs))
# Wait a bit so that files appears on the disk.
time.sleep(20)
assert all(o.exists() for o in outputs)
return outputs
def _hashes_shard(conf: Config, shard: int, output: Path):
tmp_output = tmp(output)
jsonql.run_pipes(
dedup.HashesCollector(field="raw_content", output=tmp_output),
inputs=conf.get_cc_shard(shard),
)
finalize(tmp_output, output)
return f"Hashed {output}"
HASHES_IN_MEM = [0, 1, 2, 5, 10, 20, 50, 100, 200, 400]
def mine(conf: Config) -> List[Path]:
"""Remove dups, run LID and LMs, and split by lang and quality."""
mined_dir = conf.get_mined_dir()
if conf.will_split:
# Give a directories when splitting
outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)]
else:
# Files otherwise
outputs = [
mined_dir / f"{shard:04d}.json.gz" for shard in range(conf.num_shards)
]
if "mini_again" in conf.experiments:
mined_dir = conf.output_dir / "mini_again" / conf.dump
outputs = [mined_dir / f"{shard:04d}" for shard in range(conf.num_shards)]
# TODO: try to reduce this / make it a function of "hash_in_mem" / num_langs
mem_gb = 60 + 1 * conf.hash_in_mem
timeout_hour = 5
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
outputs = [
conf.output_dir / f"hashes_exp/{conf.dump}_0000_dedup{h:03d}.json.gz"
for h in HASHES_IN_MEM
]
mem_gb = int(max(HASHES_IN_MEM) * 1.2)
timeout_hour = 8
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if "mini_again" in conf.experiments:
missing_outputs = [
(shard, o)
for shard, o in enumerate(outputs)
if shard in [5, 139] and not o.exists()
]
if not missing_outputs:
return outputs
mined_dir.mkdir(parents=True, exist_ok=True)
ex = conf.get_executor(
f"mine_{conf.dump}",
mem_gb=mem_gb,
timeout_hour=timeout_hour,
cpus=conf.mine_num_processes + 1,
)
# Compute hashes firsts.
if "dedup" in conf.pipeline:
hashes_groups = list(jsonql.grouper(hashes(conf), conf.hash_in_mem))
hashes_files: Iterable[List[Path]] = [
hashes_groups[shard // conf.hash_in_mem] for shard, o in missing_outputs
]
else:
hashes_files = repeat([])
ex(_mine_shard, repeat(conf), hashes_files, *_transpose(missing_outputs))
assert all(o.exists() for o in outputs)
return outputs
def _get_segment(tmp_output: Path, doc: dict) -> str:
segment: str = doc["cc_segment"].split("/")[-1]
return str(tmp_output / segment.replace(".warc.wet.gz", ".json.gz"))
def _mine_shard(conf: Config, hashes: List[Path], shard: int, output: Path) -> str:
assert conf.pipeline
tmp_output = tmp(output)
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
hashes_in_mem = shard
hashes = hashes[: HASHES_IN_MEM[hashes_in_mem]]
shard = 0
cc_shard = conf.get_cc_shard(shard)
steps: Dict[str, Optional[jsonql.Transformer]] = {}
lang_id = Path("bin") / "lid.bin"
steps["lid_before_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_before_dedup", top=5
)
steps["dedup"] = dedup.DuplicatesRemover(field="raw_content", hashes_files=hashes)
steps["lid"] = split_by_lang.Classifier(
model=lang_id,
field="raw_content",
out_field="language",
top=1,
threshold=conf.lang_threshold,
)
steps["lid_after_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_after_dedup", top=5
)
if conf.lang_blacklist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") not in set(conf.lang_blacklist)]
)
elif conf.lang_whitelist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") in set(conf.lang_whitelist)]
)
else:
steps["keep_lang"] = None
tok_field = "tokenized"
steps["sp"] = perplexity.MultiSentencePiece(
{l: conf.lm_dir / f"{l}.sp.model" for l in conf.get_lm_languages()},
field="raw_content",
output_field=tok_field,
normalize=True,
)
steps["lm"] = perplexity.DocLM(
{l: conf.lm_dir / f"{l}.arpa.bin" for l in conf.get_lm_languages()},
field=tok_field,
output_field="perplexity",
normalize=False, # Normalization is done before SentencePiece
# load_method=kenlm.LoadMethod.PARALLEL_READ,
)
steps["pp_bucket"] = perplexity.PerplexityBucket(CUTOFF_CSV)
steps["drop"] = perplexity.DropKeys(tok_field)
steps["keep_bucket"] = None
if conf.keep_bucket:
steps["keep_bucket"] = jsonql.where(
[lambda doc: doc.get("bucket", "all") in conf.keep_bucket]
)
if "fetch_metadata" in conf.pipeline:
# TODO: better default
assert conf.metadata is not None
steps["fetch_metadata"] = minify.MetadataFetcher(
f"{conf.metadata}/{conf.dump}/"
)
steps["minify"] = minify.Minifier()
pattern = str(tmp_output / "{language}_{bucket}.json.gz")
steps["split_by_lang"] = jsonql.split(pattern=str(pattern), mkdir=True)
steps["split_by_segment"] = jsonql.split(
split_fn=lambda doc: _get_segment(tmp_output, doc), mkdir=True
)
pipeline = filter(None, (steps[s] for s in conf.pipeline))
jsonql.run_pipes(
*pipeline,
inputs=cc_shard,
processes=conf.mine_num_processes,
chunksize=100,
# The splitter takes care of writing to files.
output=tmp_output if not conf.will_split else None,
)
finalize(tmp_output, output)
return f"Mined {output}"
def regroup(conf: Config, all_dirs: List[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs
all_files = [f for d in all_dirs for f in d.glob("*.json.gz")]
if not all_files:
print(f"No .json.gz file found in {all_dirs[0]}")
splits: Dict[str, List[Path]] = defaultdict(list)
for f in all_files:
split = f.name.split(".")[0]
splits[split].append(f)
print(f"Identified {len(all_files)} files to regroup from {len(splits)} splits.")
inputs: List[List[Path]] = []
outputs: List[Path] = []
target_size = jsonql.parse_size(conf.target_size)
for split, files in splits.items():
cuts = list(regroup_module.determine_groups(files, target_size=target_size))
if not cuts:
continue
pattern = f"{split}_????.json.gz"
existing_outputs = sorted(regroup_dir.glob(pattern))
if not conf.cleanup_after_regroup:
# We still have all the inputs so it is safe to overwrite existing outputs.
assert len(existing_outputs) <= len(cuts)
existing_outputs = []
if len(existing_outputs) > 0 and len(cuts) == 1:
# append to existing file if size allows it.
new_size = (
sum(f.stat().st_size for f in cuts[0])
+ existing_outputs[-1].stat().st_size
)
if new_size < target_size:
print(f"Will append {cuts[0]} to {existing_outputs[-1]}")
cuts[0].insert(0, existing_outputs.pop(-1))
n_existing = len(existing_outputs)
for i, cut in enumerate(cuts):
# avoid overwriting existing files.
j = i + n_existing
output = regroup_dir / f"{split}_{j:04}.json.gz"
inputs.append(cut)
outputs.append(output)
print(
str(regroup_dir / pattern),
"->",
len(cuts),
f"shards ({n_existing} already there).",
)
ex = conf.get_executor(f"regroup_{conf.dump}", mem_gb=1, timeout_hour=12, cpus=2)
ex(_regroup, repeat(conf), inputs, outputs)
return regroup_dir
def _regroup(conf: Config, inputs: List[Path], output: Path) -> str:
output.parent.mkdir(parents=True, exist_ok=True)
regroup_module.fast_reshard(
inputs, output, tmp=tmp(output), rm_original=conf.cleanup_after_regroup
)
return f"Regrouped {output}"
def move_segments(conf: Config, all_dirs: Sequence[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
# check that mining is over.
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs, "Received no dirs to move"
assert all(
d.is_dir() for d in all_dirs
), f"move_segments was expecting dirs received files: {all_dirs[:10]}..."
regroup_dir.parent.mkdir(exist_ok=True)
regroup_dir.mkdir(exist_ok=True)
ex = conf.get_executor(f"moveseg_{conf.dump}", mem_gb=1, timeout_hour=1, cpus=2)
def _move_segments(subdir: Path, regroup_dir: Path) -> str:
n = 0
for f in subdir.iterdir():
if not f.is_file() or f.is_symlink():
continue
n += f.name.endswith(".json.gz")
new_name = regroup_dir / f.name
target = new_name.resolve()
assert f.resolve() != target
# this make the job idempotent.
f.rename(new_name)
f.symlink_to(target)
if n == 0:
return ""
return f"Moved {n} .json.gz files from {subdir} to {regroup_dir}"
ex(_move_segments, all_dirs, repeat(regroup_dir))
print(f"Results are in {regroup_dir}")
return regroup_dir
def _validate_test(conf: Config, output_dir: Path, generate: bool = False):
stats: Dict[str, dict] = {}
for file in sorted(output_dir.glob("*.json.gz")):
fname = "/".join((file.parent.name, file.name))
# The order of documents is not guaranteed inside a shard,
lines = sorted(jsonql.open_read(file))
content = "\n".join(lines)
size = len(content)
checksum = hashlib.sha1(bytes(content, encoding="utf-8")).hexdigest()
# first_document = json.loads(lines[0])
stats[fname] = {"size": size, "checksum": checksum}
def dump(x):
return json.dumps(x, indent=2, ensure_ascii=False)
print("*** Stats ***")
stats_raw = dump(stats)
stats_file = FILE_DIR / "data" / "test_stats.json"
if generate:
print("Saving stats to", stats_file)
stats_file.write_text(stats_raw)
return
expected_stats: Dict[str, dict] = {}
if stats_file.exists():
expected_stats = json.loads(stats_file.read_text())
if expected_stats == stats:
print("Everything looks good !")
return
stats_file.with_suffix(".actual.json").write_text(stats_raw)
print("*** Expected Stats ***")
print(dump(expected_stats))
print("*** Diff ***")
for fname in sorted(expected_stats.keys()):
print(fname)
assert fname in expected_stats, "missing file " + fname
if expected_stats[fname]["size"] != stats[fname]["size"]:
print(
" - Expected size",
expected_stats[fname]["size"],
", size",
stats[fname]["size"],
)
if expected_stats[fname]["checksum"] != stats[fname]["checksum"]:
print(
" - Expected checksum",
expected_stats[fname]["checksum"],
", checksum",
stats[fname]["checksum"],
)
def get_main_parser() -> ArgumentParser:
# Generates the 'main' parser by patching a 'Config' parser
p = func_argparse.func_argparser(Config)
# Override defaults value to None, so we know what was set by the user.
# Note that it will keep the original default values in the help message.
p.set_defaults(**{f: None for f in Config._fields})
p.add_argument("--config", type=str, default="base")
p.set_defaults(__command=main)
return p
def main(config: str = "base", **config_as_dict: Any) -> None:
# Use the given 'config' as default value.
config_base = config
if config_base in PREDEF_CONFIGS:
conf = PREDEF_CONFIGS[config_base]
elif Path(config_base).exists():
conf = Config.from_json(Path(config_base))
else:
raise ValueError(
f"Invalid value {config_base} for --config. "
f"Choose from ({', '.join(PREDEF_CONFIGS)}) or give an existing .json file."
)
conf = conf._replace(**{k: v for (k, v) in config_as_dict.items() if v is not None})
print(f"Will run cc_net.mine.main with the following config:", conf)
all_files = mine(conf)
if conf.will_split:
assert all_files
assert all(d.is_dir() for d in all_files)
all_dirs = all_files
if "split_by_lang" in conf.pipeline:
# Only try regrouping if we split the shards.
regroup(conf, all_dirs)
elif "split_by_segment" in conf.pipeline:
# If we split by segment then regrouping is trivial, since segments appear in only one shard.
move_segments(conf, all_dirs)
if conf.config_name == "test":
_validate_test(conf, conf.get_mined_dir(regroup=True))
if __name__ == "__main__":
func_argparse.parse_and_call(get_main_parser())
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Creates mono-lingual corpus from Wikipedia.
"""
import functools
import re
import subprocess
import urllib.request
from pathlib import Path
from typing import Dict
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql, text_normalizer
CIRRUS_URL = "https://dumps.wikimedia.org/other/cirrussearch"
CIRRUS_DUMP_RE = re.compile(r"^(.*)wiki-\d+-cirrussearch-content\.json\.gz")
def tmp(file: Path) -> Path:
return file.parent / ("tmp." + file.name)
def opening(file: Path, output: Path = None, n_docs: int = 1_000_000):
"""Will dump the tokenized opening text of the given Wikipedia.
Args:
- file: File containing the Wikipedia dump.
- output: Output file.
- n_docs: How many docs to parse
- tokenize: whether to tokenize the text
- lang: Language code used to chose the tokenizer
"""
assert file.exists()
return jsonql.run_pipes(
functools.partial(extract_opening_text, n_docs=n_docs),
file=file,
output=tmp(output) if output else None,
)
if output:
tmp(output).replace(output)
def extract_opening_text(source, n_docs: int = 10_000):
i = 0
for doc in jsonql.read_jsons(source):
if not doc:
continue
text = doc.get("opening_text")
if not text:
continue
yield text_normalizer.normalize(text)
i += 1
if i >= n_docs:
break
def dl(lang: str, output_dir: Path, date: str = None):
"""Download the cirrus extract for the given lang.
See https://dumps.wikimedia.org/other/cirrussearch for the full list of files.
Args:
- lang: The Wikipedia code for the language.
- output_dir: Output directory. File will be `{lang}.json.gz`
- date: Date of a specific Cirrus dump.
"""
urls = get_cirrus_urls(date)
assert (
lang in urls
), f"--lang {lang} not found. Available languages are: {urls.keys()}"
assert output_dir, "--output_dir folder needed."
output_dir.mkdir(exist_ok=True)
output = output_dir / (lang + ".json.gz")
print(f"Downloading {lang} wiki from {urls[lang]} to {output}")
wget(urls[lang], output)
def get_cirrus_urls(date: str = None) -> Dict[str, str]:
if date is None:
cirrus_page = BeautifulSoup(
urllib.request.urlopen(CIRRUS_URL), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in cirrus_page.findAll("a")]
dumps.remove("..")
dumps.remove("current")
# We take the oldest dump since the most recent might be incomplete.
# The page only link to the N latest dumps so the dump won't be too old.
date = min(dumps)
cirrus_url = "/".join((CIRRUS_URL, date))
print("Will use the Wikipedia dump from:", date, cirrus_url)
cirrus_page = BeautifulSoup(
urllib.request.urlopen(cirrus_url), features="html.parser"
)
urls = {}
for link in cirrus_page.findAll("a"):
match = CIRRUS_DUMP_RE.match(link.get("href"))
if not match:
continue
urls[match.group(1)] = "/".join([cirrus_url, link.get("href")])
assert urls, f"No valid download urls found at {cirrus_url}"
return urls
def wget(url: str, output: Path):
subprocess.run(["wget", url, "-O", tmp(output), "-q"], check=True)
tmp(output).replace(output)
assert (
output.stat().st_size > 10_000
), f"File {output} downloaded from {url} looks too small"
if __name__ == "__main__":
func_argparse.main(dl, opening)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Manipulate files containing one json per line.
"""
import argparse
import collections
import contextlib
import functools
import glob
import gzip
import importlib
import inspect
import io
import itertools
import json
import logging
import multiprocessing
import os
import re
import sys
import tempfile
import time
import typing as tp
import warnings
import zlib
from pathlib import Path
from typing import (
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
TextIO,
Tuple,
Union,
)
import numpy as np
import psutil # type: ignore
import requests
from typing_extensions import Protocol
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(process)d:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M",
)
NEWLINE = " N3WL1N3 "
FilterFn = Callable[[dict], bool]
FileDescriptor = Union[Path, List[Path], str]
WritableFileLike = Union[FileDescriptor, TextIO, "SimpleIO", None]
ReadableFileLike = Union[Iterable[str], FileDescriptor, None]
def io_parser():
"""Parser shared by all commands to get input/output files."""
parser = argparse.ArgumentParser(add_help=False)
file_help = """File to read from. Can be specified several times for several files.
Be careful that bash will expand glob patterns **before** sending the args
to python. To use globs put it inside single quotes:
jsonql where --file 'data/perplexity/*.json' '{length} > 100' | head -1
jsonql --file 'data/perplexity/*.json' where '{length} > 100' | head -1
[Invalid] jsonql where '{length} > 100' --file data/perplexity/*.json | head -1
[Invalid] jsonql where --file data/perplexity/*.json '{length} > 100' | head -1
"""
parser.add_argument("-f", "--file", type=Path, action="append", help=file_help)
parser.add_argument("-o", "--output", type=Path, default="-")
parser.add_argument("--processes", type=int, default=1)
return parser
def get_parser():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them"
)
subparsers = parser.add_subparsers()
def add_subparser(function, arguments):
doc = function.__doc__.split("\n")[0]
p = subparsers.add_parser(function.__name__, help=doc, parents=[io_parser()])
p.set_defaults(command=function)
for k, v in arguments.items():
p.add_argument(k, **v)
add_subparser(
select,
{
"columns": dict(nargs="+", help="Extract the value of the given fields"),
"--skip_empty": dict(
action="store_true", help="Skip lines without the requested fields"
),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE,
help="Replace newlines found in the text by the given string",
),
},
)
add_subparser(
where,
{
"clauses": dict(nargs="+", help=""),
"--requires": dict(
action="append", help="Python module required by the clauses code."
),
},
)
add_subparser(
merge,
{
"columns": dict(nargs="+", help=""),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE, help="Replace the given string by actual newlines"
),
},
)
add_subparser(
describe,
{
"columns": dict(nargs="*", help=""),
"--bins": dict(
default="auto", help="Number of bins for computing the histograms"
),
"--cumulative": dict(
action="store_true", help="Compute cumulative histograms"
),
"--weights": dict(type=str, help="Column used to weight histograms"),
},
)
add_subparser(split, {"--pattern": dict(type=str)})
add_subparser(shard, {})
return parser
def _split_array(array, sep):
last = 0
for i, x in enumerate(array):
if x != sep:
continue
yield array[last:i]
last = i + 1
if last != len(array):
yield array[last:]
def main(raw_args):
parser = get_parser()
pipeline = []
file = "-"
output = "-"
processes = 1
for args_group in _split_array(raw_args, "--"):
args = vars(parser.parse_args(args_group))
command = args.pop("command")
file = args.pop("file") or file
output = args.pop("output") or output
processes = args.pop("processes") or processes
pipeline.append(as_pipe(command, args))
if not pipeline:
parser.print_help()
return
run_pipes(*pipeline, file=Path(file), output=Path(output), processes=processes)
class Transformer:
"""
Wrapper around functions transforming documents.
This allows `run_pipes` to automatically parallelize the pipeline.
Provides:
* Automatic logging. Logging can be changed with the `summary` method.
Loggin frequency with _log_freq (in second) or $JSONQL_LOG_FREQ env variable.
* Automatic parallelization without pickling. The transformers are shared
across processes, and the object is usually not pickled.
* Basic pickling / unpickling in case it's still needed.
By default will only pickle the arguments passed to the constructor.
* Delayed initialization. Internal state which is not pickable should be set
inside the `_prepare` function.
"""
parallelisable: bool = True
expect_json: bool = False
warn_when_pickling: bool = False
ready: bool = False
def __init_subclass__(cls, expect_json: bool = None):
"""Detects if the subclass expects json as input."""
spec = inspect.getfullargspec(cls.do)
if expect_json is None:
expect_json = spec.annotations.get(spec.args[1], None) == dict
cls.expect_json = expect_json
def __new__(cls, *args, **kwargs):
"""Creates the transformer and save the arguments passed to the constructor."""
t = super().__new__(cls)
Transformer.__init__(t, args, kwargs)
return t
def __init__(self, state_args: tuple = None, state_kwargs: dict = None):
"""
Init the transformer counters.
If state_args/state_kwargs are set they will override whatever was
originally passed to the subclass constructor.
"""
if state_args is not None:
self.__args = state_args
if state_kwargs is not None:
self.__kwargs = state_kwargs
self.start_time = time.time()
self.__last_log = self.start_time
self.processed = 0
# Log every 5 min unless specified other wise.
self._log_freq = int(os.environ.get("JSONQL_LOG_FREQ", 5 * 60))
self.__cls = type(self)
self._logger = logging.getLogger(self.__cls.__name__)
def __call__(self, x):
assert self.ready, f"{self} is not ready."
if x is None:
return
y = self.do(x)
self.processed += 1
if time.time() - self.__last_log > self._log_freq:
self.log_summary()
return y
def do(self, x):
raise NotImplementedError(f"'do' not implemented in {type(self)}")
def summary(self) -> List[str]:
return [self.speed_summary()]
def speed_summary(self) -> str:
delay = time.time() - self.start_time
h = delay / 3600
s = self.processed / delay
return f"Processed {self.processed:_} documents in {h:.2}h ({s:5.1f} doc/s)."
def log(self, message):
self._logger.info(message)
def log_summary(self) -> None:
if not self.ready:
self.log("Not ready.")
return
summ = self.summary() or []
for line in summ:
self.log(line)
self.__last_log = time.time()
def map(self, source: Iterable) -> Iterator:
if self.ready:
for x in source:
yield self(x)
# since we have been prepared by caller,
# caller is also responsible for calling `close`.
return
else:
with self:
for x in source:
yield self(x)
def __getstate__(self) -> Tuple[tuple, dict, bool]:
return (self.__args, self.__kwargs, self.expect_json)
def __setstate__(self, state: Tuple[tuple, dict, bool]):
if self.warn_when_pickling:
warnings.warn(f"Unpickling transformer: {type(self)}. This can be slow.")
(args, kwargs, expect_json) = state
# When unpickling `__new__` isn't called so we have to doit ourselves.
Transformer.__init__(self, state_args=args, state_kwargs=kwargs)
type(self).__init__(self, *args, **kwargs)
assert self.expect_json == expect_json
# __setstate__ is called by multiprocessing right before calling
# the object so we need to initialize everything.
self.__enter__()
def _prepare(self) -> None:
pass
def __enter__(self) -> "Transformer":
# In multiprocessing __enter__ is always called twice, so we are idempotent.
# Because we call __enter__ when deserializing this transformer and
# also when the parent transformer is deserialized.
self.start_time = time.time()
if self.ready:
return self
self._prepare()
self.ready = True
return self
def __exit__(self, *args) -> None:
self.close()
self.log_summary()
def close(self) -> None:
pass
def as_pipe(transformer, kwargs):
if isinstance(transformer, type):
return transformer(**kwargs)
return lambda source: transformer(source, **kwargs)
def compose(fns: List[Transformer]) -> Transformer:
if len(fns) == 1:
return fns[0]
return MultiTransformer(fns)
class MultiTransformer(Transformer):
def __init__(self, transformers: List[Transformer]):
super().__init__()
self.transformers = transformers
def __repr__(self) -> str:
pipeline = " | ".join(type(t).__name__ for t in self.transformers)
return f"<{pipeline}>"
def do(self, x):
for t in self.transformers:
x = t(x)
return x
def _prepare(self):
for t in self.transformers:
t.__enter__()
return self
def __exit__(self, *args):
for t in self.transformers:
t.__exit__(*args)
def summary(self):
return itertools.chain(*(t.summary() for t in self.transformers))
class Mapper(Transformer):
def __init__(self, fn):
super().__init__()
self.fn = fn
def do(self, x):
return self.fn(x)
def run_pipe(
command,
kwargs: dict = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
):
kwargs = kwargs or {}
if isinstance(kwargs, argparse.ArgumentParser):
kwargs = vars(kwargs.parse_args())
file = file or Path(kwargs.pop("file", "-"))
output = output or Path(kwargs.pop("output", "-"))
return run_pipes(as_pipe(command, kwargs), file=file, output=output)
def run_pipes(
*fns: Union[Transformer, Callable[[Iterable], Iterable]],
inputs: Iterable[dict] = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
processes: int = 1,
chunksize: int = 10_000,
):
"""
Run full document processing pipeline.
- fns: list of functions to run over the documents. Can be:
* `Iterable -> Iterable` function
* jsonql.Transformer instance
Using transformers allow the pipeline to process documents in parallel.
- inputs: iterable to read the documents from
- file: if inputs is not given, will read documents from this file.
- output: writable file like.
- processes: number of processes to use. -1 means all CPU available.
- chunksize: chunksize for multiprocessing.Pool.imap_unordered
"""
expect_json = len(fns) and isinstance(fns[0], Transformer) and fns[0].expect_json
if expect_json and inputs is None:
fns = (JsonReader(),) + fns
transformers = []
for t in fns:
if not isinstance(t, Transformer):
break
if not t.parallelisable:
break
transformers.append(t)
pipes = fns[len(transformers) :]
log = logging.getLogger(__name__).info
if inputs is None:
data: Iterable = open_read(file)
else:
data = inputs
if processes == -1:
processes = os.cpu_count() or 0
with contextlib.suppress(BrokenPipeError), contextlib.ExitStack() as stack:
if transformers:
log(f"preparing {transformers}")
transform = stack.enter_context(compose(transformers))
if processes <= 1:
data = transform.map(data)
else:
p = multiprocessing.current_process()
log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}")
pool = stack.enter_context(
multiprocessing.Pool(
processes=processes,
initializer=_set_global_transformer,
initargs=(transform,),
)
)
data = pool.imap_unordered(
_global_transformer, data, chunksize=chunksize
)
for fn in pipes:
if isinstance(fn, Transformer):
data = fn.map(data)
else:
data = fn(data)
write_jsons(data, output)
# Allows to share transformer acroos subprocess.
# Used by `run_pipes`
_GLOBAL_TRANSFORMER: Optional[Transformer] = None
def _set_global_transformer(transformer: Transformer):
global _GLOBAL_TRANSFORMER
p = multiprocessing.current_process()
logging.info(
f"Started subprocess {p.name}:{p.pid} from {os.getppid()} for {transformer}"
)
assert transformer.ready, f"{transformer} isn't ready"
_GLOBAL_TRANSFORMER = transformer
def _global_transformer(document: str) -> Optional[dict]:
assert _GLOBAL_TRANSFORMER is not None
return _GLOBAL_TRANSFORMER(document)
def lines(file: ReadableFileLike) -> Iterator[str]:
return (line.strip("\n") for line in open_read(file))
def read_jsons(file: ReadableFileLike, strict=False) -> Iterator[dict]:
reader = JsonReader(strict=strict)
lines = open_read(file)
for line in lines:
if line is None:
continue
yield reader(line)
reader.log_summary()
def write_jsons(source: Iterable[dict], file: WritableFileLike) -> None:
eol = os.linesep
with open_write(file) as o:
for res in source:
if res is None:
continue
if isinstance(res, dict):
json.dump(res, o, ensure_ascii=False)
o.write(eol)
continue
if isinstance(res, str):
res = res.rstrip("\n")
print(res, file=o)
class JsonReader(Transformer):
def __init__(self, strict: bool = False):
super().__init__()
self.ready = True
self.strict = strict
self.num_errors = 0
def do(self, line: str) -> Optional[dict]:
if line is None:
return None
if isinstance(line, dict):
return line
line = line.rstrip("\n")
if not line:
return None
try:
return json.loads(line)
except json.decoder.JSONDecodeError as e:
self.log_error(e)
if self.strict:
raise
return None
def log_error(self, e: json.decoder.JSONDecodeError):
self.num_errors += 1
if self.num_errors > 10:
return
MAX_LEN = 80
snippet, snippet_len = e.doc, len(e.doc)
col = e.pos
if snippet_len > MAX_LEN:
if col < MAX_LEN:
start = 0
elif snippet_len - col < MAX_LEN:
start = snippet_len - MAX_LEN
else:
start = col - MAX_LEN // 2
snippet = e.doc[start : start + MAX_LEN]
col = col - start
logging.warning(
"\n".join(
[
f"Invalid json (length={len(e.doc)}) {e}",
snippet,
" " * (col - 1) + "^",
]
)
)
def summary(self):
summ = super().summary()
if self.num_errors > 0:
summ.append(f"Skipped {self.num_errors} invalid json.")
return summ
def compile_column(column, newline):
if callable(column):
return column
if column == "*":
return json.dumps
if re.match(r"[_a-z][_a-z0-9]*", column):
def extract_col(doc):
v = doc.get(column, "")
if isinstance(v, str) and newline != "\n":
v = v.rstrip("\n").replace("\n", newline)
return v
return extract_col
return compile_expr(column)
def select(lines, columns, skip_empty=False, separator="\t", newline="\n"):
"""Yields the content of the requested columns."""
column_parsers = [compile_column(c, newline) for c in columns]
for doc in read_jsons(lines):
values = []
empty = True
for parse_col in column_parsers:
v = parse_col(doc)
values.append(str(v) or "")
empty = empty and v is None
if skip_empty and empty:
continue
yield separator.join(values)
def compile_expr(clause: Union[str, FilterFn], requires: List[str] = None):
if not isinstance(clause, str):
return clause
args_re = r"(?i:\{([_a-z][_a-z0-9]*)\})"
args_list = list(re.findall(args_re, clause))
if not args_list:
# This is only a warning because you may want to have eg random sampling
# that doesn't depend on the document.
logging.warn(
f"Warning: No variable found in expression: <{clause}>\n"
"Variables should be written inside braces, eg: {language}=='en'"
)
python_like = re.sub(args_re, r"doc.get('\1', None)", clause)
requires = requires or []
modules = {r: importlib.import_module(r) for r in requires}
return eval(f"lambda doc: {python_like}", modules)
class where(Transformer):
"""Filters the data using python code.
Ex: `jsonql where 'len({text}) > 100'`
"""
def __init__(
self, clauses: Sequence[Union[str, FilterFn]], requires: List[str] = []
):
super().__init__()
self.raw_clauses = clauses
self.requires = requires
self.n_selected = 0
self.clauses: List[FilterFn] = []
def _prepare(self):
self.clauses = [compile_expr(c, self.requires) for c in self.raw_clauses]
def do(self, doc: dict) -> Optional[dict]:
assert self.clauses
if not doc or not all((c(doc) for c in self.clauses)):
return None
self.n_selected += 1
return doc
def summary(self):
n_selected, n_docs = self.n_selected, self.processed
selectivity = n_selected / n_docs if n_docs else 0
return [f"Selected {n_selected} documents out of {n_docs} ({selectivity:5.1%})"]
def merge(lines, columns, separator="\t", newline=NEWLINE):
"""Reads tab separated columns and output a json using the given headers.
Headers are of form {key}[%{type}]
{type} can be one of {"f": float, "i": int, "b": bool, "s": string}.
Default type is string.
A special header "_" means interpret this column as json, and append all other
columns to it. Must appear only once and on last position.
Ex:
`echo '1\thello' | jsonql merge n t` --> `{"n": "1", "t": "hello"}`
`echo '1\thello" | jsonql merge n%i t` --> `{"n": 1, "t": "hello"}`
`echo '1\thello\t{"f": "bar"}' | jsonql merge n%i t _` --> `{"n": 1, "t": "hello", "f": "bar"}`
"""
handle_newlines = lambda s: s.replace(newline, "\n")
type_mapping: Dict[str, Callable] = {
"f": float,
"i": int,
"b": bool,
"s": handle_newlines,
}
type_parsing = [
type_mapping.get(f.split("%")[-1], handle_newlines) for f in columns
]
columns = [f.split("%")[0] for f in columns]
doc_index = columns.index("_") if "_" in columns else -1
read_json = JsonReader()
def parse(line):
parts = line.split(separator, len(columns) - 1)
doc: Dict[str, tp.Any] = {}
for i, value in enumerate(parts):
if columns[i] == "_":
doc.update(read_json(parts[doc_index]))
else:
try:
doc[columns[i]] = type_parsing[i](value)
except ValueError:
logging.error(
f"Error when parsing column {i} of line: {line[:100]}..."
)
return doc
for line in lines:
yield json.dumps(parse(line))
class split(Transformer):
"""Split a files in several smaller files based on the value of a field."""
# Not parallelisable since we are writing to files.
parallelisable = False
def __init__(
self,
pattern: Union[Path, str] = None,
split_fn: Callable[[dict], str] = None,
mkdir: bool = False,
):
super().__init__()
assert not (
pattern and split_fn
), "split can't have both a pattern and a split_fn"
if split_fn is not None:
self.split_fn = split_fn
else:
assert pattern, "split need either a pattern or a split_fn"
self.split_fn = self.make_split_fn(str(pattern))
self.mkdir = mkdir
self.o: dict = {}
def make_split_fn(self, pattern: str) -> Callable[[dict], str]:
candidates = list(re.findall(r"(?i:\{([_a-z][_a-z0-9]*)\})", pattern))
return lambda doc: pattern.format(**{c: doc[c] for c in candidates})
def do(self, doc):
filename = self.split_fn(doc)
if not filename:
return
o = self.o.get(filename, None)
if o is None:
if self.mkdir:
Path(filename).parent.mkdir(parents=True, exist_ok=True)
self.o[filename] = open_write(filename)
print(json.dumps(doc, ensure_ascii=False), file=self.o[filename], flush=True)
def summary(self):
summ = super().summary()
summ.append(f"Found {len(self.o)} splits.")
return summ
def close(self):
for file in self.o.values():
file.close()
def histogram(values, bins, weights):
hist, bins = np.histogram(values, bins=bins)
# n_bins = len(hist)
if weights is not None:
# Bins can't be auto-determined if weights is supplied.
# So we first compute the bins without the weights then recompute
# the histogram with the weights.
hist, bins = np.histogram(values, bins=bins, weights=weights)
# cumsum = np.cumsum(hist)
# total = cumsum[-1]
# for i in range(n_bins - 1):
# if cumsum[i] / total > 0.9:
# useful_range = np.linspace(bins[0], bins[i + 1], n_bins)
# new_bins = np.append(useful_range, [bins[-1]])
# return np.histogram(values, bins=new_bins, weights=weights)
return hist, bins
def _parse_bins(bins):
try:
if isinstance(bins, str):
if "," in bins:
bins = [int(b) for b in bins.split(",")]
else:
bins = int(bins)
except ValueError:
pass
return bins
ALL_DOCUMENTS = "<ALL_DOCUMENTS>"
MAX_LABEL_LEN = 100
def bar_chart(hist, bins):
n = sum(hist)
max_h = max(hist)
out = []
for i, h in enumerate(hist):
h_size = 80 * h // max_h
dh_size = 80 * (h - hist[i - 1]) // max_h
if h_size == 0 or dh_size == 0:
continue
bar = "█" * h_size
out.append(f"{bins[i]:8.3f} {bar:80} ({h:5d}, {h / n:5.1%}) {bins[i+1]:8.3f}")
out.append(f"{bins[-1]:8.3f}")
return out
def display_stats(stats, key, weights=None, bins="auto", cumulative=False):
out = []
documents = stats[ALL_DOCUMENTS]
count = stats.get(key, 0)
r = count / documents if documents else 0
out.append(f"Field {key} saw {count} times ({r:5.1%})")
length = stats.get(key + ".length", None)
avg_length = length // count if length else 0
if length is not None:
out[-1] += f", average length is {length // count}"
values = stats.get(key + ".val", None)
if values:
out[-1] += f", histogram is: (bins={bins})"
if weights:
if weights not in stats:
logging.warn(f"Warning: weights column {weights} not found.")
if weights + ".val" not in stats:
logging.warn(
f"Warning: weights column {weights} is not a numeric column."
)
weights = stats.get(weights + ".val")
hist, bins = histogram(values, _parse_bins(bins), weights)
if cumulative:
hist = np.cumsum(hist)
out += bar_chart(hist, bins)
cnt = stats.get(key + ".cnt", None)
if avg_length < MAX_LABEL_LEN and cnt and max(cnt.values()) > 1:
cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True)
out[-1] += ", top 100 labels:"
for label, n in cnt[:100]:
if n < 5:
continue
out.append(f"{label:25}: {n:6} ({n / count:5.1%})")
return out
def describe(source, columns=None, weights=None, **kwargs):
"""Compute some statistics about a dataset.
Stats can be restricted to a subset of columns."""
MAX_HIST_SIZE = 100_000_000
MAX_CNT_SIZE = 1000
stats = {ALL_DOCUMENTS: 0}
needed = columns + [weights] if columns else None
for doc in read_jsons(source):
stats[ALL_DOCUMENTS] += 1
for k, v in doc.items():
if needed and k not in needed:
continue
stats[k] = get_or_set(stats, k, 0) + 1
if isinstance(v, str):
stats[k + ".length"] = get_or_set(stats, k + ".length", 0) + len(v)
if len(v) > MAX_LABEL_LEN: # Don't treat too long string as labels
continue
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
if v in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[v] += 1
elif type(v) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values.append(v)
elif type(v) is list and len(v) and type(v[0]) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values += v
elif type(v) is dict:
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
for label in v:
if label in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[label] += 1
documents = stats[ALL_DOCUMENTS]
yield f"Stats computed on {documents} documents:"
for k in stats:
if columns and k not in columns:
continue
if "." in k or k == ALL_DOCUMENTS:
continue
for line in display_stats(stats, k, weights=weights, **kwargs):
yield line
def shard(lines):
"""Shard a file in several smaller ones."""
# The creation of the shard is handle in a generic way. Do we need this ?
return lines
# *** Utils ***
def get_or_set(dictionary, key, default):
if key not in dictionary:
dictionary[key] = default
return dictionary[key]
class SimpleIO(Protocol):
"""A subset of methods from TextIO."""
def close(self) -> None:
...
def write(self, line: str) -> int:
...
def __enter__(self) -> "SimpleIO":
...
def __exit__(self, exc_type, exc_value, traceback):
...
def open_read(filename: ReadableFileLike) -> Iterable[str]:
"""Open the given file, list of files or files matching the given glob and read lines.
`filename` is None or "-" -> reads from stdin
`filename` is a Path / str -> interprets filename as a glob and open files matching it
`filename` is a list -> opens sequentially all files from the list using `open_read`
`filename` is something else -> returns the object wrapped in a `nullcontext`
This allows to pass already openened files or iterables.
`open_read` will decompress gzip files, given they have ".gz" suffix.
"""
if filename is None:
return sys.stdin
if isinstance(filename, list):
assert isinstance(filename[0], Path)
if len(filename) == 0:
return []
if len(filename) > 1:
return _yield_from(filename)
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
if filename.startswith("http://") or filename.startswith("https://"):
return open_remote_file(filename)
filename = Path(filename)
if not isinstance(filename, Path):
# we might have received an iterable, return it unmodified.
return filename # type: ignore
# Expand glob patterns only when reading
files = [Path(f) for f in sorted(glob.glob(str(filename)))]
if len(files) > 1:
return _yield_from(files)
if len(files) == 1:
filename = files[0]
assert isinstance(filename, Path)
if filename.name.endswith("]"):
return block_reader(filename)
logging.getLogger(__name__).info(f"Opening {filename} with mode 'rt'")
if filename.suffix == ".gz":
file: TextIO = gzip.open(filename, "rt") # type: ignore
else:
file = open(filename, "rt")
return _close_when_exhausted(file)
def _close_when_exhausted(file: TextIO) -> Iterable[str]:
with file:
yield from file
def _yield_from(files: list) -> Iterable[str]:
for file in files:
yield from open_read(file)
def open_write(
filename: WritableFileLike, max_size: str = "4G"
) -> tp.ContextManager[TextIO]:
"""Open the given file, list of files or files matching the given glob.
The return value is a ContextManager meant to be used inside a `with` block:
```
with open_write("foo.txt") as o:
...
Write mode:
replaces "?" from filename by numbers ranging from 0 to 9, generatings files of size `max_size`.
If filename ends with ".gz", creates a blocked gzip file with random access.
"""
if filename is None:
return contextlib.nullcontext(sys.stdout)
if isinstance(filename, list):
if len(filename) > 1:
return MultiFile(filename, "w", max_size)
else:
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
filename = Path(filename)
if not isinstance(filename, Path):
assert hasattr(filename, "write"), f"{filename} doesn't have a .write method."
# We return a 'TextIO' even though we only check for `.write` method,
# this works better with eg `print`.
return contextlib.nullcontext(tp.cast(TextIO, filename))
mode = "wt"
if "?" in filename.name:
return sharded_file(filename, mode, max_size)
logging.getLogger(__name__).info(f"Opening {filename} with mode {mode}")
# TODO: should we use another format ?
if filename.suffix == ".gz":
return BlockedGzipWriter(Path(filename), mode, block_size="64M")
return open(filename, "wt")
def parse_size(size):
unit_map = {"B": 1, "K": 1024, "M": 1024 ** 2, "G": 1024 ** 3}
unit = size[-1].upper()
assert (
unit in unit_map
), f"Unsupported size unit for {size}. Use one of: {unit_map.keys()}."
return int(size[:-1]) * unit_map[unit]
class MultiFile(SimpleIO):
def __init__(self, files: Iterable[Path], mode="w", max_size="4G"):
self.name = str(files)
self.mode = mode
self.files = iter(files)
self.max_size = parse_size(max_size)
self.current_handle: Optional[TextIO] = None
self.current_block_size = 0
self._open_next_handle() # Opening 1st handle allows to write directly.
def write(self, content) -> int:
# Avoid splitting newlines to a new file.
# use current_block_size since it's faster than `tell()`
if content != "\n" and self.current_block_size >= self.max_size:
self._open_next_handle()
if self.current_handle is None:
raise Exception("No more files to write to...")
written = self.current_handle.write(content)
self.current_block_size += written
return written
def _open_next_handle(self) -> bool:
self.close()
file = next(self.files, None)
if file is None:
return False
self.current_handle = open_write(file).__enter__()
self.current_block_size = 0
return True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@property
def closed(self):
return self.current_handle is None
def close(self):
if self.current_handle is None:
return
# log("Closing", self.current_handle.name, "with mode", self.current_handle.mode)
self.current_handle.__exit__(None, None, None)
self.current_handle = None
# not sure it helps since connections are reseted anyway.
_session = functools.lru_cache()(requests.Session)
def request_get_content(url: str, n_retry: int = 3) -> bytes:
"""Retrieve the binary content at url.
Retry on connection errors.
"""
t0 = time.time()
logging.info(f"Starting download of {url}")
for i in range(1, n_retry + 1):
try:
r = _session().get(url)
r.raise_for_status()
break
except requests.exceptions.RequestException as e:
# Sleep and try again on error, unless it's a 404.
message = e.args[0] if isinstance(e.args[0], str) else ""
if i == n_retry or "Client Error" in message:
raise e
warnings.warn(
f"Swallowed error {e} while downloading {url} ({i} out of {n_retry})"
)
time.sleep(10 * 2 ** i)
dl_time = time.time() - t0
dl_speed = len(r.content) / dl_time / 1024
logging.info(
f"Downloaded {url} [{r.status_code}] took {dl_time:.0f}s ({dl_speed:.1f}kB/s)"
)
return r.content
def open_remote_file(url: str, cache: Path = None) -> Iterable[str]:
"""Download the files at the given url to memory and opens it as a file.
Assumes that the file is small, and fetch it when this function is called.
"""
if cache and cache.exists():
return open_read(cache)
# TODO: open the remote file in streaming mode.
# The hard part is that we need to write the content on disk at the same time,
# to implement disk caching.
raw_bytes = request_get_content(url)
content = io.BytesIO(raw_bytes)
if url.endswith(".gz"):
f: TextIO = gzip.open(content, mode="rt") # type: ignore
else:
f = io.TextIOWrapper(content)
if cache and not cache.exists():
# The file might have been created while downloading/writing.
tmp_cache = _tmp(cache)
tmp_cache.write_bytes(raw_bytes)
if not cache.exists():
tmp_cache.replace(cache)
else:
tmp_cache.unlink()
return _close_when_exhausted(f)
def sharded_file(file_pattern: Path, mode: str, max_size: str = "4G") -> MultiFile:
folder, name = file_pattern.parent, file_pattern.name
assert "?" in name, f"Can't expand give file_pattern: {file_pattern}"
n = name.count("?")
assert 0 < n < 8
assert "?" * n in name, f"The '?' need to be adjacents in {file_pattern}"
assert "r" not in mode
files = (folder / name.replace("?" * n, f"%0{n}d" % i) for i in range(10 ** n))
return MultiFile(files, mode, max_size)
class SplitFile:
def __init__(self, filename: Path, chunk: int, n_chunks: int, mode: str = "r"):
assert mode == "r"
size = os.path.getsize(filename)
self.handle = open(filename, mode)
start = chunk * size // n_chunks
self.end: int = (chunk + 1) * size // n_chunks
if start > 0:
self.handle.seek(start - 1)
# Skip incomplete line. This avoid crashing when reading eg the middle
# of a unicode char. `self.handle.buffer` is a binary file reader.
self.handle.buffer.readline() # type: ignore
def __enter__(self):
return self
def __iter__(self):
while True:
line = self.handle.readline()
if not line:
return
yield line
if self.handle.tell() >= self.end:
return
def readlines(self):
return list(self.__iter__())
def close(self):
self.handle.close()
def __exit__(self, *args):
self.close()
def get_block_readers(filename: Path, n_readers, mode="t"):
index_filename = filename.parent / (filename.name + ".index")
if not index_filename.exists():
return [gzip.open(filename, "r" + mode)]
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
start = 0
readers = []
for i in range(n_readers):
end = index[min((i + 1) * chunk_per_reader - 1, n_chunks - 1)]
r = _blocked_gzip_reader(filename, start, end, mode)
readers.append(r)
start = end
return readers
def block_reader(filename: Path) -> Iterable[str]:
root, pattern = str(filename)[:-1].split("[", 1)
assert root.endswith(".gz"), "Can only read block of a .gz file for now."
ii, nn = pattern.strip().split("/")
i, n_readers = int(ii), int(nn)
index_filename = root + ".index"
assert os.path.exists(
index_filename
), f"Index {index_filename} not found for {filename}"
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
# I'm not sure how to handle the case where there is less reader than expected.
# Currently we return empty readers.
start = 0
if i > 0:
start = index[min((i - 1) * chunk_per_reader, n_chunks - 1)]
end = index[min(i * chunk_per_reader, n_chunks - 1)]
return _blocked_gzip_reader(root, start, end, mode="t")
def _blocked_gzip_reader(filename, start, end, mode="t") -> Iterable[str]:
handle = gzip.open(filename, "r" + mode)
handle.seek(start)
try:
while handle.tell() < end:
line = handle.readline()
if not line:
break
yield line
finally:
handle.close()
class BlockedGzipWriter(MultiFile):
"""Writes a Gzip files which can be read by block.
Decreasing the block size may hurt compression, but provides more split points.
"""
def __init__(self, filename: Path, mode: str, block_size: str = "256M"):
assert "w" in mode
self.filename = Path(filename)
self.index: List[int] = []
self.zipfile: Optional[gzip.GzipFile] = None
super().__init__([], mode, block_size)
def _open_next_handle(self) -> bool:
"""Here we never actually close/open handles,
we just write the end of block sequence."""
if not self.current_handle:
mode = self.mode + "t"
self.current_handle = tp.cast(TextIO, gzip.open(self.filename, mode))
assert isinstance(self.current_handle.buffer, gzip.GzipFile)
self.zipfile = self.current_handle.buffer
return True
# Use Z_FULL_FLUSH to allow random access:
# https://github.com/madler/zlib/blob/cacf7f1d4e3d44d871b605da3b647f07d718623f/zlib.h#L313
self.current_handle.buffer.flush(zlib_mode=zlib.Z_FULL_FLUSH) # type: ignore
self.index.append(self.current_handle.tell())
self.current_block_size = 0
return True
def flush(self):
assert self.current_handle is not None
self.current_handle.flush()
def close(self):
if self.current_handle is None:
return
self.current_handle.flush()
self.index.append(self.current_handle.tell())
self.current_handle.close()
self.current_handle = None
index = np.array(self.index, dtype=np.uint64)
with open(str(self.filename) + ".index", "wb") as o:
np.save(o, index)
def grouper(iterable, n):
group = []
for x in iterable:
group.append(x)
if len(group) == n:
yield group
group = []
if group:
yield group
PROCESS = psutil.Process()
def mem_footprint_gb(pid=None):
rss = PROCESS.memory_info().rss
return rss / 1_000_000_000
def _tmp(output: Path) -> Path:
suffix = "".join(output.suffixes)
suffix = ".tmp" + suffix
prefix = output.name[: -len(suffix)]
_, tmp_path = tempfile.mkstemp(dir=output.parent, prefix=prefix, suffix=suffix)
return Path(tmp_path)
@functools.lru_cache()
def _tmp_dir() -> Path:
job_id = os.environ.get("SLURM_JOB_ID")
if job_id:
return Path("/scratch/slurm_tmpdir") / job_id
checkpoint = Path("/checkpoint") / os.environ.get("USER", "")
if checkpoint.exists():
tmp = checkpoint / "tmp"
tmp.mkdir(exist_ok=True)
return tmp
return Path("/tmp")
if __name__ == "__main__":
multiprocessing.set_start_method("fork")
main(sys.argv[1:])
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import functools
import itertools
import logging
import os
import sys
import time
import warnings
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Optional, Sequence, Sized
import submitit
from typing_extensions import Protocol
class Executor(Protocol):
def __call__(self, function: Callable[..., str], *args: Iterable) -> None:
...
class SubmititRetryOnTimeout(submitit.helpers.Checkpointable):
def __init__(self, fn: Callable):
self.fn = fn
self.__name__ = fn.__name__
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def get_executor(
name: str,
log_dir: Path,
execution: str,
timeout_hour: float = 1.0,
mem_gb: int = 1,
cpus: int = 1,
task_parallelism: int = -1,
options: dict = {},
) -> Executor:
execution_mode = execution.split(",")[0]
options.update(
{kv.split("=", 1)[0]: kv.split("=", 1)[1] for kv in execution.split(",")[1:]}
)
if execution_mode == "mp":
warnings.warn("Execution mode 'mp' is deprecated, use 'local'.")
execution_mode = "local"
cluster = None if execution_mode == "auto" else execution_mode
# use submitit to detect which executor is available
ex = submitit.AutoExecutor(log_dir, cluster=cluster)
if ex.cluster == "local":
# LocalExecutor doesn't respect task_parallelism
return functools.partial(custom_map_array, ex, task_parallelism)
if ex.cluster == "debug":
return debug_executor
# We are on slurm
if task_parallelism == -1:
task_parallelism = 500
ex.update_parameters(
name=name,
timeout_min=int(timeout_hour * 60),
mem_gb=mem_gb,
cpus_per_task=cpus,
slurm_array_parallelism=task_parallelism,
**options,
)
return functools.partial(map_array_and_wait, ex)
def map_array_and_wait(
ex: submitit.AutoExecutor, function: Callable[..., str], *args: Iterable
):
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
approx_length = _approx_length(*args)
print(f"Submitting {f_name} in a job array ({approx_length} jobs)")
jobs = ex.map_array(function, *args)
if not jobs:
return
failed_jobs = []
done = 0
total = len(jobs)
job_array_id = jobs[0].job_id.split("_")[0]
print(f"Started {f_name} in job array {job_array_id} ({len(jobs)} jobs).")
for job in submitit.helpers.as_completed(jobs):
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def debug_executor(function: Callable[..., Optional[str]], *args: Iterable) -> None:
logging.getLogger().setLevel(logging.DEBUG)
approx_length = _approx_length(*args)
for i, x in enumerate(zip(*args)):
try:
message = function(*x)
except Exception:
try:
import ipdb as pdb # type: ignore
except ImportError:
import pdb # type: ignore
import traceback
traceback.print_exc()
print("")
pdb.post_mortem()
sys.exit(1)
if message is not None:
print(message, f"({i + 1} / {approx_length})")
def _approx_length(*args: Iterable):
for a in args:
if isinstance(a, Sized):
return len(a)
return -1
def custom_map_array(
ex: submitit.AutoExecutor,
parallelism: int,
function: Callable[..., Optional[str]],
*args: Iterable,
) -> None:
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
jobs_args = list(zip(*args))
total = len(jobs_args)
if parallelism < 0:
parallelism = os.cpu_count() or 0
assert parallelism >= 0, f"Can't run any jobs with task_parallelism={parallelism}"
print(f"Submitting {total} jobs for {f_name}, with task_parallelism={parallelism}")
enqueued = 0
done = 0
running_jobs: List[submitit.Job] = []
failed_jobs: List[submitit.Job] = []
while done < len(jobs_args):
# Try to queue more job if we have some bandwidth.
if enqueued < total and len(running_jobs) < parallelism:
running_jobs.append(ex.submit(function, *jobs_args[enqueued]))
enqueued += 1
continue
# Else wait for some job to finish
if not running_jobs:
warnings.warn(
f"No more running jobs, yet we submitted only {enqueued} / {total} and finished {done} / {total}"
)
break
job = get_next_job(running_jobs)
running_jobs.remove(job)
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).", job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def get_next_job(
jobs: Sequence[submitit.Job], poll_frequency: float = 10
) -> submitit.Job:
"""
Waits for any of the job to finish and returns it.
jobs: list of jobs
poll_frequency: frequency in second at which we check job status
"""
start = time.time()
waiting = False
while True:
for job in jobs:
if job.done():
return job
if not waiting:
job_ids = [j.job_id for j in jobs[:4]]
suffix = "..." if len(jobs) > 4 else ""
print(
f"Waiting on {len(jobs)} running jobs. Job ids: {','.join(job_ids)}{suffix}"
)
waiting = True
time.sleep(poll_frequency)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import time
import warnings
from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type
import numpy as np
HASH_TYPE: Type[np.uint64] = np.uint64
GETPY_WARNING = False
class AbstractDedupHashSet(Sized, Iterable[np.uint64]):
"""A dict-like that returns `True` for keys that have been added more than once.
The API is batched and expect np.array as input. This batching grants better
perf when using the C++ implementation.
"""
dtype: Type[np.uint64] = HASH_TYPE
def __repr__(self):
implementation = type(self).__name__
return f"[{implementation}, len: {len(self)}"
def __len__(self) -> int:
...
def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray:
...
def __getitem__(self, values) -> np.ndarray:
...
def __setitem__(self, keys, values) -> None:
...
def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]:
...
def keys(self) -> Iterable[np.uint64]:
...
def __iter__(self) -> Iterator[np.uint64]:
return iter(self.keys())
def add(self, h, contains=None):
"""Add the given keys. First time a key is added the value is set to 0,
then it's set to one."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
if contains is None:
contains = self.__contains__(h)
self.__setitem__(h, contains)
return contains
def merge(self, keys, values):
contains = self.__contains__(keys)
self.__setitem__(keys, contains | values)
def dump(self, filename):
return self.dump_np(filename)
def load(self, filename):
return self.load_np(filename)
def dump_np(self, filename):
kv_type = np.dtype([("k", HASH_TYPE), ("v", np.uint8)])
items = np.fromiter(self.items(), dtype=kv_type, count=len(self))
with open(filename, "wb") as f:
np.save(f, items)
def load_np(self, filename):
items = np.load(str(filename))
keys = items["k"].copy()
values = items["v"].copy()
self.merge(keys, values)
def dump_np2(self, filename):
keys = np.fromiter(
(k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self)
)
with open(filename, "wb") as f:
np.save(f, keys)
values = np.fromiter(
(v for (k, v) in self.items()), dtype=np.uint8, count=len(self)
)
with open(str(filename) + ".val", "wb") as f:
np.save(f, values)
def load_np2(self, filename):
keys = np.load(filename)
values = np.load(str(filename) + ".val")
self.merge(keys, values)
class NaiveHashSet(dict, AbstractDedupHashSet):
"""Pure python implementation of AbstractDedupHashSet.
This implementation is quite fast, since Python dict are heavily optimized.
"""
def __init__(self, iterable=None):
super().__init__()
global GETPY_WARNING
if GETPY_WARNING:
warnings.warn(
"Module 'getpy' not found. Deduplication will take more RAM."
" Try `pip install cc_net[getpy]"
)
GETPY_WARNING = False
def __contains__(self, values):
"""Returns `True` if the object has been added at list once."""
contains_point = super().__contains__
return np.fromiter(
map(contains_point, values), count=len(values), dtype=np.uint8
)
def __getitem__(self, values):
"""Returns `True` if the object has been added at list twice."""
get_point = super().get
return np.fromiter(
map(lambda x: get_point(x, False), values),
count=len(values),
dtype=np.uint8,
)
def __setitem__(self, keys, values):
assert len(keys) == len(values)
for k, v in zip(keys, values):
dict.__setitem__(self, k, v)
try:
import getpy as gp # type: ignore
class _FlatHashSet(gp.Dict, AbstractDedupHashSet):
"""C++ backed implementation of AbstractDedupHashSet.
This implementation is slightly slower than the Python one but uses
3x less RAM.
See https://github.com/atom-moyer/getpy.
"""
def __init__(self):
super().__init__(HASH_TYPE, np.uint8, default_value=False)
def __contains__(self, h):
"""Returns `True` if the object has been added at list once."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
c = gp.Dict.__contains__(self, h)
c.dtype = np.uint8
return c
def dump(self, filename):
return self.dump_gp(filename)
def load(self, filename):
return self.load_gp(filename)
def dump_gp(self, filename):
return gp.Dict.dump(self, str(filename))
def load_gp(self, filename):
"""Override gp.Dict.load, to correctly merge values instead of overwriting."""
other = gp.Dict(HASH_TYPE, np.uint8, default_value=False)
other.load(str(filename))
n = len(other)
keys = np.fromiter(
(k for (k, v) in other.items()), dtype=HASH_TYPE, count=n
)
values = np.fromiter(
(v for (k, v) in other.items()), dtype=np.uint8, count=n
)
self.merge(keys, values)
FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet
except ImportError:
GETPY_WARNING = True
FlatHashSet = NaiveHashSet
def timeit(message, function, *args):
start = time.time()
function(*args)
end = time.time()
print(message, f"took {end - start:.0f}s")
def compare_load(*filenames):
assert filenames, "No file given"
def load_list():
hashes = []
for f in filenames:
h = FlatHashSet()
h.load(f)
print(f"Loaded {h} from {f}.")
hashes.append(h)
return hashes
def load_all(load, ext):
hashes = FlatHashSet()
for f in filenames:
load(hashes, f + ext)
def dump_all(hashes, dump, ext):
for h, f in zip(hashes, filenames):
dump(h, f + ext)
hashes = load_list()
dump_gp = getattr(FlatHashSet, "dump_gp")
if dump_gp is not None:
timeit("Dumping using gp.dump", dump_all, hashes, dump_gp, ".gp.test")
timeit("Dumping using dump_np", dump_all, hashes, FlatHashSet.dump_np, ".npy.test")
timeit(
"Dumping using dump_np2", dump_all, hashes, FlatHashSet.dump_np2, ".npy2.test"
)
load_gp = getattr(FlatHashSet, "load_gp")
if load_gp is not None:
timeit("Loading using gp.load", load_all, load_gp, ".gp.test")
timeit("Loading using load_np", load_all, FlatHashSet.load_np, ".npy.test")
timeit("Loading using load_np2", load_all, FlatHashSet.load_np2, ".npy2.test")
# Loading 10 shards:
# [dedup] Dumping using gp.dump took 52s
# [dedup] Dumping using dump_np took 270s
# [dedup] Dumping using dump_np2 took 483s
#
# [dedup] Loading using gp.load took 654s
# [dedup] Loading using load_np took 82s
# [dedup] Loading using load_np2 took 76s
if __name__ == "__main__":
compare_load(*sys.argv[1:])
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import base64
import hashlib
import itertools
import urllib.parse
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Sequence, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.execution import get_executor
from cc_net.jsonql import mem_footprint_gb
HASH_SIZE = 4
HASH_TYPE = np.uint32
PUBLIC_FIELDS = ["url", "digest"]
COMPUTED_FIELDS = ["cc_segment", "language", "language_score", "bucket", "perplexity"]
DATA = Path(__file__).parent.parent / "data"
# This is similar to dedup methods but with use 32 bits hashes.
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def _str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
def get_hashes(lines: Iterable[str]) -> List[bytes]:
h = HASH_SIZE
return [hashlib.sha1(bytes(l, encoding="utf-8")).digest()[:h] for l in lines]
def encode_hashes(hashes: Iterable[bytes]) -> str:
return base64.b64encode(b"".join(hashes)).decode("ascii")
def encode_as_hashes(lines: Iterable[str]) -> str:
return encode_hashes(get_hashes(lines))
def decode_hashes(compact: str) -> List[bytes]:
all_hashes = base64.b64decode(compact)
res = []
assert len(all_hashes) % HASH_SIZE == 0
for i in range(len(all_hashes) // HASH_SIZE):
chunk = all_hashes[i * HASH_SIZE : (i + 1) * HASH_SIZE]
res.append(chunk)
return res
def encode_line_ids(line_ids: Sequence[int]) -> str:
arr = np.array(line_ids, dtype="<u2")
return base64.b64encode(arr.tobytes()).decode("ascii")
def decode_line_ids(compact: str) -> List[int]:
ids_bytes = bytearray(base64.b64decode(compact))
return np.ndarray(len(ids_bytes) // 2, dtype="<i2", buffer=ids_bytes)
def get_doc_key(digest: str) -> int:
assert digest.startswith("sha1:")
h = base64.b32decode(digest[5:])
return _b2i(h[:HASH_SIZE])
class Minifier(jsonql.Transformer):
ready = True
def __init__(self):
self.fields = frozenset(COMPUTED_FIELDS + PUBLIC_FIELDS)
def do(self, doc: dict) -> Optional[dict]:
line_ids: List[int] = doc.pop("line_ids")
fields = self.fields
keys = list(doc.keys())
for k in keys:
if k not in fields:
doc.pop(k, None)
p = doc.get("perplexity", 0)
doc["line_ids"] = encode_line_ids(line_ids)
if p:
doc["perplexity"] = round(p, 1)
s = doc.get("language_score", 0)
if s:
doc["language_score"] = round(s, 2)
return doc
class MetadataFetcher(jsonql.Transformer):
"""Reads documents from CC snapshot and join precomputed metadata.
CC snapshots are split in segments. Each segment is 64Mb long.
The metadata must also be stored in segments of the same size and names.
"""
def __init__(self, folder: Union[Path, str]):
self.ready = True
self.metadata: Dict[int, dict] = {}
self._segments: Set[str] = set()
self.read_doc = 0
self.missed_doc = 0
self.missed_par = 0
self.processed_par = 0
if isinstance(folder, str):
# detect path passed as string
if urllib.parse.urlparse(folder).scheme == "":
folder = Path(folder)
assert folder.exists(), f"Metadata folder not found: {folder}"
self.folder = folder
self.segment: str = ""
self.segments_read_twice = 0
def meta_file(self, segment: str) -> str:
file_name = segment.split("/")[-1]
assert file_name.endswith(".warc.wet.gz") or file_name.endswith(".warc.wet")
if isinstance(self.folder, str):
return urllib.parse.urljoin(
self.folder, file_name.replace(".warc.wet", ".json")
)
meta_file = self.folder / file_name.replace(".warc.wet", ".json")
assert (
meta_file.exists()
), f"Couldn't find metadata file for segment {segment} at {meta_file}"
return str(meta_file)
def fetch_metadata(self, segment: str) -> None:
meta_file = self.meta_file(segment)
k = get_doc_key
self.metadata = {}
collision = 0
for m in jsonql.read_jsons(meta_file):
key = k(m["digest"])
if key in self.metadata:
collision += 1
self.metadata[key] = m
self.log(f"Loaded {len(self.metadata)} metadatas from {meta_file}")
if collision > 0:
self._logger.warning(f"Found {collision} collisions !")
self.segment = segment
if segment in self._segments:
self.log("Cache miss")
self.segments_read_twice += 1
self._segments.add(segment)
def do(self, doc: dict) -> Optional[dict]:
if self.segment != doc["cc_segment"]:
self.fetch_metadata(doc["cc_segment"])
digest = doc["digest"]
key = get_doc_key(digest)
if key not in self.metadata:
return None
metadata = self.metadata.pop(key)
return self.clean(metadata, doc)
def clean(self, metadata: dict, full_doc: dict) -> Optional[dict]:
line_ids = decode_line_ids(metadata.pop("line_ids"))
lines = full_doc["raw_content"].split("\n")
cleaned = []
for l in line_ids:
if l >= len(lines) or l < 0:
self.missed_par += 1
continue
cleaned.append(lines[l])
self.processed_par += len(line_ids)
if not cleaned:
self.missed_doc += 1
return None
full_doc["raw_content"] = "\n".join(cleaned)
full_doc["original_nlines"] = full_doc["nlines"]
full_doc["original_length"] = full_doc["length"]
full_doc["nlines"] = len(cleaned)
full_doc["length"] = len(full_doc["raw_content"])
for key, value in metadata.items():
full_doc[key] = value
return full_doc
def summary(self) -> List[str]:
summ = super().summary()
mem = mem_footprint_gb()
len_cache = len(self.metadata)
summ.append(
f"Read {self.read_doc:_}, stocking {len_cache:_} doc in {mem:.1f}g."
)
if self.missed_doc:
r = self.missed_doc / self.processed
summ.append(f"! Missed {self.missed_doc} documents ({r:.1%}) !")
if self.missed_par:
r = self.missed_par / self.processed
summ.append(f"! Missed {self.missed_par} paragraphs ({r:.1%}) !")
return summ
def _expand_files(files: List[Path]) -> List[Path]:
if len(files) == 1 and files[0].is_dir():
folder = files[0]
files = sorted(folder.glob("*.json.gz"))
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert files, "No files found"
return files
def minify_file(file: Path, output: Path) -> str:
"""Minify the given file."""
jsonql.run_pipes(Minifier(), file=file, output=output)
return f"Minified {output}"
def minify(
files: List[Path], output_dir: Path, execution: str = "mp", parallelism: int = -1
):
"""Minify all the files in the given folder."""
files = _expand_files(files)
output_dir.mkdir(exist_ok=True)
with open(output_dir / "files.txt", "w") as o:
for f in files:
print(f.name, file=o)
outputs = [output_dir / f.name for f in files]
ex = get_executor(
"minify",
output_dir / "logs",
execution,
timeout_hour=2,
cpus=1,
task_parallelism=parallelism,
)
ex(minify_file, files, outputs)
def fetch_metadata_file(
file: Union[Path, str],
metadata_dir: Union[Path, str],
output: Path,
cache_dir: Path = None,
):
unminifier = MetadataFetcher(metadata_dir)
tmp = output.with_name("tmp." + output.name)
jsonql.run_pipes(unminifier, file=file, output=tmp)
tmp.rename(output)
return f"Fetched metadata for {file}. Results at {output}."
def fetch_metadata(
files: List[str],
metadata_dir: Union[Path, str],
output_dir: Path,
execution: str = "mp",
parallelism: int = -1,
cache_dir: Path = None,
):
if len(files) == 1 and Path(files[0]).is_dir():
folder = Path(files[0])
files = [str(f) for f in sorted(folder.glob("*.json.gz"))]
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert len(files) > 0, "No files given."
output_dir.mkdir(exist_ok=True)
outputs = [output_dir / str(f).split("/")[-1] for f in files]
if cache_dir is None:
cache_dir = output_dir / "wet_cache"
cache_dir.mkdir(exist_ok=True)
if str(cache_dir) == "none":
cache_dir = None
files = [f for f, o in zip(files, outputs) if not o.exists()]
outputs = [o for o in outputs if not o.exists()]
if not files:
return
ex = get_executor(
"unminify",
output_dir / "logs",
execution,
timeout_hour=8,
cpus=1,
task_parallelism=parallelism,
mem_gb=32,
)
ex(fetch_metadata_file, files, outputs, itertools.repeat(cache_dir))
if __name__ == "__main__":
import func_argparse
func_argparse.main(minify_file, minify, fetch_metadata, fetch_metadata_file)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import unicodedata
UNICODE_PUNCT = {
",": ",",
"。": ".",
"、": ",",
"„": '"',
"”": '"',
"“": '"',
"«": '"',
"»": '"',
"1": '"',
"」": '"',
"「": '"',
"《": '"',
"》": '"',
"´": "'",
"∶": ":",
":": ":",
"?": "?",
"!": "!",
"(": "(",
")": ")",
";": ";",
"–": "-",
"—": " - ",
".": ". ",
"~": "~",
"’": "'",
"…": "...",
"━": "-",
"〈": "<",
"〉": ">",
"【": "[",
"】": "]",
"%": "%",
"►": "-",
}
UNICODE_PUNCT_RE = re.compile(f"[{''.join(UNICODE_PUNCT.keys())}]")
def replace_unicode_punct(text: str) -> str:
return "".join((UNICODE_PUNCT.get(c, c) for c in text))
def remove_unicode_punct(text: str) -> str:
"""More aggressive version of replace_unicode_punct but also faster."""
return UNICODE_PUNCT_RE.sub("", text)
def strip_accents(line: str) -> str:
"""Strips accents from a piece of text."""
nfd = unicodedata.normalize("NFD", line)
output = [c for c in nfd if unicodedata.category(c) != "Mn"]
if len(output) == line:
return line
return "".join(output)
# Build a regex matching all control characters.
NON_PRINTING_CHARS_RE = re.compile(
f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]"
)
DIGIT_RE = re.compile(r"\d")
PUNCT_OR_NON_PRINTING_CHARS_RE = re.compile(
(UNICODE_PUNCT_RE.pattern + NON_PRINTING_CHARS_RE.pattern).replace("][", "")
)
def remove_non_printing_char(text: str) -> str:
return NON_PRINTING_CHARS_RE.sub("", text)
def normalize_spacing_for_tok(text: str, language: str = "en") -> str:
res = (
text.replace("\r", "")
# remove extra spaces
.replace("(", " (")
.replace(")", ") ")
.replace(" +", " ")
)
res = re.sub(r"\) ([\.\!\:\?\;\,])", r"\)\1", res)
res = res.replace("( ", "(").replace(" )", ")")
res = re.sub(r"(\d) \%", r"\1\%", res)
res = res.replace(" :", ":").replace(" ;", ";")
res = res.replace("`", "'").replace("''", ' " ')
res = (
res.replace("„", '"')
.replace("“", '"')
.replace("”", '"')
.replace("–", "-")
.replace("—", " - ")
.replace(" +", " ")
.replace("´", "'")
.replace("([a-z])‘([a-z])", r"\1'\2/")
.replace("([a-z])’([a-z])", r"\1'\2/")
.replace("‘", '"')
.replace("‚", '"')
.replace("’", '"')
.replace("''", '"')
.replace("´´", '"')
.replace("…", "...")
# French quotes
.replace(" « ", ' "')
.replace("« ", '"')
.replace("«", '"')
.replace(" » ", '" ')
.replace(" »", '"')
.replace("»", '"')
# handle pseudo-spaces
.replace(" %", "%")
.replace("nº ", "nº ")
.replace(" :", ":")
.replace(" ºC", " ºC")
.replace(" cm", " cm")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ;", ";")
.replace(", ", ", ")
.replace(" +", " ")
.replace(".", ". ")
)
# English "quotation," followed by comma, style
if language == "en":
res = re.sub(r"\"([,\.]+)", r"\1\"", res)
# Czech is confused
elif language == "cs" or language == "cz":
pass
# German/Spanish/French "quotation", followed by comma, style
else:
res = res.replace(',"', '",')
res = re.sub(
r"(\.+)\"(\s*[^<])", r"\"\1\2", res
) # don't fix period at end of sentence
if (
language == "de"
or language == "es"
or language == "cz"
or language == "cs"
or language == "fr"
):
res = re.sub(r"(\d) (\d)", r"\1,\2", res)
else:
res = re.sub(r"(\d) (\d)", r"\1.\2", res)
return res
def normalize(line: str, accent=True, case=True, numbers=True, punct=1) -> str:
line = line.strip()
if not line:
return line
if case:
line = line.lower()
if accent:
line = strip_accents(line)
if numbers:
line = DIGIT_RE.sub("0", line)
if punct == 1:
line = replace_unicode_punct(line)
elif punct == 2:
line = remove_unicode_punct(line)
line = remove_non_printing_char(line)
return line
def slow_normalize_for_dedup(line: str) -> str:
return normalize(line, accent=False, case=True, numbers=True, punct=2)
def normalize_for_dedup(line: str) -> str:
line = line.strip()
if not line:
return line
# case
line = line.lower()
# numbers
line = DIGIT_RE.sub("0", line)
line = PUNCT_OR_NON_PRINTING_CHARS_RE.sub("", line)
return line
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import subprocess
from pathlib import Path
from typing import List
import func_argparse
import numpy as np
from cc_net import jsonql
def get_index(file: Path) -> Path:
return file.parent / (file.name + ".index")
def _get_tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Read the given files and concatenate them to the output file.
Can remove original files on completion, or just write dummy content into them to free disk.
"""
if tmp is None:
tmp = _get_tmp(output)
logging.info(f"Resharding {inputs} to {tmp}, will move later to {output}")
jsonql.run_pipes(file=inputs, output=tmp)
tmp.replace(output)
tmp_index = get_index(tmp)
if tmp_index.exists():
tmp_index.replace(get_index(output))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def fast_reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Same as reshard but don't re-compress the output.
This will lead to a bigger output file, especially if the shards are very small.
"""
if tmp is None:
tmp = _get_tmp(output)
with open(tmp, "wb") as o:
subprocess.run(["cat"] + [str(f) for f in inputs], stdout=o)
tmp.replace(output)
indexes_files = [get_index(i) for i in inputs]
existing_indexes = sum(i.exists() for i in indexes_files)
assert (
existing_indexes == len(indexes_files) or existing_indexes == 0
), "some indexes don't exist."
if existing_indexes > 0:
indexes = [np.load(idx) for idx in indexes_files]
for i in range(len(indexes) - 1):
indexes[i + 1] += indexes[i][-1]
with open(str(output) + ".index", "wb") as o:
np.save(o, np.concatenate(indexes))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def determine_groups(
inputs: List[Path], target_size: int = 4 * 1024 ** 3
) -> List[List[Path]]:
if len(inputs) == 0:
return []
sample = inputs[:10]
typical_size = sum(s.stat().st_size for s in sample) / len(sample)
group_size = min(target_size // typical_size, len(inputs))
group_size = max(group_size, 1)
return jsonql.grouper(inputs, group_size)
if __name__ == "__main__":
func_argparse.single_main(reshard)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Tuple, Union
import kenlm # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
import sentencepiece # type: ignore
from cc_net import jsonql, text_normalizer
LMDescriptor = Union[Dict[str, Path], Union[Path, str]]
def get_args():
parser = argparse.ArgumentParser(
description="Compute the score of each sentences of a document",
parents=[jsonql.io_parser()],
)
parser.add_argument("--models", type=str)
parser.add_argument("--sentences", action="store_true", default=False)
parser.add_argument(
"--languages", type=str, help="Ignore doc with another language"
)
parser.add_argument("--field", type=str, default=None)
parser.add_argument("--newline", type=str, default="\n")
return vars(parser.parse_args())
def pp(log_score, length):
return 10.0 ** (-log_score / length)
class SentencePiece(jsonql.Transformer):
# Sentence Pieces model have to be read back from disk.
warning_when_pickling = True
def __init__(
self,
model: Path,
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.model = model
self.field = field
self.output_field = output_field
self.normalize = normalize
self.sp: sentencepiece.SentencePieceProcessor = None
def _prepare(self):
if self.sp is not None:
return
self.sp = sentencepiece.SentencePieceProcessor()
self.sp.load(str(self.model))
return self
def do(self, document: dict) -> dict:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
tokenized = self.sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class MultiSentencePiece(jsonql.Transformer):
warning_when_pickling = True
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.field = field
self.output_field = output_field
self.normalize = normalize
self._prefetch: Sequence[str] = []
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.sp: Dict[str, sentencepiece.SentencePieceProcessor] = {}
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_sp(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_sp(self, lang) -> Optional[sentencepiece.SentencePieceProcessor]:
sp = self.sp.get(lang)
if sp is not None:
return sp
if lang not in self.models:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
sp = sentencepiece.SentencePieceProcessor()
sp.load(str(self.models[lang]))
self.sp[lang] = sp
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return sp
def do(self, document: dict) -> Optional[dict]:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
sp = self.get_sp(document.get("language"))
if sp is None:
return document
tokenized = sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class DocLM(jsonql.Transformer):
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "perplexity",
newline: str = "\n",
normalize: bool = True,
load_method: int = 2,
):
super().__init__()
self.field = field
self.output_field = output_field
self.newline = newline
self.normalize = normalize
self._prefetch: Sequence[str] = []
self.lm_config = kenlm.Config()
# This is the default settings
# POPULATE will mmap the models and populate the pages.
# Maybe that's not the best way when the models are on a network disk.
# TODO: try copying models file, try READ or PARALLEL_READ
self.lm_config.load_method = load_method
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.lm: Dict[str, kenlm.Model] = {}
self.n_lines = 0
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_lm(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_lines(self, document: dict) -> List[str]:
lang = document.get("language")
if not lang:
return []
if lang not in self.models:
return []
content = document.get(self.field)
if not content:
return []
lines = content.split(self.newline)
self.n_lines += len(lines)
return lines
def get_lm(self, lang: Optional[str]) -> Optional[kenlm.Model]:
if lang is None:
return None
lm = self.lm.get(lang)
if lm is not None:
return lm
model = self.models.get(lang)
if model is None:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
lm = kenlm.Model(str(model), self.lm_config)
self.lm[lang] = lm
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return lm
def do(self, document: dict) -> dict:
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return document
doc_log_score, doc_length = 0, 0
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
document[self.output_field] = round(pp(doc_log_score, doc_length), 1)
return document
def summary(self):
delay = time.time() - self.start_time
h = delay / 3600
s = self.n_lines / delay
summ = super().summary()
summ.append(f"Processed {self.n_lines:_} lines in {h:.2}h ({s:.1} lines/s).")
return summ
class SentencesLM(DocLM):
"""Returns the score of each individual paragraph."""
def do(self, document: dict) -> Optional[str]: # type: ignore
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return None
sentences = []
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
sentences.append(f"{pp(log_score, length)}\t{line}")
return "\n".join(sentences)
class PerplexityBucket(jsonql.Transformer):
def __init__(
self, cutoff_csv: Path, percentile_head: int = 30, percentile_tail: int = 60
):
super().__init__()
self.cutoff_csv = cutoff_csv
self.percentile_head = percentile_head
self.percentile_tail = percentile_tail
self.cutoffs: Dict[str, Tuple[float, float]] = {}
def _prepare(self) -> None:
cutoffs = pd.read_csv(self.cutoff_csv, index_col=0)
self.cutoffs = {
l: (cutoffs[l][self.percentile_head], cutoffs[l][self.percentile_tail])
for l in cutoffs.columns
}
def get_bucket(self, doc: dict) -> str:
perplexity = doc.get("perplexity", -1)
lang = doc.get("language")
if lang not in self.cutoffs or perplexity < 0:
return "all"
pp_head, pp_tail = self.cutoffs[lang]
if perplexity < pp_head:
return "head"
if perplexity < pp_tail:
return "middle"
return "tail"
def do(self, doc: dict) -> dict:
doc["bucket"] = self.get_bucket(doc)
return doc
class DropKeys(jsonql.Transformer):
def __init__(self, *keys):
super().__init__()
self.keys = keys
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
for key in self.keys:
document.pop(key, None)
return document
class RemoveSmall(jsonql.Transformer):
def __init__(self, field, min_len):
super().__init__()
self.field = field
self.min_len = min_len
self.removed = 0
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
content = document.get(self.field)
if not content or len(content) < self.min_len:
self.removed += 1
return None
return document
def summary(self):
r, n = self.removed, self.processed
ratio = r / n if n else 0
return [f"Removed {r} small documents out of {n} ({ratio:.1%})"]
def perplexity_to_bin(file: Path, output: Path, models, tok_field: str):
pp_field = "perplexity"
lm = DocLM(models, tok_field, output_field=pp_field)
stats: List[float] = []
max_stats = 1_000_000
batch_size = 100_000
i = 0
batch = []
with open(output, "wb") as o:
for doc in jsonql.read_jsons(file):
i += 1
pp = lm(doc)[pp_field]
if len(stats) < max_stats:
stats.append(pp)
batch.append(pp)
if len(batch) >= batch_size:
np.array(batch, dtype=np.float32).tofile(o)
batch = []
if len(batch) > 0:
np.array(batch, dtype=np.float32).tofile(o)
if __name__ == "__main__":
args = get_args()
output = Path(args["output"])
if output.suffix == ".bin":
perplexity_to_bin(args["file"], output, args["models"], args["field"])
else:
jsonql.run_pipe(DocLM, args)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
class RobustTokenizer(jsonql.Transformer):
"""Moses tokenizer with the expected preprocessing."""
LANG_WITHOUT_ACCENT = {"en", "my"}
def __init__(self, lang: str):
super().__init__()
self.lang = lang
self.moses = sacremoses.MosesTokenizer(lang)
self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
self.ready = True
def do(self, text: str):
text = text_normalizer.normalize(
text, accent=self.rm_accent, case=False, numbers=False, punct=True
)
text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
return self.moses.tokenize(text, return_str=True, escape=False)
class DocTokenizer(jsonql.Transformer):
"""Tokenize the text found in `output_field and store the result in `output_field`."""
def __init__(
self,
field: str,
output_field: str = "tokenized",
language_field: str = "language",
):
super().__init__()
self.field = field
self.output_field = output_field
self.language_field = language_field
self.n_docs = 0
self.tokenizers: Dict[str, RobustTokenizer] = {}
def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
cache = self.tokenizers
if lang in cache:
return cache[lang]
if lang in ("th", "zh", "ja"):
# TODO find a tokenizer for those languages
return None
cache[lang] = RobustTokenizer(lang)
return cache[lang]
def do(self, document):
lang = document[self.language_field]
tok = self.get_tokenizer(lang)
if not tok:
return document
self.n_docs += 1
lines = document[self.field].split("\n")
tokenized = "\n".join(tok(l) for l in lines)
document[self.output_field] = tokenized
return document
def summary(self):
delay = (time.time() - self.start_time) / 3600
speed = self.n_docs / delay
return [
f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to remove duplicate paragraphs across one or several shards.
"""
import argparse
import gc
import hashlib
import logging
import multiprocessing
import os
import tempfile
import time
from pathlib import Path
from typing import Iterable, List, Optional, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.flat_hash_set import HASH_TYPE, AbstractDedupHashSet, FlatHashSet
from cc_net.jsonql import mem_footprint_gb
from cc_net.text_normalizer import normalize_for_dedup
BYTE_ORDER = "little"
HASH_SIZE = HASH_TYPE(0).nbytes
DISABLE_MULTI_PROCESSING = False
FilesOrDir = Union[List[Path], Path]
def get_args():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them",
parents=[jsonql.io_parser()],
)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--output_hashes", type=str)
parser.add_argument("--no_finalize", action="store_false", dest="finalize")
# parser.add_argument("--mem_gb", type=int)
parser.add_argument("--hashes", type=str)
return vars(parser.parse_args())
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
log = logging.getLogger(__name__).info
def run_par(processes):
# This is different from multiprocessing.map since it allows for kwargs.
processes = list(processes)
if len(processes) == 1 or DISABLE_MULTI_PROCESSING:
for f, args, kwargs in processes:
f(*args, **kwargs)
return
log(f"Starting {len(processes)} subprocess")
processes = [
multiprocessing.Process(target=f, args=a, kwargs=kw) for (f, a, kw) in processes
]
for p in processes:
p.start()
for p in processes:
p.join()
failed = 0
for p in processes:
if p.exitcode != 0:
log(f"Process failed with code {p.exitcode}: {p}")
failed += 1
assert failed == 0, f"{failed} processes failed..."
def split_file(file, n_splits):
for i in range(n_splits):
yield jsonql.SplitFile(file, i, n_splits)
def merge(hashes_1, hashes_2, output):
if isinstance(hashes_1, str):
h1 = FlatHashSet()
h1.load(hashes_1)
else:
h1 = hashes_1
if isinstance(hashes_2, str):
h2 = FlatHashSet()
h2.load(hashes_2)
else:
h2 = hashes_2
h2_np = np.fromiter(h2.keys(), dtype=FlatHashSet.dtype, count=len(h2))
dup = h1.__contains__(h2_np)
# Dups between h1 and h2 will be set to 1, keys unique to h2 are copied to
# h1 with their value.
h1[h2_np] = dup
if output:
h1.dump(output)
return h1
def merge_shard(hash_files, output):
h = FlatHashSet()
h.load(hash_files[0])
for hash_file in hash_files[1:]:
h = merge(h, hash_file, output=None)
print(f"Merged {hash_file}. We now have {len(h)} hashes.")
h.dump(output)
print(f"Saved {len(h)} hashes to {output}.")
def _dump_sentence_hashes(source: Path, output: Path, field: str):
treated = 0
started = time.time()
with open(output, "wb") as o:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content:
continue
h = compute_hashes(content)
if h is None:
continue
h.tofile(o)
treated += 1
if treated % 100_000 == 0:
delay = time.time() - started
log(
f"Computed {treated} documents hashes in {delay / 3600:.2f}h ({treated / delay} doc / s)"
)
def _remove_duplicate_hashes(duplicates, source, output):
batch_size = 100_000
n_lines, n_lines_kept = 0, 0
with open(source, "rb") as f, open(output, "wb") as o:
log(f"Opening {source} with mode rb")
log(f"Opening {output} with mode wb")
while True:
hashes = np.fromfile(f, dtype=HASH_TYPE, count=batch_size)
if hashes.size == 0:
break
keep = duplicates[hashes] < 1
kept = keep.sum()
hashes *= keep
hashes.tofile(o)
n_lines += hashes.size
n_lines_kept += kept
removed = n_lines - n_lines_kept
selectivity = n_lines_kept / n_lines if n_lines else 0
log(f"Removed {removed} duplicate hashes with selectivity: {selectivity:3.1%}")
def remove_duplicates_sharded(
files: List[Path],
outputs: List[Path],
hashes_dir: FilesOrDir,
field: str,
group_hashes: int = 1,
tmp_dir: Path = None,
min_len: int = 0,
):
"""Remove duplicates in several passes, when all hashes don't fit in RAM.
Note: The current implementation is not doing a 'perfect' deduplication.
If a hash appear exactly once in each shard of hashes it won't be detected
as a duplicate. This can be fixed if hashes are fully dedup beforehand.
"""
assert len(files) == len(outputs)
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}"
if len(hashes_files) <= group_hashes:
log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}")
rm_dups = DuplicatesRemover(field, hashes_files)
rm_dups._prepare()
run_par(
(jsonql.run_pipes, (rm_dups,), dict(file=f, output=o))
for f, o in zip(files, outputs)
)
return
log(f"Starting deduplicate_sharded on {files}.")
tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None)
def tmp_files(i):
return [
Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin")
for f in files
]
last = tmp_files(0)
run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last))
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)):
hashes = FlatHashSet()
for h in group:
hashes.load(h)
log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)")
intermediates = tmp_files(i + 1)
# Remove hashes in parallel. Since modern OS have "copy-on-write" and
# `hashes` is read-only, we will only have one version of it in RAM.
run_par(
(_remove_duplicate_hashes, (hashes, f, tmp), {})
for f, tmp in zip(last, intermediates)
)
# Force hashes to be freed, before we start allocating a new one.
del hashes
gc.collect()
for tmp in last:
os.remove(tmp)
last = intermediates
def finalize(source, dedup_hashes, min_len):
n_chars, n_chars_kept = 0, 0
with open(dedup_hashes, "rb") as hashes:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content or len(content) < min_len:
continue
sentences = content.split("\n")
doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences))
chars, kept_chars = finalize_doc(doc, field, doc_hashes)
n_chars += chars
n_chars_kept += kept_chars
yield doc
selectivity = n_chars_kept / n_chars if n_chars else 0
log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
dedup_hashes = last
run_par(
[
(
jsonql.run_pipe,
(finalize,),
dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o),
)
for h, f, o in zip(dedup_hashes, files, outputs)
]
)
tmp_directory.cleanup()
def compute_hashes(content) -> Optional[np.ndarray]:
if not content:
return None
lines = content.split("\n")
# save hashes as bytes but reinterpret them as uint64.
hashes = np.fromiter(
(
hashlib.sha1(bytes(normalize_for_dedup(l), encoding="utf-8")).digest()[
:HASH_SIZE
]
for l in lines
),
dtype=np.dtype((bytes, HASH_SIZE)),
count=len(lines),
)
return np.ndarray(dtype=HASH_TYPE, buffer=hashes.data, shape=hashes.shape)
def finalize_doc(doc, field, hashes=None):
content = doc.get(field)
lines = content.split("\n")
n_chars = len(content)
if "original_nlines" not in doc:
doc["original_nlines"] = doc.get("nlines", len(lines))
if "original_length" not in doc:
doc["original_length"] = doc.get("length", n_chars)
if hashes is None:
hashes = doc.pop(field + "_hash")
# Remove duplicates inside doc
seen: Set[int] = set()
original_line_ids = doc.get("line_ids", range(len(hashes)))
line_ids = []
new_lines = []
for l, line, h in zip(original_line_ids, lines, hashes):
if h not in seen and h != 0:
line_ids.append(l)
new_lines.append(line)
seen.add(h)
doc[field] = "\n".join(new_lines)
doc["nlines"] = len(line_ids)
n_chars_kept = len(doc[field])
doc["length"] = n_chars_kept
doc["line_ids"] = line_ids
return n_chars, n_chars_kept
class HashesCollector(jsonql.Transformer):
"""
Collect all hashes found of lines found in the `field` of the source documents.
"""
parallelisable = False
def __init__(
self, field: str, output: Path = None, hashes: AbstractDedupHashSet = None
):
super().__init__()
self.n_lines = 0
self.field = field
self.output = output
self.hashes = FlatHashSet() if hashes is None else hashes
self.num_hashes_end = 0
self.num_hashes_start = len(self.hashes)
def summary(self) -> List[str]:
summ = super().summary()
h = self.num_hashes_end if self.hashes is None else len(self.hashes)
h = (h - self.num_hashes_start) // 1000
max_mem = mem_footprint_gb()
n = self.n_lines // 1000
summ.append(
f"Found {h:_}k unique hashes over {n:_}k lines. Using {max_mem:.1f}GB of RAM."
)
return summ
def do(self, doc: dict) -> None:
doc_hashes = compute_hashes(doc.get(self.field))
if doc_hashes is None:
return
self.hashes.add(doc_hashes)
self.n_lines += doc_hashes.size
def close(self):
if self.output and self.hashes:
self.hashes.dump(self.output)
self.log(f"Saved {len(self.hashes)} hashes to {self.output}")
# Save the number of hashes.
self.num_hashes_end = len(self.hashes)
# Free up mem even if the transformer is kept somewhere else.
self.hashes = None # type: ignore
class DuplicatesRemover(jsonql.Transformer):
"""DuplicatesRemover"""
# The hashes can't be pickled so they will have to be read back from disk.
warn_when_pickling = True
def __init__(self, field: str, hashes_files: List[Path], collect: bool = False):
"""
Remove duplicates
"""
super().__init__()
self.field = field
self.collect = collect
self.hashes_files = hashes_files
self.duplicates: Optional[AbstractDedupHashSet] = None
self.n_lines, self.n_lines_kept = 0, 0
self.n_chars, self.n_chars_kept = 0, 0
def _prepare(self):
if self.duplicates is not None:
return
self.duplicates = FlatHashSet()
start = time.time()
for h in self.hashes_files:
shard_start = time.time()
self.duplicates.load(str(h))
delay = time.time() - shard_start
self.log(
f"Loaded hashes from {h} ({mem_footprint_gb():.3f}GB total, took {delay / 60:.1}m)"
)
delay = time.time() - start
self.log(
f"Loaded {len(self.duplicates):_d} hashes from {len(self.hashes_files)} files. ({mem_footprint_gb():.1f}GB total, took {delay / 60:.1}m)"
)
def do(self, doc: dict) -> Optional[dict]:
content = doc.get(self.field)
if not content:
return None
doc_hashes = compute_hashes(content)
assert self.duplicates is not None
seen = (
self.duplicates.add(doc_hashes)
if self.collect
else self.duplicates[doc_hashes]
)
keep = seen < True
kept = keep.sum()
if kept == 0:
return None
doc_hashes = doc_hashes * keep
self.n_lines += keep.size
self.n_lines_kept += kept
chars, kept_chars = finalize_doc(doc, self.field, hashes=doc_hashes)
self.n_chars += chars
self.n_chars_kept += kept_chars
return doc
def summary(self) -> List[str]:
summ = super().summary()
end_time = time.time()
n_lines_kept, n_lines, n_docs = self.n_lines_kept, self.n_lines, self.processed
speed = n_docs / (end_time - self.start_time)
summ.append(
f"Processed {self.n_lines} lines in {n_docs} docs. [{speed:.1f} doc/s]"
)
selectivity = self.n_lines_kept / self.n_lines if n_lines else 0
summ.append(f"Kept {n_lines_kept} lines out of {n_lines} ({selectivity:.1%}).")
n_chars_kept, n_chars = self.n_chars_kept, self.n_chars
selectivity = n_chars_kept / n_chars if n_chars else 0
summ.append(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
return summ
def deduplicate(
file: jsonql.ReadableFileLike, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (but keep the first occurence)."""
dup_remover = DuplicatesRemover(field, [], collect=True)
return dup_remover.map(jsonql.read_jsons(file))
def deduplicate_two_pass(
file: jsonql.FileDescriptor, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (even removing the first occurence).
This is what is done in the paper, and in mine.py
"""
try:
if isinstance(file, Path):
hash_file: Path = file.with_suffix(".bin")
else:
hash_file = jsonql._tmp(Path("hashes.bin"))
jsonql.run_pipes(
jsonql.JsonReader(), HashesCollector(field, output=hash_file), file=file
)
dup_remover = DuplicatesRemover(field, [hash_file])
return dup_remover.map(jsonql.read_jsons(file))
finally:
if hash_file.exists():
hash_file.unlink()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import logging
import re
import tempfile
import time
import urllib.request
from pathlib import Path
from typing import ContextManager, Iterable, Iterator, List, Optional, Sequence
from urllib.parse import urlparse
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql
WET_URL_ROOT = "https://commoncrawl.s3.amazonaws.com"
logger = logging.getLogger(__name__)
def cc_wet_paths_url(dump_id: str) -> str:
return "/".join([WET_URL_ROOT, "crawl-data", "CC-MAIN-" + dump_id, "wet.paths.gz"])
@functools.lru_cache()
def cc_segments(dump_id: str, cache_dir: Path = None) -> List[str]:
wet_paths = cc_wet_paths_url(dump_id)
cache_dir = cache_dir or jsonql._tmp_dir()
wet_paths_cache = cache_dir / f"wet_{dump_id}.paths.gz"
f = jsonql.open_remote_file(wet_paths, cache=wet_paths_cache)
return [segment.strip() for segment in f]
def list_dumps() -> List[str]:
home_page = BeautifulSoup(
urllib.request.urlopen("http://index.commoncrawl.org/"), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in home_page.findAll("a")]
dumps = [a[8:] for a in dumps if re.match(r"^CC-MAIN-\d\d\d\d-\d\d$", a)]
return sorted(dumps)
def ls():
for dump in list_dumps():
print(dump, "->", cc_wet_paths_url(dump))
def parse_doc(headers: List[str], doc: List[str]) -> Optional[dict]:
"""Headers format is:
WARC/1.0
WARC-Type: conversion
WARC-Target-URI: [url]
WARC-Date: [crawldate: 2019-02-15T19:15:59Z]
WARC-Record-ID: <urn:uuid:8865156e-d5f1-4734-9c68-4b46eaf2bb7e>
WARC-Refers-To: <urn:uuid:340152e2-65cf-4143-b522-8ce4e2d069d7>
WARC-Block-Digest: sha1:S3DTWCONT2L6ORTGCY2KXEZ37LNBB7V2
Content-Type: text/plain
Content-Length: 7743
"""
if not headers or not doc:
return None
try:
warc_type = headers[1].split()[1]
if warc_type != "conversion":
return None
url = headers[2].split()[1]
date = headers[3].split()[1]
digest = headers[6].split()[1]
length = int(headers[8].split()[1])
except Exception as e:
logger.warning("Can't parse header:", e, headers, doc)
return None
# Docs are separated by two empty lines.
last = None
if not doc[-1] and not doc[-2]:
last = -2
title, doc = doc[0], doc[1:last]
return {
"url": url,
"date_download": date,
"digest": digest,
"length": length,
"nlines": len(doc),
"source_domain": urlparse(url).netloc,
"title": title,
"raw_content": "\n".join(doc),
}
def group_by_docs(warc_lines: Iterable[str]) -> Iterable[dict]:
doc: List[str] = []
headers, read_headers = [], True
for warc in warc_lines:
warc = warc.strip()
if read_headers:
headers.append(warc)
read_headers = warc != ""
continue
if warc == "WARC/1.0":
# We reached the beginning of the new doc.
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
headers, doc, read_headers = [warc], [], True
continue
doc.append(warc)
# Return the last document
if doc:
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
def parse_warc_file(lines: Iterable[str], min_len: int = 1) -> Iterator[dict]:
n_doc = 0
n_ok = 0
for doc in group_by_docs(lines):
n_doc += 1
if not doc or len(doc["raw_content"]) < min_len:
continue
n_ok += 1
yield doc
if n_doc > 0:
logger.info(f"Kept {n_ok:_d} documents over {n_doc:_d} ({n_ok / n_doc:.1%}).")
else:
logger.info(f"Found no documents")
def dl(
dump: str,
shard: int,
num_shards: int,
output: Path = None,
num_segments_per_shard: int = 0,
):
"""Download a shard of the common crawl, and export it to json.
Arguments:
output: filename of the output file
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: manual control of the number of segment per shard.
"""
reader = CCShardReader(dump, shard, num_shards, num_segments_per_shard)
jsonql.run_pipes(inputs=reader, output=output)
logger.info(f"Done. {output} is ready.")
class CCSegmentsReader(Iterable[dict]):
def __init__(
self, segments: Sequence[str], min_len: int = 0, cache_dir: Path = None
):
self._segments = segments
self.min_len = min_len
if cache_dir is not None:
cache_dir = Path(cache_dir)
cache_dir.mkdir(exist_ok=True)
self.cache_dir = cache_dir
self.retrieved_segments = 0
def segment_url(self, segment: str):
return "/".join((WET_URL_ROOT, segment))
@property
def segments(self) -> Sequence[str]:
return self._segments
def open_segment(self, segment: str) -> Iterable[str]:
url = self.segment_url(segment)
file: Optional[Path] = None
if self.cache_dir:
file = self.cache_dir / segment.split("/")[-1]
if not file or not file.exists():
self.retrieved_segments += 1
return jsonql.open_remote_file(url, cache=file)
def __iter__(self) -> Iterator[dict]:
n = len(self.segments)
for i, segment in enumerate(self.segments):
start = time.time()
# TODO: start downloading the next segment in the background
for doc in parse_warc_file(self.open_segment(segment), self.min_len):
doc["cc_segment"] = segment
yield doc
if i + 1 >= n:
continue
end = time.time()
delay = (end - start) / 3600 * (n - 1 - i)
logger.info(
f"Parsed {i + 1} / {n} files. Estimated remaining time: {delay:.1f}h"
)
class CCShardReader(CCSegmentsReader):
def __init__(
self,
dump: str,
shard: int,
num_shards: int = -1,
num_segments_per_shard: int = 40,
min_len: int = 300,
cache_dir: Path = None,
):
"""Downloads a shard of Common Crawl, and yields dict.
Arguments:
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: if set will limit the number of files by shard.
Useful for testing.
"""
super().__init__([], min_len=min_len, cache_dir=cache_dir)
self.dump = dump
self.shard = shard
assert num_shards > 0 or num_segments_per_shard > 0
self.num_shards = num_shards
self.num_segments_per_shard = num_segments_per_shard
@property
def segments(self) -> Sequence[str]:
# Delaying the initialization allows to delay the looking up of the WET files
if self._segments:
return self._segments
segments = cc_segments(self.dump, self.cache_dir)
n = len(segments)
if self.num_shards < 0:
self.num_shards = n // self.num_segments_per_shard
i_min = (self.shard * n) // self.num_shards
i_max = ((self.shard + 1) * n) // self.num_shards
if self.num_segments_per_shard > 0:
i_max = min(i_max, i_min + self.num_segments_per_shard)
self._segments = segments[i_min:i_max]
return self._segments
def _tmp(prefix: str = None, suffix: str = None, dir: Path = None) -> Path:
_, tmp_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
return Path(tmp_path)
@contextlib.contextmanager
def timer(name: str = "-"):
start = time.time()
yield None
delay = time.time() - start
print(f"{name} took {delay:.1f}s")
def benchmark(tmp_path: Path):
segments = [
"crawl-data/CC-MAIN-2019-09/segments/1550249406966.99/wet/CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
]
seg_file = tmp_path / "CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
with timer("from network"):
list(CCSegmentsReader(segments))
with timer("from network, with caching"):
list(CCSegmentsReader(segments, cache_dir=tmp_path))
assert seg_file.exists()
with timer("from disk"):
CCSegmentsReader(segments, cache_dir=tmp_path)
seg_file.unlink()
if __name__ == "__main__":
func_argparse.main(ls, dl)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import func_argparse
import cc_net.mine
def main():
func_argparse.parse_and_call(cc_net.mine.get_main_parser())
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
from pathlib import Path
from typing import Dict, Optional
import fasttext # type: ignore
from cc_net import jsonql
def get_args():
parser = argparse.ArgumentParser(
description="Read a list of json files and split them ",
parents=[jsonql.io_parser()],
)
parser.add_argument("--pattern", type=str)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--threshold", type=float, default=0)
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--out_field", type=str, default="language")
parser.add_argument("--top", type=int, default=1)
return vars(parser.parse_args())
def predict(model, text: str, k: int = 1):
labels, scores = model.predict(text, k=k)
labels = [l.replace("__label__", "") for l in labels]
return labels, scores
def avg_predict(model, text):
# Overall gives the same results than predict(model, text.replace("\n", ""))
text = text.split("\n")
text_len = sum(len(line) for line in text)
if text_len == 0:
return None, 0
scores = [predict(model, line) for line in text]
scores_by_label: Dict[str, float] = collections.defaultdict(float)
for (label, score), line in zip(scores, text):
scores_by_label[label] += score * len(line)
label, score = max(scores_by_label.items(), key=lambda kv: kv[1])
return label, score / text_len
class Classifier(jsonql.Transformer):
def __init__(
self,
model: Path,
field: str,
out_field: str,
threshold: float = 0,
top: int = 1,
language: str = None,
rounding: int = 2,
):
super().__init__()
self.model = model
assert model.exists(), f"Model {model} doesn't exist."
self.field = field
self.out_field = out_field
self.threshold = threshold
self.top = top
self.language = language
self.rounding = rounding
# Fasttext model is a C object and can't be pickled
self.fasttext_model: fasttext._FastText = None
self.n_doc, self.n_accepted, self.n_ignored, self.n_disagreement = 0, 0, 0, 0
self.cnt: Dict[str, int] = {}
def _prepare(self):
self.log(f"Loading {self.model}")
self.fasttext_model = fasttext.load_model(str(self.model))
def predict(self, text):
return predict(self.fasttext_model, text.replace("\n", ""), k=self.top)
def do(self, doc: dict) -> Optional[dict]:
text = doc.get(self.field, None)
if not text:
return None
if self.language and doc.get("language") != self.language:
self.n_ignored += 1
return doc
self.n_doc += 1
labels, scores = self.predict(text)
scores.round(self.rounding, out=scores)
for l in labels:
self.cnt[l] = self.cnt.get(l, 0) + 1
if self.top == 1:
existing_label = doc.get(self.out_field, None)
if existing_label and labels[0] != existing_label:
self.n_disagreement += 1
if all(s < self.threshold for s in scores):
return None
self.n_accepted += 1
if self.top == 1:
doc[self.out_field] = labels[0]
doc[self.out_field + "_score"] = scores[0]
else:
doc[self.out_field] = {l: s for l, s in zip(labels, scores)}
return doc
def summary(self):
n_doc, n_accepted, n_disagreement, cnt, out_field = (
self.n_doc,
self.n_accepted,
self.n_disagreement,
self.cnt,
self.out_field,
)
summ = super().summary()
if self.threshold > 0:
ratio = n_accepted / n_doc if n_doc else 0
summ.append(f"Kept {n_accepted} docs over {n_doc} ({ratio :.1%})")
summ.append(f"Found {len(cnt)} {out_field} labels: {cnt}")
disagreement = n_disagreement / n_doc if n_doc else 0
if disagreement:
summ.append(f"{out_field} disagreement is at {disagreement:.1%}.")
return summ
def __repr__(self):
return f"Classifier({self.model})"
def classify_and_split(file, output, pattern, **kwargs):
classifier = Classifier(**kwargs)
splitter = jsonql.split(pattern)
jsonql.run_pipes(classifier, splitter, file=file, output=output)
if __name__ == "__main__":
args = get_args()
pattern = args.get("pattern")
if pattern:
classify_and_split(**args)
else:
args.pop("pattern")
jsonql.run_pipe(Classifier, args)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import gzip
import logging
import multiprocessing
from collections import defaultdict
from pathlib import Path
from typing import Callable, Dict, Iterator, List, NamedTuple, Optional, Tuple
import cc_net
from cc_net import jsonql
from cc_net.process_wet_file import CCSegmentsReader
# Set this to a directory to use as cache for intermediary files.
# This helps for debugging.
WET_CACHE = None
# WET_CACHE = Path("wet_cache")
S3_BUCKET = "https://dl.fbaipublicfiles.com/cc100"
VERSION = "1.0.0"
CC_100_SNAPSHOTS = [
"2018-05",
"2018-09",
"2018-13",
"2018-17",
"2018-22",
"2018-26",
"2018-30",
"2018-34",
"2018-39",
"2018-43",
"2018-47",
"2018-51",
]
BIG_LANGUAGES = {
"es_XX",
"fr_XX",
"de_DE",
"ja_XX",
"ru_RU",
"zh_CN",
"en_XX",
"it_IT",
"ar_AR",
"nl_XX",
"pl_PL",
"pt_XX",
"tr_TR",
"zh_TW",
}
class Paragraph(NamedTuple):
lang: str
text: str
lm_score: float
def _dl_shard(snapshot: str, shard: int) -> Iterator[Paragraph]:
"""
Download metadata from a shards.
Sample metadata:
{
"cc_segment": "crawl-data/CC-MAIN-2018-51/segments/1544376823009.19/wet/CC-MAIN-20181209185547-20181209211547-00000.warc.wet.gz",
"digest": "sha1:222LWNHN5FM26XGS7WJSMI6IISTVWBKJ",
"url": "http://personals.gearplay.com/ads/DRJONES.htm",
"line_ids": [10],
"languages": ["en_XX"],
"lm_scores": [-2.658],
}
"""
snapshot = snapshot.replace("-", "_")
name = f"snap_{snapshot}_batch_{shard}.json.gz"
url = "/".join([S3_BUCKET, VERSION, name])
shard_metadata: Dict[str, Dict[str, dict]] = defaultdict(dict)
try:
cache_file: Optional[Path] = None
if WET_CACHE is not None:
cache_file = WET_CACHE / name
metadata_file = jsonql.open_remote_file(url, cache_file)
except:
logging.warning(f"Couldn't open {url}")
return
for meta in jsonql.read_jsons(metadata_file):
shard_metadata[meta["cc_segment"]][meta["digest"]] = meta
found_pars, missed_pars = 0, 0
for seg, segment_metadata in shard_metadata.items():
for doc in CCSegmentsReader([seg], cache_dir=WET_CACHE):
if doc["digest"] not in segment_metadata:
continue
meta = segment_metadata[doc["digest"]]
full_pars = [doc["title"]] + doc["raw_content"].split("\n")
assert len(meta["line_ids"]) == len(meta["languages"])
assert len(meta["line_ids"]) == len(meta["lm_scores"])
for i, lang, score in zip(
meta["line_ids"], meta["languages"], meta["lm_scores"]
):
if snapshot != "2018-51" and lang in BIG_LANGUAGES:
# Big languages only come from "2018-51" snapshot
continue
if i >= len(full_pars):
# This is because CC100 was created by saving only urls.
# Some urls appears in different snapshot with slightly different
# versions, but we don't know which one is correct.
# Here we read both versions, but some index may end up
# being incorrect.
# This impact ~3% documents.
missed_pars += 1
continue
yield Paragraph(lang, full_pars[i], score)
found_pars += 1
if missed_pars > 0:
logging.warning(
f"Missed {missed_pars} ({missed_pars / found_pars:%}) paragraphes."
)
def _split_by_par(
paragraphes: Iterator[Paragraph], snapshot: str, shard: int, outdir: Path
) -> int:
outdir.mkdir(exist_ok=True)
outfiles = {}
num_pars = 0
try:
for par in paragraphes:
# MODIFY ME: filter paragraph if needed (languages, score, ...)
if par.lang not in outfiles:
(outdir / par.lang).mkdir(exist_ok=True)
outfile = outdir / par.lang / f"snap_{snapshot}_batch_{shard}.gz"
outfiles[par.lang] = gzip.open(outfile, "wt")
print(par.text, file=outfiles[par.lang])
num_pars += 1
finally:
for o in outfiles.values():
o.close()
logging.info(f"Extracted {num_pars:_d} paragraphs from shard {snapshot}_{shard}")
return num_pars
def dl_shard(snapshot: str, shard: int, outdir: Path) -> int:
return _split_by_par(_dl_shard(snapshot, shard), snapshot, shard, outdir)
@contextlib.contextmanager
def unordered_map(processes: int):
if processes == 0:
yield map
return
with multiprocessing.Pool(processes) as pool:
yield pool.imap_unordered
def dl_snapshot(snapshot: str, outdir: Path, processes: int = 1) -> None:
_dl_shard = functools.partial(dl_shard, snapshot, outdir=outdir)
with unordered_map(processes) as umap:
num_pars = sum(umap(_dl_shard, range(500)))
logging.info(f"Extracted {num_pars:_d} paragraphs from snapshot {snapshot}.")
def dl(
snapshot: str = None, outdir: Path = Path("data_cc100"), processes: int = 1
) -> None:
"""
Download CC100 corpus.
Will create one text file per language and CC snapshot.
- snapshot: restrict to one snapshot. Useful for parallelization.
- outdir: output directory
- processes: number of processes to use
"""
if snapshot is None:
snapshots = CC_100_SNAPSHOTS
else:
snapshots = snapshot.split(",")
invalids = [s for s in snapshots if s not in CC_100_SNAPSHOTS]
assert not invalids, f"Invalid snapshots {invalids}, chose from {CC_100_SNAPSHOTS}"
for snapshot in snapshots:
dl_snapshot(snapshot, outdir, processes)
if __name__ == "__main__":
import func_argparse
func_argparse.single_main(dl)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This code is used to train a fastText classifier to label document with DMOZ categories.
The data, distributed under the cc-by 3.0 license
(https://web.archive.org/web/20140605215533/http://www.dmoz.org/license.html),
can be downloaded from
https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz.
"""
import urllib.request
from io import StringIO
from pathlib import Path
from typing import Dict, Set
from urllib.parse import urlparse
import func_argparse
from lxml import etree # type: ignore
from cc_net import jsonql
TaggedUrls = Dict[str, Set[str]]
DMOZ_TAGS_URL = "https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz"
def add_tags(url: str, tags: Set[str], url2tags: TaggedUrls):
if url in url2tags:
url2tags[url] &= tags
else:
url2tags[url] = tags
def load_tags(filename: Path = None) -> TaggedUrls:
if filename is None:
with StringIO("".join(jsonql.open_remote_file(DMOZ_TAGS_URL))) as dmoz:
tree = etree.parse(dmoz)
else:
tree = etree.parse(str(filename))
root = tree.getroot()
url2tags: Dict[str, Set[str]] = {}
for external_page in root.iterfind("{http://dmoz.org/rdf/}ExternalPage"):
url = external_page.get("about")
domain = urlparse(url).netloc
for topic in external_page.iterfind("{http://dmoz.org/rdf/}topic"):
# print(url, topic.text)
# Tags looks like Top/Arts/Animation/Anime/Collectibles
tags = set(topic.text.split("/")[1:])
add_tags(url, tags, url2tags)
add_tags(domain, tags, url2tags)
return url2tags
def dl(output: Path) -> None:
urllib.request.urlretrieve(DMOZ_TAGS_URL, output)
def make_corpus(file: Path, tags_file: Path = None, output: Path = None) -> None:
"""
Loads a tags file and create a training dataset using the given webpages.
Arguments:
- file: CC shard file
- tags_file: dmoz tagging file, (like the one produced by `dl`)
- output: ""
"""
url2tags = load_tags(tags_file)
with jsonql.open_write(output) as o:
for document in jsonql.read_jsons(file):
if not document:
continue
url = document["url"]
domain = document["source_domain"]
if url in url2tags:
tags = url2tags[url]
elif domain in url2tags:
tags = url2tags[domain]
else:
continue
if len(tags) == 0:
continue
fasttext_tags = ["__label__" + tag for tag in tags]
content = document["tokenized"].replace("\n", " ").lower()
if len(content) > 200:
print(" ".join(fasttext_tags), content, file=o) # type: ignore
if __name__ == "__main__":
func_argparse.single_main(make_corpus)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to search sentences in CC similar to sentences in another corpus.
"""
import functools
import logging
import math
import subprocess
from collections import Counter
from pathlib import Path
from typing import Iterable, List, Optional, Set, Tuple
import func_argparse
import submitit
from kenlm import Model as KenlmModel # type: ignore
from sentence_splitter import SentenceSplitter # type: ignore
from sentencepiece import SentencePieceProcessor # type: ignore
from cc_net import dedup, jsonql, perplexity, text_normalizer
KENLM = Path("./bin/lmplz")
KENLM_BUILD = Path("./bin/build_binary")
VOCAB_SIZE = 2 ** 16 - 10
PROCESSES = 16
def normalize(corpus: Path, output_dir: Path) -> Path:
normalized = output_dir / (corpus.stem + ".normalized")
if normalized.exists():
return normalized
print("Will normalize", corpus, "to", normalized)
jsonql.run_pipes(
jsonql.Mapper(text_normalizer.normalize),
file=corpus,
output=normalized,
processes=PROCESSES,
)
return normalized
# TODO use classic files directory.
def sp_model(lang: str) -> Path:
return Path(f"/checkpoint/guw/cc_clean/lm_sp/{lang}.sp.model")
def _dataset(dataset: Optional[Path], lang: str) -> Path:
return (
dataset
or Path("/datasets01_101/common_crawl/020919") / f"{lang}_head_*.json.gz"
)
class SentencePiece(jsonql.Transformer):
def __init__(self, model: Path):
super().__init__()
self.model = model
self.sp: SentencePieceProcessor = None # type: ignore
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.model))
def do(self, line: str) -> str:
return " ".join(self.sp.encode_as_pieces(line))
class ExtractSentences(jsonql.Transformer):
def __init__(
self,
sp_model: Path,
lm_model: Path,
field: str = "raw_content",
threshold: float = float("+inf"),
):
super().__init__()
self.sp_model = sp_model
self.lm_model = lm_model
self.field = field
self.threshold = threshold
self.sp: SentencePieceProcessor = None
self.lm: KenlmModel = None
self.splitter: SentenceSplitter = None
self.hashes: Set[int] = set()
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.sp_model))
self.splitter = SentenceSplitter("en")
self.lm = KenlmModel(str(self.lm_model))
def do(self, document: dict) -> Optional[str]:
content: Optional[str] = document.get(self.field)
if not content:
return None
all_sentences = [
s for l in content.split("\n") if l for s in self.splitter.split(text=l)
]
unique_sentences = []
for s in all_sentences:
if not s:
continue
h = dedup.str_hash(s)
if h in self.hashes:
continue
self.hashes.add(h)
unique_sentences.append(s)
scores = []
for sentence in unique_sentences:
normalized = text_normalizer.normalize(sentence)
pieces = self.sp.encode_as_pieces(normalized)
log_score = self.lm.score(" ".join(pieces))
pp = -1
if len(pieces):
pp = perplexity.pp(log_score, len(pieces))
scores.append(pp)
res = filter(
lambda pp_s: self.threshold > pp_s[0] > 0, zip(scores, unique_sentences)
)
return "\n".join(f"{pp}\t{s}" for (pp, s) in res) or None
def tokenize(corpus: Path, output_dir: Path, lang: str) -> Path:
tokenized = output_dir / (corpus.stem + ".tokenized")
if tokenized.exists():
return tokenized
print("Will SentencePiece", corpus, "to", tokenized)
jsonql.run_pipes(
SentencePiece(sp_model(lang)),
file=normalize(corpus, output_dir),
output=tokenized,
processes=PROCESSES,
)
return tokenized
def train_lm(
corpus: Path,
output_dir: Path,
lang: str = "en",
vocab_size: int = VOCAB_SIZE,
ngrams: int = 5,
):
lm_text_file = output_dir / (corpus.stem + ".arpa")
lm_bin_file = output_dir / (corpus.stem + ".arpa.bin")
if lm_bin_file.exists():
return lm_bin_file
assert KENLM.exists(), f"{KENLM} binary to train kenlm model not found."
normalized = normalize(corpus, output_dir)
tokenized = tokenize(normalized, output_dir, lang)
print("Will train LM", lm_text_file, "on", tokenized)
kenlm_cmd = [
str(KENLM),
f"--order={ngrams}",
"--memory=8G",
f"--temp_prefix={jsonql._tmp_dir()}",
f"--text={tokenized}",
f"--arpa={lm_text_file}",
f"--vocab_estimate={vocab_size}",
"--discount_fallback",
]
subprocess.run(kenlm_cmd, check=True)
print("Will create binary model", lm_bin_file, "from", lm_text_file)
subprocess.run([str(KENLM_BUILD), str(lm_text_file), str(lm_bin_file)], check=True)
return lm_bin_file
def uniform_sampling_wrt_perplexity(
paragraphes: Iterable[str],
rounding: float = 100.0,
cut: float = 1000.0,
samples: int = 20,
) -> Iterable[str]:
max_samples = math.floor(cut / rounding * samples)
n = 0
buckets = Counter([0.0])
logging.info(f"Will sample {max_samples} sentences.")
for lines in paragraphes:
for line in lines.split("\n"):
if not line:
continue
pp = float(line[: line.find("\t")])
pp = math.floor(pp / rounding) * rounding
if pp > cut:
continue
if buckets[pp] > samples:
continue
yield line
buckets[pp] += 1
if buckets[pp] > samples:
logging.info(f"Bucket {pp} is full ({samples} samples, {n} total)")
n += 1
if n > max_samples:
return
def sample(
corpus: Path,
output_dir: Path,
dataset: Path = None,
n: int = 10_000,
lang: str = "en",
) -> Path:
sample_file = output_dir / (corpus.stem + ".pp_sample.tsv")
if sample_file.exists():
return sample_file
dataset = _dataset(dataset, lang)
extractor = ExtractSentences(
sp_model(lang), train_lm(corpus, output_dir), field="raw_content"
)
sampling = functools.partial(
uniform_sampling_wrt_perplexity, rounding=100.0, cut=1000.0, samples=n // 10
)
print(f"Will sample data from {dataset} to {sample_file}")
try:
jsonql.run_pipes(
extractor, sampling, file=dataset, output=sample_file, processes=PROCESSES
)
except Exception:
sample_file.unlink()
raise
subprocess.run(["sort", "-n", "-o", sample_file, sample_file], check=True)
subprocess.run(["head", sample_file], check=True)
return sample_file
def mine(
corpus: Path,
output_dir: Path,
threshold: float,
dataset: Path = None,
lang: str = "en",
) -> List[Path]:
"""Search sentences in CC similar to the one in the given corpus.
Args:
- corpus: corpus to train the LM one. Assumes one sentence per line.
- output_dir: where to store the results
- threshold: maximum perplexity to have
- dataset: glob pattern matching CC shards.
- lang: search in the files of this language
"""
dataset = _dataset(dataset, lang)
files = list(dataset.parent.glob(dataset.name))
outputs = [output_dir / (f.stem + ".tsv") for f in files]
if all(o.exists() for o in outputs):
return outputs
n = len(outputs)
sp = [sp_model(lang)] * n
lm = [train_lm(corpus, output_dir)] * n
thresholds = [threshold] * n
ex = submitit.AutoExecutor(output_dir / "mining_logs")
ex.update_parameters(
name="mine",
cpus_per_task=PROCESSES,
timeout_min=60 * 24 // PROCESSES,
mem_gb=10,
)
jobs = ex.map_array(_mine, files, outputs, sp, lm, thresholds)
print("Submited job array:", jobs[0])
for j in submitit.helpers.as_completed(jobs):
(i, o) = j.result()
print("Mined sentences from", i, "to", o)
return outputs
def _mine(
file: Path, output: Path, sp: Path, lm: Path, threshold: float
) -> Tuple[Path, Path]:
extractor = ExtractSentences(sp, lm, field="raw_content", threshold=threshold)
jsonql.run_pipes(extractor, file=file, output=output, processes=PROCESSES)
return (file, output)
if __name__ == "__main__":
func_argparse.main(sample, mine)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
from typing import Iterable, Sequence
from cc_net import dedup, jsonql
from cc_net.dedup import str_hash
from cc_net.flat_hash_set import FlatHashSet
def text(*args: str) -> str:
return "\n".join(args)
def write_docs(file: Path, docs: Iterable[Sequence[str]]):
file.parent.mkdir(exist_ok=True)
with open(file, "w") as f:
for sentences in docs:
doc = dict(text=text(*sentences))
print(json.dumps(doc), file=f)
def as_dict(hash_set):
if not isinstance(hash_set, dict):
hash_set = {k: v for (k, v) in hash_set.items()}
return hash_set
def load_hashes(file):
results = dedup.FlatHashSet()
results.load(file)
return as_dict(results)
LENGTHS = ["original_length", "length"]
def assert_documents_equal(expected, actual, ignoring={}):
expected = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
actual = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
assert expected == actual
def test_simple_dedup(tmp_path: Path) -> None:
write_docs(
tmp_path / "docs.json",
[
["_Hello", "_World", "I'm so original"],
["_world", "I'm originaler", "_Hello"],
],
)
results = list(dedup.deduplicate(tmp_path / "docs.json", field="text"))
expected = [
# First document is untouched
dict(
text=text("_Hello", "_World", "I'm so original"),
original_nlines=3,
nlines=3,
line_ids=[0, 1, 2],
),
# Second documents loses several lines
dict(text="I'm originaler", original_nlines=3, nlines=1, line_ids=[1]),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_with_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
collector = dedup.HashesCollector(field="text", output=hashes)
list(collector.map(documents))
results = load_hashes(hashes)
expected = {
str_hash(l): l.startswith("_")
for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
}
assert expected == results
def test_dedup_with_np_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
with dedup.HashesCollector(field="text", output=hashes) as d:
list(d.map(documents))
results = FlatHashSet()
results.load_np(hashes)
expected = set(
str_hash(l) for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
)
assert expected == set(results.keys())
def test_dedup_from_hashes(tmp_path: Path):
documents = [
dict(text=text("_Hello", "World", "I'm so original")),
dict(text=text("Good morning", "World", "I'm originaler")),
]
seen = ["_hello", "i'm originaler", "world"]
hashes = [str_hash(h) for h in seen]
h = dedup.FlatHashSet()
h.add(hashes)
# Note: 'world' appears only once and won't be treated as a duplicate.
h.add(hashes[:-1])
h.dump(tmp_path / "hashes.bin")
results = list(
dedup.DuplicatesRemover("text", [tmp_path / "hashes.bin"]).map(documents)
)
expected = [
dict(
text=text("World", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[1, 2],
),
dict(
text=text("Good morning", "World"),
original_nlines=3,
nlines=2,
line_ids=[0, 1],
),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_fast(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
parts = [data / "part_0.json", data / "part_1.json"]
res = tmp_path / "res"
res.mkdir()
h = tmp_path / "hashes.bin"
field = "text"
jsonql.run_pipes(dedup.HashesCollector(field, output=h), file=parts)
for part in parts:
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(
text=text("Good morning", "I'm originaler"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
words = [w for part in [part_0, part_1] for doc in part for w in doc]
expected = {str_hash(s.lower()): s.startswith("_") for s in words}
assert expected == load_hashes(h)
def test_remove_duplicates_sharded(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["_Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
h = tmp_path / "hashes"
h.mkdir()
h0 = FlatHashSet()
h0.add([str_hash(s.lower()) for doc in part_0 for s in doc])
h0.add([str_hash("_world")])
h0.dump(h / "part_0.bin")
assert {
str_hash("hello"): False,
str_hash("_world"): True,
str_hash("i'm so original"): False,
} == as_dict(h0)
h1 = FlatHashSet()
h1.add([str_hash(s.lower()) for doc in part_1 for s in doc])
h1.add([str_hash("_good morning")])
h1.dump(h / "part_1.bin")
assert {
str_hash("_good morning"): True,
str_hash("_world"): False,
str_hash("i'm originaler"): False,
} == as_dict(h1)
res = tmp_path / "res"
res.mkdir()
# dedup.DISABLE_MULTI_PROCESSING = True # Simplifies debugging
dedup.remove_duplicates_sharded(
files=[data / "part_0.json", data / "part_1.json"],
outputs=[res / "part_0.json", res / "part_1.json"],
field="text",
hashes_dir=h,
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
# First pass removes "_world", second "_good morning".
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(text=text("I'm originaler"), original_nlines=3, nlines=1, line_ids=[2])
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import cc_net.text_normalizer as txt
def test_unicode_punct():
weird = ",。、„”“«»1」「《》´∶:?!();–—.~’…━〈〉【】%"
replaced = ',.,""""""""""\'::?!();- - . ~\'...-<>[]%'
assert txt.replace_unicode_punct(weird) == replaced
assert txt.remove_unicode_punct(weird) == ""
def test_numbers():
weird = "023456789 | 0123456789"
normalized = "000000000 | 0000000000"
assert txt.normalize(weird, numbers=True) == normalized
assert txt.normalize(weird, numbers=False) == weird
def test_normalize_for_dedup():
weird = "023´∶:\x10 | ;012 hèllo"
normalized = "000 | ;000 hèllo"
assert normalized == txt.slow_normalize_for_dedup(weird)
assert normalized == txt.normalize_for_dedup(weird)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from cc_net import process_wet_file
def test_parsing():
sample = Path(__file__).parent / "data" / "sample.warc.txt"
with open(sample) as f:
documents = list(process_wet_file.parse_warc_file(f))
expected_urls = [
"http://sample_english.com",
"http://sample_chinese.zh",
"http://sample_russian.ru",
]
assert expected_urls == [d["url"] for d in documents]
expected_domains = ["sample_english.com", "sample_chinese.zh", "sample_russian.ru"]
assert expected_domains == [d["source_domain"] for d in documents]
expected_date = [
"2019-03-18T00:00:00Z",
"2019-03-18T00:00:01Z",
"2019-03-18T00:00:02Z",
]
assert expected_date == [d["date_download"] for d in documents]
expected_title = [
"Famous Mark Twain Quotes",
"馬克·吐溫名言",
"Цитаты знаменитого Марка Твена",
]
assert expected_title == [d["title"] for d in documents]
expected_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't.
"""
assert expected_quotes == documents[0]["raw_content"]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import pytest
from cc_net.flat_hash_set import HASH_TYPE, FlatHashSet, NaiveHashSet
def as_dict(flat_hash_set) -> dict:
return {k: v for (k, v) in flat_hash_set.items()}
need_getpy = pytest.mark.skipif(
FlatHashSet == NaiveHashSet, reason="getpy isn't installed"
)
def same_behavior(test_case):
def run_case():
naive = as_dict(test_case(FlatHashSet))
flat = as_dict(test_case(NaiveHashSet))
assert naive == flat
return need_getpy(run_case)
@same_behavior
def test_setitem(hash_set_cls):
h = hash_set_cls()
h[np.arange(10, dtype=h.dtype)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=h.dtype)] = np.ones(5, dtype=np.uint8)
return h
@same_behavior
def test_add_dup(hash_set_cls):
h = hash_set_cls()
h.add(np.arange(10, dtype=h.dtype))
h.add(np.arange(5, dtype=h.dtype))
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h), f"add_dup with {hash_set_cls.__name__}"
return h
@need_getpy
def test_gp_dict():
import getpy as gp # type: ignore
h = gp.Dict(HASH_TYPE, np.uint8)
h[np.arange(10, dtype=HASH_TYPE)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=HASH_TYPE)] = np.ones(5, dtype=np.uint8)
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h)
def check_reload(h, dump, load, tmp_path):
dump_path = tmp_path / dump.__name__
dump(h, dump_path)
h2 = type(h)()
load(h2, dump_path)
assert as_dict(h) == as_dict(h2)
@pytest.mark.parametrize("hash_set_cls", [FlatHashSet, NaiveHashSet])
def test_loading(tmp_path, hash_set_cls):
h = hash_set_cls()
x = np.random.randint(0, 2 ** 32, (100,), dtype=h.dtype)
h.add(x)
check_reload(h, hash_set_cls.dump, hash_set_cls.load, tmp_path)
check_reload(h, hash_set_cls.dump_np, hash_set_cls.load_np, tmp_path)
if hasattr(hash_set_cls, "dump_gp"):
check_reload(h, hash_set_cls.dump_gp, hash_set_cls.load_gp, tmp_path)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import pytest
def _request_is_disabled(self, *args, **kwargs):
raise Exception(
f"Your code tried to call 'request' with: {args}, {kwargs}. Unit test aren't allowed to reach internet."
)
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
"""Remove requests.sessions.Session.request for all tests."""
monkeypatch.setattr("requests.sessions.Session.request", _request_is_disabled)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
#
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from cc_net import jsonql, regroup
def check_regroup(tmp_path, regroup_fn, check_blocks_boundaries=False):
n_shards = 4
n_docs = 20
shards = [
[dict(id=i, shard=s, raw_content="hello world") for i in range(n_docs)]
for s in range(n_shards)
]
shards_files = [tmp_path / f"{s:04d}.json.gz" for s in range(n_shards)]
for shard, shard_file in zip(shards, shards_files):
jsonql.run_pipes(inputs=shard, output=shard_file)
regroup_file = tmp_path / "regroup.json.gz"
start = time.time()
regroup_fn(shards_files, regroup_file)
duration = time.time() - start
print(f"{regroup_fn.__module__}.{regroup_fn.__name__} took {duration}s")
regrouped = list(jsonql.read_jsons(regroup_file))
assert [doc for shard in shards for doc in shard] == regrouped
readers = jsonql.get_block_readers(regroup_file, n_shards)
if not check_blocks_boundaries:
assert [doc for shard in shards for doc in shard] == [
doc for reader in readers for doc in jsonql.read_jsons(reader)
]
return
for shard, reader in zip(shards, readers):
block = [doc for doc in jsonql.read_jsons(reader)]
assert shard == block
def test_regroup(tmp_path):
# With regroup boundaries will be every 256Mb.
check_regroup(tmp_path, regroup.reshard, check_blocks_boundaries=False)
def test_fast_regroup(tmp_path):
# With fast regroup boundaries should match the shards.
check_regroup(tmp_path, regroup.fast_reshard, check_blocks_boundaries=True)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import io
from pathlib import Path
from typing import Sequence
import numpy as np
import pytest
from cc_net import jsonql
def bar(small_bar: str) -> str:
return small_bar.replace(" ", " " * 10).replace("█", "█" * 10)
def get_output(transformer, data, **kwargs):
with io.StringIO() as output:
# Convert data to a generator so that it's not interpreted as a file list.
jsonql.run_pipe(transformer, kwargs, file=(x for x in data), output=output)
return output.getvalue()
def test_split(tmp_path: Path):
data = [
dict(text="Hello world", lang="en"),
dict(text="Boujour les amis", lang="fr"),
dict(text="Rock your boat", lang="en"),
]
with jsonql.split(tmp_path / "{lang}.json") as split:
list(split.map(data))
summary = split.summary()
assert "Found 2 splits." in summary
en_docs = list(jsonql.read_jsons(tmp_path / "en.json"))
assert [data[0], data[2]] == en_docs
fr_docs = list(jsonql.read_jsons(tmp_path / "fr.json"))
assert [data[1]] == fr_docs
def test_split_bad_pattern(tmp_path: Path):
data = [dict(text="Hello world", lang="en")]
with pytest.raises(KeyError):
with jsonql.split(tmp_path / "{language}.json") as split:
list(split.map(data))
def test_histogram():
data = [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]
hist, bins = jsonql.histogram(data, bins=8, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(1, 10)])
np.testing.assert_almost_equal(hist, [4, 0, 0, 2, 0, 0, 0, 2])
data = [0, 0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.8, 0.8, 1]
hist, bins = jsonql.histogram(data, bins=10, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(11)])
np.testing.assert_almost_equal(hist, [1, 4, 0, 0, 2, 0, 0, 0, 2, 1])
def test_display_stats():
stats = {
jsonql.ALL_DOCUMENTS: 100,
"title": 80,
"title.length": 80 * 50,
"text": 100,
"text.length": 100 * 1000,
"popularity": 8,
"popularity.val": [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9],
}
(title,) = jsonql.display_stats(stats, "title")
assert "title" in title
assert "saw 80 times" in title
assert "average length is" in title
assert "\n" not in title
(text,) = jsonql.display_stats(stats, "text")
assert "text" in text
assert "saw 100 times" in text
assert "average length is" in text
assert "\n" not in text
histogram = jsonql.display_stats(
stats, "popularity", bins=[x / 10 for x in range(1, 10)]
)
assert "popularity" in histogram[0]
assert "saw 8 times" in histogram[0]
assert "histogram is" in histogram[0]
assert "0.100 " + bar("████████") in histogram[1]
assert "0.400 " + bar("████ ") in histogram[2]
assert "0.800 " + bar("████ ") in histogram[3]
cum_histogram = jsonql.display_stats(stats, "popularity", bins=8, cumulative=True)
assert "popularity" in cum_histogram[0]
assert "saw 8 times" in cum_histogram[0]
assert "histogram is" in cum_histogram[0]
assert "0.100 " + bar("████ ") in cum_histogram[1]
assert "0.400 " + bar("██████ ") in cum_histogram[2]
assert "0.800 " + bar("████████") in cum_histogram[3]
def test_describe():
def sample(pop):
return dict(title="Lorem", text="Lorem ipsum dolor sit amet.", popularity=pop)
data = [sample(pop) for pop in [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]]
desc = get_output(
jsonql.describe, data, columns=None, bins=[x / 10 for x in range(1, 10)]
)
assert "Field title saw 8 times (100.0%), average length is 5" in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity saw 8 times (100.0%), histogram is" in desc
assert "0.100 " + bar("████████") in desc
assert "0.400 " + bar("████ ") in desc
assert "0.800 " + bar("████ ") in desc
desc = get_output(jsonql.describe, data, columns=["text"])
assert "Field title saw 8 times (100.0%), average length is 5" not in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity, histogram is:" not in desc
def test_custom_pipe():
def transformer(source, sep=" "):
for i, line in enumerate(source):
res = f"{i}{sep}{line}"
yield res
data = ["hello", "world"]
assert get_output(transformer, data) == "0 hello\n1 world\n"
assert get_output(transformer, data, sep="_") == "0_hello\n1_world\n"
def test_open_read_write(tmp_path: Path):
def _lines(filename: Path) -> Sequence[str]:
# jsonql.lines calls open_read
return list(jsonql.lines(filename))
tmp = tmp_path
with jsonql.open_write(tmp / "a.txt") as o:
print("a", file=o)
assert _lines(tmp / "a.txt") == ["a"]
jsonql.write_jsons([{"a": 1}], tmp / "a.txt")
assert _lines(tmp / "a.txt") == ['{"a": 1}']
with jsonql.open_write(tmp / "a.gz") as o:
print("a", file=o)
assert _lines(tmp / "a.gz") == ["a"]
with jsonql.open_write([tmp / "a0.txt", tmp / "a1.txt"]) as o:
print("a", file=o)
assert _lines(tmp / "a0.txt") == ["a"]
assert not (tmp / "a1.txt").is_file()
with jsonql.open_write([tmp / "b0.txt", tmp / "b1.txt"], max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b0.txt") == ["0" * 2000]
assert _lines(tmp / "b1.txt") == ["1" * 2000]
with jsonql.open_write(tmp / "a_????.json") as o:
print("a", file=o)
assert _lines(tmp / "a_0000.json") == ["a"]
assert not (tmp / "a_0001.json").is_file()
assert _lines(tmp / "a_*.json") == ["a"]
with jsonql.open_write(tmp / "b_??.json", max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b_00.json") == ["0" * 2000]
assert _lines(tmp / "b_01.json") == ["1" * 2000]
assert _lines(tmp / "b_*.json") == ["0" * 2000, "1" * 2000]
def test_split_file(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\nWorld\n"
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_line(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello _|_\nWorld\n"
# split is here ^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello _|_\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_char(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\U0001F40D\nWorld\n"
# split is here ^^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello🐍\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_blocked_gzip(tmp_path: Path):
file = tmp_path / "test.gz"
f = str(file)
# Each object is 10/11 bytes long. We have 2 of them by block.
content = ['{"xx": %d}' % i for i in range(80)]
with jsonql.BlockedGzipWriter(file, "wt", block_size="20B") as o:
for line in content:
print(line, file=o)
jr = jsonql.JsonReader(strict=True)
expected = list(jr.map(content))
# read as one file
assert expected == list(jsonql.read_jsons(file))
# read first block
assert expected[:2] == list(jsonql.read_jsons(f + "[0/40]"))
# read last block
assert expected[-2:] == list(jsonql.read_jsons(f + "[39/40]"))
readers = jsonql.get_block_readers(file, 9)
read_as_several_files = [list(jsonql.read_jsons(r)) for r in readers]
# 40 splits of 2 docs, 9 readers -> 5 splits, 10 docs per reader
assert list(jsonql.grouper(expected, 10)) == read_as_several_files
def test_enter_exit(capsys):
class MyTransformer(jsonql.Transformer):
def __enter__(self):
print("trans: started")
self.ready = True
return self
def __exit__(self, *args):
print("trans: done")
def do(self, x):
return (x, x)
def acc(values):
print("acc: started")
res = 0
for (x, _) in values:
res += int(x)
print("acc: done")
yield f"acc: result={res}"
t = MyTransformer()
data = (str(x) for x in range(10))
print("pipeline: started")
# Print to stdout.
jsonql.run_pipes(t, acc, file=data)
print("pipeline: done")
out = capsys.readouterr().out
assert (
"\n".join(
[
"pipeline: started",
"trans: started",
"acc: started",
"acc: done",
f"acc: result=45",
# Transformers are closed at the very end.
"trans: done",
"pipeline: done\n",
]
)
== out
)
def test_write_to_stdout(capsys):
lines = [str(x) for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "\n".join(lines) + "\n"
def test_write_to_stdout_handle_newlines(capsys):
lines = [str(x) + "\n" for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "".join(lines)
def test_multiprocess(capsys):
mult = jsonql.Mapper(lambda x: f"2x = {2 * int(x)}")
jsonql.run_pipes(mult, processes=2, file=(str(x) for x in range(10)))
out = set(capsys.readouterr().out.strip("\n").split("\n"))
assert set(f"2x = {2 * x}" for x in range(10)) == out
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
import pytest
import cc_net
import cc_net.minify as minify
from cc_net import jsonql, process_wet_file
from cc_net.minify import (
HASH_SIZE,
decode_hashes,
encode_hashes,
encode_line_ids,
get_hashes,
)
def test_encode_decode():
sentences = ["Hello world !", "Is everyone happy in here ?"]
hashes = get_hashes(sentences)
assert all([len(h) == HASH_SIZE for h in hashes])
hashes_int = [minify._b2i(h) for h in hashes]
encoded = encode_hashes(hashes)
decoded = decode_hashes(encoded)
assert all([len(d) == HASH_SIZE for d in decoded])
decoded_int = [minify._b2i(d) for d in decoded]
assert hashes_int == decoded_int
assert hashes == decoded
def test_minify():
doc = {
"raw_content": "Hello world !\nIs everyone happy in here ?",
"language": "en",
"perplexity": 120.0,
"line_ids": [0, 4],
}
expected = {"line_ids": "AAAEAA==", "language": "en", "perplexity": 120.0}
minifier = minify.Minifier()
assert expected == minifier(doc)
@pytest.fixture
def http_from_disk(monkeypatch):
def read_sample_file(url: str, n_retry: int = 3) -> bytes:
expected_url = process_wet_file.WET_URL_ROOT + "/crawl-data/sample.warc.wet"
assert expected_url == url
file = Path(__file__).parent / "data" / "sample.warc.txt"
return file.read_bytes()
monkeypatch.setattr(cc_net.jsonql, "request_get_content", read_sample_file)
def test_minify_and_fetch(http_from_disk, tmp_path: Path):
full_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't."""
# We don't need no education.
chosen_quotes = "\n".join(
l for l in full_quotes.splitlines() if "Education" not in l
)
cc_doc = {
"url": "http://sample_english.com",
"date_download": "2019-03-18T00:00:00Z",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"source_domain": "sample_english.com",
"title": "Famous Mark Twain Quotes",
"raw_content": full_quotes,
"cc_segment": "crawl-data/sample.warc.wet",
"nlines": 4,
"length": 353,
}
ccnet_metadata = {
"language": "en",
"language_score": 0.99,
"perplexity": 151.5,
"bucket": "head",
"raw_content": chosen_quotes,
"nlines": 3,
"length": len(chosen_quotes),
"original_nlines": 4,
"original_length": 353,
"line_ids": [0, 2, 3],
}
ccnet_doc = dict(cc_doc, **ccnet_metadata)
mini = minify.Minifier()(ccnet_doc.copy())
assert mini is not ccnet_doc
important_fields = [
"url",
"digest",
"cc_segment",
"language",
"language_score",
"perplexity",
"bucket",
"line_ids",
]
expected = {k: ccnet_doc[k] for k in important_fields}
expected["line_ids"] = encode_line_ids(expected["line_ids"]) # type: ignore
assert expected == mini
with jsonql.open_write(tmp_path / "sample.json") as o:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
# line_ids is removed when unminifying
ccnet_doc.pop("line_ids")
assert ccnet_doc == fetcher(cc_doc)
def test_fetch(http_from_disk, tmp_path: Path):
mini_docs = [
{
"url": "http://sample_chinese.com",
"digest": "sha1:Y4E6URVYGIAFNVRTPZ5S3J64RTZTP6HJ",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([2]),
"bucket": "not_that_great",
},
{
"url": "http://sample_english.com",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([3]),
"bucket": "top_notch",
},
]
with jsonql.open_write(tmp_path / "sample.json") as o:
for mini in mini_docs:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
cc = process_wet_file.CCSegmentsReader(["crawl-data/sample.warc.wet"])
docs = [d for d in fetcher.map(cc) if d is not None]
assert cc.retrieved_segments == 1
# Note: documents are retrieved as they are ordered in the .warc.wet file
assert [
"Facts are stubborn things, but statistics are more pliable.",
"事實是固執的東西,但統計數字卻比較柔和。",
] == [d["raw_content"] for d in docs]
assert ["top_notch", "not_that_great"] == [d["bucket"] for d in docs]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import inspect
import pickle
from pathlib import Path
import pytest
from cc_net import dedup, jsonql, perplexity, split_by_lang, tokenizer
def get_transformers(module):
return [
v
for v in vars(module).values()
if type(v) is type
and issubclass(v, jsonql.Transformer)
and v != jsonql.Transformer
]
ALL_TRANSFORMERS = (
get_transformers(jsonql)
+ get_transformers(dedup)
+ get_transformers(perplexity)
+ get_transformers(tokenizer)
+ get_transformers(split_by_lang)
)
def check_transformer_is_calling_super_init(cls: type):
assert issubclass(cls, jsonql.Transformer)
# accessing __init__ is generally an error, but here we do want to inspect
# the __init__method.
code = inspect.getsource(cls.__init__) # type: ignore
code = code.replace(" ", "")
# Check that super().__init__ is called.
assert "super().__init__()" in code
def test_bad_transformers_are_caught():
class BadTransformer(jsonql.Transformer):
def __init__(self, arg):
# We aren't calling super /!\
self.arg = arg
with pytest.raises(AssertionError):
check_transformer_is_calling_super_init(BadTransformer)
@pytest.mark.parametrize("transformer", ALL_TRANSFORMERS)
def test_transformer_is_correctly_implemented(transformer):
check_transformer_is_calling_super_init(transformer)
@pytest.mark.skipif(
not Path("bin/lid.bin").exists(), reason="bin/lid.bin not found, run `make install`"
)
def test_can_pickle_transformer(tmp_path):
model = Path("bin/lid.bin")
if not model.exists():
return
classifier = split_by_lang.Classifier(model, "text", "lang")
classifier.__enter__()
doc = dict(text="Hello world ! This is English btw.")
original_results = classifier(doc)
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
# Do it again with the unpickled object.
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
|
from cross_vit_flax.cross_vit_flax import CrossViT
|
import flax.linen as nn
import jax
import jax.numpy as jnp
from jax.numpy import einsum
from typing import Any, Callable
from einops import rearrange, repeat
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class PreNorm(nn.Module):
fn: Callable
@nn.compact
def __call__(self, x, **kwargs):
x = nn.LayerNorm(epsilon = 1e-5, use_bias = False)(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
dim: int
hidden_dim: int
dropout: float = 0.
@nn.compact
def __call__(self, x):
x = nn.Dense(features = self.hidden_dim)(x)
x = nn.gelu(x)
x = nn.Dropout(rate = self.dropout)(x, deterministic = False)
x = nn.Dense(features = self.dim)(x)
x = nn.Dropout(rate = self.dropout)(x, deterministic = False)
return x
class Attention(nn.Module):
dim: int
heads: int = 8
dim_head: int = 64
dropout: float = 0.
@nn.compact
def __call__(self, x, context=None, kv_include_self=False, training=True):
inner_dim = self.dim_head * self.heads
heads = self.heads
scale = self.dim_head ** -0.5
context = default(context, x)
if kv_include_self:
context = jnp.concatenate([x, context], axis = 1) # cross attention requires CLS token includes itself as key / value
q = nn.Dense(features = inner_dim, use_bias = False)(x)
kv = nn.Dense(features = inner_dim * 2, use_bias = False)(context)
k, v = jnp.split(kv, 2, axis = -1)
qkv = (q, k, v)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = heads), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * scale
attn = nn.softmax(dots, axis = -1)
x = einsum('b h i j, b h j d -> b h i d', attn, v)
x = rearrange(x, 'b h n d -> b n (h d)')
x = nn.Dense(features = self.dim)(x)
x = nn.Dropout(rate = self.dropout)(x, deterministic = False)
return x
class Transformer(nn.Module):
dim: int
depth: int
heads: int
dim_head: int
mlp_dim: int
dropout: float = 0.
@nn.compact
def __call__(self, x):
layers = []
for _ in range(self.depth):
layers.append([
PreNorm(Attention(self.dim, self.heads, self.dim_head, self.dropout)),
PreNorm(FeedForward(self.dim, self.mlp_dim, self.dropout))
])
for attn, ff in layers:
x = attn(x) + x
x = ff(x) + x
x = nn.LayerNorm(epsilon = 1e-5, use_bias = False)(x)
return x
# projecting CLS tokens, in the case that small and large patch tokens have different dimensions
class ProjectInOut(nn.Module):
dim_in: int
dim_out: int
fn: Callable
@nn.compact
def __call__(self, x, *args, **kwargs):
fn = self.fn
need_projection = self.dim_in != self.dim_out
if need_projection:
project_in = nn.Dense(features = self.dim_out)
project_out = nn.Dense(features = self.dim_in)
# args check
if need_projection:
x = project_in(x)
x = fn(x, *args, **kwargs)
if need_projection:
x = project_out(x)
return x
# cross attention transformer
class CrossTransformer(nn.Module):
sm_dim: int
lg_dim: int
depth: int
heads: int
dim_head: int
dropout: float
@nn.compact
def __call__(self, inputs):
layers = []
for _ in range(self.depth):
layers.append([
ProjectInOut(self.sm_dim, self.lg_dim, PreNorm(Attention(self.lg_dim, heads = self.heads, dim_head = self.dim_head, dropout = self.dropout))),
ProjectInOut(self.lg_dim, self.sm_dim, PreNorm(Attention(self.sm_dim, heads = self.heads, dim_head = self.dim_head, dropout = self.dropout)))
])
sm_tokens, lg_tokens = inputs
(sm_cls, sm_patch_tokens), (lg_cls, lg_patch_tokens) = map(lambda t: (t[:, :1], t[:, 1:]), (sm_tokens, lg_tokens))
for sm_attend_lg, lg_attend_sm in layers:
sm_cls = sm_attend_lg(sm_cls, context = lg_patch_tokens, kv_include_self = True) + sm_cls
lg_cls = lg_attend_sm(lg_cls, context = sm_patch_tokens, kv_include_self = True) + lg_cls
sm_tokens = jnp.concatenate([sm_cls, sm_patch_tokens], axis = 1)
lg_tokens = jnp.concatenate([lg_cls, lg_patch_tokens], axis = 1)
return sm_tokens, lg_tokens
# multi-scale encoder
class MultiScaleEncoder(nn.Module):
depth: int
sm_dim: int
lg_dim: int
sm_enc_params: Any
lg_enc_params: Any
cross_attn_heads: int
cross_attn_depth: int
cross_attn_dim_head: int = 64
dropout: float = 0.
@nn.compact
def __call__(self, inputs):
layers = []
for _ in range(self.depth):
layers.append([Transformer(dim = self.sm_dim, dropout = self.dropout, **self.sm_enc_params),
Transformer(dim = self.lg_dim, dropout = self.dropout, **self.lg_enc_params),
CrossTransformer(sm_dim = self.sm_dim, lg_dim = self.lg_dim,
depth = self.cross_attn_depth, heads = self.cross_attn_heads, dim_head = self.cross_attn_dim_head, dropout = self.dropout)
])
sm_tokens, lg_tokens = inputs
for sm_enc, lg_enc, cross_attend in layers:
sm_tokens, lg_tokens = sm_enc(sm_tokens), lg_enc(lg_tokens)
sm_tokens, lg_tokens = cross_attend([sm_tokens, lg_tokens])
return sm_tokens, lg_tokens
# patch-based image to token embedder
class ImageEmbedder(nn.Module):
dim: int
image_size: int
patch_size: int
dropout:float = 0.
@nn.compact
def __call__(self, x):
assert self.image_size % self.patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (self.image_size // self.patch_size) ** 2
pos_embedding = self.param('pos_embedding', nn.initializers.zeros, [1, num_patches + 1, self.dim])
cls_token = self.param('cls', nn.initializers.zeros, [1, 1, self.dim])
x = rearrange(x, 'b (h p1) (w p2) c -> b (h w) (p1 p2 c)', p1 = self.patch_size, p2 = self.patch_size)
x = nn.Dense(features = self.dim)(x)
b, n, d = x.shape
cls_tokens = repeat(cls_token, '() n d -> b n d', b = b)
x = jnp.concatenate([cls_tokens, x], axis = 1)
x += pos_embedding[:, :(n + 1)]
x = nn.Dropout(rate = self.dropout)(x, deterministic = False)
return x
# cross ViT class
class CrossViT(nn.Module):
image_size: int
num_classes: int
sm_dim: int
lg_dim: int
sm_patch_size: int = 12
sm_enc_depth: int = 1
sm_enc_heads: int = 8
sm_enc_mlp_dim: int = 2048
sm_enc_dim_head: int = 64
lg_patch_size: int = 16
lg_enc_depth: int = 4
lg_enc_heads: int = 8
lg_enc_mlp_dim: int = 2048
lg_enc_dim_head: int = 64
cross_attn_depth: int = 2
cross_attn_heads: int = 8
cross_attn_dim_head: int = 64
depth: int = 3
dropout: float = 0.1
emb_dropout: float = 0.1
@nn.compact
def __call__(self, img):
multi_scale_encoder = MultiScaleEncoder(
depth = self.depth,
sm_dim = self.sm_dim,
lg_dim = self.lg_dim,
cross_attn_heads = self.cross_attn_heads,
cross_attn_dim_head = self.cross_attn_dim_head,
cross_attn_depth = self.cross_attn_depth,
sm_enc_params = dict(
depth = self.sm_enc_depth,
heads = self.sm_enc_heads,
mlp_dim = self.sm_enc_mlp_dim,
dim_head = self.sm_enc_dim_head
),
lg_enc_params = dict(
depth = self.lg_enc_depth,
heads = self.lg_enc_heads,
mlp_dim = self.lg_enc_mlp_dim,
dim_head = self.lg_enc_dim_head
),
dropout = self.dropout
)
sm_tokens = ImageEmbedder(dim = self.sm_dim, image_size = self.image_size, patch_size = self.sm_patch_size, dropout = self.emb_dropout)(img)
lg_tokens = ImageEmbedder(dim = self.lg_dim, image_size = self.image_size, patch_size = self.lg_patch_size, dropout = self.emb_dropout)(img)
sm_tokens, lg_tokens = multi_scale_encoder([sm_tokens, lg_tokens])
sm_cls, lg_cls = map(lambda t: t[:, 0], (sm_tokens, lg_tokens))
sm_logits = nn.LayerNorm(epsilon = 1e-5, use_bias = False)(sm_cls)
sm_logits = nn.Dense(features = self.num_classes)(sm_logits)
lg_logits = nn.LayerNorm(epsilon = 1e-5, use_bias = False)(lg_cls)
lg_logits = nn.Dense(features = self.num_classes)(lg_logits)
x = sm_logits + lg_logits
return x
if __name__ == '__main__':
import numpy
key = jax.random.PRNGKey(0)
img = jax.random.normal(key, (1, 256, 256, 3))
v = CrossViT(
image_size = 256,
num_classes = 1000,
depth = 4, # number of multi-scale encoding blocks
sm_dim = 192, # high res dimension
sm_patch_size = 16, # high res patch size (should be smaller than lg_patch_size)
sm_enc_depth = 2, # high res depth
sm_enc_heads = 8, # high res heads
sm_enc_mlp_dim = 2048, # high res feedforward dimension
lg_dim = 384, # low res dimension
lg_patch_size = 64, # low res patch size
lg_enc_depth = 3, # low res depth
lg_enc_heads = 8, # low res heads
lg_enc_mlp_dim = 2048, # low res feedforward dimensions
cross_attn_depth = 2, # cross attention rounds
cross_attn_heads = 8, # cross attention heads
dropout = 0.1,
emb_dropout = 0.1
)
init_rngs = {'params': jax.random.PRNGKey(1),
'dropout': jax.random.PRNGKey(2),
'emb_dropout': jax.random.PRNGKey(3)}
params = v.init(init_rngs, img)
output = v.apply(params, img, rngs=init_rngs)
print(output.shape)
n_params_flax = sum(
jax.tree_leaves(jax.tree_map(lambda x: numpy.prod(x.shape), params))
)
print(f"Number of parameters in Flax model: {n_params_flax}")
|
import torch
import colossalai
from colossalai.trainer import Trainer, hooks
from colossalai.utils import MultiTimer
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.core import global_context as gpc
from colossalai.zero.init_ctx import ZeroInitContext
from utils.build_dataloader import build_dataloaders
from utils.hugging_face_config import CFG
from transformers import BloomForCausalLM
def BloomCoder(cfg: CFG):
colossalai.launch_from_torch(config='./utils/colossalai_config.py')
assert hasattr(gpc.config, "EPOCHS"), "Please provide EPOCHS in your configuration"
assert hasattr(gpc.config, "LEARNING_RATE"), "Please provide LEARNING_RATE in your configuration"
assert hasattr(gpc.config, "gradient_accumulation"), "Please provide gradient_accumulation in your configuration"
assert hasattr(gpc.config, "clip_grad_norm"), "Please provide clip_grad_norm in your configuration"
if hasattr(gpc.config, "zero"):
with ZeroInitContext(
target_device = torch.cuda.current_device(),
shard_strategy = gpc.config.zero.model_config.shard_strategy,
shard_param = True
):
model = BloomForCausalLM.from_pretrained("bigscience/bloom-1b3")
else:
model = BloomForCausalLM.from_pretrained("bigscience/bloom-1b3")
# build dataloaders
train_dataloader, eval_dataloader = build_dataloaders(cfg)
# optimizer
optimizer = torch.optim.AdamW(
model.parameters(),
lr=gpc.config.LEARNING_RATE
)
#initialize the model
engine, train_dataloader, eval_dataloader, _ = colossalai.initialize(
model,
optimizer,
None,
train_dataloader,
eval_dataloader
)
steps = 0
# training loop
for _ in range(gpc.config.EPOCHS):
engine.train()
for step, batch in enumerate(train_dataloader):
batch = {k: v.cuda() for k, v in batch.items()}
engine.zero_grad()
output = model(**batch)
loss = output.loss
engine.backward(loss)
engine.step()
steps += 1
# validation loop
# engine.eval()
# for step, batch in enumerate(eval_dataloader):
# with torch.no_grad():
# batch = {k: v.cuda() for k, v in batch.items()}
# output = model(**batch)
# eval_loss = output.loss
BloomCoder(CFG())
|
import copy
from itertools import chain
from datasets import load_dataset
from torch.utils.data import DataLoader, DistributedSampler
from torch.distributed import get_world_size
from transformers import BloomTokenizerFast, default_data_collator
from .hugging_face_config import CFG
def build_dataloaders(cfg: CFG):
"""
Build dataloaders for the Bloom Coder model.
"""
# Load training dataset
load_train_data = load_dataset(cfg.train_dataset_name, split = cfg.choose_train_split)
# Remove unused columns from the training dataset
load_train_data = load_train_data.remove_columns(cfg.remove_train_columns)
# Load validation dataset
load_eval_data = load_dataset(cfg.eval_dataset_name, split = cfg.choose_eval_split)
# Remove unused columns from the validation dataset
load_eval_data = load_eval_data.remove_columns(cfg.remove_eval_columns)
# Shuffle the training input files.
shuffled_train_files = load_train_data.shuffle(seed = cfg.seed)
# Shuffle the validation input files.
shuffled_eval_files = load_eval_data.shuffle(seed = cfg.seed)
tokenizer = BloomTokenizerFast.from_pretrained(cfg.tokenizer_name)
"""
A sequence length of x is used for the model. Input examples are concatenated
together and then split into sequences of exactly x tokens, so that there are
no padding tokens, but examples may be split in the middle.
Tokenize function reference:
https://github.com/hpcaitech/PaLM-colossalai/blob/main/data/wikitext.py
"""
def tokenize(examples):
seq_length = cfg.tokenizer_seq_length
examples = tokenizer(examples[cfg.select_input_string])
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if total_length >= seq_length:
total_length = (total_length // seq_length) * seq_length
result = {
k: [t[i : i + seq_length] for i in range(0, total_length, seq_length)]
for k, t in concatenated_examples.items()
}
result["labels"] = copy.deepcopy(result["input_ids"])
return result
"""
Map the tokenization function to the shuffled training files to create an
Iterable training dataset of batched input sequences of x tokens.
Remove columns from the the shuffled training files so that you are left with
only the input_ids, attention_mask, and labels columns.
"""
tokenized_train_dataset = shuffled_train_files.map(tokenize, batched = True, remove_columns = [cfg.select_input_string])
"""
Map the tokenization function to the shuffled validation files to create an
Iterable validation dataset of batched input sequences of x tokens.
Remove columns from the the shuffled training files so that you are left with
only the input_ids, attention_mask, and labels columns.
"""
tokenized_eval_dataset = shuffled_eval_files.map(tokenize, batched = True, remove_columns = [cfg.select_input_string])
# Convert the format of the tokenized train dataset to PyTorch Tensors
train_with_torch = tokenized_train_dataset.set_format(type = "torch")
# Convert the format of the tokenized validation dataset to PyTorch Tensors
eval_with_torch = tokenized_eval_dataset.set_format(type = "torch")
# Train dataset used for sampling.
sample_train_dataset = DistributedSampler(train_with_torch, shuffle = True) if get_world_size() > 1 else None
# Validation dataset used for sampling.
sample_eval_dataset = DistributedSampler(eval_with_torch, shuffle = False) if get_world_size() > 1 else None
# Create the train dataloader. If the length of a tokenized input sequence is less than 2048 drop it.
train_dataloader = DataLoader(tokenized_train_dataset, shuffle = True, sampler = sample_train_dataset, drop_last = True, collate_fn = default_data_collator, batch_size = cfg.batch_size)
# Create the validation dataloader. If the length of a tokenized input sequence is less than 2048 drop it.
eval_dataloader = DataLoader(tokenized_eval_dataset, sampler = sample_eval_dataset, drop_last = True, collate_fn = default_data_collator, batch_size = cfg.batch_size)
# Return the training and validation dataloaders to be used in the model
print('Done building dataloaders')
return train_dataloader, eval_dataloader
if __name__ == '__main__':
# Get Dataloader Configuration Arguments
data_loader_args = CFG()
# Test Build Dataloaders
train_loader, eval_loader = build_dataloaders(cfg = data_loader_args)
print(next(iter(train_loader))['input_ids'])
print(next(iter(train_loader))['input_ids'].shape)
|
from typing import Optional, ClassVar
from dataclasses import dataclass, field
@dataclass
class CFG:
"""
Configuration for data loader.
"""
train_dataset_name: Optional[str] = field(
default="codeparrot/codeparrot-train-v2-near-dedup",
metadata={"help": "Path to Hugging Face training dataset."}
)
eval_dataset_name: Optional[str] = field(
default="codeparrot/codeparrot-valid-v2-near-dedup",
metadata={"help": "Path to Hugging Face validation dataset."}
)
choose_train_split: Optional[str] = field(
default="train",
metadata={"help": "Choose Hugging Face training dataset split."}
)
choose_eval_split: Optional[str] = field(
default="train",
metadata={"help": "Choose Hugging Face validation dataset split."}
)
remove_train_columns: ClassVar[list[str]] = field(
default = [
'copies',
'path',
'repo_name',
'size',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
'ratio',
'config_test',
'has_no_keywords',
'few_assignments',
],
metadata={"help": "Train dataset columns to remove."}
)
remove_eval_columns: ClassVar[list[str]] = field(
default = [
'copies',
'path',
'repo_name',
'size',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
'ratio',
'config_test',
'has_no_keywords',
'few_assignments',
],
metadata={"help": "Validation dataset columns to remove."}
)
seed: Optional[int] = field(
default=42,
metadata={"help": "Random seed used for reproducibility."}
)
tokenizer_name: Optional[str] = field(
default="bigscience/bloom-1b3",
metadata={"help": "Tokenizer name."}
)
tokenizer_seq_length: Optional[int] = field(
default=1024,
metadata={"help": "Sequence lengths used for tokenizing examples."}
)
select_input_string: Optional[str] = field(
default="content",
metadata={"help": "Select the key to used as the input string column."}
)
batch_size: Optional[int] = field(
default=16,
metadata={"help": "Batch size for training and validation."}
)
save_to_path: Optional[str] = field(
default="''",
metadata={"help": "Save the dataset to local disk."}
)
|
from colossalai.amp import AMP_TYPE
from colossalai.zero.shard_utils import TensorShardStrategy
# Colossal AI Global Config
EPOCHS = 1
LEARNING_RATE = 0.001
zero = dict(
model_config = dict(
shard_strategy = TensorShardStrategy(),
tensor_placement_policy = 'auto',
reuse_fp16_shard = True
),
)
gradient_accumulation = 1.0
clip_grad_norm = 1.0
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = f"bitsandbytes-cuda{os.environ['CUDA_VERSION']}",
version = "0.26.0",
author = "Tim Dettmers",
author_email = "[email protected]",
description = ("8-bit optimizers and quantization routines."),
license = "MIT",
keywords = "gpu optimizers optimization 8-bit quantization compression",
url = "http://packages.python.org/bitsandbytes",
packages=find_packages(),
package_data={'': ['libbitsandbytes.so']},
long_description=read('README.md'),
long_description_content_type = 'text/markdown',
classifiers=[
"Development Status :: 4 - Beta",
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import bitsandbytes as bnb
from itertools import product
from bitsandbytes import functional as F
def setup():
pass
def teardown():
pass
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['float', 'half'])
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device='cuda')
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1/512, 511/512, 256, device=A.device)
torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device='cuda')
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code-quantiles)
assert (diff > 5e-02).sum().item() == 0
def test_quantile_quantization():
for i in range(100):
A1 = torch.randn(1024, 1024, device='cuda')
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1-A2).mean().item()
assert diff < 0.0075
A1 = torch.rand(1024, 1024, device='cuda')
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1-A2).mean().item()
torch.testing.assert_allclose(A1, A2, atol=5e-3, rtol=0)
assert diff < 0.001
def test_dynamic_quantization():
diffs = []
reldiffs = []
for i in range(100):
A1 = torch.randn(1024, 1024, device='cuda')
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1-A2)
reldiff = diff/torch.abs(A1+1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
assert diff.mean().item() < 0.0135
print(sum(diffs)/len(diffs))
print(sum(reldiffs)/len(reldiffs))
for i in range(100):
A1 = torch.rand(1024, 1024, device='cuda')
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1-A2).mean().item()
torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
assert diff < 0.004
def test_dynamic_blockwise_quantization():
diffs = []
reldiffs = []
for i in range(100):
A1 = torch.randn(1024, 1024, device='cuda')
C, S = F.quantize_blockwise(A1)
A2 = F.dequantize_blockwise(C, S)
diff = torch.abs(A1-A2)
reldiff = diff/torch.abs(A1+1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
assert diffs[-1] < 0.011
print(sum(diffs)/len(diffs))
print(sum(reldiffs)/len(reldiffs))
diffs = []
for i in range(100):
A1 = torch.rand(1024, 1024, device='cuda')
C, S = F.quantize_blockwise(A1)
A2 = F.dequantize_blockwise(C, S)
diff = torch.abs(A1-A2).mean().item()
assert diff < 0.0033
diffs.append(diff)
torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
#print(sum(diffs)/len(diffs))
def test_dynamic_blockwise_stochastic_quantization():
diffs = []
reldiffs = []
rand = torch.rand(1024).cuda()
for i in range(100):
A1 = torch.randn(1024, 1024, device='cuda')
C1, S1 = F.quantize_blockwise(A1, rand=rand)
C2, S2 = F.quantize_blockwise(A1)
# a maximunm distance of quantized values of 1
torch.testing.assert_allclose(C1, C2, atol=1, rtol=0)
fraction_smaller = (C1<C2).float().sum()/C1.numel()
fraction_larger = (C1>C2).float().sum()/C1.numel()
torch.testing.assert_allclose(fraction_larger, fraction_smaller, atol=0.01, rtol=0)
@pytest.mark.parametrize("gtype", [torch.float32, torch.float16], ids=['float', 'half'])
def test_percentile_clipping(gtype):
gnorm_vec1 = torch.zeros(100, device='cuda')
gnorm_vec2 = torch.zeros(100, device='cuda')
n = 4
step = 0
percentile=5
for i in range(1000):
step += 1
g = torch.randn(n, n, dtype=gtype, device='cuda')
gnorm1, clip2, gnorm_scale = F.percentile_clipping(g, gnorm_vec2, step, percentile=percentile)
assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2/gnorm1
gnorm2 = torch.norm(g.float())
if step == 1:
gnorm_vec1[:] = gnorm2
else:
gnorm_vec1[step % 100] = gnorm2
vals, idx = torch.sort(gnorm_vec1)
clip1 = vals[percentile]
torch.testing.assert_allclose(gnorm_vec1, torch.sqrt(gnorm_vec2))
torch.testing.assert_allclose(clip1, clip2)
torch.testing.assert_allclose(gnorm1, gnorm2)
def test_stable_embedding():
layer = bnb.nn.StableEmbedding(1024, 1024)
layer.reset_parameters()
def test_dynamic_blockwise_quantization_cpu():
#A1 = torch.randn(1024, 1024, device='cpu')
#code = F.create_dynamic_map()
#for i in range(1000):
# C, S = F.quantize_blockwise(A1, code=code)
# A2 = F.dequantize_blockwise(C, S)
for i in range(10):
# equivalence with GPU blockwise quantization
A1 = torch.randn(1024, 1024, device='cpu')
C1, S1 = F.quantize_blockwise(A1)
C2, S2 = F.quantize_blockwise(A1.cuda())
torch.testing.assert_allclose(S1[0], S2[0].cpu())
# there seems to be some issues with precision in CUDA vs CPU
# not all elements are usually close, with couple off elements in a million
idx = torch.isclose(C1, C2.cpu())
assert (idx==0).sum().item() < 15
diffs = []
reldiffs = []
for i in range(10):
A1 = torch.randn(1024, 1024, device='cpu')
C, S = F.quantize_blockwise(A1)
A2 = F.dequantize_blockwise(C, S)
diff = torch.abs(A1-A2)
reldiff = diff/torch.abs(A1+1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
assert diffs[-1] < 0.011
#print(sum(diffs)/len(diffs))
#print(sum(reldiffs)/len(reldiffs))
diffs = []
for i in range(10):
A1 = torch.rand(1024, 1024, device='cpu')
C, S = F.quantize_blockwise(A1)
A2 = F.dequantize_blockwise(C, S)
diff = torch.abs(A1-A2).mean().item()
assert diff < 0.0033
diffs.append(diff)
torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
#print(sum(diffs)/len(diffs))
def test_histogram():
dim1, dim2 = 32, 32
source = torch.rand(dim1, dim2, device='cuda')
idx1 = torch.randint(0, 255, size=(dim1, dim2), device='cuda').int()
idx2 = torch.randint(0, 255, size=(dim1, dim2), device='cuda').int()
histogram1 = torch.zeros((256, 256)).cuda()
histogram2 = torch.zeros((256, 256)).cuda()
F.histogram_scatter_add_2d(histogram2, idx1, idx2, source)
for i in range(dim1):
for j in range(dim2):
histogram1[idx1[i, j].item(), idx2[i, j].item()] += source[i, j]
torch.testing.assert_allclose(histogram1, histogram2)
torch.testing.assert_allclose(histogram1.sum(), source.sum())
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import shutil
import uuid
import pytest
import ctypes
import torch
import bitsandbytes as bnb
import bitsandbytes.functional as F
from os.path import join
from itertools import product
import apex
def get_temp_dir():
path = '/tmp/autoswap/{0}'.format(str(uuid.uuid4()))
os.makedirs(path, exist_ok=True)
return path
def rm_path(path):
shutil.rmtree(path)
str2optimizers = {}
str2optimizers['adam_pytorch'] = (None, torch.optim.Adam, bnb.optim.Adam)
str2optimizers['adam_apex'] = (None, apex.optimizers.FusedAdam, bnb.optim.Adam)
str2optimizers['momentum_apex'] = (None, lambda pxx: apex.optimizers.FusedSGD(pxx, 0.01, 0.9), bnb.optim.Adam)
str2optimizers['momentum_pytorch'] = (None, lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9), bnb.optim.Adam)
str2optimizers['lamb_apex'] = (None, lambda pxx: apex.optimizers.FusedLAMB(pxx, weight_decay=0.00, use_nvlamb=True), bnb.optim.Adam)
str2optimizers['lars_apex'] = (None, lambda pxx: apex.parallel.LARC.LARC(apex.optimizers.FusedSGD(pxx, 0.01, 0.9)), bnb.optim.Adam)
str2optimizers['adam'] = (torch.optim.Adam, bnb.optim.Adam)
str2optimizers['adamw'] = (torch.optim.AdamW, bnb.optim.AdamW)
str2optimizers['fused_adam'] = (apex.optimizers.FusedAdam, bnb.optim.Adam)
str2optimizers['momentum'] = (lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9), lambda pxx: bnb.optim.SGD(pxx, 0.01, 0.9, block_wise=False))
str2optimizers['lars'] = (lambda pxx: bnb.optim.PytorchLARS(pxx, 0.01, 0.9), lambda pxx: bnb.optim.LARS(pxx, 0.01, 0.9))
str2optimizers['lamb'] = (lambda pxx: apex.optimizers.FusedLAMB(pxx, weight_decay=0.0, max_grad_norm=10000.0, eps=1e-8, use_nvlamb=True), bnb.optim.LAMB)
str2optimizers['rmsprop'] = (lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9), lambda pxx: bnb.optim.RMSprop(pxx, 0.01, 0.9, block_wise=False))
str2optimizers['adagrad'] = (lambda pxx: torch.optim.Adagrad(pxx, 0.01), lambda pxx: bnb.optim.Adagrad(pxx, 0.01, block_wise=False))
str2optimizers['adam8bit'] = (torch.optim.Adam, lambda pxx: bnb.optim.Adam8bit(pxx, block_wise=False))
str2optimizers['momentum8bit'] = (lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9), lambda pxx: bnb.optim.SGD8bit(pxx, 0.01, 0.9, block_wise=False))
str2optimizers['rmsprop8bit'] = (lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9), lambda pxx: bnb.optim.RMSprop8bit(pxx, 0.01, 0.9, block_wise=False))
str2optimizers['lamb8bit'] = (lambda pxx: apex.optimizers.FusedLAMB(pxx, weight_decay=0.0, max_grad_norm=10000.0, eps=1e-8, use_nvlamb=True), bnb.optim.LAMB8bit)
str2optimizers['lars8bit'] = (lambda pxx: bnb.optim.PytorchLARS(pxx, 0.01, 0.9), lambda pxx: bnb.optim.LARS8bit(pxx, 0.01, 0.9))
str2optimizers['adam8bit_blockwise'] = (torch.optim.Adam, lambda pxx: bnb.optim.Adam8bit(pxx, block_wise=True))
str2optimizers['adamw8bit_blockwise'] = (torch.optim.Adam, lambda pxx: bnb.optim.AdamW8bit(pxx, block_wise=True))
str2optimizers['momentum8bit_blockwise'] = (lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9), lambda pxx: bnb.optim.SGD8bit(pxx, 0.01, 0.9, block_wise=True))
str2optimizers['rmsprop8bit_blockwise'] = (lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9), lambda pxx: bnb.optim.RMSprop8bit(pxx, 0.01, 0.9, block_wise=True))
str2optimizers['adagrad8bit_blockwise'] = (lambda pxx: torch.optim.Adagrad(pxx, 0.01), lambda pxx: bnb.optim.Adagrad8bit(pxx, 0.01, block_wise=True))
str2statenames = {}
str2statenames['adam'] = [('exp_avg', 'state1'), ('exp_avg_sq', 'state2')]
str2statenames['adamw'] = [('exp_avg', 'state1'), ('exp_avg_sq', 'state2')]
str2statenames['momentum'] = [('momentum_buffer', 'state1')]
str2statenames['lars'] = [('momentum_buffer', 'state1')]
str2statenames['lamb'] = [('exp_avg', 'state1'), ('exp_avg_sq', 'state2')]
str2statenames['rmsprop'] = [('square_avg', 'state1')]
str2statenames['adagrad'] = [('sum', 'state1')]
str2statenames['adam8bit'] = [('exp_avg', 'state1', 'qmap1', 'max1'), ('exp_avg_sq', 'state2', 'qmap2', 'max2')]
str2statenames['lamb8bit'] = [('exp_avg', 'state1', 'qmap1', 'max1'), ('exp_avg_sq', 'state2', 'qmap2', 'max2')]
str2statenames['adam8bit_blockwise'] = [('exp_avg', 'state1', 'qmap1', 'absmax1'), ('exp_avg_sq', 'state2', 'qmap2', 'absmax2')]
str2statenames['adamw8bit_blockwise'] = [('exp_avg', 'state1', 'qmap1', 'absmax1'), ('exp_avg_sq', 'state2', 'qmap2', 'absmax2')]
str2statenames['momentum8bit'] = [('momentum_buffer', 'state1', 'qmap1', 'max1')]
str2statenames['momentum8bit_blockwise'] = [('momentum_buffer', 'state1', 'qmap1', 'absmax1')]
str2statenames['lars8bit'] = [('momentum_buffer', 'state1', 'qmap1', 'max1')]
str2statenames['rmsprop8bit'] = [('square_avg', 'state1', 'qmap1', 'max1')]
str2statenames['rmsprop8bit_blockwise'] = [('square_avg', 'state1', 'qmap1', 'absmax1')]
str2statenames['adagrad8bit_blockwise'] = [('sum', 'state1', 'qmap1', 'absmax1')]
dim1 = [1024]
dim2 = [32, 1024, 4097, 1]
gtype = [torch.float32, torch.float16]
optimizer_names = ['adam', 'adamw', 'momentum', 'rmsprop', 'lars', 'lamb', 'adagrad']
values = list(product(dim1,dim2, gtype, optimizer_names))
names = ['dim1_{0}_dim2_{1}_gtype_{2}_optim_{3}'.format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
if dim1 == 1 and dim2 == 1: return
p1 = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.1
p2 = p1.clone()
p1 = p1.float()
torch_optimizer = str2optimizers[optim_name][0]([p1])
bnb_optimizer = str2optimizers[optim_name][1]([p2])
if gtype == torch.float32:
atol, rtol = 2e-6, 1e-5
else:
atol, rtol = 1e-4, 1e-3
for i in range(50):
g = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.01
p1.grad = g.clone().float()
p2.grad = g.clone()
bnb_optimizer.step()
torch_optimizer.step()
for name1, name2 in str2statenames[optim_name]:
torch.testing.assert_allclose(torch_optimizer.state[p1][name1], bnb_optimizer.state[p2][name2], atol=atol, rtol=rtol)
torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol)
if i % 10 == 0 and i > 0:
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(),join(path, 'opt.pt'))
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, 'opt.pt')))
rm_path(path)
torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol)
for name1, name2 in str2statenames[optim_name]:
torch.testing.assert_allclose(torch_optimizer.state[p1][name1], bnb_optimizer.state[p2][name2], atol=atol, rtol=rtol)
if gtype == torch.float16:
# the adam buffers should also be close because they are 32-bit
# but the paramters can diverge because they are 16-bit
# the difference grow larger and larger with each update
# --> copy the state to keep weights close
p1.data = p1.data.half().float()
p2.copy_(p1.data)
torch.testing.assert_allclose(p1.half(), p2)
if optim_name in ['lars', 'lamb']:
assert bnb_optimizer.state[p2]['unorm_vec'] > 0.0
dim1 = [1024]
dim2 = [32, 1024, 4097]
gtype = [torch.float32, torch.float16]
values = list(product(dim1,dim2, gtype))
names = ['dim1_{0}_dim2_{1}_gtype_{2}'.format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, gtype", values, ids=names)
def test_global_config(dim1, dim2, gtype):
if dim1 == 1 and dim2 == 1: return
p1 = torch.randn(dim1,dim2, device='cpu', dtype=gtype)*0.1
p2 = torch.randn(dim1,dim2, device='cpu', dtype=gtype)*0.1
p3 = torch.randn(dim1,dim2, device='cpu', dtype=gtype)*0.1
mask = torch.rand_like(p2) < 0.1
beta1 = 0.9
beta2 = 0.999
lr = 0.001
eps = 1e-8
bnb.optim.GlobalOptimManager.get_instance().initialize()
bnb.optim.GlobalOptimManager.get_instance().override_config(p2, 'skip_zeros', True)
bnb.optim.GlobalOptimManager.get_instance().override_config(p3, 'optim_bits', 8)
bnb.optim.GlobalOptimManager.get_instance().register_parameters([p1, p2, p3])
p1 = p1.cuda()
p2 = p2.cuda()
p3 = p3.cuda()
adam2 = bnb.optim.Adam([p1, p2, p3], lr, (beta1, beta2), eps)
if gtype == torch.float32:
atol, rtol = 1e-6, 1e-5
else:
atol, rtol = 1e-4, 1e-3
original_p2 = p2[mask].clone()
for i in range(50):
g1 = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.1 + 0.001
g2 = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.1 + 0.001
g3 = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.1 + 0.001
p1.grad = g1
p2.grad = g2
p3.grad = g3
if i > 30 and i % 10 == 0:
g1.data[mask] = 0.0
g2.data[mask] = 0.0
p1.grad = g1
p2.grad = g2
original_p1 = p1[mask].clone()
original_p2 = p2[mask].clone()
og_s1 = adam2.state[p2]['state1'][mask].clone()
og_s2 = adam2.state[p2]['state2'][mask].clone()
og_s11 = adam2.state[p1]['state1'][mask].clone()
og_s21 = adam2.state[p1]['state2'][mask].clone()
adam2.step()
assert adam2.state[p3]['state1'].dtype == torch.uint8
assert adam2.state[p3]['state2'].dtype == torch.uint8
if i > 30 and i % 10 == 0:
torch.testing.assert_allclose(original_p2, p2[mask])
torch.testing.assert_allclose(adam2.state[p2]['state1'][mask], og_s1)
torch.testing.assert_allclose(adam2.state[p2]['state2'][mask], og_s2)
assert ((p1[mask]- original_p1)==0.0).sum() < p1.numel()
assert ((adam2.state[p1]['state1'][mask]- og_s11)==0.0).sum() == 0.0
assert ((adam2.state[p1]['state2'][mask]- og_s21)==0.0).sum() == 0.0
dim1 = [1024]
dim2 = [32, 1024, 4097]
gtype = [torch.float32, torch.float16]
optimizer_names = ['adam8bit', 'momentum8bit', 'rmsprop8bit', 'adam8bit_blockwise', 'adamw8bit_blockwise', 'lamb8bit', 'lars8bit', 'momentum8bit_blockwise', 'rmsprop8bit_blockwise', 'adagrad8bit_blockwise']
values = list(product(dim1,dim2, gtype, optimizer_names))
names = ['dim1_{0}_dim2_{1}_gtype_{2}_optim_{3}'.format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
if dim1 == 1 and dim2 == 1: return
p1 = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.1
p2 = p1.clone()
p1 = p1.float()
blocksize = 2048
torch_optimizer = str2optimizers[optim_name][0]([p1])
bnb_optimizer = str2optimizers[optim_name][1]([p2])
if gtype == torch.float32:
atol, rtol = 3e-3, 1e-3
patol, prtol = 1e-5, 1e-3
else:
atol, rtol = 3e-3, 1e-3
patol, prtol = 1e-5, 1e-3
errors = []
relerrors = []
for i in range(50):
g = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.01
p1.grad = g.clone().float()
p2.grad = g.clone()
bnb_optimizer.step()
torch_optimizer.step()
torch.testing.assert_allclose(p1, p2.float(), atol=patol, rtol=prtol)
dequant_states = []
for name1, name2, qmap, max_val in str2statenames[optim_name]:
#print(bnb_optimizer.state[p2][max_val], name1)
if 'blockwise' in optim_name:
s1 = F.dequantize_blockwise(code=bnb_optimizer.state[p2][qmap], absmax=bnb_optimizer.state[p2][max_val], A=bnb_optimizer.state[p2][name2], blocksize=blocksize)
else:
s1 = F.dequantize(code=bnb_optimizer.state[p2][qmap], absmax=bnb_optimizer.state[p2][max_val], A=bnb_optimizer.state[p2][name2])
num_not_close = torch.isclose(torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol)==0
assert num_not_close.sum().item() < 20
dequant_states.append(s1.clone())
err = torch.abs(p1-p2)
relerr = err/torch.abs(p1)
assert err.mean() < 0.0001
assert relerr.mean() < 0.001
errors.append(err.mean().item())
relerrors.append(relerr.mean().item())
if i % 10 == 0 and i > 0:
for (name1, name2, qmap, max_val), s in zip(str2statenames[optim_name], dequant_states):
s1cpy = s.clone()
raws1cpy = bnb_optimizer.state[p2][name2].clone()
qmap1 = bnb_optimizer.state[p2][qmap].clone()
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(),join(path, 'opt.pt'))
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, 'opt.pt')))
rm_path(path)
torch.testing.assert_allclose(raws1cpy, bnb_optimizer.state[p2][name2])
torch.testing.assert_allclose(qmap1, bnb_optimizer.state[p2][qmap])
if 'blockwise' in optim_name:
s1 = F.dequantize_blockwise(code=bnb_optimizer.state[p2][qmap], absmax=bnb_optimizer.state[p2][max_val], A=bnb_optimizer.state[p2][name2], blocksize=blocksize)
else:
s1 = F.dequantize(code=bnb_optimizer.state[p2][qmap], absmax=bnb_optimizer.state[p2][max_val], A=bnb_optimizer.state[p2][name2])
torch.testing.assert_allclose(s1cpy, s1)
num_not_close = torch.isclose(torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol)==0
assert num_not_close.sum().item() < 20
torch.testing.assert_allclose(p1, p2.float(), atol=patol, rtol=prtol)
# the parameters diverge quickly. Here we keep them close
# together so we can test against the Adam error
p1.data = p1.data.to(gtype).float()
p2.copy_(p1.data)
torch.testing.assert_allclose(p1.to(gtype), p2)
for (name1, name2, qmap, max_val), s in zip(str2statenames[optim_name], dequant_states):
torch_optimizer.state[p1][name1].copy_(s.data)
#print(sum(errors)/len(errors))
#print(sum(relerrors)/len(relerrors))
dim1 = [1024]
dim2 = [32, 1024, 4097]
gtype = [torch.float32]
optim_bits = [32, 8]
values = list(product(dim1,dim2, gtype, optim_bits))
names = ['dim1_{0}_dim2_{1}_gtype_{2}_optim_bits_{3}'.format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_bits", values, ids=names)
def test_adam_percentile_clipping(dim1, dim2, gtype, optim_bits):
if dim1 == 1 and dim2 == 1: return
p1 = torch.randn(dim1,dim2, device='cpu', dtype=gtype)*0.1
beta1 = 0.9
beta2 = 0.999
lr = 0.001
eps = 1e-8
p1 = p1.cuda()
p2 = p1.clone()
adam1 = bnb.optim.Adam([p1], lr, (beta1, beta2), eps, optim_bits=optim_bits)
adam2 = bnb.optim.Adam([p2], lr, (beta1, beta2), eps, optim_bits=optim_bits, percentile_clipping=5)
gnorm_vec = torch.zeros(100).cuda()
step = 0
for i in range(50):
step += 1
g1 = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.1 + (0.01*i)
g2 = g1.clone()
p2.grad = g2
current_gnorm, clip_val, gnorm_scale = F.percentile_clipping(g1, gnorm_vec, step, 5)
g1 = (g1.float()*gnorm_scale).to(gtype)
p1.grad = g1
adam1.step()
adam2.step()
# gnorm_scale is not deterministic (warp reductions), as such there can be slight differences in state
if optim_bits == 32:
torch.testing.assert_allclose(p1, p2)
torch.testing.assert_allclose(adam1.state[p1]['state1'], adam2.state[p2]['state1'], atol=5e-5, rtol=1e-4)
torch.testing.assert_allclose(adam1.state[p1]['state2'], adam2.state[p2]['state2'], atol=5e-5, rtol=1e-4)
elif optim_bits == 8:
torch.testing.assert_allclose(p1, p2, atol=1e-4, rtol=1e-3)
torch.testing.assert_allclose(adam1.state[p1]['state1'], adam2.state[p2]['state1'], atol=2, rtol=1e-3)
torch.testing.assert_allclose(adam1.state[p1]['state2'], adam2.state[p2]['state2'], atol=2, rtol=1e-3)
adam1.state[p1]['state1'].copy_(adam2.state[p2]['state1'])
adam1.state[p1]['state2'].copy_(adam2.state[p2]['state2'])
if i % 10 == 0 and i > 0:
path = get_temp_dir()
torch.save(adam2.state_dict(),join(path, 'opt.pt'))
del adam2
adam2 = None
adam2 = bnb.optim.Adam([p2], lr, (beta1, beta2), eps, optim_bits=optim_bits, percentile_clipping=5)
adam2.load_state_dict(torch.load(join(path, 'opt.pt')))
dim1 = [4096]
dim2 = [4096]
gtype = [torch.float32, torch.float16]
#optimizer_names = ['adam8bit_blockwise', 'adam8bit', 'lamb8bit']
#optimizer_names = ['adam8bit_blockwise', 'adam_apex', 'adam8bit', 'adam', 'adam_pytorch']
#optimizer_names = ['momentum_apex', 'momentum8bit', 'momentum_pytorch']
#optimizer_names = ['lamb_apex', 'lamb8bit']
#optimizer_names = ['lars_apex', 'lars8bit']
optimizer_names = ['adam8bit_blockwise']
values = list(product(dim1,dim2, gtype, optimizer_names))
names = ['dim1_{0}_dim2_{1}_gtype_{2}_optim_{3}'.format(*vals) for vals in values]
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_benchmark_blockwise(dim1, dim2, gtype, optim_name):
if dim1 == 1 and dim2 == 1: return
p1 = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.1
bnb_optimizer = str2optimizers[optim_name][1]([p1])
g = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.01
p1.grad = g
for i in range(5000):
if i == 500:
# 100 iterations for burn-in
torch.cuda.synchronize()
t0 = time.time()
bnb_optimizer.step()
torch.cuda.synchronize()
s = time.time()-t0
print('')
params = 4500*4096*4096
print(optim_name, gtype, s/params)
#assert s < 3.9
def test_str_betas():
betas = (0.80, 0.95)
strbetas = '(0.80, 0.95)'
layer = torch.nn.Linear(10, 10)
base = bnb.optim.Adam(layer.parameters(), betas=betas)
strbase = bnb.optim.Adam(layer.parameters(), betas=strbetas)
assert base.defaults['betas'][0] == 0.8
assert base.defaults['betas'][1] == 0.95
assert strbase.defaults['betas'][0] == 0.8
assert strbase.defaults['betas'][1] == 0.95
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import bitsandbytes as bnb
from itertools import product
from bitsandbytes import functional as F
@pytest.mark.parametrize("embcls", [bnb.nn.Embedding, bnb.nn.StableEmbedding], ids=['Embedding', 'StableEmbedding'])
def test_embeddings(embcls):
bnb.optim.GlobalOptimManager.get_instance().initialize()
emb1 = torch.nn.Embedding(100, 512).cuda()
emb2 = embcls(100, 512).cuda()
adam1 = bnb.optim.Adam8bit(emb1.parameters())
adam2 = bnb.optim.Adam8bit(emb2.parameters())
batches = torch.randint(1, 100, size=(100, 4, 32)).cuda()
for i in range(100):
batch = batches[i]
embedded1 = emb1(batch)
embedded2 = emb2(batch)
l1 = embedded1.mean()
l2 = embedded2.mean()
l1.backward()
l2.backward()
adam1.step()
adam2.step()
adam1.zero_grad()
adam2.zero_grad()
assert adam1.state[emb1.weight]['state1'].dtype == torch.uint8
assert adam2.state[emb2.weight]['state1'].dtype == torch.float32
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .optim import adam
from .nn import modules
__pdoc__ = {'libBitsNBytes' : False,
'optim.optimizer.Optimizer8bit': False,
'optim.optimizer.MockArgs': False
}
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import math
import ctypes as ct
import torch
from torch import Tensor
from typing import Tuple
lib = ct.cdll.LoadLibrary(os.path.dirname(__file__) + '/libbitsandbytes.so')
name2qmap = {}
''' C FUNCTIONS FOR OPTIMIZERS '''
str2optimizer32bit = {}
str2optimizer32bit['adam'] = (lib.cadam32bit_g32, lib.cadam32bit_g16)
str2optimizer32bit['momentum'] = (lib.cmomentum32bit_g32, lib.cmomentum32bit_g16)
str2optimizer32bit['rmsprop'] = (lib.crmsprop32bit_g32, lib.crmsprop32bit_g16)
str2optimizer32bit['adagrad'] = (lib.cadagrad32bit_g32, lib.cadagrad32bit_g16)
str2optimizer32bit['lars'] = (lib.cmomentum32bit_g32, lib.cmomentum32bit_g16)
str2optimizer32bit['lamb'] = (lib.cadam32bit_g32, lib.cadam32bit_g16)
str2optimizer8bit = {}
str2optimizer8bit['adam'] = (lib.cadam_static_8bit_g32, lib.cadam_static_8bit_g16)
str2optimizer8bit['momentum'] = (lib.cmomentum_static_8bit_g32, lib.cmomentum_static_8bit_g16)
str2optimizer8bit['rmsprop'] = (lib.crmsprop_static_8bit_g32, lib.crmsprop_static_8bit_g16)
str2optimizer8bit['lamb'] = (lib.cadam_static_8bit_g32, lib.cadam_static_8bit_g16)
str2optimizer8bit['lars'] = (lib.cmomentum_static_8bit_g32, lib.cmomentum_static_8bit_g16)
str2optimizer8bit_blockwise = {}
str2optimizer8bit_blockwise['adam'] = (lib.cadam_8bit_blockwise_fp32, lib.cadam_8bit_blockwise_fp16)
str2optimizer8bit_blockwise['momentum'] = (lib.cmomentum_8bit_blockwise_fp32, lib.cmomentum_8bit_blockwise_fp16)
str2optimizer8bit_blockwise['rmsprop'] = (lib.crmsprop_8bit_blockwise_fp32, lib.crmsprop_8bit_blockwise_fp16)
str2optimizer8bit_blockwise['adagrad'] = (lib.cadagrad_8bit_blockwise_fp32, lib.cadagrad_8bit_blockwise_fp16)
optimal_normal = [-0.9939730167388916, -0.8727636337280273, -0.8097418546676636, -0.7660024166107178, -0.7318882346153259, -0.6793879270553589, -0.657649040222168, -0.6385974884033203, -0.6211113333702087, -0.5901028513908386, -0.5762918591499329, -0.5630806684494019, -0.5509274005889893, -0.5394591689109802, -0.5283197164535522, -0.517780065536499, -0.5074946284294128, -0.4980469048023224, -0.48867011070251465, -0.48003149032592773, -0.47125306725502014, -0.4629971981048584, -0.4547359049320221, -0.446626216173172, -0.43902668356895447, -0.43158355355262756, -0.4244747757911682, -0.4173796474933624, -0.41038978099823, -0.4055633544921875, -0.4035947024822235, -0.39701032638549805, -0.39057496190071106, -0.38439232110977173, -0.3782760500907898, -0.3721940815448761, -0.3661896586418152, -0.3604033589363098, -0.354605108499527, -0.34892538189888, -0.34320303797721863, -0.3376772701740265, -0.3323028087615967, -0.3269782066345215, -0.32166096568107605, -0.316457599401474, -0.3112771809101105, -0.3061025142669678, -0.30106794834136963, -0.2961243987083435, -0.2912728488445282, -0.28644347190856934, -0.28165507316589355, -0.2769731283187866, -0.2722635865211487, -0.26779335737228394, -0.26314786076545715, -0.2586647868156433, -0.2541804611682892, -0.2496625930070877, -0.24527113139629364, -0.24097171425819397, -0.23659978806972504, -0.23218469321727753, -0.22799566388130188, -0.22380566596984863, -0.21965542435646057, -0.2154538631439209, -0.2113603949546814, -0.20735277235507965, -0.20334717631340027, -0.19932441413402557, -0.19530178606510162, -0.19136647880077362, -0.18736697733402252, -0.18337111175060272, -0.17951400578022003, -0.1757056713104248, -0.17182783782482147, -0.1680615097284317, -0.16431649029254913, -0.16053077578544617, -0.15685945749282837, -0.15298527479171753, -0.1493264138698578, -0.14566898345947266, -0.14188314974308014, -0.13819937407970428, -0.1344561129808426, -0.1306886374950409, -0.1271020770072937, -0.12346585839986801, -0.11981867253780365, -0.11614970862865448, -0.11256207525730133, -0.10889036953449249, -0.10525048524141312, -0.1016591489315033, -0.09824034571647644, -0.09469068050384521, -0.0911419615149498, -0.08773849159479141, -0.08416644483804703, -0.08071305602788925, -0.07720902562141418, -0.07371306419372559, -0.07019119709730148, -0.06673648208379745, -0.06329209357500076, -0.059800852090120316, -0.0564190037548542, -0.05296570807695389, -0.049522045999765396, -0.04609023034572601, -0.04262964054942131, -0.039246633648872375, -0.03577171266078949, -0.03236335143446922, -0.028855687007308006, -0.02542758360505104, -0.022069433704018593, -0.018754752352833748, -0.015386369079351425, -0.01194947212934494, -0.008439815603196621, -0.004995611496269703, -0.0016682245768606663, 0.0, 0.0015510577941313386, 0.005062474869191647, 0.008417150937020779, 0.011741090565919876, 0.015184164978563786, 0.018582714721560478, 0.02204744517803192, 0.025471193715929985, 0.02889077737927437, 0.0323684960603714, 0.03579240292310715, 0.039281025528907776, 0.0427563451230526, 0.04619763046503067, 0.04968220740556717, 0.05326594039797783, 0.05679265409708023, 0.060245808213949203, 0.06372645497322083, 0.06721872836351395, 0.0706876739859581, 0.0742349922657013, 0.07774098962545395, 0.08123527467250824, 0.08468879014253616, 0.08810535818338394, 0.09155989438295364, 0.09498448669910431, 0.0985206812620163, 0.10206405073404312, 0.10563778132200241, 0.10921968519687653, 0.11284469068050385, 0.11653254181146622, 0.12008969485759735, 0.12368203699588776, 0.1272617131471634, 0.13089501857757568, 0.134552001953125, 0.1382799744606018, 0.14194637537002563, 0.14563234150409698, 0.14930322766304016, 0.15303383767604828, 0.1567956507205963, 0.16050070524215698, 0.16431072354316711, 0.16813558340072632, 0.17204202711582184, 0.1758781224489212, 0.17973239719867706, 0.1836014688014984, 0.18753431737422943, 0.19138391315937042, 0.19535475969314575, 0.19931404292583466, 0.20333819091320038, 0.20738255977630615, 0.21152682602405548, 0.21568812429904938, 0.21978361904621124, 0.22393859922885895, 0.22814159095287323, 0.23241068422794342, 0.23675410449504852, 0.24123944342136383, 0.24569889903068542, 0.2500703036785126, 0.25904011726379395, 0.26349544525146484, 0.2682226300239563, 0.272907555103302, 0.2774306833744049, 0.28220856189727783, 0.2869136929512024, 0.2916390895843506, 0.29649388790130615, 0.30142995715141296, 0.3065022826194763, 0.3114383816719055, 0.31648796796798706, 0.3216581642627716, 0.32700115442276, 0.3322487473487854, 0.33778008818626404, 0.3431521952152252, 0.3487405776977539, 0.3543166518211365, 0.3601346015930176, 0.36605337262153625, 0.37217751145362854, 0.378179669380188, 0.3843980133533478, 0.3906566798686981, 0.39714935421943665, 0.40357843041419983, 0.4104187488555908, 0.4171563684940338, 0.42418959736824036, 0.43136918544769287, 0.4389212429523468, 0.44673123955726624, 0.45457619428634644, 0.4627031683921814, 0.47130417823791504, 0.4798591434955597, 0.48897242546081543, 0.4979848861694336, 0.5, 0.5076631307601929, 0.5177803635597229, 0.5282770991325378, 0.5392990112304688, 0.5506287813186646, 0.5632893443107605, 0.5764452815055847, 0.5903191566467285, 0.6051878333091736, 0.6209936141967773, 0.6382884979248047, 0.6573970913887024, 0.6795773506164551, 0.7037051916122437, 0.7327037453651428, 0.7677436470985413, 0.8111193776130676, 0.875165581703186, 1.0]
optimal_half_normal = [0.0025565922260284424, 0.005811259150505066, 0.00961565226316452, 0.010822802782058716, 0.013123787939548492, 0.014242202043533325, 0.0143156498670578, 0.016469404101371765, 0.017666727304458618, 0.01773911714553833, 0.0199756920337677, 0.0210941880941391, 0.021161124110221863, 0.02451971173286438, 0.024580076336860657, 0.02685210108757019, 0.028012827038764954, 0.030198264867067337, 0.0302925705909729, 0.03136435151100159, 0.03374280035495758, 0.03487399220466614, 0.035243816673755646, 0.037192340940237045, 0.03822284936904907, 0.04164902865886688, 0.04173608124256134, 0.04401407018303871, 0.04508155584335327, 0.047482021152973175, 0.04756556823849678, 0.050963032990694046, 0.05196474492549896, 0.055417388677597046, 0.05793146416544914, 0.05799369141459465, 0.05887940526008606, 0.05895659327507019, 0.062420234084129333, 0.06493274495005608, 0.06499008461833, 0.06935599446296692, 0.07197384163737297, 0.07201516255736351, 0.07276943325996399, 0.07283210754394531, 0.07550075277686119, 0.07975354790687561, 0.07980883121490479, 0.08257630094885826, 0.0867777168750763, 0.08682405948638916, 0.08967285975813866, 0.09323835000395775, 0.09386616945266724, 0.09735457599163055, 0.09739077091217041, 0.10092401504516602, 0.10444298386573792, 0.10447832942008972, 0.10770941898226738, 0.10803905129432678, 0.11161200702190399, 0.1151546835899353, 0.11520349979400635, 0.11875157058238983, 0.11879390478134155, 0.1222602017223835, 0.122351735830307, 0.12240418791770935, 0.12594850733876228, 0.12597402930259705, 0.12602100148797035, 0.12960633635520935, 0.1296597123146057, 0.12966342642903328, 0.13227657973766327, 0.13325360417366028, 0.1333133578300476, 0.13691483438014984, 0.1371927298605442, 0.14066261053085327, 0.14088113978505135, 0.1447291411459446, 0.14805573225021362, 0.148526418954134, 0.15170684456825256, 0.15178103744983673, 0.15225710347294807, 0.1554398238658905, 0.15609459951519966, 0.15618794038891792, 0.1592724472284317, 0.1629735231399536, 0.16382690146565437, 0.16676269471645355, 0.16873238794505596, 0.17066434025764465, 0.17068277299404144, 0.1717144437134266, 0.17558929696679115, 0.17827065289020538, 0.17835864424705505, 0.18222273886203766, 0.18353315070271492, 0.18604370951652527, 0.18611834943294525, 0.1876586265861988, 0.18996606767177582, 0.19170701876282692, 0.19398853182792664, 0.19786442816257477, 0.19795633852481842, 0.20195159316062927, 0.2058800607919693, 0.2099103182554245, 0.2122517265379429, 0.21410366892814636, 0.21819619834423065, 0.22221362590789795, 0.22233009338378906, 0.22500130906701088, 0.2251257635653019, 0.22638091444969177, 0.23067741096019745, 0.23368822410702705, 0.2348879873752594, 0.2382080741226673, 0.2390350103378296, 0.2391497790813446, 0.24253453686833382, 0.24265171959996223, 0.2470107562839985, 0.24764248728752136, 0.24777774512767792, 0.2516774423420429, 0.256104726344347, 0.2564055472612381, 0.2607169933617115, 0.265461727976799, 0.26985861361026764, 0.2701106257736683, 0.2702729292213917, 0.274574413895607, 0.2750340588390827, 0.27919672429561615, 0.283704474568367, 0.28386808931827545, 0.28953738883137703, 0.2896753139793873, 0.29320384562015533, 0.29451676085591316, 0.295327290892601, 0.29802779853343964, 0.29818175733089447, 0.29972871020436287, 0.30290623009204865, 0.30305664241313934, 0.30486901476979256, 0.31299956142902374, 0.31518544629216194, 0.31790371239185333, 0.3205283172428608, 0.3230419009923935, 0.32595496252179146, 0.32612212374806404, 0.3282426446676254, 0.3283906430006027, 0.33146094158291817, 0.3316439874470234, 0.33365286886692047, 0.33723779395222664, 0.3390095978975296, 0.3427443392574787, 0.34853987768292427, 0.34869300201535225, 0.35457711294293404, 0.35537679493427277, 0.3604113645851612, 0.36124424636363983, 0.3665340431034565, 0.36667295172810555, 0.3727492541074753, 0.3729033060371876, 0.37888188660144806, 0.37907837703824043, 0.3792510814964771, 0.38557394221425056, 0.38573457673192024, 0.39108292758464813, 0.39911722019314766, 0.40589402988553047, 0.40604450181126595, 0.410498782992363, 0.4106704741716385, 0.4129834659397602, 0.4131447561085224, 0.4172855168581009, 0.4202354736626148, 0.4204071946442127, 0.43538858368992805, 0.4355536885559559, 0.4432900734245777, 0.44603554904460907, 0.4461968094110489, 0.451409537345171, 0.4598204083740711, 0.46002377942204475, 0.46178819239139557, 0.46868549659848213, 0.46995367109775543, 0.4868385046720505, 0.48702501133084297, 0.4958047419786453, 0.4960057884454727, 0.5051481872797012, 0.506847757846117, 0.5148334950208664, 0.5150565356016159, 0.5174009390175343, 0.5249751061201096, 0.5283288545906544, 0.5355450958013535, 0.539984006434679, 0.5467876642942429, 0.5522958822548389, 0.5584012717008591, 0.5706631988286972, 0.5836620181798935, 0.5836880058050156, 0.5942088551819324, 0.5975865572690964, 0.6102624125778675, 0.6124880760908127, 0.6286389082670212, 0.646102175116539, 0.6471664495766163, 0.665437325835228, 0.6687244363129139, 0.687017485499382, 0.6932839937508106, 0.7115348428487778, 0.7218200154602528, 0.7219699807465076, 0.7747527211904526, 0.7749756425619125, 0.8192005604505539, 0.8194110840559006, 0.8830635994672775, 0.9217727445065975, 0.9245667457580566, 0.947742685675621, 0.9674464613199234, 0.9890814647078514, 0.9891453236341476, 0.9925699159502983]
def create_linear_map(signed=True):
if signed:
return torch.linspace(-1.0, 1.0, 256)
else:
return torch.linspace(0.0, 1.0, 256)
def create_dynamic_map(signed=True, n=7):
'''
Creates the dynamic quantiztion map.
The dynamic data type is made up of a dynamic exponent and
fraction. As the exponent increase from 0 to -7 the number
of bits available for the fraction shrinks.
This is a generalization of the dynamic type where a certain
number of the bits and be reserved for the linear quantization
region (the fraction). n determines the maximum number of
exponent bits.
For more details see
(8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561]
'''
data = []
# these are additional items that come from the case
# where all the exponent bits are zero and no
# indicator bit is present
additional_items = 2**(7-n)-1
if not signed: additional_items = 2*additional_items
for i in range(n):
fraction_items = 2**(i+7-n)+1 if signed else 2**(i+7-n+1)+1
boundaries = torch.linspace(0.1, 1, fraction_items)
means = (boundaries[:-1]+boundaries[1:])/2.0
data += ((10**(-(n-1)+i))*means).tolist()
if signed:
data += (-(10**(-(n-1)+i))*means).tolist()
if additional_items > 0:
boundaries = torch.linspace(0.1, 1, additional_items+1)
means = (boundaries[:-1]+boundaries[1:])/2.0
data += ((10**(-(n-1)+i))*means).tolist()
if signed:
data += (-(10**(-(n-1)+i))*means).tolist()
data.append(0)
data.append(1.0)
data.sort()
return Tensor(data)
def get_ptr(A: Tensor) -> ct.c_void_p:
'''
Get the ctypes pointer from a PyTorch Tensor.
Parameters
----------
A : torch.tensor
The PyTorch tensor.
Returns
-------
ctypes.c_void_p
'''
if A is None: return None
else: return ct.c_void_p(A.data.storage().data_ptr())
def estimate_quantiles(A: Tensor, out: Tensor=None, offset: float=1/512) -> Tensor:
'''
Estimates 256 equidistant quantiles on the input tensor eCDF.
Uses SRAM-Quantiles algorithm to quickly estimate 256 equidistant quantiles
via the eCDF of the input tensor `A`. This is a fast but approximate algorithm
and the extreme quantiles close to 0 and 1 have high variance / large estimation
errors. These large errors can be avoided by using the offset variable which trims
the distribution. The default offset value of 1/512 ensures minimum entropy encoding -- it
trims 1/512 = 0.2% from each side of the distrivution. An offset value of 0.01 to 0.02
usually has a much lower error but is not a minimum entropy encoding. Given an offset
of 0.02 equidistance points in the range [0.02, 0.98] are used for the quantiles.
Parameters
----------
A : torch.Tensor
The input tensor. Any shape.
out : torch.Tensor
Tensor with the 256 estimated quantiles.
offset : float
The offset for the first and last quantile from 0 and 1. Default: 1/512
Returns
-------
torch.Tensor:
The 256 quantiles in float32 datatype.
'''
if out is None: out = torch.zeros((256,), dtype=torch.float32, device=A.device)
if A.dtype == torch.float32:
lib.cestimate_quantiles_fp32(get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cestimate_quantiles_fp16(get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel()))
else:
raise NotImplementError(f'Not supported data type {A.dtype}')
return out
def quantize_blockwise(A: Tensor, code: Tensor=None, absmax: Tensor=None, rand=None, out: Tensor=None) -> Tensor:
'''
Quantize tensor A in blocks of size 4096 values.
Quantizes tensor A by dividing it into blocks of 4096 values.
Then the absolute maximum value within these blocks is calculated
for the non-linear quantization.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
absmax : torch.Tensor
The absmax values.
rand : torch.Tensor
The tensor for stochastic rounding.
out : torch.Tensor
The output tensor (8-bit).
Returns
-------
torch.Tensor:
The 8-bit tensor.
tuple(torch.Tensor, torch.Tensor):
The quantization state to undo the quantization.
'''
if code is None:
if 'dynamic' not in name2qmap: name2qmap['dynamic'] = create_dynamic_map().to(A.device)
code = name2qmap['dynamic']
code = code.to(A.device)
if absmax is None:
n = A.numel()
num_blocks = 4096
blocks = n//num_blocks
blocks += 1 if n % num_blocks > 0 else 0
absmax = torch.zeros((blocks,), device=A.device)
if out is None: out = torch.zeros_like(A, dtype=torch.uint8)
if A.device.type != 'cpu':
if rand is not None:
assert rand.numel() >= 1024
rand_offset = random.randint(0, 1023)
if A.dtype == torch.float32:
lib.cquantize_blockwise_stochastic_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_stochastic_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), ct.c_int(A.numel()))
else:
raise ValueError(f'Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}')
else:
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(A.numel()))
else:
raise ValueError(f'Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}')
else:
# cpu
assert rand is None
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(A.numel()))
return out, (absmax, code)
def dequantize_blockwise(A: Tensor, quant_state: Tuple[Tensor, Tensor]=None,
absmax: Tensor=None, code: Tensor=None, out: Tensor=None,
blocksize: int=4096) -> Tensor:
'''
Dequantizes blockwise quantized values.
Dequantizes the tensor A with maximum absolute values absmax in
blocks of size 4096.
Parameters
----------
A : torch.Tensor
The input 8-bit tensor.
quant_state : tuple(torch.Tensor, torch.Tensor)
Tuple of code and absmax values.
absmax : torch.Tensor
The absmax values.
code : torch.Tensor
The quantization map.
out : torch.Tensor
Dequantized output tensor (default: float32)
Returns
-------
torch.Tensor:
Dequantized tensor (default: float32)
'''
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if 'dynamic' not in name2qmap: name2qmap['dynamic'] = create_dynamic_map().to(A.device)
code = name2qmap['dynamic']
code = code.to(A.device)
if out is None: out = torch.zeros_like(A, dtype=torch.float32)
if quant_state is None: quant_state = (absmax, code)
if blocksize not in [2048, 4096]:
raise ValueError(f'The blockwise of {blocksize} is not supported. Supported values: [2048 4096]')
if A.device.type != 'cpu':
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f'Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}')
else:
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_int(A.numel()))
return out
def quantize(A: Tensor, code: Tensor=None, out: Tensor=None) -> Tensor:
if code is None:
if 'dynamic' not in name2qmap: name2qmap['dynamic'] = create_dynamic_map().to(A.device)
code = name2qmap['dynamic']
code = code.to(A.device)
absmax = torch.abs(A).max()
inp = A/absmax
out = quantize_no_absmax(inp, code, out)
return out, (absmax, code)
def dequantize(A: Tensor, quant_state: Tuple[Tensor, Tensor]=None, absmax: Tensor=None, code: Tensor=None, out: Tensor=None) -> Tensor:
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if 'dynamic' not in name2qmap: name2qmap['dynamic'] = create_dynamic_map().to(A.device)
code = name2qmap['dynamic']
code = code.to(A.device)
if quant_state is None: quant_state = (absmax, code)
out = dequantize_no_absmax(A, quant_state[1], out)
return out*quant_state[0]
def quantize_no_absmax(A: Tensor, code: Tensor, out: Tensor=None) -> Tensor:
'''
Quantizes input tensor to 8-bit.
Quantizes the 32-bit input tensor `A` to the 8-bit output tensor
`out` using the quantization map `code`.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
out : torch.Tensor, optional
The output tensor. Needs to be of type byte.
Returns
-------
torch.Tensor:
Quantized 8-bit tensor.
'''
if out is None: out = torch.zeros_like(A, dtype=torch.uint8)
lib.cquantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()))
return out
def dequantize_no_absmax(A: Tensor, code: Tensor, out: Tensor=None) -> Tensor:
'''
Dequantizes the 8-bit tensor to 32-bit.
Dequantizes the 8-bit tensor `A` to the 32-bit tensor `out` via
the quantization map `code`.
Parameters
----------
A : torch.Tensor
The 8-bit input tensor.
code : torch.Tensor
The quantization map.
out : torch.Tensor
The 32-bit output tensor.
Returns
-------
torch.Tensor:
32-bit output tensor.
'''
if out is None: out = torch.zeros_like(A, dtype=torch.float32)
lib.cdequantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()))
return out
def optimizer_update_32bit(optimizer_name:str, g: Tensor, p: Tensor, state1: Tensor,
beta1: float, eps: float, step: int, lr: float,
state2: Tensor=None, beta2: float=0.0,
weight_decay: float=0.0, gnorm_scale: float=1.0,
unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None:
'''
Performs an inplace optimizer update with one or two optimizer states.
Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
Parameters
----------
optimizer_name : str
The name of the optimizer: {adam}.
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Optimizer state 1.
beta1 : float
Optimizer beta1.
eps : float
Optimizer epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
state2 : torch.Tensor
Optimizer state 2.
beta2 : float
Optimizer beta2.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
unorm_vec : torch.Tensor
The tensor for the update norm.
max_unorm : float
The maximum update norm relative to the weight norm.
skip_zeros : bool
Whether to skip zero-valued gradients or not (default: False).
'''
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if optimizer_name not in str2optimizer32bit:
raise NotImplementError(f'Optimizer not implemented: {optimizer_name}. Choices: {",".join(str2optimizer32bit.keys())}')
if g.dtype == torch.float32 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][0](get_ptr(g), get_ptr(p), get_ptr(state1), get_ptr(state2), get_ptr(unorm_vec), ct.c_float(max_unorm),
ct.c_float(param_norm), ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps), ct.c_float(weight_decay),
ct.c_int32(step), ct.c_float(lr), ct.c_float(gnorm_scale), ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
elif g.dtype == torch.float16 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][1](get_ptr(g), get_ptr(p), get_ptr(state1), get_ptr(state2), get_ptr(unorm_vec), ct.c_float(max_unorm),
ct.c_float(param_norm), ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps), ct.c_float(weight_decay),
ct.c_int32(step), ct.c_float(lr), ct.c_float(gnorm_scale), ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
else:
raise ValueError(f'Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}')
def optimizer_update_8bit(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, state2: Tensor,
beta1: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
max1: Tensor, max2: Tensor, new_max1: Tensor, new_max2: Tensor,
weight_decay: float=0.0, gnorm_scale: float=1.0,
unorm_vec: Tensor=None, max_unorm: float=0.0) -> None:
'''
Performs an inplace Adam update.
Universal Adam update for 32/8-bit state and 32/16-bit gradients/weights.
Uses AdamW formulation if weight decay > 0.0.
Parameters
----------
optimizer_name : str
The name of the optimizer. Choices {adam, momentum}
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Adam state 1.
state2 : torch.Tensor
Adam state 2.
beta1 : float
Adam beta1.
beta2 : float
Adam beta2.
eps : float
Adam epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
qmap1 : torch.Tensor
Quantization map for first Adam state.
qmap2 : torch.Tensor
Quantization map for second Adam state.
max1 : torch.Tensor
Max value for first Adam state update.
max2 : torch.Tensor
Max value for second Adam state update.
new_max1 : torch.Tensor
Max value for the next Adam update of the first state.
new_max2 : torch.Tensor
Max value for the next Adam update of the second state.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
unorm_vec : torch.Tensor
The tensor for the update norm.
max_unorm : float
The maximum update norm relative to the weight norm.
'''
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit[optimizer_name][0](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
get_ptr(unorm_vec), ct.c_float(max_unorm), ct.c_float(param_norm),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr),
get_ptr(qmap1), get_ptr(qmap2),
get_ptr(max1), get_ptr(max2), get_ptr(new_max1), get_ptr(new_max2),
ct.c_float(weight_decay),ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit[optimizer_name][1](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
get_ptr(unorm_vec), ct.c_float(max_unorm), ct.c_float(param_norm),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr),
get_ptr(qmap1), get_ptr(qmap2),
get_ptr(max1), get_ptr(max2), get_ptr(new_max1), get_ptr(new_max2),
ct.c_float(weight_decay),ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
else:
raise ValueError(f'Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}')
def optimizer_update_8bit_blockwise(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, state2: Tensor,
beta1: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0,
skip_zeros=False) -> None:
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][0](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][1](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
else:
raise ValueError(f'Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}')
def percentile_clipping(grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int=5):
"""Applies percentile clipping
grad: torch.Tensor
The gradient tensor.
gnorm_vec: torch.Tensor
Vector of gradient norms. 100 elements expected.
step: int
The current optimiation steps (number of past gradient norms).
"""
if grad.dtype == torch.float32:
lib.cpercentile_clipping_g32(get_ptr(grad), get_ptr(gnorm_vec), ct.c_int32(step), ct.c_int32(grad.numel()))
elif grad.dtype == torch.float16:
lib.cpercentile_clipping_g16(get_ptr(grad), get_ptr(gnorm_vec), ct.c_int32(step), ct.c_int32(grad.numel()))
else:
raise ValueError(f'Gradient type {grad.dtype} not supported!')
current_gnorm = torch.sqrt(gnorm_vec[step % 100])
vals, idx = torch.sort(gnorm_vec)
clip_value = torch.sqrt(vals[percentile])
gnorm_scale = 1.0
if current_gnorm > clip_value:
gnorm_scale = clip_value/current_gnorm
return current_gnorm, clip_value, gnorm_scale
def histogram_scatter_add_2d(histogram: Tensor, index1: Tensor, index2: Tensor, source: Tensor):
assert len(histogram.shape) == 2
assert histogram.dtype == torch.float32
assert source.dtype == torch.float32
assert index1.dtype == torch.int32
assert index2.dtype == torch.int32
assert histogram.device.type == 'cuda'
assert index1.device.type == 'cuda'
assert index2.device.type == 'cuda'
assert source.device.type == 'cuda'
maxdim1 = ct.c_int32(histogram.shape[0])
n = ct.c_int32(index1.numel())
lib.chistogram_scatter_add_2d(get_ptr(histogram), get_ptr(index1), get_ptr(index2), get_ptr(source), maxdim1, n)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .modules import StableEmbedding, Embedding
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from typing import Optional
from torch import Tensor
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from bitsandbytes.optim import GlobalOptimManager
class StableEmbedding(torch.nn.Embedding):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
sparse: bool = False, _weight: Optional[Tensor] = None) -> None:
super(StableEmbedding, self).__init__(num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight)
self.norm = torch.nn.LayerNorm(embedding_dim)
GlobalOptimManager.get_instance().register_module_override(self, 'weight', {'optim_bits': 32})
def reset_parameters(self) -> None:
torch.nn.init.xavier_uniform_(self.weight)
self._fill_padding_idx_with_zero()
''' !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
to make the Layer compatible with Pytorch < 1.9.
This means that if this changes in future PyTorch releases this need to change too
which is cumbersome. However, with this we can ensure compatibility with previous
PyTorch releases.
'''
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input: Tensor) -> Tensor:
emb = F.embedding(
input, self.weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
return self.norm(emb)
class Embedding(torch.nn.Embedding):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
sparse: bool = False, _weight: Optional[Tensor] = None) -> None:
super(Embedding, self).__init__(num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight)
GlobalOptimManager.get_instance().register_module_override(self, 'weight', {'optim_bits': 32})
def reset_parameters(self) -> None:
torch.nn.init.xavier_uniform_(self.weight)
self._fill_padding_idx_with_zero()
''' !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
to make the Layer compatible with Pytorch < 1.9.
This means that if this changes in future PyTorch releases this need to change too
which is cumbersome. However, with this we can ensure compatibility with previous
PyTorch releases.
'''
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input: Tensor) -> Tensor:
emb = F.embedding(
input, self.weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
return emb
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from bitsandbytes.optim.optimizer import Optimizer1State
class RMSprop(Optimizer1State):
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
if alpha == 0:
raise NotImplementError(f'RMSprop with alpha==0.0 is not supported!')
if centered:
raise NotImplementError(f'Centered RMSprop is not supported!')
super(RMSprop, self).__init__('rmsprop', params, lr, (alpha, momentum), eps,
weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise)
class RMSprop8bit(Optimizer1State):
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
if alpha == 0:
raise NotImplementError(f'RMSprop with alpha==0.0 is not supported!')
if centered:
raise NotImplementError(f'Centered RMSprop is not supported!')
super(RMSprop8bit, self).__init__('rmsprop', params, lr, (alpha, momentum), eps,
weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise)
class RMSprop32bit(Optimizer1State):
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
if alpha == 0:
raise NotImplementError(f'RMSprop with alpha==0.0 is not supported!')
if centered:
raise NotImplementError(f'Centered RMSprop is not supported!')
super(RMSprop32bit, self).__init__('rmsprop', params, lr, (alpha, momentum), eps,
weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from bitsandbytes.optim.optimizer import Optimizer2State
class LAMB(Optimizer2State):
def __init__(self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False, adam_w_mode=True, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0):
super(LAMB, self).__init__('lamb', params, lr, betas, eps,
weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, max_unorm=1.0)
class LAMB8bit(Optimizer2State):
def __init__(self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False, adam_w_mode=True, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0):
super(LAMB8bit, self).__init__('lamb', params, lr, betas, eps,
weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, max_unorm=1.0)
class LAMB32bit(Optimizer2State):
def __init__(self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False, adam_w_mode=True, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0):
super(LAMB32bit, self).__init__('lamb', params, lr, betas, eps,
weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, max_unorm=1.0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.