code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Union[str, Any] = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class _lowercase :
'''simple docstring'''
_A = 42
_A = None
_A = None
_A = None
_A = None
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : List[str] = _str_to_version_tuple(self.version_str )
def __repr__( self )-> str:
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def lowerCAmelCase__ ( self )-> Dict:
return self.major, self.minor, self.patch
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return Version(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return other
raise TypeError(F"{other} (type {type(__UpperCamelCase )}) cannot be compared to version." )
def __eq__( self , __UpperCamelCase )-> Optional[int]:
try:
UpperCAmelCase__ : Any = self._validate_operand(__UpperCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : Any = self._validate_operand(__UpperCamelCase )
return self.tuple < other.tuple
def __hash__( self )-> Optional[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowerCAmelCase__ ( self )-> str:
return self.version_str
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = _VERSION_REG.match(lowerCAmelCase )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(lowerCAmelCase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def a__ ( lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
return ".".join(str(lowerCAmelCase ) for v in version_tuple )
| 716 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A__ : int = logging.get_logger(__name__)
A__ : Dict[Optional[str], Type[Formatter]] = {}
A__ : Dict[Optional[str], str] = {}
A__ : Dict[Optional[str], Exception] = {}
def a__ ( lowerCAmelCase : type , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[List[str]] = None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
UpperCAmelCase__ : Union[str, Any] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
UpperCAmelCase__ : Union[str, Any] = format_type
def a__ ( lowerCAmelCase : Exception , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[List[str]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
UpperCAmelCase__ : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
A__ : Union[str, Any] = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
A__ : Union[str, Any] = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
A__ : Any = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def a__ ( lowerCAmelCase : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def a__ ( lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_format_type_from_alias(lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 717 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 0 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : List[Any] = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
A__ : int = {
"""Salesforce/codegen-350M-mono""": 2_048,
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
_A = CodeGenTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase=False , **__UpperCamelCase , )-> Dict:
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
if kwargs.pop("add_bos_token" , __UpperCamelCase ):
UpperCAmelCase__ : Any = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
UpperCAmelCase__ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**__UpperCamelCase )
UpperCAmelCase__ : int = add_prefix_space
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> BatchEncoding:
UpperCAmelCase__ : Optional[Any] = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> BatchEncoding:
UpperCAmelCase__ : Optional[int] = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
UpperCAmelCase__ : Dict = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
UpperCAmelCase__ : int = super().decode(
token_ids=__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , **__UpperCamelCase , )
if truncate_before_pattern is not None and len(__UpperCamelCase ) > 0:
UpperCAmelCase__ : Tuple = self.truncate(__UpperCamelCase , __UpperCamelCase )
return decoded_text
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Any:
def find_re(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Dict = pattern.search(__UpperCamelCase , __UpperCamelCase )
return m.start() if m else -1
UpperCAmelCase__ : str = [re.compile(__UpperCamelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCAmelCase__ : List[str] = list(re.finditer("^print" , __UpperCamelCase , re.MULTILINE ) )
if len(__UpperCamelCase ) > 1:
UpperCAmelCase__ : Any = completion[: prints[1].start()]
UpperCAmelCase__ : Optional[Any] = list(re.finditer("^def" , __UpperCamelCase , re.MULTILINE ) )
if len(__UpperCamelCase ) > 1:
UpperCAmelCase__ : Dict = completion[: defs[1].start()]
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Any = [
pos for pos in [find_re(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for terminal in terminals] if pos != -1
]
if len(__UpperCamelCase ) > 0:
return completion[: min(__UpperCamelCase )]
else:
return completion
| 718 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 0 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 0 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 720 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : str = """▁"""
A__ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
A__ : Optional[Any] = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
A__ : Dict = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Any = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
UpperCAmelCase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
UpperCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
UpperCAmelCase__ : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ : int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ : Tuple = 1
UpperCAmelCase__ : Optional[int] = len(self.sp_model ) + self.fairseq_offset
UpperCAmelCase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self )-> Any:
UpperCAmelCase__ : Union[str, Any] = self.__dict__.copy()
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Optional[int] = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
UpperCAmelCase__ : List[Any] = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self )-> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : List[str] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ : str = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
UpperCAmelCase__ : List[Any] = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip()
return out_string
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
UpperCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 721 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['image_processor', 'tokenizer']
_A = 'BlipImageProcessor'
_A = 'AutoTokenizer'
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Any:
super().__init__(__UpperCamelCase , __UpperCamelCase )
# add QFormer tokenizer
UpperCAmelCase__ : Optional[Any] = qformer_tokenizer
def __call__( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
UpperCAmelCase__ : Dict = BatchFeature()
if text is not None:
UpperCAmelCase__ : List[str] = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
encoding.update(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.qformer_tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
UpperCAmelCase__ : Any = qformer_text_encoding.pop("input_ids" )
UpperCAmelCase__ : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
UpperCAmelCase__ : Tuple = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
encoding.update(__UpperCamelCase )
return encoding
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> List[Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> List[str]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : List[str] = self.tokenizer.model_input_names
UpperCAmelCase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> Optional[int]:
if os.path.isfile(__UpperCamelCase ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
UpperCAmelCase__ : int = os.path.join(__UpperCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__UpperCamelCase )
return super().save_pretrained(__UpperCamelCase , **__UpperCamelCase )
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(__UpperCamelCase , subfolder="qformer_tokenizer" )
UpperCAmelCase__ : Tuple = cls._get_arguments_from_pretrained(__UpperCamelCase , **__UpperCamelCase )
args.append(__UpperCamelCase )
return cls(*__UpperCamelCase )
| 700 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = (DDIMParallelScheduler,)
_A = (('eta', 0.0), ('num_inference_steps', 50))
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__UpperCamelCase )
return config
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> int:
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Union[str, Any] = self.get_scheduler_config(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = scheduler_class(**__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = 10, 0.0
UpperCAmelCase__ : Optional[int] = self.dummy_model()
UpperCAmelCase__ : int = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase__ : List[Any] = model(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Any = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def lowerCAmelCase__ ( self )-> str:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def lowerCAmelCase__ ( self )-> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[int]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Tuple:
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def lowerCAmelCase__ ( self )-> Optional[Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=__UpperCamelCase , num_inference_steps=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[Any]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__UpperCamelCase , eta=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : List[str] = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : List[str] = self.get_scheduler_config()
UpperCAmelCase__ : str = scheduler_class(**__UpperCamelCase )
UpperCAmelCase__ : Dict = 10, 0.0
scheduler.set_timesteps(__UpperCamelCase )
UpperCAmelCase__ : str = self.dummy_model()
UpperCAmelCase__ : Tuple = self.dummy_sample_deter
UpperCAmelCase__ : Dict = self.dummy_sample_deter + 0.1
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter - 0.1
UpperCAmelCase__ : Dict = samplea.shape[0]
UpperCAmelCase__ : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase__ : Any = torch.arange(__UpperCamelCase )[0:3, None].repeat(1 , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase__ : Tuple = scheduler.batch_step_no_noise(__UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __UpperCamelCase )
UpperCAmelCase__ : Tuple = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.full_loop()
UpperCAmelCase__ : Tuple = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : Any = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : str = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase__ : List[str] = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : List[Any] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowerCAmelCase__ ( self )-> List[Any]:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ : Tuple = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
UpperCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ : str = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : int = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 701 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 0 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def lowerCAmelCase__ ( *__UpperCamelCase , **__UpperCamelCase )-> str:
pass
def a__ ( lowerCAmelCase : Image ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def a__ ( lowerCAmelCase : Image ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = np.array(lowerCAmelCase )
UpperCAmelCase__ : List[str] = npimg.shape
return {"hash": hashimage(lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_A = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_A = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCAmelCase__ : Union[str, Any] = MaskGenerationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> int:
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@slow
@require_torch
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Union[str, Any] = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCAmelCase__ : Any = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_80, 6_40)}, "scores": 0.9967},
{"mask": {"hash": "453c7844bd", "shape": (4_80, 6_40)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (4_80, 6_40)}, "scores": 0.9909},
{"mask": {"hash": "64033ddc3f", "shape": (4_80, 6_40)}, "scores": 0.9879},
{"mask": {"hash": "801064ff79", "shape": (4_80, 6_40)}, "scores": 0.9834},
{"mask": {"hash": "6172f276ef", "shape": (4_80, 6_40)}, "scores": 0.9716},
{"mask": {"hash": "b49e60e084", "shape": (4_80, 6_40)}, "scores": 0.9612},
{"mask": {"hash": "a811e775fd", "shape": (4_80, 6_40)}, "scores": 0.9599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_80, 6_40)}, "scores": 0.9552},
{"mask": {"hash": "9d8257e080", "shape": (4_80, 6_40)}, "scores": 0.9532},
{"mask": {"hash": "32de6454a8", "shape": (4_80, 6_40)}, "scores": 0.9516},
{"mask": {"hash": "af3d4af2c8", "shape": (4_80, 6_40)}, "scores": 0.9499},
{"mask": {"hash": "3c6db475fb", "shape": (4_80, 6_40)}, "scores": 0.9483},
{"mask": {"hash": "c290813fb9", "shape": (4_80, 6_40)}, "scores": 0.9464},
{"mask": {"hash": "b6f0b8f606", "shape": (4_80, 6_40)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (4_80, 6_40)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (4_80, 6_40)}, "scores": 0.9408},
{"mask": {"hash": "efb6cab859", "shape": (4_80, 6_40)}, "scores": 0.9335},
{"mask": {"hash": "1ff2eafb30", "shape": (4_80, 6_40)}, "scores": 0.9326},
{"mask": {"hash": "788b798e24", "shape": (4_80, 6_40)}, "scores": 0.9262},
{"mask": {"hash": "abea804f0e", "shape": (4_80, 6_40)}, "scores": 0.8999},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_80, 6_40)}, "scores": 0.8986},
{"mask": {"hash": "cd24047c8a", "shape": (4_80, 6_40)}, "scores": 0.8984},
{"mask": {"hash": "6943e6bcbd", "shape": (4_80, 6_40)}, "scores": 0.8873},
{"mask": {"hash": "b5f47c9191", "shape": (4_80, 6_40)}, "scores": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = "facebook/sam-vit-huge"
UpperCAmelCase__ : Any = pipeline("mask-generation" , model=__UpperCamelCase )
UpperCAmelCase__ : List[str] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase__ : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0053},
] , )
| 702 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
def a__ ( lowerCAmelCase : int = 200 ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
UpperCAmelCase__ : List[str] = [0] * (pence + 1)
UpperCAmelCase__ : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 703 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
A__ : int = logging.getLogger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = '''token-classification'''
def __init__( self , __UpperCamelCase )-> Union[str, Any]:
if type(__UpperCamelCase ) == dict:
UpperCAmelCase__ : int = Namespace(**__UpperCamelCase )
UpperCAmelCase__ : Dict = import_module("tasks" )
try:
UpperCAmelCase__ : Optional[int] = getattr(__UpperCamelCase , hparams.task_type )
UpperCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
UpperCAmelCase__ : List[Any] = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase__ : Optional[int] = CrossEntropyLoss().ignore_index
super().__init__(__UpperCamelCase , len(self.labels ) , self.mode )
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> Dict:
return self.model(**__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase__ : Any = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase__ : Union[str, Any] = self(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : str = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase__ : Any = self._feature_file(__UpperCamelCase )
if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , __UpperCamelCase )
UpperCAmelCase__ : str = torch.load(__UpperCamelCase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
UpperCAmelCase__ : List[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = self.token_classification_task.convert_examples_to_features(
__UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__UpperCamelCase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , __UpperCamelCase )
torch.save(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False )-> DataLoader:
UpperCAmelCase__ : Optional[Any] = self._feature_file(__UpperCamelCase )
logger.info("Loading features from cached file %s" , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.load(__UpperCamelCase )
UpperCAmelCase__ : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase__ : Optional[int] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase__ : str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase__ : Optional[Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase__ : Optional[int] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , batch_size=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> str:
"""Compute validation""" ""
UpperCAmelCase__ : Tuple = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase__ : List[Any] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase__ : Dict = self(**__UpperCamelCase )
UpperCAmelCase__ : str = outputs[:2]
UpperCAmelCase__ : str = logits.detach().cpu().numpy()
UpperCAmelCase__ : Optional[int] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = torch.stack([x["val_loss"] for x in outputs] ).mean()
UpperCAmelCase__ : Optional[Any] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
UpperCAmelCase__ : int = np.argmax(__UpperCamelCase , axis=2 )
UpperCAmelCase__ : Any = np.concatenate([x["target"] for x in outputs] , axis=0 )
UpperCAmelCase__ : Dict = dict(enumerate(self.labels ) )
UpperCAmelCase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase__ : List[Any] = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(__UpperCamelCase , __UpperCamelCase ),
"precision": precision_score(__UpperCamelCase , __UpperCamelCase ),
"recall": recall_score(__UpperCamelCase , __UpperCamelCase ),
"f1": fa_score(__UpperCamelCase , __UpperCamelCase ),
}
UpperCAmelCase__ : Union[str, Any] = dict(results.items() )
UpperCAmelCase__ : Dict = results
return ret, preds_list, out_label_list
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
# when stable
UpperCAmelCase__ : Optional[Any] = self._eval_end(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase__ : List[str] = self._eval_end(__UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase__ : List[str] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
# Add NER specific options
BaseTransformer.add_model_specific_args(__UpperCamelCase , __UpperCamelCase )
parser.add_argument(
"--task_type" , default="NER" , type=__UpperCamelCase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=__UpperCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=__UpperCamelCase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=__UpperCamelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
A__ : str = NERTransformer.add_model_specific_args(parser, os.getcwd())
A__ : Dict = parser.parse_args()
A__ : Union[str, Any] = NERTransformer(args)
A__ : str = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
A__ : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
A__ : str = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 704 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def a__ ( lowerCAmelCase : str , lowerCAmelCase : float | Decimal , lowerCAmelCase : float = 10**-10 ):
'''simple docstring'''
UpperCAmelCase__ : str = a
while True:
UpperCAmelCase__ : Tuple = Decimal(lowerCAmelCase ) - (
Decimal(eval(lowerCAmelCase ) ) / Decimal(eval(str(diff(lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase ) ) < precision: # noqa: S307
return float(lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 705 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 0 |
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'dandelin/vilt-b32-finetuned-vqa'
_A = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
_A = 'image_qa'
_A = AutoProcessor
_A = AutoModelForVisualQuestionAnswering
_A = ['image', 'text']
_A = ['text']
def __init__( self , *__UpperCamelCase , **__UpperCamelCase )-> List[str]:
requires_backends(self , ["vision"] )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Any:
return self.pre_processor(__UpperCamelCase , __UpperCamelCase , return_tensors="pt" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
with torch.no_grad():
return self.model(**__UpperCamelCase ).logits
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Optional[int] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 706 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
from __future__ import annotations
from typing import Any
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : Any = num_of_nodes
UpperCAmelCase__ : list[list[int]] = []
UpperCAmelCase__ : dict[int, int] = {}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
self.m_edges.append([u_node, v_node, weight] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase__ : List[Any] = self.find_component(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase__ : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__UpperCamelCase )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase__ : str = self.find_component(__UpperCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> None:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase__ : Tuple = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase__ : str = edge
UpperCAmelCase__ : List[Any] = self.m_component[u]
UpperCAmelCase__ : Tuple = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase__ : Dict = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : str = edge
UpperCAmelCase__ : Any = self.m_component[u]
UpperCAmelCase__ : List[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
print(F"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
UpperCAmelCase__ : Union[str, Any] = [-1] * self.m_num_of_nodes
print(F"The total weight of the minimal spanning tree is: {mst_weight}" )
def a__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
if n_term == "":
return []
UpperCAmelCase__ : list = []
for temp in range(int(lowerCAmelCase ) ):
series.append(F"1/{temp + 1}" if series else "1" )
return series
if __name__ == "__main__":
A__ : List[str] = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 708 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A__ : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowercase ( datasets.BuilderConfig ):
'''simple docstring'''
_A = 1_0000
_A = None
_A = None
class _lowercase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_A = ParquetConfig
def lowerCAmelCase__ ( self )-> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCAmelCase__ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCamelCase , (str, list, tuple) ):
UpperCAmelCase__ : List[Any] = data_files
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase__ : Any = []
for split_name, files in data_files.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCamelCase ):
with open(__UpperCamelCase , "rb" ) as f:
UpperCAmelCase__ : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase__ ( self , __UpperCamelCase )-> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase__ : Union[str, Any] = table_cast(__UpperCamelCase , self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ):
with open(__UpperCamelCase , "rb" ) as f:
UpperCAmelCase__ : Any = pq.ParquetFile(__UpperCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase__ : Union[str, Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(__UpperCamelCase )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}" )
raise
| 709 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 0 |
"""simple docstring"""
from maths.prime_check import is_prime
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase )
if is_prime(lowerCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = 13
UpperCAmelCase__ : Union[str, Any] = 7
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Union[str, Any] = 99
UpperCAmelCase__ : Any = 3_84
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : List[str] = 37
UpperCAmelCase__ : Union[str, Any] = "gelu"
UpperCAmelCase__ : Dict = 0.1
UpperCAmelCase__ : Optional[int] = 0.1
UpperCAmelCase__ : Optional[int] = 5_12
UpperCAmelCase__ : List[str] = 16
UpperCAmelCase__ : Union[str, Any] = 2
UpperCAmelCase__ : List[str] = 0.02
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : str = 1_28
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : List[str] = 9
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : Any = None
def lowerCAmelCase__ ( self )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : str = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = TFConvBertModel(config=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase__ : str = [input_ids, input_mask]
UpperCAmelCase__ : str = model(__UpperCamelCase )
UpperCAmelCase__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Dict = TFConvBertForMaskedLM(config=__UpperCamelCase )
UpperCAmelCase__ : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : Tuple = TFConvBertForSequenceClassification(config=__UpperCamelCase )
UpperCAmelCase__ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.num_choices
UpperCAmelCase__ : str = TFConvBertForMultipleChoice(config=__UpperCamelCase )
UpperCAmelCase__ : Any = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : List[str] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : List[Any] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase__ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.num_labels
UpperCAmelCase__ : Dict = TFConvBertForTokenClassification(config=__UpperCamelCase )
UpperCAmelCase__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : int = TFConvBertForQuestionAnswering(config=__UpperCamelCase )
UpperCAmelCase__ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) : Dict = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFConvBertModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def lowerCAmelCase__ ( self )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[int] = True
if hasattr(__UpperCamelCase , "use_cache" ):
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : Any = getattr(self.model_tester , "key_length" , __UpperCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class(__UpperCamelCase )
UpperCAmelCase__ : str = len(model(__UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase , saved_model=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = os.path.join(__UpperCamelCase , "saved_model" , "1" )
UpperCAmelCase__ : int = tf.keras.models.load_model(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = model(__UpperCamelCase )
if self.is_encoder_decoder:
UpperCAmelCase__ : Optional[int] = outputs["encoder_hidden_states"]
UpperCAmelCase__ : Tuple = outputs["encoder_attentions"]
else:
UpperCAmelCase__ : List[Any] = outputs["hidden_states"]
UpperCAmelCase__ : Union[str, Any] = outputs["attentions"]
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : Union[str, Any] = getattr(self.model_tester , "key_length" , __UpperCamelCase )
UpperCAmelCase__ : Any = getattr(self.model_tester , "key_length" , __UpperCamelCase )
def check_decoder_attentions_output(__UpperCamelCase ):
UpperCAmelCase__ : str = len(__UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Optional[int] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[Any] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Dict = model_class(__UpperCamelCase )
UpperCAmelCase__ : Any = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCAmelCase__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : int = model(__UpperCamelCase )[0]
UpperCAmelCase__ : Any = [1, 6, 7_68]
self.assertEqual(output.shape , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 )
| 711 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['image_processor', 'tokenizer']
_A = 'BlipImageProcessor'
_A = 'AutoTokenizer'
def __init__( self , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : List[Any] = False
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = self.image_processor
def __call__( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCAmelCase__ : Optional[Any] = self.tokenizer
UpperCAmelCase__ : str = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
return text_encoding
# add pixel_values
UpperCAmelCase__ : List[str] = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
if text is not None:
UpperCAmelCase__ : Any = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
else:
UpperCAmelCase__ : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCamelCase )
return encoding_image_processor
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> str:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : int = self.tokenizer.model_input_names
UpperCAmelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 712 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 713 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 0 |
"""simple docstring"""
from string import ascii_uppercase
A__ : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
A__ : Optional[Any] = dict(enumerate(ascii_uppercase))
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(lowerCAmelCase )
UpperCAmelCase__ : Any = 0
while True:
if x == i:
UpperCAmelCase__ : Union[str, Any] = 0
if len(lowerCAmelCase ) == len(lowerCAmelCase ):
break
key += key[i]
i += 1
return key
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ""
UpperCAmelCase__ : Tuple = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCAmelCase__ : List[str] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ""
UpperCAmelCase__ : Tuple = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCAmelCase__ : List[str] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = "THE GERMAN ATTACK"
UpperCAmelCase__ : Optional[int] = "SECRET"
UpperCAmelCase__ : Union[str, Any] = generate_key(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = cipher_text(lowerCAmelCase , lowerCAmelCase )
print(F"Encrypted Text = {s}" )
print(F"Original Text = {original_text(lowerCAmelCase , lowerCAmelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 714 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(repo_id=lowerCAmelCase , path=lowerCAmelCase , revision=lowerCAmelCase )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(lowerCAmelCase )}"
| 715 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 0 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Optional[Any] = job["started_at"]
UpperCAmelCase__ : Tuple = job["completed_at"]
UpperCAmelCase__ : str = date_parser.parse(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = date_parser.parse(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase__ : Union[str, Any] = start
UpperCAmelCase__ : Tuple = end
UpperCAmelCase__ : Optional[int] = duration_in_min
return job_info
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : int = None
if token is not None:
UpperCAmelCase__ : Union[str, Any] = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
UpperCAmelCase__ : Dict = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase__ : Optional[Any] = requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json()
UpperCAmelCase__ : Optional[Any] = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(lowerCAmelCase ) for job in result["jobs"]} )
UpperCAmelCase__ : int = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = requests.get(url + F"&page={i + 2}" , headers=lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
A__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
A__ : Dict = parser.parse_args()
A__ : Optional[Any] = get_job_time(args.workflow_run_id)
A__ : str = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 716 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 0 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 717 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 0 |
"""simple docstring"""
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : Any = len(__UpperCamelCase )
UpperCAmelCase__ : str = [0] * len_array
if len_array > 0:
UpperCAmelCase__ : List[Any] = array[0]
for i in range(1 , __UpperCamelCase ):
UpperCAmelCase__ : Any = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> bool:
UpperCAmelCase__ : str = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 0 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
A__ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
class _lowercase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
_A = None
_A = None
class _lowercase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
_A = datasets.Audio()
_A = 'audio'
_A = AudioFolderConfig
_A = 42 # definition at the bottom of the script
_A = AudioClassification(audio_column='audio' , label_column='label' )
A__ : Union[str, Any] = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
A__ : str = AUDIO_EXTENSIONS
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 0 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 32 , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , __UpperCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , __UpperCamelCase = True , __UpperCamelCase=7 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=3 , )-> List[str]:
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Any = do_resize
UpperCAmelCase__ : Tuple = size if size is not None else {"shortest_edge": 2_88}
UpperCAmelCase__ : int = size_divisor
UpperCAmelCase__ : Optional[int] = do_rescale
UpperCAmelCase__ : int = rescale_factor
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : Optional[int] = image_mean
UpperCAmelCase__ : Any = image_std
UpperCAmelCase__ : Union[str, Any] = do_pad
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : str = min_resolution
UpperCAmelCase__ : Optional[Any] = max_resolution
def lowerCAmelCase__ ( self )-> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False )-> Dict:
if not batched:
UpperCAmelCase__ : str = self.size["shortest_edge"]
UpperCAmelCase__ : List[Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ : Union[str, Any] = image.shape[1], image.shape[2]
UpperCAmelCase__ : List[str] = size / min(__UpperCamelCase , __UpperCamelCase )
if h < w:
UpperCAmelCase__ : Dict = size, scale * w
else:
UpperCAmelCase__ : Optional[Any] = scale * h, size
UpperCAmelCase__ : List[Any] = int((13_33 / 8_00) * size )
if max(__UpperCamelCase , __UpperCamelCase ) > max_size:
UpperCAmelCase__ : int = max_size / max(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Any = newh * scale
UpperCAmelCase__ : Union[str, Any] = neww * scale
UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase__ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase__ : Optional[Any] = []
for image in image_inputs:
UpperCAmelCase__ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : List[str] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
UpperCAmelCase__ : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
_A = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : str = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size_divisor" ) )
def lowerCAmelCase__ ( self )-> Optional[int]:
pass
def lowerCAmelCase__ ( self )-> Any:
# Initialize image processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Union[str, Any] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self )-> List[Any]:
# Initialize image processor
UpperCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self )-> Any:
# Initialize image processor
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 720 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : Union[str, Any] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase__ : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase__ : Optional[int] = [0 for i in range(0 , 4 * size )]
UpperCAmelCase__ : Union[str, Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return idx * 2
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return idx * 2 + 1
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> None:
if left_element == right_element:
UpperCAmelCase__ : Optional[int] = a[left_element - 1]
else:
UpperCAmelCase__ : List[Any] = (left_element + right_element) // 2
self.build(self.left(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.build(self.right(__UpperCamelCase ) , mid + 1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Tuple = max(
self.segment_tree[self.left(__UpperCamelCase )] , self.segment_tree[self.right(__UpperCamelCase )] )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
if self.flag[idx] is True:
UpperCAmelCase__ : List[Any] = self.lazy[idx]
UpperCAmelCase__ : str = False
if left_element != right_element:
UpperCAmelCase__ : Optional[Any] = self.lazy[idx]
UpperCAmelCase__ : Dict = self.lazy[idx]
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[str] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase__ : Any = val
if left_element != right_element:
UpperCAmelCase__ : Dict = val
UpperCAmelCase__ : Tuple = val
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Any = True
return True
UpperCAmelCase__ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.update(self.right(__UpperCamelCase ) , mid + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = max(
self.segment_tree[self.left(__UpperCamelCase )] , self.segment_tree[self.right(__UpperCamelCase )] )
return True
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int | float:
if self.flag[idx] is True:
UpperCAmelCase__ : Union[str, Any] = self.lazy[idx]
UpperCAmelCase__ : List[str] = False
if left_element != right_element:
UpperCAmelCase__ : Any = self.lazy[idx]
UpperCAmelCase__ : List[Any] = self.lazy[idx]
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Dict = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase__ : Optional[Any] = (left_element + right_element) // 2
UpperCAmelCase__ : str = self.query(self.left(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = self.query(self.right(__UpperCamelCase ) , mid + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return max(__UpperCamelCase , __UpperCamelCase )
def __str__( self )-> str:
return str([self.query(1 , 1 , self.size , __UpperCamelCase , __UpperCamelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
A__ : Optional[Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
A__ : str = 15
A__ : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 721 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A__ ( lowercase: Namespace ) -> Optional[Any]:
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name )
_lowercase : Optional[int] ='''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Any =parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Tuple , ) -> Tuple:
A : Optional[Any] =logging.get_logger('transformers-cli/converting' )
self._logger.info(f'Loading model {model_type}' )
A : List[Any] =model_type
A : Any =tf_checkpoint
A : Optional[int] =pytorch_dump_output
A : Dict =config
A : str =finetuning_task_name
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
if "ckpt" in self._tf_checkpoint.lower():
A : Optional[int] =self._tf_checkpoint
A : Optional[Any] =''
else:
A : str =self._tf_checkpoint
A : Dict =''
convert_transfo_xl_checkpoint_to_pytorch(
SCREAMING_SNAKE_CASE__ , self._config , self._pytorch_dump_output , SCREAMING_SNAKE_CASE__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 661 |
_lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowercase : Dict ={'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
lowercase : Dict = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
A : Dict =pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
A : Tuple =text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
A : Union[str, Any] =text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}] )
A : Dict =text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
] , )
A : Optional[Any] =text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
# Legacy behavior
A : Optional[Any] =text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
A : Optional[int] =text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}]] )
A : str =text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
] , )
A : int =text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{'label': 'LABEL_0', 'score': 0.5_0_4},
{'label': 'LABEL_0', 'score': 0.5_0_4},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
import torch
A : Dict =pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
A : str =text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
A : Any =pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
A : List[str] =text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : str =pipeline('text-classification' )
A : Union[str, Any] =text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
A : Any =text_classifier('This is bad !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
A : List[Any] =text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.9_8_8}] )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A : str =pipeline('text-classification' , framework='tf' )
A : Tuple =text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
A : List[str] =text_classifier('This is bad !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
A : Optional[int] =text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.9_8_8}] )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
A : Any =TextClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
A : Tuple =text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
A : Union[str, Any] ='HuggingFace is in'
A : List[Any] =text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
A : Dict =['HuggingFace is in ', 'Paris is in France']
A : Optional[Any] =text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}, {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
A : Tuple =text_classifier(SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ )
A : int =len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N, [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N] , )
A : str ={'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
A : Dict =text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
A : List[str] =[['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
text_classifier(SCREAMING_SNAKE_CASE__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
A : List[str] =text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 661 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowercase: str ) -> List[str]:
def decorator(lowercase: int ):
A : Tuple =getattr(lowercase, 'handle_key', [] )
handle += [key]
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
def A__ ( *lowercase: List[str] ) -> Dict:
def decorator(lowercase: Union[str, Any] ):
A : Optional[int] =getattr(lowercase, 'handle_key', [] )
handle += keys
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __new__( cls : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
A : Dict =super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , 'key_handler' ):
setattr(SCREAMING_SNAKE_CASE__ , 'key_handler' , {} )
setattr(SCREAMING_SNAKE_CASE__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] )
for key in handled_keys:
A : str =value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ) -> Any:
A : str =get_character()
if char != KEYMAP["undefined"]:
A : List[str] =ord(SCREAMING_SNAKE_CASE__ )
A : List[str] =cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
A : List[str] =char
return handler(cls )
else:
return None
def A__ ( cls: Optional[int] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 661 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A__ ( lowercase: List[Any] ) -> str:
if "model" in orig_key:
A : int =orig_key.replace('model.', '' )
if "norm1" in orig_key:
A : Tuple =orig_key.replace('norm1', 'attention.output.LayerNorm' )
if "norm2" in orig_key:
A : List[Any] =orig_key.replace('norm2', 'output.LayerNorm' )
if "norm" in orig_key:
A : List[str] =orig_key.replace('norm', 'LayerNorm' )
if "transformer" in orig_key:
A : int =orig_key.split('.' )[0].split('_' )[-1]
A : int =orig_key.replace(F'transformer_{layer_num}', F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
A : List[Any] =orig_key.replace('mha.attn', 'attention.self' )
if "mha" in orig_key:
A : List[Any] =orig_key.replace('mha', 'attention' )
if "W_q" in orig_key:
A : List[str] =orig_key.replace('W_q', 'self.query' )
if "W_k" in orig_key:
A : int =orig_key.replace('W_k', 'self.key' )
if "W_v" in orig_key:
A : List[Any] =orig_key.replace('W_v', 'self.value' )
if "ff1" in orig_key:
A : Tuple =orig_key.replace('ff1', 'intermediate.dense' )
if "ff2" in orig_key:
A : Dict =orig_key.replace('ff2', 'output.dense' )
if "ff" in orig_key:
A : Tuple =orig_key.replace('ff', 'output.dense' )
if "mlm_class" in orig_key:
A : Union[str, Any] =orig_key.replace('mlm.mlm_class', 'cls.predictions.decoder' )
if "mlm" in orig_key:
A : Any =orig_key.replace('mlm', 'cls.predictions.transform' )
if "cls" not in orig_key:
A : List[Any] ='yoso.' + orig_key
return orig_key
def A__ ( lowercase: List[Any], lowercase: str ) -> str:
for key in orig_state_dict.copy().keys():
A : Any =orig_state_dict.pop(lowercase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
A : List[Any] =val
A : Optional[Any] =orig_state_dict['cls.predictions.decoder.bias']
A : Any =torch.arange(lowercase ).expand((1, -1) ) + 2
return orig_state_dict
def A__ ( lowercase: List[Any], lowercase: int, lowercase: Dict ) -> Union[str, Any]:
A : int =torch.load(lowercase, map_location='cpu' )['model_state_dict']
A : List[Any] =YosoConfig.from_json_file(lowercase )
A : Tuple =YosoForMaskedLM(lowercase )
A : Tuple =convert_checkpoint_helper(config.max_position_embeddings, lowercase )
print(model.load_state_dict(lowercase ) )
model.eval()
model.save_pretrained(lowercase )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
_lowercase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[str] =parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 661 |
import math
def A__ ( lowercase: int ) -> list:
A : Optional[Any] =[True] * n
A : Tuple =False
A : List[Any] =False
A : Dict =True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A : Dict =i * 2
while index < n:
A : Dict =False
A : Dict =index + i
A : Tuple =[2]
for i in range(3, lowercase, 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A__ ( lowercase: int = 999_966_663_333 ) -> int:
A : Optional[int] =math.floor(math.sqrt(lowercase ) ) + 100
A : Optional[int] =prime_sieve(lowercase )
A : Optional[Any] =0
A : List[Any] =0
A : Union[str, Any] =primes[prime_index]
while (last_prime**2) <= limit:
A : Tuple =primes[prime_index + 1]
A : Optional[int] =last_prime**2
A : Tuple =next_prime**2
# Get numbers divisible by lps(current)
A : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A : List[Any] =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A : Any =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A : List[str] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 661 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Any =argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_lowercase : List[str] =parser.parse_args()
_lowercase : Any ='''cpu'''
_lowercase : List[str] ='''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_lowercase : Tuple ='''path-to-your-trained-model'''
_lowercase : int =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : List[str] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : List[Any] =pipe.to(device)
# to channels last
_lowercase : int =pipe.unet.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] =pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Union[str, Any] =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : List[Any] =torch.randn(2, 4, 6_4, 6_4)
_lowercase : Tuple =torch.rand(1) * 9_9_9
_lowercase : str =torch.randn(2, 7_7, 7_6_8)
_lowercase : List[str] =(sample, timestep, encoder_hidden_status)
try:
_lowercase : Dict =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : str =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : Tuple =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : Union[str, Any] =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : str =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : Dict =6_6_6
_lowercase : List[str] =torch.Generator(device).manual_seed(seed)
_lowercase : Any ={'''generator''': generator}
if args.steps is not None:
_lowercase : Tuple =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : Any =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 661 |
import heapq
def A__ ( lowercase: dict ) -> set[int]:
A : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase, [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
A : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A : List[str] =heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A : str =elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 661 | 1 |
def A__ ( lowercase: str, lowercase: str ) -> int:
if len(lowercase ) != len(lowercase ):
raise ValueError('String lengths must match!' )
A : List[Any] =0
for chara, chara in zip(lowercase, lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase : List[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
A : Tuple =feature_size
A : int =sampling_rate
A : List[str] =padding_value
A : Tuple =kwargs.pop('padding_side' , 'right' )
A : str =kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A : Tuple ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
A : Dict =processed_features[self.model_input_names[0]]
A : int =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
A : List[Any] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A : List[str] =required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A : Any =0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
A : Dict =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
A : List[Any] ='tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ='pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
A : Union[str, Any] ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A : int =to_numpy(SCREAMING_SNAKE_CASE__ )
else:
A : List[Any] =[to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A : List[Any] =self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =processed_features[self.model_input_names[0]]
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A : Tuple =[]
for i in range(SCREAMING_SNAKE_CASE__ ):
A : int ={k: v[i] for k, v in processed_features.items()}
# truncation
A : List[Any] =self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A : Any =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A : Optional[Any] =PaddingStrategy.MAX_LENGTH
A : List[Any] ={}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
A : Optional[Any] =self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
A : Dict =[]
if value.dtype is np.dtype(np.floataa ):
A : Tuple =value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> dict:
A : Optional[int] =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Tuple =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : int =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A : str =np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
A : Union[str, Any] =max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
A : Dict =np.pad(
processed_features['attention_mask'] , (0, difference) )
A : str =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A : List[Any] =np.pad(
processed_features['attention_mask'] , (difference, 0) )
A : Union[str, Any] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A : Tuple =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Any =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : List[str] =len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
A : Union[str, Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A : Dict =processed_features['attention_mask'][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
A : List[Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Tuple =PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =padding
else:
A : List[str] =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 661 | 1 |
def A__ ( lowercase: list[int], lowercase: list[int] ) -> tuple[float, float]:
# Check if the input is valid
if not len(lowercase ) == len(lowercase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
A , A , A : Dict =equationa
A , A , A : str =equationa
# Calculate the determinants of the matrices
A : Tuple =aa * ba - aa * ba
A : List[str] =ca * ba - ca * ba
A : List[str] =aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
A : Optional[Any] =determinant_x / determinant
A : Any =determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 661 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : List[str] ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : int = "deberta-v2"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=12_81_00 , SCREAMING_SNAKE_CASE__ : List[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : List[str]=24 , SCREAMING_SNAKE_CASE__ : List[str]=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-7 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=-1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : Dict =hidden_size
A : Optional[Any] =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Optional[int] =intermediate_size
A : Any =hidden_act
A : Any =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Tuple =type_vocab_size
A : Tuple =initializer_range
A : int =relative_attention
A : int =max_relative_positions
A : Optional[Any] =pad_token_id
A : Union[str, Any] =position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE__ ) == str:
A : Any =[x.strip() for x in pos_att_type.lower().split('|' )]
A : Any =pos_att_type
A : Tuple =vocab_size
A : Any =layer_norm_eps
A : Optional[Any] =kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE__ )
A : str =pooler_dropout
A : Any =pooler_hidden_act
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A : List[Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : int ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
return 12
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
A : str =super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 661 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : List[str] ={
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict =[
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =[
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =[
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_lowercase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 5_02_57 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "gelu_new" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 1e-5 , SCREAMING_SNAKE_CASE__ : float = 0.0_2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[str]:
super().__init__()
A : str =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
A : List[Any] =prefix_inner_dim
A : Dict =prefix_hidden_dim
A : List[str] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A : Dict =GPTaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , n_positions=SCREAMING_SNAKE_CASE__ , n_embd=SCREAMING_SNAKE_CASE__ , n_layer=SCREAMING_SNAKE_CASE__ , n_head=SCREAMING_SNAKE_CASE__ , n_inner=SCREAMING_SNAKE_CASE__ , activation_function=SCREAMING_SNAKE_CASE__ , resid_pdrop=SCREAMING_SNAKE_CASE__ , embd_pdrop=SCREAMING_SNAKE_CASE__ , attn_pdrop=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , initializer_range=SCREAMING_SNAKE_CASE__ , scale_attn_weights=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE__ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE__ , )
A : Dict =GPTaLMHeadModel(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , ) -> Optional[Any]:
A : str =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
A : Any =self.encode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.decode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A : int =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A : Optional[int] =torch.cat((dummy_token, input_ids) , dim=1 )
A : Dict =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.device ) -> torch.Tensor:
return torch.zeros(SCREAMING_SNAKE_CASE__ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self.encode_prefix(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
A : Dict =torch.split(SCREAMING_SNAKE_CASE__ , 1 , dim=0 )
A : int =[]
A : Optional[int] =[]
for feature in features:
A : int =self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE__ ) ) # back to the clip feature
# Only support beam search for now
A , A : Dict =self.generate_beam(
input_embeds=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A : str =torch.stack(SCREAMING_SNAKE_CASE__ )
A : int =torch.stack(SCREAMING_SNAKE_CASE__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : int = 67 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Dict:
A : Dict =eos_token_id
A : str =None
A : List[Any] =None
A : List[Any] =torch.ones(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.int )
A : str =torch.zeros(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.bool )
if input_embeds is not None:
A : Any =input_embeds
else:
A : List[Any] =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Any =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ )
A : str =outputs.logits
A : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A : List[str] =logits.softmax(-1 ).log()
if scores is None:
A , A : Any =logits.topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Any =generated.expand(SCREAMING_SNAKE_CASE__ , *generated.shape[1:] )
A , A : Tuple =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A : Union[str, Any] =next_tokens
else:
A : str =tokens.expand(SCREAMING_SNAKE_CASE__ , *tokens.shape[1:] )
A : Optional[int] =torch.cat((tokens, next_tokens) , dim=1 )
else:
A : Optional[Any] =-float(np.inf )
A : Tuple =0
A : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A : int =scores_sum / seq_lengths[:, None]
A , A : Optional[int] =scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Dict =next_tokens // scores_sum.shape[1]
A : Optional[Any] =seq_lengths[next_tokens_source]
A : Tuple =next_tokens % scores_sum.shape[1]
A : Optional[Any] =next_tokens.unsqueeze(1 )
A : Optional[Any] =tokens[next_tokens_source]
A : Any =torch.cat((tokens, next_tokens) , dim=1 )
A : List[str] =generated[next_tokens_source]
A : List[Any] =scores_sum_average * seq_lengths
A : Optional[Any] =is_stopped[next_tokens_source]
A : Optional[int] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A : Any =torch.cat((generated, next_token_embed) , dim=1 )
A : Optional[int] =is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE__ ).squeeze()
if is_stopped.all():
break
A : Optional[Any] =scores / seq_lengths
A : str =scores.argsort(descending=SCREAMING_SNAKE_CASE__ )
# tokens tensors are already padded to max_seq_length
A : Optional[Any] =[tokens[i] for i in order]
A : Any =torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 )
A : str =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 661 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661 | 1 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_lowercase : List[Any] =TypeVar('''T''')
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
'''simple docstring'''
lowercase : deque[T] # Cache store of keys
lowercase : set[T] # References of the keys in cache
lowercase : int = 10 # Maximum capacity of cache
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> None:
A : Optional[int] =deque()
A : List[str] =set()
if not n:
A : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
A : List[Any] =n
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
A : Any =self.dq_store.pop()
self.key_reference.remove(SCREAMING_SNAKE_CASE__ )
else:
self.dq_store.remove(SCREAMING_SNAKE_CASE__ )
self.dq_store.appendleft(SCREAMING_SNAKE_CASE__ )
self.key_reference.add(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> None:
for k in self.dq_store:
print(SCREAMING_SNAKE_CASE__ )
def __repr__( self : List[Any] ) -> str:
return f'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : LRUCache[str | int] =LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 661 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "xglm"
lowercase : Any = ["past_key_values"]
lowercase : Dict = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=25_60_08 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
A : str =vocab_size
A : Union[str, Any] =max_position_embeddings
A : Optional[Any] =d_model
A : Optional[int] =ffn_dim
A : int =num_layers
A : Any =attention_heads
A : Dict =activation_function
A : List[Any] =dropout
A : str =attention_dropout
A : List[Any] =activation_dropout
A : List[Any] =layerdrop
A : List[Any] =init_std
A : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A : List[str] =use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 661 | 1 |
from __future__ import annotations
def A__ ( lowercase: int | str ) -> bool:
A : List[str] =str(lowercase )
return n == n[::-1]
def A__ ( lowercase: int = 1_000_000 ) -> Dict:
A : Dict =0
for i in range(1, lowercase ):
if is_palindrome(lowercase ) and is_palindrome(bin(lowercase ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 661 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase : List[str] ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ ( ) -> List[Any]:
A : Any =_ask_options(
'In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A : Tuple =get_sagemaker_input()
else:
A : str =get_cluster_input()
return config
def A__ ( lowercase: int=None ) -> str:
if subparsers is not None:
A : List[str] =subparsers.add_parser('config', description=lowercase )
else:
A : Union[str, Any] =argparse.ArgumentParser('Accelerate config command', description=lowercase )
parser.add_argument(
'--config_file', default=lowercase, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def A__ ( lowercase: Tuple ) -> List[Any]:
A : Union[str, Any] =get_user_input()
if args.config_file is not None:
A : Optional[Any] =args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(F'accelerate configuration saved at {config_file}' )
def A__ ( ) -> Optional[int]:
A : Any =config_command_parser()
A : int =parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 661 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Tuple ={
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =[
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[str] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : Dict =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : List[Any] =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Tuple =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : Dict =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : List[Any] =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : Optional[int] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : Any =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[Any] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Optional[Any] =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : List[Any] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: Dict ) -> int:
if _re_test_backend.search(lowercase ) is None:
return None
A : Any =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Any ) -> List[Any]:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : Optional[Any] =f.readlines()
A : Dict =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Optional[int] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : int =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : int =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : int =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : Optional[int] =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : Dict =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : str ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Optional[int] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : Optional[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : Optional[Any] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : int =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : Optional[int] =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : Optional[int] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Optional[Any] =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : Any =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Optional[Any] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : Any =lines[line_index]
A : Any =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Dict =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Any, lowercase: int ) -> Dict:
def find_duplicates(lowercase: List[str] ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : List[Any] =[]
for key in import_dict_objects.keys():
A : List[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Tuple =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> List[str]:
A : Dict =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Any =os.path.join(lowercase, '__init__.py' )
A : Union[str, Any] =parse_init(lowercase )
if objects is not None:
A : str =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Any =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> int:
A : List[str] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : Any =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : List[str] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : Optional[Any] =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : Dict =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Tuple =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> Tuple:
# This is to make sure the transformers module imported is the one in the repo.
A : str =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : Any =spec.loader.load_module()
A : Any =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : Dict ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase : int ={
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =['''ConvNextFeatureExtractor''']
_lowercase : Optional[int] =['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =[
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =[
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowercase : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 661 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
lowercase : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 661 | 1 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowercase : List[Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict , **SCREAMING_SNAKE_CASE__ : Any ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
A : Dict ={}
A : Dict ={}
A : int ={}
# preprocess args
if "points_per_batch" in kwargs:
A : int =kwargs['points_per_batch']
if "points_per_crop" in kwargs:
A : Any =kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
A : Any =kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
A : Optional[Any] =kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
A : Tuple =kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
A : Optional[Any] =kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
A : Union[str, Any] =kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
A : List[Any] =kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
A : Any =kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
A : Union[str, Any] =kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
A : Dict =kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
A : Optional[int] =kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , *SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
return super().__call__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : float = 5_12 / 15_00 , SCREAMING_SNAKE_CASE__ : Optional[int] = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = 1 , ) -> Optional[int]:
A : Tuple =load_image(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.image_processor.size['longest_edge']
A , A , A , A : str =self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Dict =self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
A : Optional[Any] =self.get_inference_context()
with inference_context():
A : Any =self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE__ , device=self.device )
A : List[str] =self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
A : int =image_embeddings
A : str =grid_points.shape[1]
A : Tuple =points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Dict =grid_points[:, i : i + points_per_batch, :, :]
A : Optional[int] =input_labels[:, i : i + points_per_batch]
A : List[Any] =i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.8_8 , SCREAMING_SNAKE_CASE__ : Any=0.9_5 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Dict=1 , ) -> int:
A : Union[str, Any] =model_inputs.pop('input_boxes' )
A : List[str] =model_inputs.pop('is_last' )
A : Dict =model_inputs.pop('original_sizes' ).tolist()
A : Optional[Any] =model_inputs.pop('reshaped_input_sizes' ).tolist()
A : Tuple =self.model(**SCREAMING_SNAKE_CASE__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A : Any =model_outputs['pred_masks']
A : Optional[int] =self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , binarize=SCREAMING_SNAKE_CASE__ )
A : str =model_outputs['iou_scores']
A , A , A : List[Any] =self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=0.7 , ) -> str:
A : Union[str, Any] =[]
A : Optional[Any] =[]
A : Any =[]
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
A : Optional[Any] =torch.cat(SCREAMING_SNAKE_CASE__ )
A : str =torch.cat(SCREAMING_SNAKE_CASE__ )
A , A , A , A : List[Any] =self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : int =defaultdict(SCREAMING_SNAKE_CASE__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE__ )
A : Dict ={}
if output_rle_mask:
A : Optional[Any] =rle_mask
if output_bboxes_mask:
A : List[str] =bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 661 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A , A , A , A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A , A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Optional[int] ={
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Tuple=30 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=1 / 2_55 , SCREAMING_SNAKE_CASE__ : int=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Optional[Any] =size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
A : Union[str, Any] =parent
A : Union[str, Any] =batch_size
A : Union[str, Any] =num_channels
A : int =min_resolution
A : List[Any] =max_resolution
A : Dict =do_resize
A : Tuple =size
A : List[str] =do_normalize
A : List[Any] =image_mean
A : Dict =image_std
A : Any =do_rescale
A : List[str] =rescale_factor
A : Optional[Any] =do_pad
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
if not batched:
A : Any =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
A , A : Union[str, Any] =image.size
else:
A , A : Tuple =image.shape[1], image.shape[2]
if w < h:
A : Any =int(self.size['shortest_edge'] * h / w )
A : Any =self.size['shortest_edge']
elif w > h:
A : Dict =self.size['shortest_edge']
A : Dict =int(self.size['shortest_edge'] * w / h )
else:
A : List[str] =self.size['shortest_edge']
A : Dict =self.size['shortest_edge']
else:
A : List[Any] =[]
for image in image_inputs:
A , A : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : str =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
A : Tuple =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : str =ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
A : str =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Union[str, Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Tuple =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Any =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : Optional[int] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : Optional[int] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : int =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
# prepare image and target
A : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A : List[Any] =json.loads(f.read() )
A : Any ={'image_id': 3_97_69, 'annotations': target}
# encode them
A : str =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
A : Any =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Optional[Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Dict =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : str =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
A : List[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : int =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
# prepare image, target and masks_path
A : List[str] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A : Optional[int] =json.loads(f.read() )
A : int ={'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
A : Optional[Any] =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A : List[Any] =ConditionalDetrImageProcessor(format='coco_panoptic' )
A : Union[str, Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Dict =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Optional[int] =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Any =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : List[Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : str =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
A : int =82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
A : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : str =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
| 661 | 1 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowercase : Tuple =argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
_lowercase , _lowercase : Tuple =parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
_lowercase : Any =rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
_lowercase : Any =rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowercase : str =args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 661 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : List[Any] =1_6
_lowercase : Union[str, Any] =3_2
def A__ ( lowercase: Accelerator, lowercase: int = 16, lowercase: str = "bert-base-cased" ) -> Optional[int]:
A : List[Any] =AutoTokenizer.from_pretrained(lowercase )
A : Any =load_dataset('glue', 'mrpc' )
def tokenize_function(lowercase: Any ):
# max_length=None => use the model max length (it's actually the default)
A : List[str] =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A : Any =datasets.map(
lowercase, batched=lowercase, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Dict =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase, padding='max_length', max_length=128, return_tensors='pt' )
return tokenizer.pad(lowercase, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
A : Union[str, Any] =DataLoader(
tokenized_datasets['train'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
A : str =DataLoader(
tokenized_datasets['validation'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
def A__ ( lowercase: Dict, lowercase: Optional[int], lowercase: Any, lowercase: str ) -> Tuple:
model.eval()
A : Tuple =0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : Tuple =model(**lowercase )
A : Tuple =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A : Union[str, Any] =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
A : List[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
A : Optional[int] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase, references=lowercase, )
A : Union[str, Any] =metric.compute()
return eval_metric["accuracy"]
def A__ ( lowercase: Union[str, Any], lowercase: Dict ) -> List[str]:
# Initialize accelerator
A : Optional[int] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : int =config['lr']
A : Optional[Any] =int(config['num_epochs'] )
A : Union[str, Any] =int(config['seed'] )
A : List[str] =int(config['batch_size'] )
A : Optional[Any] =args.model_name_or_path
set_seed(lowercase )
A , A : str =get_dataloaders(lowercase, lowercase, lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[str] =AutoModelForSequenceClassification.from_pretrained(lowercase, return_dict=lowercase )
# Instantiate optimizer
A : Any =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A : List[str] =optimizer_cls(params=model.parameters(), lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
A : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A : Dict =1
A : Union[str, Any] =(len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A : List[Any] =get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=0, num_training_steps=lowercase, )
else:
A : List[str] =DummyScheduler(lowercase, total_num_steps=lowercase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A : Optional[int] =accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# We need to keep track of how many total steps we have iterated over
A : Tuple =0
# We also need to keep track of the stating epoch so files are named properly
A : List[str] =0
A : Tuple =evaluate.load('glue', 'mrpc' )
A : Optional[int] =num_epochs
if args.partial_train_epoch is not None:
A : Dict =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A : List[Any] =args.resume_from_checkpoint.split('epoch_' )[1]
A : List[Any] =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A : Union[str, Any] =int(lowercase ) + 1
A : List[str] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
accelerator.print('resumed checkpoint performance:', lowercase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:', lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:', optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir, F'state_{starting_epoch-1}.json' ), 'r' ) as f:
A : Union[str, Any] =json.load(lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A : str ={}
for epoch in range(lowercase, lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
A : Tuple =model(**lowercase )
A : List[Any] =outputs.loss
A : Any =loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A : Union[str, Any] =F'epoch_{epoch}'
A : Optional[Any] =os.path.join(args.output_dir, lowercase )
accelerator.save_state(lowercase )
A : Optional[Any] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
A : Dict =accuracy
A : Optional[Any] =lr_scheduler.get_lr()[0]
A : Any =optimizer.param_groups[0]['lr']
A : str =epoch
A : Dict =overall_step
accelerator.print(F'epoch {epoch}:', lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'state_{epoch}.json' ), 'w' ) as f:
json.dump(lowercase, lowercase )
def A__ ( ) -> Optional[int]:
A : Optional[int] =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=lowercase, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=lowercase, )
parser.add_argument(
'--output_dir', type=lowercase, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--resume_from_checkpoint', type=lowercase, default=lowercase, help='If the training should continue from a checkpoint folder.', )
parser.add_argument(
'--partial_train_epoch', type=lowercase, default=lowercase, help='If passed, the training will stop after this number of epochs.', )
parser.add_argument(
'--num_epochs', type=lowercase, default=2, help='Number of train epochs.', )
A : str =parser.parse_args()
A : Optional[int] ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 661 | 1 |
from __future__ import annotations
from math import ceil, floor, sqrt
def A__ ( lowercase: int = 2_000_000 ) -> int:
A : list[int] =[0]
A : int
for idx in range(1, ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
A : int =0
# the area corresponding to the grid that gives the product closest to target
A : int =0
# an estimate of b, using the quadratic formula
A : float
# the largest integer less than b_estimate
A : int
# the largest integer less than b_estimate
A : int
# the triangle number corresponding to b_floor
A : int
# the triangle number corresponding to b_ceil
A : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:], 1 ):
A : Any =(-1 + sqrt(1 + 8 * target / triangle_a )) / 2
A : Dict =floor(lowercase )
A : List[Any] =ceil(lowercase )
A : Optional[Any] =triangle_numbers[b_floor]
A : int =triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
A : Optional[Any] =triangle_b_first_guess * triangle_a
A : Union[str, Any] =idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
A : Tuple =triangle_b_second_guess * triangle_a
A : Dict =idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 661 |
def A__ ( lowercase: int ) -> int:
if not isinstance(lowercase, lowercase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A : Any =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_lowercase : Tuple =logging.get_logger(__name__)
_lowercase : Optional[int] ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase : Any ={
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
_lowercase : Dict ={
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
_lowercase : Dict ={
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Any = VOCAB_FILES_NAMES
lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : int = PRETRAINED_INIT_CONFIGURATION
lowercase : Optional[int] = ["input_ids", "attention_mask"]
lowercase : List[str] = DistilBertTokenizer
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[int]="[UNK]" , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[PAD]" , SCREAMING_SNAKE_CASE__ : str="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple="[MASK]" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> List[Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A : List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
A : Dict =getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
A : Dict =do_lower_case
A : Dict =strip_accents
A : Optional[Any] =tokenize_chinese_chars
A : Any =normalizer_class(**SCREAMING_SNAKE_CASE__ )
A : int =do_lower_case
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Union[str, Any]:
A : List[str] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
A : List[Any] =[self.sep_token_id]
A : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
A : Union[str, Any] =self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 661 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A__ ( *lowercase: Tuple, lowercase: Optional[Union[Dict, Any]] = None, lowercase: Dict=True, lowercase: Any=2 ) -> List[Any]:
from .. import __version__
A : Optional[Any] =take_from
A : Union[str, Any] =()
if not isinstance(args[0], lowercase ):
A : List[str] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase ).base_version ) >= version.parse(lowercase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
A : Tuple =None
if isinstance(lowercase, lowercase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase ),)
A : Union[str, Any] =F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowercase, lowercase ):
values += (getattr(lowercase, lowercase ),)
A : Optional[Any] =F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A : List[Any] =F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A : List[Any] =warning + ' ' if standard_warn else ''
warnings.warn(warning + message, lowercase, stacklevel=lowercase )
if isinstance(lowercase, lowercase ) and len(lowercase ) > 0:
A : Any =inspect.getouterframes(inspect.currentframe() )[1]
A : int =call_frame.filename
A : int =call_frame.lineno
A : Optional[int] =call_frame.function
A , A : int =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(lowercase ) == 0:
return
elif len(lowercase ) == 1:
return values[0]
return values
| 661 | 1 |
from statistics import mean
import numpy as np
def A__ ( lowercase: list, lowercase: list, lowercase: list, lowercase: int ) -> list:
A : int =0
# Number of processes finished
A : Optional[int] =0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
A : int =[0] * no_of_process
# List to include calculation results
A : List[Any] =[0] * no_of_process
# Sort by arrival time.
A : Optional[int] =[burst_time[i] for i in np.argsort(lowercase )]
A : Optional[int] =[process_name[i] for i in np.argsort(lowercase )]
arrival_time.sort()
while no_of_process > finished_process_count:
A : Dict =0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
A : Dict =arrival_time[i]
A : int =0
# Index showing the location of the process being performed
A : Tuple =0
# Saves the current response ratio.
A : Optional[Any] =0
for i in range(0, lowercase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
A : str =(burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
A : Any =temp
A : Any =i
# Calculate the turn around time
A : Any =current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
A : Optional[Any] =1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A__ ( lowercase: list, lowercase: list, lowercase: list, lowercase: int ) -> list:
A : Optional[Any] =[0] * no_of_process
for i in range(0, lowercase ):
A : Optional[int] =turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_lowercase : Optional[int] =5
_lowercase : Union[str, Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_lowercase : Union[str, Any] =[1, 2, 3, 4, 5]
_lowercase : List[str] =[1, 2, 3, 4, 5]
_lowercase : int =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_lowercase : List[Any] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 661 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: int, lowercase: str ) -> Dict:
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Dict, lowercase: Tuple, lowercase: str ) -> str:
A : Any =tmp_path / 'cache'
A : Dict ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict =JsonDatasetReader(lowercase, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Any, lowercase: Union[str, Any] ) -> Tuple:
A : Tuple =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : Optional[Any] =features.copy() if features else default_expected_features
A : Union[str, Any] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : str =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def A__ ( lowercase: Optional[int], lowercase: str, lowercase: Dict ) -> Optional[int]:
A : int =tmp_path / 'cache'
A : Tuple ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A : int =features.copy() if features else default_expected_features
A : str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( lowercase: Optional[Any], lowercase: str ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : str ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A : Dict =features.copy()
A : List[str] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int =tmp_path / 'cache'
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Union[str, Any], lowercase: Any, lowercase: str ) -> Optional[Any]:
A : Optional[int] =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =JsonDatasetReader(lowercase, cache_dir=lowercase, split=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Union[str, Any] ) -> List[Any]:
if issubclass(lowercase, lowercase ):
A : int =jsonl_path
elif issubclass(lowercase, lowercase ):
A : Any =[jsonl_path]
A : Optional[Any] =tmp_path / 'cache'
A : Tuple ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[str] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Optional[Any]=("train",) ) -> Tuple:
assert isinstance(lowercase, lowercase )
for split in splits:
A : List[str] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Tuple, lowercase: Optional[int], lowercase: Any ) -> str:
A : List[str] =tmp_path / 'cache'
A : Union[str, Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : str =JsonDatasetReader({'train': jsonl_path}, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Optional[int] ) -> Tuple:
A : Any =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =features.copy() if features else default_expected_features
A : Dict =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] =JsonDatasetReader({'train': jsonl_path}, features=lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any] ) -> Tuple:
if split:
A : Optional[int] ={split: jsonl_path}
else:
A : Dict ='train'
A : Optional[Any] ={'train': jsonl_path, 'test': jsonl_path}
A : Tuple =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[Any] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( lowercase: List[Any] ) -> Tuple:
return json.load(lowercase )
def A__ ( lowercase: List[Any] ) -> Tuple:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : Any =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : List[Any] =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
A : Union[str, Any] =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
A : Union[str, Any] =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : str =f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : List[str] =f.read()
assert exported_content == original_content
| 661 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : str ={
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict =['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =[
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowercase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 661 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
A : List[str] =tf.convert_to_tensor(
[
[
8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0
-0.5_6_2_0_0_4_4,
5.2_3_2_2_9_7_5_2,
4.0_3_8_6_3_9_3,
-6.8_7_9_8_3_7_8,
-0.5_4_7_8_5_8_0_2,
-3.2_0_1_2_1_5_3,
2.9_2_7_7_7_1_7_6,
1.8_8_1_7_1_9_5_3,
7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9
8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10
-9.8_5_7_1_1_8_3_6,
-5.9_6_2_0_9_2_3_6,
-1.1_3_0_3_9_1_6_1,
-7.1_1_1_5_2_9_4,
-0.8_3_6_9_6_3_3,
-5.3_1_8_6_4_0_8,
7.0_6_4_2_7_4_0_7,
0.8_1_3_6_9_3_4_4,
-0.8_2_0_2_3_8_1_7,
-5.9_1_7_9_7_9_6,
0.5_8_8_1_3_4_4_3,
-6.9_9_7_7_8_4_3_8,
4.7_1_5_5_1_1_8_9,
-0.1_8_7_7_1_6_3_7,
7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25
9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26
2.1_2_6_6_2_9_4_1,
-9.3_2_5_6_2_0_3_8,
2.3_5_6_5_2_5_2_2,
], # cummulative prob of 5 highest values <= 0.6
[
0.5_8_4_2_5_5_1_8,
4.5_3_1_3_9_2_3_8,
-5.5_7_5_1_0_4_6_4,
-6.2_8_0_3_0_6_9_9,
-7.1_9_5_2_9_5_0_3,
-4.0_2_1_2_2_5_5_1,
1.3_9_3_3_7_0_3_7,
-6.0_6_7_0_7_0_5_7,
1.5_9_4_8_0_5_1_7,
-9.6_4_3_1_1_9,
0.0_3_9_0_7_7_9_9,
0.6_7_2_3_1_7_6_2,
-8.8_8_2_0_6_7_2_6,
6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13
2.2_8_5_2_0_7_2_3,
4.8_2_7_6_7_5_0_6,
4.3_0_4_2_1_3_6_8,
8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17
5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18
-4.4_7_3_5_7_9_4,
7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20
-2.9_1_0_5_1_6_6_3,
2.6_1_9_4_6_0_7_7,
-2.5_6_7_4_7_6_2,
-9.4_8_9_5_9_3_0_2,
-4.0_2_9_2_2_6_4_5,
-1.3_5_4_1_6_9_1_8,
9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27
-5.8_9_4_7_8_5_5_3,
1.8_5_3_7_0_4_6_7,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
A : List[str] =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
A : str =tf.convert_to_tensor(
[8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above
A : List[Any] =tf_top_k_top_p_filtering(SCREAMING_SNAKE_CASE__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
A : Dict =output[output != -float('inf' )]
A : Dict =tf.cast(
tf.where(tf.not_equal(SCREAMING_SNAKE_CASE__ , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1e-12 )
tf.debugging.assert_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
if is_tf_available():
lowercase : Any = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
# TF-only test: tf.saved_model export
A : List[Any] =TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A : Optional[Any] =2
A : Union[str, Any] =2
class SCREAMING_SNAKE_CASE_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
super(SCREAMING_SNAKE_CASE__ , self ).__init__()
A : str =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
A : str =self.model.generate(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , max_new_tokens=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , )
return {"sequences": outputs["sequences"]}
A : List[Any] =[[2, 0], [1_02, 1_03]]
A : Dict =[[1, 0], [1, 1]]
A : Optional[Any] =DummyModel(model=SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , signatures={'serving_default': dummy_model.serving} )
A : Union[str, Any] =tf.saved_model.load(SCREAMING_SNAKE_CASE__ ).signatures['serving_default']
for batch_size in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 ):
A : List[str] ={
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
A : List[Any] =serving_func(**SCREAMING_SNAKE_CASE__ )['sequences']
A : Dict =test_model.generate(**SCREAMING_SNAKE_CASE__ , max_new_tokens=SCREAMING_SNAKE_CASE__ )
tf.debugging.assert_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[Any]:
# TF-only test: tf.saved_model export
A : List[str] =TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A : Dict =1
A : str =2
class SCREAMING_SNAKE_CASE_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
super(SCREAMING_SNAKE_CASE__ , self ).__init__()
A : Tuple =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
A : str =self.model.generate(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , max_new_tokens=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , )
return {"sequences": outputs["sequences"]}
A : int =[[2], [1_02, 1_03]]
A : Optional[int] =[[1], [1, 1]]
A : List[str] =DummyModel(model=SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , signatures={'serving_default': dummy_model.serving} )
A : List[Any] =tf.saved_model.load(SCREAMING_SNAKE_CASE__ ).signatures['serving_default']
for input_row in range(len(SCREAMING_SNAKE_CASE__ ) ):
A : str ={
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
A : List[str] =serving_func(**SCREAMING_SNAKE_CASE__ )['sequences']
A : Tuple =test_model.generate(**SCREAMING_SNAKE_CASE__ , max_new_tokens=SCREAMING_SNAKE_CASE__ )
tf.debugging.assert_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ) -> int:
super().__init__()
A : Union[str, Any] =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(SCREAMING_SNAKE_CASE__ , 'spiece.model' ) , 'rb' ).read() )
A : str =TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
A : str =self.tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A , A : int =text.pad_model_inputs(
SCREAMING_SNAKE_CASE__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
A : Any =self.model.generate(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
return self.tokenizer.detokenize(SCREAMING_SNAKE_CASE__ )
A : List[str] =CompleteSentenceTransformer()
A : Tuple =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
A : Optional[Any] =complete_model(SCREAMING_SNAKE_CASE__ )
A : List[str] =tf.keras.Model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
keras_model.save(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
# Has PT equivalent: this test relies on random sampling
A : Optional[int] ={
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
A : Tuple =14
A : List[str] =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A : List[Any] ='Hello, my dog is cute and'
A : str =tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
A : str =TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A : List[Any] =6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
A : Dict =model.generate(**SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
A : Union[str, Any] =[6_38, 1_98]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
A : Dict =model.generate(**SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
# Has PT equivalent: ample use of framework-specific code
A : Optional[int] =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
A : Dict ='Hugging Face is a technology company based in New York and Paris.'
A : Any =bart_tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='tf' ).input_ids
A : Any =TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
A : Dict =bart_model.generate(SCREAMING_SNAKE_CASE__ ).numpy()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
return super().call(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
A : List[Any] =bart_model.generate(SCREAMING_SNAKE_CASE__ , foo='bar' ).numpy()
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
class SCREAMING_SNAKE_CASE_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
return super().call(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Any =FakeEncoder(bart_model.config , bart_model.model.shared )
A : Tuple =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
A : Tuple =bart_model.generate(SCREAMING_SNAKE_CASE__ ).numpy()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(SCREAMING_SNAKE_CASE__ , foo='bar' )
| 661 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A : Dict =tempfile.mkdtemp()
A : int =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
A : str =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Optional[int] =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
A : Optional[int] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : str =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Union[str, Any] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] =self.get_image_processor()
A : Optional[Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Dict =self.prepare_image_inputs()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : Optional[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
A : str =self.get_image_processor()
A : Union[str, Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : str =[torch.ones((1, 3, 5, 5) )]
A : Optional[Any] =[[17_64, 26_46]]
A : List[Any] =[[6_83, 10_24]]
A : Union[str, Any] =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : str =[np.ones((1, 3, 5, 5) )]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A : Any =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : Tuple =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
A : Optional[Any] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Any =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
A : Optional[Any] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[Any] =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Dict =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
A : Any =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : Tuple =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : List[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : int =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =[tf.ones((1, 3, 5, 5) )]
A : Tuple =[[17_64, 26_46]]
A : Union[str, Any] =[[6_83, 10_24]]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : List[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : Any =[np.ones((1, 3, 5, 5) )]
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A : List[str] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Optional[int] =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Dict =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
A : Any =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Tuple =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
A : Optional[Any] =self.get_image_processor()
A : Dict =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A : Optional[int] =[tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )]
A : Union[str, Any] =[torch.tensor(SCREAMING_SNAKE_CASE__ )]
A : int =[[17_64, 26_46]]
A : int =[[6_83, 10_24]]
A : Dict =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
A : Union[str, Any] =self.get_image_processor()
A : int =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : List[Any] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Tuple =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
A : Dict =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
| 661 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase : Dict =logging.get_logger(__name__)
_lowercase : List[Any] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase : List[str] ={
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase : Any ={
'''allenai/longformer-base-4096''': 4_0_9_6,
'''allenai/longformer-large-4096''': 4_0_9_6,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4_0_9_6,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4_0_9_6,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A__ ( ) -> Dict:
A : List[Any] =(
list(range(ord('!' ), ord('~' ) + 1 ) ) + list(range(ord('¡' ), ord('¬' ) + 1 ) ) + list(range(ord('®' ), ord('ÿ' ) + 1 ) )
)
A : List[Any] =bs[:]
A : Optional[Any] =0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
A : str =[chr(lowercase ) for n in cs]
return dict(zip(lowercase, lowercase ) )
def A__ ( lowercase: List[Any] ) -> Tuple:
A : Optional[Any] =set()
A : Dict =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A : Tuple =char
return pairs
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Dict = VOCAB_FILES_NAMES
lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]="replace" , SCREAMING_SNAKE_CASE__ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : str="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : str=False , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[Any]:
A : List[str] =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
A : Tuple =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
A : int =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
A : Dict =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
A : Union[str, Any] =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
A : Tuple =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A : List[str] =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as vocab_handle:
A : List[Any] =json.load(SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] ={v: k for k, v in self.encoder.items()}
A : List[str] =errors # how to handle errors in decoding
A : Optional[Any] =bytes_to_unicode()
A : List[str] ={v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as merges_handle:
A : List[Any] =merges_handle.read().split('\n' )[1:-1]
A : List[Any] =[tuple(merge.split() ) for merge in bpe_merges]
A : Optional[int] =dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
A : List[Any] ={}
A : Dict =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A : Any =re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[Any]:
return len(self.encoder )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
if token in self.cache:
return self.cache[token]
A : List[Any] =tuple(SCREAMING_SNAKE_CASE__ )
A : Any =get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
A : int =min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A : Optional[Any] =bigram
A : Dict =[]
A : Optional[Any] =0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
A : Tuple =word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A : List[str] =j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A : Optional[Any] =tuple(SCREAMING_SNAKE_CASE__ )
A : str =new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
A : str =get_pairs(SCREAMING_SNAKE_CASE__ )
A : int =' '.join(SCREAMING_SNAKE_CASE__ )
A : str =word
return word
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
A : Tuple =[]
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
A : Optional[int] =''.join(SCREAMING_SNAKE_CASE__ )
A : str =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A : str =os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A : List[str] =os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '\n' )
A : Optional[int] =0
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
A : Dict =token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE__ ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A : str =[self.cls_token_id]
A : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
A : List[Any] =[self.sep_token_id]
A : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=False , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
A : Optional[int] =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
A : List[Any] =' ' + text
return (text, kwargs)
| 661 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_lowercase : Optional[Any] =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def A__ ( lowercase: Optional[int] ) -> Optional[int]:
A : str =test_results.split(' ' )
A : List[str] =0
A : Tuple =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A : List[str] =expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( lowercase: List[Any] ) -> str:
A : Union[str, Any] ={}
A : Optional[Any] =None
A : Union[str, Any] =False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]', lowercase ):
A : List[Any] =True
A : Any =line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
A : Dict =line
A : List[str] =False
return failures
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
A : Tuple =title
A : Dict =doc_test_results['time_spent'].split(',' )[0]
A : Union[str, Any] =doc_test_results['success']
A : Any =doc_test_results['failures']
A : Optional[Any] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A : Union[str, Any] =doc_test_results
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
A : Any =[self._time_spent]
A : List[str] =0
for time in time_spent:
A : List[Any] =time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
A : List[str] =[0, 0, time_parts[0]]
A , A , A : Tuple =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A : str =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Tuple =40
A : Optional[Any] ={k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
A : Any =''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Optional[int] =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
A : Tuple =[
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
A : Any =f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
A : Dict =client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
A : List[str] =''
for key, value in failures.items():
A : Any =value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
A : Union[str, Any] =job_name
A : Any ={'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
A : int ={
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
A : Union[str, Any] =self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
A : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
A : Any =f'*Num failures* :{len(job_result["failed"] )} \n'
A : List[Any] =job_result['failures']
A : Any =self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def A__ ( ) -> Union[str, Any]:
A : Any =os.environ['GITHUB_RUN_ID']
A : List[Any] =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
A : Union[str, Any] =requests.get(lowercase ).json()
A : List[Any] ={}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
A : List[str] =math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowercase ):
A : List[str] =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.', lowercase )
return {}
def A__ ( lowercase: str ) -> Optional[Any]:
A : Any ={}
if os.path.exists(lowercase ):
A : List[Any] =os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase, lowercase ), encoding='utf-8' ) as f:
A : Optional[int] =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase, lowercase )}.' ) from e
return _artifact
def A__ ( ) -> int:
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
A : Dict =name
A : Dict =[]
def __str__( self : Optional[Any] ) -> List[str]:
return self.name
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.paths.append({'name': self.name, 'path': path} )
A : Dict[str, Artifact] ={}
A : str =filter(os.path.isdir, os.listdir() )
for directory in directories:
A : Tuple =directory
if artifact_name not in _available_artifacts:
A : int =Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
_lowercase : Optional[int] =get_job_links()
_lowercase : str =retrieve_available_artifacts()
_lowercase : List[Any] =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_lowercase : Optional[Any] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_lowercase : List[Any] =github_actions_job_links.get('''run_doctests''')
_lowercase : int =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_lowercase : Dict =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_lowercase , _lowercase , _lowercase : List[Any] =handle_test_results(artifact['''stats'''])
_lowercase : Any =failed
_lowercase : Union[str, Any] =success
_lowercase : str =time_spent[1:-1] + ''', '''
_lowercase : Any =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_lowercase : Tuple =line.replace('''FAILED ''', '''''')
_lowercase : int =line.split()[0].replace('''\n''', '''''')
if "::" in line:
_lowercase , _lowercase : str =line.split('''::''')
else:
_lowercase , _lowercase : Union[str, Any] =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_lowercase : Any =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_lowercase : Any =all_failures[test] if test in all_failures else '''N/A'''
_lowercase : Tuple =failure
break
_lowercase : Optional[int] =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 661 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : List[Any]=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Dict=37 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , ) -> Union[str, Any]:
A : Optional[Any] =parent
A : Optional[Any] =batch_size
A : List[Any] =seq_length
A : int =is_training
A : Optional[int] =use_attention_mask
A : Optional[int] =use_token_type_ids
A : str =use_labels
A : List[Any] =vocab_size
A : Optional[Any] =hidden_size
A : Tuple =num_hidden_layers
A : Optional[Any] =num_attention_heads
A : int =intermediate_size
A : int =hidden_act
A : int =hidden_dropout_prob
A : int =attention_probs_dropout_prob
A : int =max_position_embeddings
A : Optional[int] =type_vocab_size
A : Any =type_sequence_label_size
A : Tuple =initializer_range
A : Union[str, Any] =num_choices
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
A : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Union[str, Any] =None
if self.use_attention_mask:
A : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
A : Union[str, Any] =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=SCREAMING_SNAKE_CASE__ , )
return config, input_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
A : int =self.prepare_config_and_inputs()
A , A , A : Optional[Any] =config_and_inputs
A : List[Any] ={'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
A : Optional[Any] =FlaxDistilBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
A : int =model_class_name.from_pretrained('distilbert-base-uncased' )
A : Optional[int] =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
A : Any =FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
A : List[str] =np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
A : Dict =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A : Union[str, Any] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
A : Optional[int] =(1, 11, 7_68)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
A : str =np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 661 |
_lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661 | 1 |
def A__ ( lowercase: list, lowercase: list, lowercase: int, lowercase: int, lowercase: int ) -> int:
if index == number_of_items:
return 0
A : Any =0
A : Optional[Any] =0
A : Dict =knapsack(lowercase, lowercase, lowercase, lowercase, index + 1 )
if weights[index] <= max_weight:
A : List[Any] =values[index] + knapsack(
lowercase, lowercase, lowercase, max_weight - weights[index], index + 1 )
return max(lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowercase: str ) -> List[str]:
def decorator(lowercase: int ):
A : Tuple =getattr(lowercase, 'handle_key', [] )
handle += [key]
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
def A__ ( *lowercase: List[str] ) -> Dict:
def decorator(lowercase: Union[str, Any] ):
A : Optional[int] =getattr(lowercase, 'handle_key', [] )
handle += keys
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __new__( cls : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
A : Dict =super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , 'key_handler' ):
setattr(SCREAMING_SNAKE_CASE__ , 'key_handler' , {} )
setattr(SCREAMING_SNAKE_CASE__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] )
for key in handled_keys:
A : str =value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ) -> Any:
A : str =get_character()
if char != KEYMAP["undefined"]:
A : List[str] =ord(SCREAMING_SNAKE_CASE__ )
A : List[str] =cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
A : List[str] =char
return handler(cls )
else:
return None
def A__ ( cls: Optional[int] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 661 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_lowercase : Optional[Any] ={
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Dict = "facebook/nllb-200-distilled-600M"
lowercase : Optional[Any] = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowercase : List[str] = "translator"
lowercase : List[str] = AutoTokenizer
lowercase : int = AutoModelForSeqaSeqLM
lowercase : Dict = LANGUAGE_CODES
lowercase : Dict = ["text", "text", "text"]
lowercase : List[Any] = ["text"]
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
A : str =self.lang_to_code[src_lang]
A : Dict =self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 661 |
import math
def A__ ( lowercase: int ) -> list:
A : Optional[Any] =[True] * n
A : Tuple =False
A : List[Any] =False
A : Dict =True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A : Dict =i * 2
while index < n:
A : Dict =False
A : Dict =index + i
A : Tuple =[2]
for i in range(3, lowercase, 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A__ ( lowercase: int = 999_966_663_333 ) -> int:
A : Optional[int] =math.floor(math.sqrt(lowercase ) ) + 100
A : Optional[int] =prime_sieve(lowercase )
A : Optional[Any] =0
A : List[Any] =0
A : Union[str, Any] =primes[prime_index]
while (last_prime**2) <= limit:
A : Tuple =primes[prime_index + 1]
A : Optional[int] =last_prime**2
A : Tuple =next_prime**2
# Get numbers divisible by lps(current)
A : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A : List[Any] =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A : Any =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A : List[str] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 661 | 1 |
def A__ ( lowercase: int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
import heapq
def A__ ( lowercase: dict ) -> set[int]:
A : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase, [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
A : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A : List[str] =heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A : str =elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 661 | 1 |
from collections.abc import Sequence
def A__ ( lowercase: Sequence[float], lowercase: float ) -> float:
return sum(c * (x**i) for i, c in enumerate(lowercase ) )
def A__ ( lowercase: Sequence[float], lowercase: float ) -> float:
A : List[str] =0.0
for coeff in reversed(lowercase ):
A : Union[str, Any] =result * x + coeff
return result
if __name__ == "__main__":
_lowercase : List[str] =(0.0, 0.0, 5.0, 9.3, 7.0)
_lowercase : Optional[Any] =1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 661 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase : List[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
A : Tuple =feature_size
A : int =sampling_rate
A : List[str] =padding_value
A : Tuple =kwargs.pop('padding_side' , 'right' )
A : str =kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A : Tuple ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
A : Dict =processed_features[self.model_input_names[0]]
A : int =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
A : List[Any] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A : List[str] =required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A : Any =0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
A : Dict =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
A : List[Any] ='tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ='pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
A : Union[str, Any] ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A : int =to_numpy(SCREAMING_SNAKE_CASE__ )
else:
A : List[Any] =[to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A : List[Any] =self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =processed_features[self.model_input_names[0]]
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A : Tuple =[]
for i in range(SCREAMING_SNAKE_CASE__ ):
A : int ={k: v[i] for k, v in processed_features.items()}
# truncation
A : List[Any] =self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A : Any =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A : Optional[Any] =PaddingStrategy.MAX_LENGTH
A : List[Any] ={}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
A : Optional[Any] =self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
A : Dict =[]
if value.dtype is np.dtype(np.floataa ):
A : Tuple =value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> dict:
A : Optional[int] =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Tuple =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : int =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A : str =np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
A : Union[str, Any] =max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
A : Dict =np.pad(
processed_features['attention_mask'] , (0, difference) )
A : str =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A : List[Any] =np.pad(
processed_features['attention_mask'] , (difference, 0) )
A : Union[str, Any] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A : Tuple =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Any =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : List[str] =len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
A : Union[str, Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A : Dict =processed_features['attention_mask'][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
A : List[Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Tuple =PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =padding
else:
A : List[str] =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 661 | 1 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_lowercase : List[Any] =(
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A__ ( lowercase: str, lowercase: Optional[Any] ) -> Any:
warnings.warn(lowercase, lowercase )
requires_backends(lowercase, 'sklearn' )
return (preds == labels).mean()
def A__ ( lowercase: List[Any], lowercase: Union[str, Any] ) -> Optional[Any]:
warnings.warn(lowercase, lowercase )
requires_backends(lowercase, 'sklearn' )
A : List[str] =simple_accuracy(lowercase, lowercase )
A : Union[str, Any] =fa_score(y_true=lowercase, y_pred=lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A__ ( lowercase: Optional[int], lowercase: int ) -> Union[str, Any]:
warnings.warn(lowercase, lowercase )
requires_backends(lowercase, 'sklearn' )
A : str =pearsonr(lowercase, lowercase )[0]
A : Dict =spearmanr(lowercase, lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A__ ( lowercase: Tuple, lowercase: List[Any], lowercase: Dict ) -> List[str]:
warnings.warn(lowercase, lowercase )
requires_backends(lowercase, 'sklearn' )
assert len(lowercase ) == len(lowercase ), F'Predictions and labels have mismatched lengths {len(lowercase )} and {len(lowercase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowercase, lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "mrpc":
return acc_and_fa(lowercase, lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(lowercase, lowercase )
elif task_name == "qqp":
return acc_and_fa(lowercase, lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowercase, lowercase )}
else:
raise KeyError(lowercase )
def A__ ( lowercase: int, lowercase: List[str], lowercase: str ) -> List[str]:
warnings.warn(lowercase, lowercase )
requires_backends(lowercase, 'sklearn' )
if len(lowercase ) != len(lowercase ):
raise ValueError(F'Predictions and labels have mismatched lengths {len(lowercase )} and {len(lowercase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(lowercase, lowercase )}
else:
raise KeyError(lowercase )
| 661 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : List[str] ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : int = "deberta-v2"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=12_81_00 , SCREAMING_SNAKE_CASE__ : List[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : List[str]=24 , SCREAMING_SNAKE_CASE__ : List[str]=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-7 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=-1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : Dict =hidden_size
A : Optional[Any] =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Optional[int] =intermediate_size
A : Any =hidden_act
A : Any =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Tuple =type_vocab_size
A : Tuple =initializer_range
A : int =relative_attention
A : int =max_relative_positions
A : Optional[Any] =pad_token_id
A : Union[str, Any] =position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE__ ) == str:
A : Any =[x.strip() for x in pos_att_type.lower().split('|' )]
A : Any =pos_att_type
A : Tuple =vocab_size
A : Any =layer_norm_eps
A : Optional[Any] =kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE__ )
A : str =pooler_dropout
A : Any =pooler_hidden_act
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A : List[Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : int ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
return 12
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
A : str =super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 661 | 1 |
from __future__ import annotations
from collections import namedtuple
def A__ ( lowercase: float, lowercase: float, lowercase: float ) -> tuple:
A : List[Any] =namedtuple('result', 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage', power / current )
elif current == 0:
return result('current', power / voltage )
elif power == 0:
return result('power', float(round(abs(voltage * current ), 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 5_02_57 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "gelu_new" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 1e-5 , SCREAMING_SNAKE_CASE__ : float = 0.0_2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[str]:
super().__init__()
A : str =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
A : List[Any] =prefix_inner_dim
A : Dict =prefix_hidden_dim
A : List[str] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A : Dict =GPTaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , n_positions=SCREAMING_SNAKE_CASE__ , n_embd=SCREAMING_SNAKE_CASE__ , n_layer=SCREAMING_SNAKE_CASE__ , n_head=SCREAMING_SNAKE_CASE__ , n_inner=SCREAMING_SNAKE_CASE__ , activation_function=SCREAMING_SNAKE_CASE__ , resid_pdrop=SCREAMING_SNAKE_CASE__ , embd_pdrop=SCREAMING_SNAKE_CASE__ , attn_pdrop=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , initializer_range=SCREAMING_SNAKE_CASE__ , scale_attn_weights=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE__ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE__ , )
A : Dict =GPTaLMHeadModel(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , ) -> Optional[Any]:
A : str =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
A : Any =self.encode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.decode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A : int =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A : Optional[int] =torch.cat((dummy_token, input_ids) , dim=1 )
A : Dict =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.device ) -> torch.Tensor:
return torch.zeros(SCREAMING_SNAKE_CASE__ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self.encode_prefix(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
A : Dict =torch.split(SCREAMING_SNAKE_CASE__ , 1 , dim=0 )
A : int =[]
A : Optional[int] =[]
for feature in features:
A : int =self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE__ ) ) # back to the clip feature
# Only support beam search for now
A , A : Dict =self.generate_beam(
input_embeds=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A : str =torch.stack(SCREAMING_SNAKE_CASE__ )
A : int =torch.stack(SCREAMING_SNAKE_CASE__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : int = 67 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Dict:
A : Dict =eos_token_id
A : str =None
A : List[Any] =None
A : List[Any] =torch.ones(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.int )
A : str =torch.zeros(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.bool )
if input_embeds is not None:
A : Any =input_embeds
else:
A : List[Any] =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Any =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ )
A : str =outputs.logits
A : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A : List[str] =logits.softmax(-1 ).log()
if scores is None:
A , A : Any =logits.topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Any =generated.expand(SCREAMING_SNAKE_CASE__ , *generated.shape[1:] )
A , A : Tuple =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A : Union[str, Any] =next_tokens
else:
A : str =tokens.expand(SCREAMING_SNAKE_CASE__ , *tokens.shape[1:] )
A : Optional[int] =torch.cat((tokens, next_tokens) , dim=1 )
else:
A : Optional[Any] =-float(np.inf )
A : Tuple =0
A : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A : int =scores_sum / seq_lengths[:, None]
A , A : Optional[int] =scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Dict =next_tokens // scores_sum.shape[1]
A : Optional[Any] =seq_lengths[next_tokens_source]
A : Tuple =next_tokens % scores_sum.shape[1]
A : Optional[Any] =next_tokens.unsqueeze(1 )
A : Optional[Any] =tokens[next_tokens_source]
A : Any =torch.cat((tokens, next_tokens) , dim=1 )
A : List[str] =generated[next_tokens_source]
A : List[Any] =scores_sum_average * seq_lengths
A : Optional[Any] =is_stopped[next_tokens_source]
A : Optional[int] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A : Any =torch.cat((generated, next_token_embed) , dim=1 )
A : Optional[int] =is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE__ ).squeeze()
if is_stopped.all():
break
A : Optional[Any] =scores / seq_lengths
A : str =scores.argsort(descending=SCREAMING_SNAKE_CASE__ )
# tokens tensors are already padded to max_seq_length
A : Optional[Any] =[tokens[i] for i in order]
A : Any =torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 )
A : str =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def A__ ( ) -> Optional[int]:
raise RuntimeError('CUDA out of memory.' )
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ) -> Any:
super().__init__()
A : Tuple =nn.Linear(3 , 4 )
A : Union[str, Any] =nn.BatchNormad(4 )
A : Optional[int] =nn.Linear(4 , 5 )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Optional[int] =[]
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [1_28, 64, 32, 16, 8] )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : List[str] =[]
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
A , A : Tuple =mock_training_loop_function('hello' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
pass
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function(1_28 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Union[str, Any]:
A : Tuple =torch.cuda.memory_allocated()
A : Tuple =ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE__ )
A : str =release_memory(SCREAMING_SNAKE_CASE__ )
self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE__ )
| 661 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661 | 1 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = (DPMSolverSDEScheduler,)
lowercase : Dict = 10
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
A : Dict ={
'num_train_timesteps': 11_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Tuple =self.scheduler_classes[0]
A : Tuple =self.get_scheduler_config()
A : Tuple =scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps )
A : Dict =self.dummy_model()
A : Any =self.dummy_sample_deter * scheduler.init_noise_sigma
A : Union[str, Any] =sample.to(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
A : Optional[int] =scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Tuple =model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : List[str] =output.prev_sample
A : Optional[Any] =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
A : List[str] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1e-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
A : Any =self.scheduler_classes[0]
A : Dict =self.get_scheduler_config(prediction_type='v_prediction' )
A : List[Any] =scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps )
A : Tuple =self.dummy_model()
A : Dict =self.dummy_sample_deter * scheduler.init_noise_sigma
A : List[str] =sample.to(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
A : Dict =scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Tuple =model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Tuple =output.prev_sample
A : Union[str, Any] =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
A : List[Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1e-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
A : Dict =self.scheduler_classes[0]
A : str =self.get_scheduler_config()
A : Optional[int] =scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.dummy_model()
A : List[str] =self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A : Union[str, Any] =scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : List[Any] =model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =output.prev_sample
A : Union[str, Any] =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
A : Optional[int] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1e-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : List[str] =self.scheduler_classes[0]
A : List[str] =self.get_scheduler_config()
A : Optional[Any] =scheduler_class(**SCREAMING_SNAKE_CASE__ , use_karras_sigmas=SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =self.dummy_model()
A : Tuple =self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE__ ) * scheduler.init_noise_sigma
A : str =sample.to(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
A : Any =scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Tuple =scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =output.prev_sample
A : Tuple =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
A : str =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
| 661 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "xglm"
lowercase : Any = ["past_key_values"]
lowercase : Dict = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=25_60_08 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
A : str =vocab_size
A : Union[str, Any] =max_position_embeddings
A : Optional[Any] =d_model
A : Optional[int] =ffn_dim
A : int =num_layers
A : Any =attention_heads
A : Dict =activation_function
A : List[Any] =dropout
A : str =attention_dropout
A : List[Any] =activation_dropout
A : List[Any] =layerdrop
A : List[Any] =init_std
A : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A : List[str] =use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 661 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : str ={
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Dict = "wavlm"
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_68 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[Any]=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : int=1e-5 , SCREAMING_SNAKE_CASE__ : Dict="group" , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__ : str=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=1_28 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : Tuple=3_20 , SCREAMING_SNAKE_CASE__ : List[Any]=8_00 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : List[str]=0.0_5 , SCREAMING_SNAKE_CASE__ : List[Any]=10 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : str=10 , SCREAMING_SNAKE_CASE__ : int=3_20 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1_00 , SCREAMING_SNAKE_CASE__ : Tuple=2_56 , SCREAMING_SNAKE_CASE__ : List[Any]=2_56 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Any=2_56 , SCREAMING_SNAKE_CASE__ : Optional[int]=(5_12, 5_12, 5_12, 5_12, 15_00) , SCREAMING_SNAKE_CASE__ : Union[str, Any]=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE__ : Dict=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE__ : Tuple=5_12 , SCREAMING_SNAKE_CASE__ : Any=80 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
A : Tuple =hidden_size
A : Union[str, Any] =feat_extract_norm
A : Tuple =feat_extract_activation
A : Any =list(SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =list(SCREAMING_SNAKE_CASE__ )
A : str =list(SCREAMING_SNAKE_CASE__ )
A : List[str] =conv_bias
A : List[Any] =num_buckets
A : str =max_bucket_distance
A : int =num_conv_pos_embeddings
A : Tuple =num_conv_pos_embedding_groups
A : Any =len(self.conv_dim )
A : Any =num_hidden_layers
A : Optional[Any] =intermediate_size
A : Union[str, Any] =hidden_act
A : Optional[int] =num_attention_heads
A : Dict =hidden_dropout
A : Optional[Any] =attention_dropout
A : Optional[Any] =activation_dropout
A : Optional[Any] =feat_proj_dropout
A : Union[str, Any] =final_dropout
A : Union[str, Any] =layerdrop
A : Optional[Any] =layer_norm_eps
A : Any =initializer_range
A : List[str] =num_ctc_classes
A : Dict =vocab_size
A : List[str] =do_stable_layer_norm
A : Dict =use_weighted_layer_sum
A : Optional[Any] =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A : str =apply_spec_augment
A : Any =mask_time_prob
A : Any =mask_time_length
A : int =mask_time_min_masks
A : str =mask_feature_prob
A : Optional[int] =mask_feature_length
# parameters for pretraining with codevector quantized representations
A : Union[str, Any] =num_codevectors_per_group
A : Optional[Any] =num_codevector_groups
A : List[Any] =contrastive_logits_temperature
A : str =num_negatives
A : Any =codevector_dim
A : Dict =proj_codevector_dim
A : List[Any] =diversity_loss_weight
# ctc loss
A : int =ctc_loss_reduction
A : int =ctc_zero_infinity
# adapter
A : int =add_adapter
A : Tuple =adapter_kernel_size
A : Optional[Any] =adapter_stride
A : Union[str, Any] =num_adapter_layers
A : int =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A : List[str] =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A : Optional[int] =list(SCREAMING_SNAKE_CASE__ )
A : Any =list(SCREAMING_SNAKE_CASE__ )
A : str =list(SCREAMING_SNAKE_CASE__ )
A : List[str] =xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 661 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase : List[str] ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ ( ) -> List[Any]:
A : Any =_ask_options(
'In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A : Tuple =get_sagemaker_input()
else:
A : str =get_cluster_input()
return config
def A__ ( lowercase: int=None ) -> str:
if subparsers is not None:
A : List[str] =subparsers.add_parser('config', description=lowercase )
else:
A : Union[str, Any] =argparse.ArgumentParser('Accelerate config command', description=lowercase )
parser.add_argument(
'--config_file', default=lowercase, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def A__ ( lowercase: Tuple ) -> List[Any]:
A : Union[str, Any] =get_user_input()
if args.config_file is not None:
A : Optional[Any] =args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(F'accelerate configuration saved at {config_file}' )
def A__ ( ) -> Optional[int]:
A : Any =config_command_parser()
A : int =parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 661 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A__ ( lowercase: int ) -> int:
A : List[Any] =filter(lambda lowercase : p.requires_grad, model.parameters() )
A : Tuple =sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowercase : Tuple =logging.getLogger(__name__)
def A__ ( lowercase: List[str], lowercase: Tuple ) -> int:
if metric == "rouge2":
A : Dict ='{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A : Optional[Any] ='{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A : List[Any] ='{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
A : List[str] =ModelCheckpoint(
dirpath=lowercase, filename=lowercase, monitor=F'val_{metric}', mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def A__ ( lowercase: List[str], lowercase: Dict ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}', mode='min' if 'loss' in metric else 'max', patience=lowercase, verbose=lowercase, )
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
A : Optional[int] ={f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple=True ) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
A : List[str] =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A : Optional[int] =Path(pl_module.hparams.output_dir )
if type_path == "test":
A : Any =od / 'test_results.txt'
A : List[Any] =od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A : Tuple =od / f'{type_path}_results/{trainer.global_step:05d}.txt'
A : int =od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'a+' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key in ["log", "progress_bar", "preds"]:
continue
A : List[str] =metrics[key]
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
A : List[Any] =val.item()
A : List[str] =f'{key}: {val:.6f}\n'
writer.write(SCREAMING_SNAKE_CASE__ )
if not save_generations:
return
if "preds" in metrics:
A : Dict ='\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> int:
try:
A : Optional[Any] =pl_module.model.model.num_parameters()
except AttributeError:
A : Tuple =pl_module.model.num_parameters()
A : Tuple =count_trainable_parameters(SCREAMING_SNAKE_CASE__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule ) -> str:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'test' )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 661 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[str] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : Dict =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : List[Any] =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Tuple =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : Dict =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : List[Any] =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : Optional[int] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : Any =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[Any] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Optional[Any] =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : List[Any] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: Dict ) -> int:
if _re_test_backend.search(lowercase ) is None:
return None
A : Any =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Any ) -> List[Any]:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : Optional[Any] =f.readlines()
A : Dict =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Optional[int] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : int =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : int =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : int =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : Optional[int] =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : Dict =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : str ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Optional[int] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : Optional[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : Optional[Any] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : int =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : Optional[int] =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : Optional[int] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Optional[Any] =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : Any =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Optional[Any] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : Any =lines[line_index]
A : Any =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Dict =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Any, lowercase: int ) -> Dict:
def find_duplicates(lowercase: List[str] ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : List[Any] =[]
for key in import_dict_objects.keys():
A : List[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Tuple =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> List[str]:
A : Dict =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Any =os.path.join(lowercase, '__init__.py' )
A : Union[str, Any] =parse_init(lowercase )
if objects is not None:
A : str =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Any =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> int:
A : List[str] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : Any =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : List[str] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : Optional[Any] =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : Dict =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Tuple =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> Tuple:
# This is to make sure the transformers module imported is the one in the repo.
A : str =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : Any =spec.loader.load_module()
A : Any =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : Dict ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661 | 1 |
from __future__ import annotations
import numpy as np
def A__ ( lowercase: list[float] ) -> Tuple:
return np.maximum(0, lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 661 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
lowercase : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 661 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
_lowercase : str =tuple[int, int]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : set[int] , SCREAMING_SNAKE_CASE__ : Mapping[EdgeT, int] ) -> None:
A : set[int] =vertices
A : dict[EdgeT, int] ={
(min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : EdgeT , SCREAMING_SNAKE_CASE__ : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
A : Dict =weight
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Graph:
A : Graph =Graph({min(self.vertices )} , {} )
A : EdgeT
A : int
A : EdgeT
A : int
while len(subgraph.vertices ) < len(self.vertices ):
A : List[Any] =max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
A : Dict =edge
A : str =weight
subgraph.add_edge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return subgraph
def A__ ( lowercase: str = "p107_network.txt" ) -> int:
A : str =os.path.abspath(os.path.dirname(lowercase ) )
A : str =os.path.join(lowercase, lowercase )
A : dict[EdgeT, int] ={}
A : list[str]
A : int
A : int
with open(lowercase ) as f:
A : Dict =f.read().strip().split('\n' )
A : int =[line.split(',' ) for line in data]
for edgea in range(1, len(lowercase ) ):
for edgea in range(lowercase ):
if adjaceny_matrix[edgea][edgea] != "-":
A : Dict =int(adjaceny_matrix[edgea][edgea] )
A : Graph =Graph(set(range(len(lowercase ) ) ), lowercase )
A : Graph =graph.prims_algorithm()
A : int =sum(graph.edges.values() )
A : int =sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 661 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A , A , A , A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A , A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase : Dict =True
except ImportError:
_lowercase : Union[str, Any] =False
_lowercase : Any =logging.get_logger(__name__) # pylint: disable=invalid-name
def A__ ( lowercase: Namespace ) -> Optional[Any]:
return AddNewModelCommand(args.testing, args.testing_file, path=args.path )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Optional[int]:
A : Tuple =parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=SCREAMING_SNAKE_CASE__ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=SCREAMING_SNAKE_CASE__ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=None , *SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
A : Union[str, Any] =testing
A : str =testing_file
A : List[Any] =path
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A : List[str] =[directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A : Optional[int] =(
Path(SCREAMING_SNAKE_CASE__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A : int =path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(SCREAMING_SNAKE_CASE__ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
A : List[str] =json.load(SCREAMING_SNAKE_CASE__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=SCREAMING_SNAKE_CASE__ , extra_context=SCREAMING_SNAKE_CASE__ , )
A : Optional[Any] =[directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
A : Union[str, Any] =json.load(SCREAMING_SNAKE_CASE__ )
A : List[str] =configuration['lowercase_modelname']
A : Tuple =configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'{directory}/configuration.json' )
A : Union[str, Any] ='PyTorch' in generate_tensorflow_pytorch_and_flax
A : List[Any] ='TensorFlow' in generate_tensorflow_pytorch_and_flax
A : str ='Flax' in generate_tensorflow_pytorch_and_flax
A : Tuple =f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=SCREAMING_SNAKE_CASE__ )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , 'w' ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
A : Union[str, Any] =f.readlines()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(SCREAMING_SNAKE_CASE__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
# Create temp file
A , A : Optional[int] =mkstemp()
A : Union[str, Any] =False
with fdopen(SCREAMING_SNAKE_CASE__ , 'w' ) as new_file:
with open(SCREAMING_SNAKE_CASE__ ) as old_file:
for line in old_file:
new_file.write(SCREAMING_SNAKE_CASE__ )
if line_to_copy_below in line:
A : List[Any] =True
for line_to_copy in lines_to_copy:
new_file.write(SCREAMING_SNAKE_CASE__ )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Remove original file
remove(SCREAMING_SNAKE_CASE__ )
# Move new file
move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def skip_units(SCREAMING_SNAKE_CASE__ : Any ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(SCREAMING_SNAKE_CASE__ : Tuple ):
with open(SCREAMING_SNAKE_CASE__ ) as datafile:
A : Any =[]
A : Tuple =False
A : int =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A : List[str] =line.split('"' )[1]
A : Union[str, Any] =skip_units(SCREAMING_SNAKE_CASE__ )
elif "# Below: " in line and "##" not in line:
A : Dict =line.split('"' )[1]
A : str =skip_units(SCREAMING_SNAKE_CASE__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =[]
elif "# Replace with" in line and "##" not in line:
A : Optional[int] =[]
elif "##" not in line:
lines_to_copy.append(SCREAMING_SNAKE_CASE__ )
remove(SCREAMING_SNAKE_CASE__ )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(SCREAMING_SNAKE_CASE__ )
| 661 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Any ={
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =[
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_lowercase : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Tuple=30 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=1 / 2_55 , SCREAMING_SNAKE_CASE__ : int=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Optional[Any] =size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
A : Union[str, Any] =parent
A : Union[str, Any] =batch_size
A : Union[str, Any] =num_channels
A : int =min_resolution
A : List[Any] =max_resolution
A : Dict =do_resize
A : Tuple =size
A : List[str] =do_normalize
A : List[Any] =image_mean
A : Dict =image_std
A : Any =do_rescale
A : List[str] =rescale_factor
A : Optional[Any] =do_pad
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
if not batched:
A : Any =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
A , A : Union[str, Any] =image.size
else:
A , A : Tuple =image.shape[1], image.shape[2]
if w < h:
A : Any =int(self.size['shortest_edge'] * h / w )
A : Any =self.size['shortest_edge']
elif w > h:
A : Dict =self.size['shortest_edge']
A : Dict =int(self.size['shortest_edge'] * w / h )
else:
A : List[str] =self.size['shortest_edge']
A : Dict =self.size['shortest_edge']
else:
A : List[Any] =[]
for image in image_inputs:
A , A : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : str =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
A : Tuple =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : str =ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
A : str =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Union[str, Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Tuple =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Any =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : Optional[int] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : Optional[int] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : int =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
# prepare image and target
A : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A : List[Any] =json.loads(f.read() )
A : Any ={'image_id': 3_97_69, 'annotations': target}
# encode them
A : str =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
A : Any =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Optional[Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Dict =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : str =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
A : List[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : int =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
# prepare image, target and masks_path
A : List[str] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A : Optional[int] =json.loads(f.read() )
A : int ={'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
A : Optional[Any] =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A : List[Any] =ConditionalDetrImageProcessor(format='coco_panoptic' )
A : Union[str, Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Dict =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Optional[int] =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Any =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : List[Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : str =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
A : int =82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
A : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : str =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
| 661 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[str] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : Dict =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : List[Any] =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Tuple =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : Dict =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : List[Any] =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : Optional[int] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : Any =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[Any] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Optional[Any] =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : List[Any] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: Dict ) -> int:
if _re_test_backend.search(lowercase ) is None:
return None
A : Any =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Any ) -> List[Any]:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : Optional[Any] =f.readlines()
A : Dict =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Optional[int] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : int =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : int =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : int =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : Optional[int] =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : Dict =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : str ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Optional[int] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : Optional[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : Optional[Any] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : int =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : Optional[int] =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : Optional[int] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Optional[Any] =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : Any =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Optional[Any] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : Any =lines[line_index]
A : Any =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Dict =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Any, lowercase: int ) -> Dict:
def find_duplicates(lowercase: List[str] ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : List[Any] =[]
for key in import_dict_objects.keys():
A : List[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Tuple =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> List[str]:
A : Dict =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Any =os.path.join(lowercase, '__init__.py' )
A : Union[str, Any] =parse_init(lowercase )
if objects is not None:
A : str =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Any =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> int:
A : List[str] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : Any =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : List[str] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : Optional[Any] =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : Dict =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Tuple =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> Tuple:
# This is to make sure the transformers module imported is the one in the repo.
A : str =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : Any =spec.loader.load_module()
A : Any =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : Dict ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : List[Any] =1_6
_lowercase : Union[str, Any] =3_2
def A__ ( lowercase: Accelerator, lowercase: int = 16, lowercase: str = "bert-base-cased" ) -> Optional[int]:
A : List[Any] =AutoTokenizer.from_pretrained(lowercase )
A : Any =load_dataset('glue', 'mrpc' )
def tokenize_function(lowercase: Any ):
# max_length=None => use the model max length (it's actually the default)
A : List[str] =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A : Any =datasets.map(
lowercase, batched=lowercase, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Dict =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase, padding='max_length', max_length=128, return_tensors='pt' )
return tokenizer.pad(lowercase, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
A : Union[str, Any] =DataLoader(
tokenized_datasets['train'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
A : str =DataLoader(
tokenized_datasets['validation'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
def A__ ( lowercase: Dict, lowercase: Optional[int], lowercase: Any, lowercase: str ) -> Tuple:
model.eval()
A : Tuple =0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : Tuple =model(**lowercase )
A : Tuple =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A : Union[str, Any] =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
A : List[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
A : Optional[int] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase, references=lowercase, )
A : Union[str, Any] =metric.compute()
return eval_metric["accuracy"]
def A__ ( lowercase: Union[str, Any], lowercase: Dict ) -> List[str]:
# Initialize accelerator
A : Optional[int] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : int =config['lr']
A : Optional[Any] =int(config['num_epochs'] )
A : Union[str, Any] =int(config['seed'] )
A : List[str] =int(config['batch_size'] )
A : Optional[Any] =args.model_name_or_path
set_seed(lowercase )
A , A : str =get_dataloaders(lowercase, lowercase, lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[str] =AutoModelForSequenceClassification.from_pretrained(lowercase, return_dict=lowercase )
# Instantiate optimizer
A : Any =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A : List[str] =optimizer_cls(params=model.parameters(), lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
A : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A : Dict =1
A : Union[str, Any] =(len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A : List[Any] =get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=0, num_training_steps=lowercase, )
else:
A : List[str] =DummyScheduler(lowercase, total_num_steps=lowercase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A : Optional[int] =accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# We need to keep track of how many total steps we have iterated over
A : Tuple =0
# We also need to keep track of the stating epoch so files are named properly
A : List[str] =0
A : Tuple =evaluate.load('glue', 'mrpc' )
A : Optional[int] =num_epochs
if args.partial_train_epoch is not None:
A : Dict =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A : List[Any] =args.resume_from_checkpoint.split('epoch_' )[1]
A : List[Any] =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A : Union[str, Any] =int(lowercase ) + 1
A : List[str] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
accelerator.print('resumed checkpoint performance:', lowercase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:', lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:', optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir, F'state_{starting_epoch-1}.json' ), 'r' ) as f:
A : Union[str, Any] =json.load(lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A : str ={}
for epoch in range(lowercase, lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
A : Tuple =model(**lowercase )
A : List[Any] =outputs.loss
A : Any =loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A : Union[str, Any] =F'epoch_{epoch}'
A : Optional[Any] =os.path.join(args.output_dir, lowercase )
accelerator.save_state(lowercase )
A : Optional[Any] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
A : Dict =accuracy
A : Optional[Any] =lr_scheduler.get_lr()[0]
A : Any =optimizer.param_groups[0]['lr']
A : str =epoch
A : Dict =overall_step
accelerator.print(F'epoch {epoch}:', lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'state_{epoch}.json' ), 'w' ) as f:
json.dump(lowercase, lowercase )
def A__ ( ) -> Optional[int]:
A : Optional[int] =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=lowercase, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=lowercase, )
parser.add_argument(
'--output_dir', type=lowercase, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--resume_from_checkpoint', type=lowercase, default=lowercase, help='If the training should continue from a checkpoint folder.', )
parser.add_argument(
'--partial_train_epoch', type=lowercase, default=lowercase, help='If passed, the training will stop after this number of epochs.', )
parser.add_argument(
'--num_epochs', type=lowercase, default=2, help='Number of train epochs.', )
A : str =parser.parse_args()
A : Optional[int] ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 661 | 1 |
from __future__ import annotations
import math
def A__ ( lowercase: int, lowercase: int, lowercase: bool, lowercase: list[int], lowercase: float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowercase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowercase, lowercase, lowercase ), minimax(depth + 1, node_index * 2 + 1, lowercase, lowercase, lowercase ), )
return min(
minimax(depth + 1, node_index * 2, lowercase, lowercase, lowercase ), minimax(depth + 1, node_index * 2 + 1, lowercase, lowercase, lowercase ), )
def A__ ( ) -> None:
A : List[str] =[90, 23, 6, 33, 21, 65, 123, 34_423]
A : List[str] =math.log(len(lowercase ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowercase, lowercase, lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 661 |
def A__ ( lowercase: int ) -> int:
if not isinstance(lowercase, lowercase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A : Any =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 1 |
import torch
from torch import nn
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> List[Any]:
super().__init__()
A : Tuple =n_token
A : Any =d_embed
A : Dict =d_proj
A : List[Any] =cutoffs + [n_token]
A : str =[0] + self.cutoffs
A : int =div_val
A : Optional[int] =self.cutoffs[0]
A : Any =len(self.cutoffs ) - 1
A : Optional[int] =self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
A : Any =nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
A : Dict =nn.Parameter(torch.zeros(self.n_clusters ) )
A : Any =nn.ModuleList()
A : str =nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
else:
self.out_projs.append(SCREAMING_SNAKE_CASE__ )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
else:
for i in range(len(self.cutoffs ) ):
A , A : str =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A : List[str] =d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE__ , r_idx - l_idx ) )
A : List[Any] =keep_order
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
if proj is None:
A : int =nn.functional.linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
A : Tuple =nn.functional.linear(SCREAMING_SNAKE_CASE__ , proj.t().contiguous() )
A : Any =nn.functional.linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Union[str, Any]:
if labels is not None:
# Shift so that tokens < n predict n
A : Tuple =hidden[..., :-1, :].contiguous()
A : Dict =labels[..., 1:].contiguous()
A : Optional[int] =hidden.view(-1 , hidden.size(-1 ) )
A : List[Any] =labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
A : Dict =hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
A : Optional[Any] =self._compute_logit(SCREAMING_SNAKE_CASE__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
A : Any =labels != -1_00
A : Union[str, Any] =torch.zeros_like(SCREAMING_SNAKE_CASE__ , dtype=hidden.dtype , device=hidden.device )
A : Tuple =(
-nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
A : Any =nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )
else:
# construct weights and biases
A , A : Optional[Any] =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A , A : Optional[int] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A : List[Any] =self.out_layers[0].weight[l_idx:r_idx]
A : Optional[Any] =self.out_layers[0].bias[l_idx:r_idx]
else:
A : str =self.out_layers[i].weight
A : List[Any] =self.out_layers[i].bias
if i == 0:
A : List[Any] =torch.cat([weight_i, self.cluster_weight] , dim=0 )
A : List[str] =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE__ )
biases.append(SCREAMING_SNAKE_CASE__ )
A , A , A : Any =weights[0], biases[0], self.out_projs[0]
A : int =self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : List[Any] =nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
if labels is None:
A : List[str] =hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
A : Tuple =torch.zeros_like(SCREAMING_SNAKE_CASE__ , dtype=hidden.dtype , device=hidden.device )
A : Optional[int] =0
A : List[str] =[0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
A , A : int =cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
A : List[Any] =(labels >= l_idx) & (labels < r_idx)
A : Any =mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
A : List[str] =labels.index_select(0 , SCREAMING_SNAKE_CASE__ ) - l_idx
A : Optional[Any] =head_logprob.index_select(0 , SCREAMING_SNAKE_CASE__ )
A : Dict =hidden.index_select(0 , SCREAMING_SNAKE_CASE__ )
else:
A : Optional[int] =hidden
if i == 0:
if labels is not None:
A : Dict =head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
A : Dict =head_logprob[:, : self.cutoffs[0]]
else:
A , A , A : int =weights[i], biases[i], self.out_projs[i]
A : Tuple =self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : int =nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
A : Any =self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
A : Tuple =head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
A : Dict =head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
A : Tuple =logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , SCREAMING_SNAKE_CASE__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if self.n_clusters == 0:
A : str =self._compute_logit(SCREAMING_SNAKE_CASE__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )
else:
# construct weights and biases
A , A : str =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A , A : Tuple =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A : Tuple =self.out_layers[0].weight[l_idx:r_idx]
A : Optional[Any] =self.out_layers[0].bias[l_idx:r_idx]
else:
A : Optional[Any] =self.out_layers[i].weight
A : Any =self.out_layers[i].bias
if i == 0:
A : Tuple =torch.cat([weight_i, self.cluster_weight] , dim=0 )
A : List[Any] =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE__ )
biases.append(SCREAMING_SNAKE_CASE__ )
A , A , A : Optional[Any] =weights[0], biases[0], self.out_projs[0]
A : Optional[int] =self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =hidden.new_empty((head_logit.size(0 ), self.n_token) )
A : Union[str, Any] =nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
A : str =[0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
A , A : Any =cutoff_values[i], cutoff_values[i + 1]
if i == 0:
A : int =head_logprob[:, : self.cutoffs[0]]
else:
A , A , A : Optional[int] =weights[i], biases[i], self.out_projs[i]
A : List[str] =self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : List[str] =nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
A : Optional[int] =head_logprob[:, -i] + tail_logprob_i
A : Optional[Any] =logprob_i
return out
| 661 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A__ ( *lowercase: Tuple, lowercase: Optional[Union[Dict, Any]] = None, lowercase: Dict=True, lowercase: Any=2 ) -> List[Any]:
from .. import __version__
A : Optional[Any] =take_from
A : Union[str, Any] =()
if not isinstance(args[0], lowercase ):
A : List[str] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase ).base_version ) >= version.parse(lowercase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
A : Tuple =None
if isinstance(lowercase, lowercase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase ),)
A : Union[str, Any] =F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowercase, lowercase ):
values += (getattr(lowercase, lowercase ),)
A : Optional[Any] =F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A : List[Any] =F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A : List[Any] =warning + ' ' if standard_warn else ''
warnings.warn(warning + message, lowercase, stacklevel=lowercase )
if isinstance(lowercase, lowercase ) and len(lowercase ) > 0:
A : Any =inspect.getouterframes(inspect.currentframe() )[1]
A : int =call_frame.filename
A : int =call_frame.lineno
A : Optional[int] =call_frame.function
A , A : int =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(lowercase ) == 0:
return
elif len(lowercase ) == 1:
return values[0]
return values
| 661 | 1 |
import math
def A__ ( lowercase: int ) -> str:
A : Optional[Any] =0
A : str =0
while num > 0:
A : Dict =num % 8
A : Union[str, Any] =octal + (remainder * math.floor(math.pow(10, lowercase ) ))
counter += 1
A : List[Any] =math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(lowercase )}'
def A__ ( ) -> None:
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 661 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: int, lowercase: str ) -> Dict:
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Dict, lowercase: Tuple, lowercase: str ) -> str:
A : Any =tmp_path / 'cache'
A : Dict ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict =JsonDatasetReader(lowercase, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Any, lowercase: Union[str, Any] ) -> Tuple:
A : Tuple =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : Optional[Any] =features.copy() if features else default_expected_features
A : Union[str, Any] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : str =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def A__ ( lowercase: Optional[int], lowercase: str, lowercase: Dict ) -> Optional[int]:
A : int =tmp_path / 'cache'
A : Tuple ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A : int =features.copy() if features else default_expected_features
A : str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( lowercase: Optional[Any], lowercase: str ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : str ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A : Dict =features.copy()
A : List[str] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int =tmp_path / 'cache'
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Union[str, Any], lowercase: Any, lowercase: str ) -> Optional[Any]:
A : Optional[int] =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =JsonDatasetReader(lowercase, cache_dir=lowercase, split=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Union[str, Any] ) -> List[Any]:
if issubclass(lowercase, lowercase ):
A : int =jsonl_path
elif issubclass(lowercase, lowercase ):
A : Any =[jsonl_path]
A : Optional[Any] =tmp_path / 'cache'
A : Tuple ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[str] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Optional[Any]=("train",) ) -> Tuple:
assert isinstance(lowercase, lowercase )
for split in splits:
A : List[str] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Tuple, lowercase: Optional[int], lowercase: Any ) -> str:
A : List[str] =tmp_path / 'cache'
A : Union[str, Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : str =JsonDatasetReader({'train': jsonl_path}, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Optional[int] ) -> Tuple:
A : Any =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =features.copy() if features else default_expected_features
A : Dict =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] =JsonDatasetReader({'train': jsonl_path}, features=lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any] ) -> Tuple:
if split:
A : Optional[int] ={split: jsonl_path}
else:
A : Dict ='train'
A : Optional[Any] ={'train': jsonl_path, 'test': jsonl_path}
A : Tuple =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[Any] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( lowercase: List[Any] ) -> Tuple:
return json.load(lowercase )
def A__ ( lowercase: List[Any] ) -> Tuple:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : Any =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : List[Any] =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
A : Union[str, Any] =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
A : Union[str, Any] =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : str =f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : List[str] =f.read()
assert exported_content == original_content
| 661 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowercase : Optional[int] ={
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict =[
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowercase : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 661 | 1 |
import math
def A__ ( lowercase: int ) -> bool:
assert isinstance(lowercase, lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
A : Optional[int] =range(3, int(math.sqrt(lowercase ) + 1 ), 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( lowercase: List[Any], lowercase: List[str]=1, **lowercase: Tuple ) -> Dict:
A : Dict =factor * value
A : Any =value
while not is_prime(lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1, **lowercase )
return value
| 661 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A : Dict =tempfile.mkdtemp()
A : int =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
A : str =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Optional[int] =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
A : Optional[int] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : str =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Union[str, Any] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] =self.get_image_processor()
A : Optional[Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Dict =self.prepare_image_inputs()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : Optional[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
A : str =self.get_image_processor()
A : Union[str, Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : str =[torch.ones((1, 3, 5, 5) )]
A : Optional[Any] =[[17_64, 26_46]]
A : List[Any] =[[6_83, 10_24]]
A : Union[str, Any] =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : str =[np.ones((1, 3, 5, 5) )]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A : Any =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : Tuple =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
A : Optional[Any] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Any =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
A : Optional[Any] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[Any] =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Dict =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
A : Any =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : Tuple =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : List[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : int =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =[tf.ones((1, 3, 5, 5) )]
A : Tuple =[[17_64, 26_46]]
A : Union[str, Any] =[[6_83, 10_24]]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : List[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : Any =[np.ones((1, 3, 5, 5) )]
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A : List[str] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Optional[int] =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Dict =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
A : Any =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Tuple =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
A : Optional[Any] =self.get_image_processor()
A : Dict =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A : Optional[int] =[tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )]
A : Union[str, Any] =[torch.tensor(SCREAMING_SNAKE_CASE__ )]
A : int =[[17_64, 26_46]]
A : int =[[6_83, 10_24]]
A : Dict =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
A : Union[str, Any] =self.get_image_processor()
A : int =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : List[Any] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Tuple =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
A : Dict =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
| 661 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Any ={
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =[
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowercase : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_lowercase : Optional[Any] =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def A__ ( lowercase: Optional[int] ) -> Optional[int]:
A : str =test_results.split(' ' )
A : List[str] =0
A : Tuple =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A : List[str] =expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( lowercase: List[Any] ) -> str:
A : Union[str, Any] ={}
A : Optional[Any] =None
A : Union[str, Any] =False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]', lowercase ):
A : List[Any] =True
A : Any =line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
A : Dict =line
A : List[str] =False
return failures
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
A : Tuple =title
A : Dict =doc_test_results['time_spent'].split(',' )[0]
A : Union[str, Any] =doc_test_results['success']
A : Any =doc_test_results['failures']
A : Optional[Any] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A : Union[str, Any] =doc_test_results
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
A : Any =[self._time_spent]
A : List[str] =0
for time in time_spent:
A : List[Any] =time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
A : List[str] =[0, 0, time_parts[0]]
A , A , A : Tuple =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A : str =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Tuple =40
A : Optional[Any] ={k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
A : Any =''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Optional[int] =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
A : Tuple =[
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
A : Any =f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
A : Dict =client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
A : List[str] =''
for key, value in failures.items():
A : Any =value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
A : Union[str, Any] =job_name
A : Any ={'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
A : int ={
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
A : Union[str, Any] =self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
A : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
A : Any =f'*Num failures* :{len(job_result["failed"] )} \n'
A : List[Any] =job_result['failures']
A : Any =self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def A__ ( ) -> Union[str, Any]:
A : Any =os.environ['GITHUB_RUN_ID']
A : List[Any] =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
A : Union[str, Any] =requests.get(lowercase ).json()
A : List[Any] ={}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
A : List[str] =math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowercase ):
A : List[str] =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.', lowercase )
return {}
def A__ ( lowercase: str ) -> Optional[Any]:
A : Any ={}
if os.path.exists(lowercase ):
A : List[Any] =os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase, lowercase ), encoding='utf-8' ) as f:
A : Optional[int] =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase, lowercase )}.' ) from e
return _artifact
def A__ ( ) -> int:
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
A : Dict =name
A : Dict =[]
def __str__( self : Optional[Any] ) -> List[str]:
return self.name
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.paths.append({'name': self.name, 'path': path} )
A : Dict[str, Artifact] ={}
A : str =filter(os.path.isdir, os.listdir() )
for directory in directories:
A : Tuple =directory
if artifact_name not in _available_artifacts:
A : int =Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
_lowercase : Optional[int] =get_job_links()
_lowercase : str =retrieve_available_artifacts()
_lowercase : List[Any] =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_lowercase : Optional[Any] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_lowercase : List[Any] =github_actions_job_links.get('''run_doctests''')
_lowercase : int =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_lowercase : Dict =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_lowercase , _lowercase , _lowercase : List[Any] =handle_test_results(artifact['''stats'''])
_lowercase : Any =failed
_lowercase : Union[str, Any] =success
_lowercase : str =time_spent[1:-1] + ''', '''
_lowercase : Any =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_lowercase : Tuple =line.replace('''FAILED ''', '''''')
_lowercase : int =line.split()[0].replace('''\n''', '''''')
if "::" in line:
_lowercase , _lowercase : str =line.split('''::''')
else:
_lowercase , _lowercase : Union[str, Any] =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_lowercase : Any =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_lowercase : Any =all_failures[test] if test in all_failures else '''N/A'''
_lowercase : Tuple =failure
break
_lowercase : Optional[int] =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 661 | 1 |
def A__ ( lowercase: float, lowercase: float ) -> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
_lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=7 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Tuple=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=36 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : int=37 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : str=16 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : List[str]=0.0_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=6 , SCREAMING_SNAKE_CASE__ : Any=6 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=10_00 , ) -> Optional[Any]:
A : List[str] =parent
A : List[Any] =batch_size
A : int =num_channels
A : str =image_size
A : str =patch_size
A : str =text_seq_length
A : Tuple =is_training
A : List[str] =use_input_mask
A : Any =use_token_type_ids
A : Any =use_labels
A : Optional[int] =vocab_size
A : Any =hidden_size
A : Union[str, Any] =num_hidden_layers
A : Optional[Any] =num_attention_heads
A : Tuple =intermediate_size
A : int =hidden_act
A : Any =hidden_dropout_prob
A : Any =attention_probs_dropout_prob
A : Tuple =max_position_embeddings
A : int =type_vocab_size
A : Any =type_sequence_label_size
A : Any =initializer_range
A : str =coordinate_size
A : Tuple =shape_size
A : int =num_labels
A : Tuple =num_choices
A : str =scope
A : Optional[int] =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A : str =text_seq_length
A : Optional[int] =(image_size // patch_size) ** 2 + 1
A : List[str] =self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
A : Tuple =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A : int =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A : List[Any] =bbox[i, j, 3]
A : List[str] =bbox[i, j, 1]
A : List[str] =t
if bbox[i, j, 2] < bbox[i, j, 0]:
A : Optional[Any] =bbox[i, j, 2]
A : Optional[int] =bbox[i, j, 0]
A : Dict =t
A : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Tuple =None
if self.use_input_mask:
A : str =random_attention_mask([self.batch_size, self.text_seq_length] )
A : List[str] =None
if self.use_token_type_ids:
A : Tuple =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A : List[Any] =None
A : Optional[int] =None
if self.use_labels:
A : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Optional[int] =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A : Any =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> int:
A : Union[str, Any] =LayoutLMvaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
# text + image
A : int =model(SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
A : List[Any] =model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A : Dict =model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A : List[str] =model(pixel_values=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
A : Union[str, Any] =self.num_labels
A : Optional[int] =LayoutLMvaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Optional[int] =model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
A : Optional[Any] =self.num_labels
A : Dict =LayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Tuple =model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
A : List[str] =LayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : str =model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple:
A : Tuple =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Any =config_and_inputs
A : Tuple ={
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Dict = False
lowercase : str = False
lowercase : Optional[int] = False
lowercase : Optional[int] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase : Optional[int] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
A : Any =LayoutLMvaModelTester(self )
A : Optional[Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> str:
A : List[str] =copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
A : Any ={
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
A : int =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
A : Tuple =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
elif model_class in [
*get_values(SCREAMING_SNAKE_CASE__ ),
]:
A : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
elif model_class in [
*get_values(SCREAMING_SNAKE_CASE__ ),
]:
A : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
A : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[Any]:
A : Optional[Any] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A : int =type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
A : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
A : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Tuple =LayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def A__ ( ) -> List[str]:
A : int =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE__ ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
A : List[str] =LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(SCREAMING_SNAKE_CASE__ )
A : str =self.default_image_processor
A : Union[str, Any] =prepare_img()
A : List[str] =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values.to(SCREAMING_SNAKE_CASE__ )
A : int =torch.tensor([[1, 2]] )
A : int =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
A : int =model(
input_ids=input_ids.to(SCREAMING_SNAKE_CASE__ ) , bbox=bbox.to(SCREAMING_SNAKE_CASE__ ) , pixel_values=pixel_values.to(SCREAMING_SNAKE_CASE__ ) , )
# verify the logits
A : int =torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
A : int =torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 661 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowercase: str ) -> List[str]:
def decorator(lowercase: int ):
A : Tuple =getattr(lowercase, 'handle_key', [] )
handle += [key]
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
def A__ ( *lowercase: List[str] ) -> Dict:
def decorator(lowercase: Union[str, Any] ):
A : Optional[int] =getattr(lowercase, 'handle_key', [] )
handle += keys
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __new__( cls : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
A : Dict =super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , 'key_handler' ):
setattr(SCREAMING_SNAKE_CASE__ , 'key_handler' , {} )
setattr(SCREAMING_SNAKE_CASE__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] )
for key in handled_keys:
A : str =value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ) -> Any:
A : str =get_character()
if char != KEYMAP["undefined"]:
A : List[str] =ord(SCREAMING_SNAKE_CASE__ )
A : List[str] =cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
A : List[str] =char
return handler(cls )
else:
return None
def A__ ( cls: Optional[int] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 661 | 1 |
_lowercase : List[Any] ={
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 661 |
import math
def A__ ( lowercase: int ) -> list:
A : Optional[Any] =[True] * n
A : Tuple =False
A : List[Any] =False
A : Dict =True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A : Dict =i * 2
while index < n:
A : Dict =False
A : Dict =index + i
A : Tuple =[2]
for i in range(3, lowercase, 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A__ ( lowercase: int = 999_966_663_333 ) -> int:
A : Optional[int] =math.floor(math.sqrt(lowercase ) ) + 100
A : Optional[int] =prime_sieve(lowercase )
A : Optional[Any] =0
A : List[Any] =0
A : Union[str, Any] =primes[prime_index]
while (last_prime**2) <= limit:
A : Tuple =primes[prime_index + 1]
A : Optional[int] =last_prime**2
A : Tuple =next_prime**2
# Get numbers divisible by lps(current)
A : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A : List[Any] =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A : Any =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A : List[str] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 661 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "xglm"
lowercase : Any = ["past_key_values"]
lowercase : Dict = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=25_60_08 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
A : str =vocab_size
A : Union[str, Any] =max_position_embeddings
A : Optional[Any] =d_model
A : Optional[int] =ffn_dim
A : int =num_layers
A : Any =attention_heads
A : Dict =activation_function
A : List[Any] =dropout
A : str =attention_dropout
A : List[Any] =activation_dropout
A : List[Any] =layerdrop
A : List[Any] =init_std
A : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A : List[str] =use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 661 |
import heapq
def A__ ( lowercase: dict ) -> set[int]:
A : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase, [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
A : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A : List[str] =heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A : str =elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 661 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A__ ( lowercase: dict ) -> tuple:
return (data["data"], data["target"])
def A__ ( lowercase: np.ndarray, lowercase: np.ndarray ) -> XGBClassifier:
A : List[str] =XGBClassifier()
classifier.fit(lowercase, lowercase )
return classifier
def A__ ( ) -> None:
A : Tuple =load_iris()
A , A : List[str] =data_handling(lowercase )
A , A , A , A : List[Any] =train_test_split(
lowercase, lowercase, test_size=0.25 )
A : List[str] =iris['target_names']
# Create an XGBoost Classifier from the training data
A : List[Any] =xgboost(lowercase, lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase, lowercase, lowercase, display_labels=lowercase, cmap='Blues', normalize='true', )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 661 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase : List[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
A : Tuple =feature_size
A : int =sampling_rate
A : List[str] =padding_value
A : Tuple =kwargs.pop('padding_side' , 'right' )
A : str =kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A : Tuple ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
A : Dict =processed_features[self.model_input_names[0]]
A : int =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
A : List[Any] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A : List[str] =required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A : Any =0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
A : Dict =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
A : List[Any] ='tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ='pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
A : Union[str, Any] ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A : int =to_numpy(SCREAMING_SNAKE_CASE__ )
else:
A : List[Any] =[to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A : List[Any] =self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =processed_features[self.model_input_names[0]]
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A : Tuple =[]
for i in range(SCREAMING_SNAKE_CASE__ ):
A : int ={k: v[i] for k, v in processed_features.items()}
# truncation
A : List[Any] =self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A : Any =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A : Optional[Any] =PaddingStrategy.MAX_LENGTH
A : List[Any] ={}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
A : Optional[Any] =self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
A : Dict =[]
if value.dtype is np.dtype(np.floataa ):
A : Tuple =value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> dict:
A : Optional[int] =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Tuple =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : int =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A : str =np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
A : Union[str, Any] =max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
A : Dict =np.pad(
processed_features['attention_mask'] , (0, difference) )
A : str =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A : List[Any] =np.pad(
processed_features['attention_mask'] , (difference, 0) )
A : Union[str, Any] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A : Tuple =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Any =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : List[str] =len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
A : Union[str, Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A : Dict =processed_features['attention_mask'][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
A : List[Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Tuple =PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =padding
else:
A : List[str] =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 661 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , 'env' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
A : Optional[Any] =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
A : str ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=SCREAMING_SNAKE_CASE__ , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE__ , py_version='py36' , )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
# create estimator
A : Any =self.create_estimator(SCREAMING_SNAKE_CASE__ )
# run training
estimator.fit()
# result dataframe
A : Union[str, Any] =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A : Dict =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
A : str =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A : str =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , SCREAMING_SNAKE_CASE__ )
| 661 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : List[str] ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : int = "deberta-v2"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=12_81_00 , SCREAMING_SNAKE_CASE__ : List[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : List[str]=24 , SCREAMING_SNAKE_CASE__ : List[str]=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-7 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=-1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : Dict =hidden_size
A : Optional[Any] =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Optional[int] =intermediate_size
A : Any =hidden_act
A : Any =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Tuple =type_vocab_size
A : Tuple =initializer_range
A : int =relative_attention
A : int =max_relative_positions
A : Optional[Any] =pad_token_id
A : Union[str, Any] =position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE__ ) == str:
A : Any =[x.strip() for x in pos_att_type.lower().split('|' )]
A : Any =pos_att_type
A : Tuple =vocab_size
A : Any =layer_norm_eps
A : Optional[Any] =kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE__ )
A : str =pooler_dropout
A : Any =pooler_hidden_act
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A : List[Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : int ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
return 12
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
A : str =super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 661 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A__ ( lowercase: Tuple ) -> Any: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A__ ( ) -> List[str]:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
A : List[str] =[1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend('unsupported backend' ):
map_nested(lowercase, lowercase, num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend('unsupported backend' ):
map_nested(lowercase, lowercase, num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc', [2, -1] )
def A__ ( lowercase: Optional[Any] ) -> Dict:
A : List[Any] =[1, 2]
A : str ={'a': 1, 'b': 2}
A : str ={'a': [1, 2], 'b': [3, 4]}
A : List[Any] ={'a': {'1': 1}, 'b': 2}
A : List[str] ={'a': 1, 'b': 2, 'c': 3, 'd': 4}
A : int =[2, 3]
A : Optional[Any] ={'a': 2, 'b': 3}
A : Any ={'a': [2, 3], 'b': [4, 5]}
A : Optional[Any] ={'a': {'1': 2}, 'b': 3}
A : Optional[Any] ={'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(lowercase, lowercase, num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase, lowercase, num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase, lowercase, num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase, lowercase, num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase, lowercase, num_proc=lowercase ) == expected_map_nested_sa
| 661 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 5_02_57 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "gelu_new" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 1e-5 , SCREAMING_SNAKE_CASE__ : float = 0.0_2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[str]:
super().__init__()
A : str =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
A : List[Any] =prefix_inner_dim
A : Dict =prefix_hidden_dim
A : List[str] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A : Dict =GPTaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , n_positions=SCREAMING_SNAKE_CASE__ , n_embd=SCREAMING_SNAKE_CASE__ , n_layer=SCREAMING_SNAKE_CASE__ , n_head=SCREAMING_SNAKE_CASE__ , n_inner=SCREAMING_SNAKE_CASE__ , activation_function=SCREAMING_SNAKE_CASE__ , resid_pdrop=SCREAMING_SNAKE_CASE__ , embd_pdrop=SCREAMING_SNAKE_CASE__ , attn_pdrop=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , initializer_range=SCREAMING_SNAKE_CASE__ , scale_attn_weights=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE__ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE__ , )
A : Dict =GPTaLMHeadModel(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , ) -> Optional[Any]:
A : str =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
A : Any =self.encode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.decode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A : int =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A : Optional[int] =torch.cat((dummy_token, input_ids) , dim=1 )
A : Dict =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.device ) -> torch.Tensor:
return torch.zeros(SCREAMING_SNAKE_CASE__ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self.encode_prefix(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
A : Dict =torch.split(SCREAMING_SNAKE_CASE__ , 1 , dim=0 )
A : int =[]
A : Optional[int] =[]
for feature in features:
A : int =self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE__ ) ) # back to the clip feature
# Only support beam search for now
A , A : Dict =self.generate_beam(
input_embeds=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A : str =torch.stack(SCREAMING_SNAKE_CASE__ )
A : int =torch.stack(SCREAMING_SNAKE_CASE__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : int = 67 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Dict:
A : Dict =eos_token_id
A : str =None
A : List[Any] =None
A : List[Any] =torch.ones(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.int )
A : str =torch.zeros(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.bool )
if input_embeds is not None:
A : Any =input_embeds
else:
A : List[Any] =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Any =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ )
A : str =outputs.logits
A : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A : List[str] =logits.softmax(-1 ).log()
if scores is None:
A , A : Any =logits.topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Any =generated.expand(SCREAMING_SNAKE_CASE__ , *generated.shape[1:] )
A , A : Tuple =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A : Union[str, Any] =next_tokens
else:
A : str =tokens.expand(SCREAMING_SNAKE_CASE__ , *tokens.shape[1:] )
A : Optional[int] =torch.cat((tokens, next_tokens) , dim=1 )
else:
A : Optional[Any] =-float(np.inf )
A : Tuple =0
A : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A : int =scores_sum / seq_lengths[:, None]
A , A : Optional[int] =scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Dict =next_tokens // scores_sum.shape[1]
A : Optional[Any] =seq_lengths[next_tokens_source]
A : Tuple =next_tokens % scores_sum.shape[1]
A : Optional[Any] =next_tokens.unsqueeze(1 )
A : Optional[Any] =tokens[next_tokens_source]
A : Any =torch.cat((tokens, next_tokens) , dim=1 )
A : List[str] =generated[next_tokens_source]
A : List[Any] =scores_sum_average * seq_lengths
A : Optional[Any] =is_stopped[next_tokens_source]
A : Optional[int] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A : Any =torch.cat((generated, next_token_embed) , dim=1 )
A : Optional[int] =is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE__ ).squeeze()
if is_stopped.all():
break
A : Optional[Any] =scores / seq_lengths
A : str =scores.argsort(descending=SCREAMING_SNAKE_CASE__ )
# tokens tensors are already padded to max_seq_length
A : Optional[Any] =[tokens[i] for i in order]
A : Any =torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 )
A : str =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661 | 1 |
import logging
import os
from .state import PartialState
class SCREAMING_SNAKE_CASE_ ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
A : Optional[int] =PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
A : Union[str, Any] =kwargs.pop('main_process_only' , SCREAMING_SNAKE_CASE__ )
A : Optional[int] =kwargs.pop('in_order' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
A , A : int =self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
A : Dict =PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A , A : Tuple =self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def A__ ( lowercase: str, lowercase: str = None ) -> Dict:
if log_level is None:
A : Union[str, Any] =os.environ.get('ACCELERATE_LOG_LEVEL', lowercase )
A : List[Any] =logging.getLogger(lowercase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowercase, {} )
| 661 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661 | 1 |
from typing import Any
def A__ ( lowercase: list ) -> list[Any]:
if not input_list:
return []
A : Optional[int] =[input_list.count(lowercase ) for value in input_list]
A : Tuple =max(lowercase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "xglm"
lowercase : Any = ["past_key_values"]
lowercase : Dict = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=25_60_08 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
A : str =vocab_size
A : Union[str, Any] =max_position_embeddings
A : Optional[Any] =d_model
A : Optional[int] =ffn_dim
A : int =num_layers
A : Any =attention_heads
A : Dict =activation_function
A : List[Any] =dropout
A : str =attention_dropout
A : List[Any] =activation_dropout
A : List[Any] =layerdrop
A : List[Any] =init_std
A : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A : List[str] =use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 661 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : torch.FloatTensor
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[int] = (64,) , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "silu" , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : float = 0.1_8_2_1_5 , SCREAMING_SNAKE_CASE__ : str = "group" , ) -> Tuple:
super().__init__()
# pass init params to Encoder
A : Any =Encoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , down_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , double_z=SCREAMING_SNAKE_CASE__ , )
A : str =vq_embed_dim if vq_embed_dim is not None else latent_channels
A : List[str] =nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
A : Tuple =VectorQuantizer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=0.2_5 , remap=SCREAMING_SNAKE_CASE__ , sane_index_shape=SCREAMING_SNAKE_CASE__ )
A : Tuple =nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
# pass init params to Decoder
A : Dict =Decoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , up_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , norm_type=SCREAMING_SNAKE_CASE__ , )
@apply_forward_hook
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ) -> VQEncoderOutput:
A : List[Any] =self.encoder(SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =self.quant_conv(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE__ )
@apply_forward_hook
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
A , A , A : Dict =self.quantize(SCREAMING_SNAKE_CASE__ )
else:
A : Any =h
A : List[str] =self.post_quant_conv(SCREAMING_SNAKE_CASE__ )
A : Any =self.decoder(SCREAMING_SNAKE_CASE__ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
A : Optional[Any] =sample
A : List[Any] =self.encode(SCREAMING_SNAKE_CASE__ ).latents
A : str =self.decode(SCREAMING_SNAKE_CASE__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
| 661 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase : List[str] ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ ( ) -> List[Any]:
A : Any =_ask_options(
'In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A : Tuple =get_sagemaker_input()
else:
A : str =get_cluster_input()
return config
def A__ ( lowercase: int=None ) -> str:
if subparsers is not None:
A : List[str] =subparsers.add_parser('config', description=lowercase )
else:
A : Union[str, Any] =argparse.ArgumentParser('Accelerate config command', description=lowercase )
parser.add_argument(
'--config_file', default=lowercase, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def A__ ( lowercase: Tuple ) -> List[Any]:
A : Union[str, Any] =get_user_input()
if args.config_file is not None:
A : Optional[Any] =args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(F'accelerate configuration saved at {config_file}' )
def A__ ( ) -> Optional[int]:
A : Any =config_command_parser()
A : int =parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 661 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
A : Any =FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A : Dict =AutoTokenizer.from_pretrained('google/mt5-small' )
A : str =tokenizer('Hello there' , return_tensors='np' ).input_ids
A : Optional[Any] =tokenizer('Hi I am' , return_tensors='np' ).input_ids
A : List[Any] =shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id )
A : Optional[int] =model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
A : str =optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean()
A : str =-(labels.shape[-1] * loss.item())
A : List[Any] =-8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 661 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[str] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : Dict =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : List[Any] =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Tuple =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : Dict =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : List[Any] =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : Optional[int] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : Any =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[Any] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Optional[Any] =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : List[Any] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: Dict ) -> int:
if _re_test_backend.search(lowercase ) is None:
return None
A : Any =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Any ) -> List[Any]:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : Optional[Any] =f.readlines()
A : Dict =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Optional[int] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : int =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : int =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : int =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : Optional[int] =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : Dict =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : str ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Optional[int] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : Optional[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : Optional[Any] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : int =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : Optional[int] =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : Optional[int] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Optional[Any] =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : Any =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Optional[Any] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : Any =lines[line_index]
A : Any =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Dict =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Any, lowercase: int ) -> Dict:
def find_duplicates(lowercase: List[str] ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : List[Any] =[]
for key in import_dict_objects.keys():
A : List[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Tuple =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> List[str]:
A : Dict =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Any =os.path.join(lowercase, '__init__.py' )
A : Union[str, Any] =parse_init(lowercase )
if objects is not None:
A : str =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Any =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> int:
A : List[str] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : Any =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : List[str] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : Optional[Any] =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : Dict =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Tuple =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> Tuple:
# This is to make sure the transformers module imported is the one in the repo.
A : str =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : Any =spec.loader.load_module()
A : Any =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : Dict ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=13 , SCREAMING_SNAKE_CASE__ : List[str]=7 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : Optional[int]=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=5 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : Dict=37 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=50 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=None , ) -> List[Any]:
A : int =parent
A : Union[str, Any] =batch_size
A : Tuple =seq_length
A : List[Any] =is_training
A : Optional[Any] =use_input_mask
A : str =vocab_size
A : Optional[int] =hidden_size
A : Optional[int] =num_hidden_layers
A : List[str] =num_attention_heads
A : Dict =intermediate_size
A : str =hidden_act
A : Tuple =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Optional[Any] =initializer_range
A : List[str] =use_labels
A : Union[str, Any] =scope
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Dict =None
if self.use_input_mask:
A : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : str =self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : Optional[Any] =self.prepare_config_and_inputs()
A : str =True
A : Tuple =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> List[str]:
A : Tuple =BertGenerationEncoder(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Any =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : int , ) -> List[str]:
A : str =True
A : str =BertGenerationEncoder(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Optional[int] =model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
A : Optional[Any] =model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Optional[int]:
A : int =True
A : Optional[Any] =True
A : Union[str, Any] =BertGenerationDecoder(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).eval()
# first forward pass
A : int =model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , )
A : Optional[int] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Any =ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Optional[int] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : List[str] =torch.cat([input_ids, next_tokens] , dim=-1 )
A : int =torch.cat([input_mask, next_mask] , dim=-1 )
A : Union[str, Any] =model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , )['hidden_states'][0]
A : List[str] =model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , )['hidden_states'][0]
# select random slice
A : Optional[Any] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : Dict =output_from_no_past[:, -3:, random_slice_idx].detach()
A : List[Any] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , ) -> int:
A : Optional[int] =BertGenerationDecoder(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple:
A , A , A , A : Optional[Any] =self.prepare_config_and_inputs()
A : List[Any] ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase : Optional[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase : Dict = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
A : Union[str, Any] =BertGenerationEncoderTester(self )
A : int =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
A , A , A , A : Dict =self.model_tester.prepare_config_and_inputs()
A : Any ='bert'
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
A : str =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
A : Any =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Any =self.model_tester.prepare_config_and_inputs_for_decoder()
A : Union[str, Any] =None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
A : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
A : Optional[Any] =BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
A : Optional[int] =BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
A : List[str] =torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
A : Optional[int] =model(SCREAMING_SNAKE_CASE__ )[0]
A : Tuple =torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
A : List[Any] =torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
A : Tuple =BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
A : Union[str, Any] =torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
A : Dict =model(SCREAMING_SNAKE_CASE__ )[0]
A : Optional[int] =torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
A : int =torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 661 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
lowercase : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 661 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : BigBirdConfig
lowercase : jnp.dtype = jnp.floataa
lowercase : bool = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
super().setup()
A : List[Any] =nn.Dense(5 , dtype=self.dtype )
def __call__( self : Any , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
A : Dict =super().__call__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : List[str] =self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def A__ ( lowercase: str, lowercase: Optional[int], lowercase: Tuple, lowercase: Dict, lowercase: Optional[Any], lowercase: Dict ) -> int:
def cross_entropy(lowercase: Optional[int], lowercase: Dict, lowercase: List[str]=None ):
A : Any =logits.shape[-1]
A : List[Any] =(labels[..., None] == jnp.arange(lowercase )[None]).astype('f4' )
A : Union[str, Any] =jax.nn.log_softmax(lowercase, axis=-1 )
A : Union[str, Any] =-jnp.sum(labels * logits, axis=-1 )
if reduction is not None:
A : List[Any] =reduction(lowercase )
return loss
A : Union[str, Any] =partial(lowercase, reduction=jnp.mean )
A : Any =cross_entropy(lowercase, lowercase )
A : List[str] =cross_entropy(lowercase, lowercase )
A : Union[str, Any] =cross_entropy(lowercase, lowercase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : str = "google/bigbird-roberta-base"
lowercase : int = 3000
lowercase : int = 10500
lowercase : int = 128
lowercase : int = 3
lowercase : int = 1
lowercase : int = 5
# tx_args
lowercase : float = 3e-5
lowercase : float = 0.0
lowercase : int = 20000
lowercase : float = 0.0_0_9_5
lowercase : str = "bigbird-roberta-natural-questions"
lowercase : str = "training-expt"
lowercase : str = "data/nq-training.jsonl"
lowercase : str = "data/nq-validation.jsonl"
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Tuple =os.path.join(self.base_dir , self.save_dir )
A : Optional[Any] =self.batch_size_per_device * jax.device_count()
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : int
lowercase : int = 4096 # no dynamic padding on TPUs
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
A : Dict =self.collate_fn(SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =jax.tree_util.tree_map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return batch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
A , A : Optional[int] =self.fetch_inputs(features['input_ids'] )
A : Union[str, Any] ={
'input_ids': jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ),
'attention_mask': jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : list ) -> Union[str, Any]:
A : Tuple =[self._fetch_inputs(SCREAMING_SNAKE_CASE__ ) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : list ) -> Any:
A : Any =[1 for _ in range(len(SCREAMING_SNAKE_CASE__ ) )]
while len(SCREAMING_SNAKE_CASE__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def A__ ( lowercase: Optional[Any], lowercase: List[Any], lowercase: Optional[Any]=None ) -> Union[str, Any]:
if seed is not None:
A : Dict =dataset.shuffle(seed=lowercase )
for i in range(len(lowercase ) // batch_size ):
A : Optional[int] =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase )
@partial(jax.pmap, axis_name='batch' )
def A__ ( lowercase: str, lowercase: str, **lowercase: Any ) -> Optional[int]:
def loss_fn(lowercase: Dict ):
A : Union[str, Any] =model_inputs.pop('start_labels' )
A : Optional[Any] =model_inputs.pop('end_labels' )
A : Any =model_inputs.pop('pooled_labels' )
A : List[Any] =state.apply_fn(**lowercase, params=lowercase, dropout_rng=lowercase, train=lowercase )
A , A , A : List[str] =outputs
return state.loss_fn(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, )
A , A : Union[str, Any] =jax.random.split(lowercase )
A : Dict =jax.value_and_grad(lowercase )
A , A : str =grad_fn(state.params )
A : Any =jax.lax.pmean({'loss': loss}, axis_name='batch' )
A : Dict =jax.lax.pmean(lowercase, 'batch' )
A : List[str] =state.apply_gradients(grads=lowercase )
return state, metrics, new_drp_rng
@partial(jax.pmap, axis_name='batch' )
def A__ ( lowercase: str, **lowercase: Tuple ) -> List[str]:
A : List[str] =model_inputs.pop('start_labels' )
A : Union[str, Any] =model_inputs.pop('end_labels' )
A : Union[str, Any] =model_inputs.pop('pooled_labels' )
A : List[Any] =state.apply_fn(**lowercase, params=state.params, train=lowercase )
A , A , A : List[Any] =outputs
A : Tuple =state.loss_fn(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
A : int =jax.lax.pmean({'loss': loss}, axis_name='batch' )
return metrics
class SCREAMING_SNAKE_CASE_ ( train_state.TrainState ):
'''simple docstring'''
lowercase : Callable = struct.field(pytree_node=lowerCAmelCase_ )
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : Args
lowercase : Callable
lowercase : Callable
lowercase : Callable
lowercase : Callable
lowercase : wandb
lowercase : Callable = None
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=None ) -> List[Any]:
A : List[Any] =model.params
A : int =TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE__ , tx=SCREAMING_SNAKE_CASE__ , loss_fn=SCREAMING_SNAKE_CASE__ , )
if ckpt_dir is not None:
A , A , A , A , A : Dict =restore_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : str ={
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
A , A : Optional[int] =build_tx(**SCREAMING_SNAKE_CASE__ )
A : List[Any] =train_state.TrainState(
step=SCREAMING_SNAKE_CASE__ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE__ , tx=SCREAMING_SNAKE_CASE__ , opt_state=SCREAMING_SNAKE_CASE__ , )
A : int =args
A : List[str] =data_collator
A : List[str] =lr
A : Any =params
A : Any =jax_utils.replicate(SCREAMING_SNAKE_CASE__ )
return state
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
A : Union[str, Any] =self.args
A : Optional[int] =len(SCREAMING_SNAKE_CASE__ ) // args.batch_size
A : Optional[int] =jax.random.PRNGKey(0 )
A : str =jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
for epoch in range(args.max_epochs ):
A : Optional[Any] =jnp.array(0 , dtype=jnp.floataa )
A : Union[str, Any] =get_batched_dataset(SCREAMING_SNAKE_CASE__ , args.batch_size , seed=SCREAMING_SNAKE_CASE__ )
A : Any =0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , desc=f'Running EPOCH-{epoch}' ):
A : Dict =self.data_collator(SCREAMING_SNAKE_CASE__ )
A , A , A : Union[str, Any] =self.train_step_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
A : Union[str, Any] =jax_utils.unreplicate(state.step )
A : int =running_loss.item() / i
A : List[Any] =self.scheduler_fn(state_step - 1 )
A : str =self.evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Tuple ={
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE__ ) )
self.logger.log(SCREAMING_SNAKE_CASE__ , commit=SCREAMING_SNAKE_CASE__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
A : List[Any] =get_batched_dataset(SCREAMING_SNAKE_CASE__ , self.args.batch_size )
A : str =len(SCREAMING_SNAKE_CASE__ ) // self.args.batch_size
A : List[str] =jnp.array(0 , dtype=jnp.floataa )
A : Optional[int] =0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , desc='Evaluating ... ' ):
A : Dict =self.data_collator(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.val_step_fn(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
A : List[Any] =jax_utils.unreplicate(SCREAMING_SNAKE_CASE__ )
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ' )
self.model_save_fn(SCREAMING_SNAKE_CASE__ , params=state.params )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE__ , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE__ , 'data_collator.joblib' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , SCREAMING_SNAKE_CASE__ )
print('DONE' )
def A__ ( lowercase: Tuple, lowercase: Dict ) -> Optional[Any]:
print(F'RESTORING CHECKPOINT FROM {save_dir}', end=' ... ' )
with open(os.path.join(lowercase, 'flax_model.msgpack' ), 'rb' ) as f:
A : Tuple =from_bytes(state.params, f.read() )
with open(os.path.join(lowercase, 'opt_state.msgpack' ), 'rb' ) as f:
A : List[str] =from_bytes(state.opt_state, f.read() )
A : Any =joblib.load(os.path.join(lowercase, 'args.joblib' ) )
A : Any =joblib.load(os.path.join(lowercase, 'data_collator.joblib' ) )
with open(os.path.join(lowercase, 'training_state.json' ), 'r' ) as f:
A : List[str] =json.load(lowercase )
A : int =training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def A__ ( lowercase: Dict, lowercase: List[Any], lowercase: List[str], lowercase: List[str] ) -> int:
A : str =num_train_steps - warmup_steps
A : Any =optax.linear_schedule(init_value=lowercase, end_value=lowercase, transition_steps=lowercase )
A : str =optax.linear_schedule(init_value=lowercase, end_value=1e-7, transition_steps=lowercase )
A : int =optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps] )
return lr
def A__ ( lowercase: Union[str, Any], lowercase: Union[str, Any], lowercase: Tuple, lowercase: Union[str, Any], lowercase: List[str] ) -> Union[str, Any]:
def weight_decay_mask(lowercase: List[Any] ):
A : Union[str, Any] =traverse_util.flatten_dict(lowercase )
A : List[str] ={k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase )
A : Dict =scheduler_fn(lowercase, lowercase, lowercase, lowercase )
A : List[str] =optax.adamw(learning_rate=lowercase, weight_decay=lowercase, mask=lowercase )
return tx, lr
| 661 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A , A , A , A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A , A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : List[str]=18 , SCREAMING_SNAKE_CASE__ : str=30 , SCREAMING_SNAKE_CASE__ : str=4_00 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
A : str =size if size is not None else {'height': 18, 'width': 18}
A : Union[str, Any] =parent
A : Union[str, Any] =batch_size
A : Tuple =num_channels
A : Dict =image_size
A : Any =min_resolution
A : List[str] =max_resolution
A : Optional[Any] =do_resize
A : int =size
A : Tuple =do_normalize
A : Optional[Any] =image_mean
A : Optional[int] =image_std
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = DPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
A : str =DPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
A : str =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
A : int =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
A : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : Any =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[int]:
# Initialize image_processing
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Optional[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A : List[Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
# Initialize image_processing
A : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : Dict =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A : Any =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 661 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Dict = LongformerTokenizer
lowercase : Optional[int] = True
lowercase : Union[str, Any] = LongformerTokenizerFast
lowercase : Any = True
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : Tuple =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A : Any =dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
A : List[Any] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A : Dict ={'unk_token': '<unk>'}
A : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> int:
A : Union[str, Any] ='lower newer'
A : Optional[Any] ='lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
A : Tuple =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A : Union[str, Any] ='lower newer'
A : Optional[int] =['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) # , add_prefix_space=True)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokens + [tokenizer.unk_token]
A : Optional[int] =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
A : Dict =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A : Optional[Any] =self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A : List[str] =tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : int =tokenizer.encode(
'sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
A : int =tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
A : List[str] =tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Any:
A : Any =self.get_tokenizer()
A : int ='Encode this sequence.'
A : str =tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A : Optional[Any] =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A : Union[str, Any] =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing spaces after special tokens
A : Dict ='<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )} ) # mask token has a left space
A : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
A : Any ='Encode <mask> sequence'
A : int ='Encode <mask>sequence'
A : Tuple =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =encoded.index(SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =encoded.index(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[str] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : int ='A, <mask> AllenNLP sentence.'
A : Optional[Any] =tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A : Any =tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A : str =tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A : str =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A : Any =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(post_processor_state['add_prefix_space'] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(post_processor_state['trim_offsets'] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : Optional[int] ='hello' # `hello` is a token in the vocabulary of `pretrained_name`
A : List[str] =f'{text_of_1_token} {text_of_1_token}'
A : List[Any] =self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ) + 1, len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
A : Tuple =self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
A : List[str] =tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ) + 1, len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
A : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
A : int =tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
A : Dict =self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
A : Any =f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A : int =self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ) + 1, 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
A : Any =self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ), 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
A : Any =self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ), 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
| 661 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Tuple=30 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=1 / 2_55 , SCREAMING_SNAKE_CASE__ : int=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Optional[Any] =size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
A : Union[str, Any] =parent
A : Union[str, Any] =batch_size
A : Union[str, Any] =num_channels
A : int =min_resolution
A : List[Any] =max_resolution
A : Dict =do_resize
A : Tuple =size
A : List[str] =do_normalize
A : List[Any] =image_mean
A : Dict =image_std
A : Any =do_rescale
A : List[str] =rescale_factor
A : Optional[Any] =do_pad
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
if not batched:
A : Any =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
A , A : Union[str, Any] =image.size
else:
A , A : Tuple =image.shape[1], image.shape[2]
if w < h:
A : Any =int(self.size['shortest_edge'] * h / w )
A : Any =self.size['shortest_edge']
elif w > h:
A : Dict =self.size['shortest_edge']
A : Dict =int(self.size['shortest_edge'] * w / h )
else:
A : List[str] =self.size['shortest_edge']
A : Dict =self.size['shortest_edge']
else:
A : List[Any] =[]
for image in image_inputs:
A , A : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : str =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
A : Tuple =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : str =ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
A : str =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Union[str, Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Tuple =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Any =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : Optional[int] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : Optional[int] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : int =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
# prepare image and target
A : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A : List[Any] =json.loads(f.read() )
A : Any ={'image_id': 3_97_69, 'annotations': target}
# encode them
A : str =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
A : Any =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Optional[Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Dict =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : str =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
A : List[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : int =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
# prepare image, target and masks_path
A : List[str] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A : Optional[int] =json.loads(f.read() )
A : int ={'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
A : Optional[Any] =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A : List[Any] =ConditionalDetrImageProcessor(format='coco_panoptic' )
A : Union[str, Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Dict =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Optional[int] =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Any =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : List[Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : str =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
A : int =82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
A : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : str =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
| 661 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=13 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=32 , SCREAMING_SNAKE_CASE__ : Dict=5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=37 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=10 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Tuple=2 , ) -> int:
A : List[Any] =parent
A : Any =batch_size
A : int =patch_size
A : Union[str, Any] =max_length
A : int =num_mel_bins
A : Optional[Any] =is_training
A : Optional[int] =use_labels
A : Optional[int] =hidden_size
A : Any =num_hidden_layers
A : Any =num_attention_heads
A : Optional[int] =intermediate_size
A : str =hidden_act
A : Optional[int] =hidden_dropout_prob
A : Dict =attention_probs_dropout_prob
A : Optional[int] =type_sequence_label_size
A : Any =initializer_range
A : List[str] =scope
A : str =frequency_stride
A : List[Any] =time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A : int =(self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
A : Tuple =(self.max_length - self.patch_size) // self.time_stride + 1
A : str =frequency_out_dimension * time_out_dimension
A : int =num_patches + 2
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : Any =floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
A : List[str] =None
if self.use_labels:
A : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Optional[int] =self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
A : List[str] =ASTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Dict =model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
A : str =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) : Any =config_and_inputs
A : Any ={'input_values': input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase : Dict = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
lowercase : Optional[int] = False
lowercase : int = False
lowercase : Dict = False
lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
A : Optional[int] =ASTModelTester(self )
A : Union[str, Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
A , A : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[str] =model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Tuple =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[Any]:
A , A : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[Any] =model_class(SCREAMING_SNAKE_CASE__ )
A : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] =[*signature.parameters.keys()]
A : Union[str, Any] =['input_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
A : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : List[str] =ASTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def A__ ( ) -> Optional[int]:
A : Tuple =hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint', filename='sample_audio.flac', repo_type='dataset' )
A , A : str =torchaudio.load(lowercase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
A : Dict =self.default_feature_extractor
A : int =ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.default_feature_extractor
A , A : Union[str, Any] =prepare_audio()
A : Optional[int] =audio.squeeze().numpy()
A : str =feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
A : Optional[Any] =model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
A : Dict =torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
A : Any =torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 661 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : List[Any] =1_6
_lowercase : Union[str, Any] =3_2
def A__ ( lowercase: Accelerator, lowercase: int = 16, lowercase: str = "bert-base-cased" ) -> Optional[int]:
A : List[Any] =AutoTokenizer.from_pretrained(lowercase )
A : Any =load_dataset('glue', 'mrpc' )
def tokenize_function(lowercase: Any ):
# max_length=None => use the model max length (it's actually the default)
A : List[str] =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A : Any =datasets.map(
lowercase, batched=lowercase, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Dict =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase, padding='max_length', max_length=128, return_tensors='pt' )
return tokenizer.pad(lowercase, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
A : Union[str, Any] =DataLoader(
tokenized_datasets['train'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
A : str =DataLoader(
tokenized_datasets['validation'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
def A__ ( lowercase: Dict, lowercase: Optional[int], lowercase: Any, lowercase: str ) -> Tuple:
model.eval()
A : Tuple =0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : Tuple =model(**lowercase )
A : Tuple =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A : Union[str, Any] =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
A : List[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
A : Optional[int] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase, references=lowercase, )
A : Union[str, Any] =metric.compute()
return eval_metric["accuracy"]
def A__ ( lowercase: Union[str, Any], lowercase: Dict ) -> List[str]:
# Initialize accelerator
A : Optional[int] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : int =config['lr']
A : Optional[Any] =int(config['num_epochs'] )
A : Union[str, Any] =int(config['seed'] )
A : List[str] =int(config['batch_size'] )
A : Optional[Any] =args.model_name_or_path
set_seed(lowercase )
A , A : str =get_dataloaders(lowercase, lowercase, lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[str] =AutoModelForSequenceClassification.from_pretrained(lowercase, return_dict=lowercase )
# Instantiate optimizer
A : Any =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A : List[str] =optimizer_cls(params=model.parameters(), lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
A : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A : Dict =1
A : Union[str, Any] =(len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A : List[Any] =get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=0, num_training_steps=lowercase, )
else:
A : List[str] =DummyScheduler(lowercase, total_num_steps=lowercase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A : Optional[int] =accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# We need to keep track of how many total steps we have iterated over
A : Tuple =0
# We also need to keep track of the stating epoch so files are named properly
A : List[str] =0
A : Tuple =evaluate.load('glue', 'mrpc' )
A : Optional[int] =num_epochs
if args.partial_train_epoch is not None:
A : Dict =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A : List[Any] =args.resume_from_checkpoint.split('epoch_' )[1]
A : List[Any] =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A : Union[str, Any] =int(lowercase ) + 1
A : List[str] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
accelerator.print('resumed checkpoint performance:', lowercase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:', lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:', optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir, F'state_{starting_epoch-1}.json' ), 'r' ) as f:
A : Union[str, Any] =json.load(lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A : str ={}
for epoch in range(lowercase, lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
A : Tuple =model(**lowercase )
A : List[Any] =outputs.loss
A : Any =loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A : Union[str, Any] =F'epoch_{epoch}'
A : Optional[Any] =os.path.join(args.output_dir, lowercase )
accelerator.save_state(lowercase )
A : Optional[Any] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
A : Dict =accuracy
A : Optional[Any] =lr_scheduler.get_lr()[0]
A : Any =optimizer.param_groups[0]['lr']
A : str =epoch
A : Dict =overall_step
accelerator.print(F'epoch {epoch}:', lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'state_{epoch}.json' ), 'w' ) as f:
json.dump(lowercase, lowercase )
def A__ ( ) -> Optional[int]:
A : Optional[int] =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=lowercase, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=lowercase, )
parser.add_argument(
'--output_dir', type=lowercase, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--resume_from_checkpoint', type=lowercase, default=lowercase, help='If the training should continue from a checkpoint folder.', )
parser.add_argument(
'--partial_train_epoch', type=lowercase, default=lowercase, help='If passed, the training will stop after this number of epochs.', )
parser.add_argument(
'--num_epochs', type=lowercase, default=2, help='Number of train epochs.', )
A : str =parser.parse_args()
A : Optional[int] ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 661 | 1 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowercase : Dict =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 661 |
def A__ ( lowercase: int ) -> int:
if not isinstance(lowercase, lowercase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A : Any =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[str] =logging.get_logger(__name__)
_lowercase : int ={
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : str = "mvp"
lowercase : Optional[Any] = ["past_key_values"]
lowercase : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=5_02_67 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=10_24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=40_96 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : int=40_96 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.0_2 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=1_00 , SCREAMING_SNAKE_CASE__ : Any=8_00 , **SCREAMING_SNAKE_CASE__ : Any , ) -> Optional[Any]:
A : Tuple =vocab_size
A : Dict =max_position_embeddings
A : Union[str, Any] =d_model
A : str =encoder_ffn_dim
A : Any =encoder_layers
A : str =encoder_attention_heads
A : Tuple =decoder_ffn_dim
A : List[Any] =decoder_layers
A : Tuple =decoder_attention_heads
A : str =dropout
A : Union[str, Any] =attention_dropout
A : Tuple =activation_dropout
A : Tuple =activation_function
A : Optional[int] =init_std
A : str =encoder_layerdrop
A : int =decoder_layerdrop
A : Optional[Any] =classifier_dropout
A : Dict =use_cache
A : Tuple =encoder_layers
A : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
A : Dict =use_prompt
A : List[Any] =prompt_length
A : str =prompt_mid_dim
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , SCREAMING_SNAKE_CASE__ ):
A : int =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 661 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A__ ( *lowercase: Tuple, lowercase: Optional[Union[Dict, Any]] = None, lowercase: Dict=True, lowercase: Any=2 ) -> List[Any]:
from .. import __version__
A : Optional[Any] =take_from
A : Union[str, Any] =()
if not isinstance(args[0], lowercase ):
A : List[str] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase ).base_version ) >= version.parse(lowercase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
A : Tuple =None
if isinstance(lowercase, lowercase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase ),)
A : Union[str, Any] =F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowercase, lowercase ):
values += (getattr(lowercase, lowercase ),)
A : Optional[Any] =F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A : List[Any] =F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A : List[Any] =warning + ' ' if standard_warn else ''
warnings.warn(warning + message, lowercase, stacklevel=lowercase )
if isinstance(lowercase, lowercase ) and len(lowercase ) > 0:
A : Any =inspect.getouterframes(inspect.currentframe() )[1]
A : int =call_frame.filename
A : int =call_frame.lineno
A : Optional[int] =call_frame.function
A , A : int =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(lowercase ) == 0:
return
elif len(lowercase ) == 1:
return values[0]
return values
| 661 | 1 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowercase: Any, lowercase: Dict, lowercase: List[Any], lowercase: List[Any] ) -> Optional[int]:
# Initialise PyTorch model
A : Optional[int] =BigBirdConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
A : List[str] =BigBirdForQuestionAnswering(lowercase )
else:
A : List[Any] =BigBirdForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(lowercase, lowercase, is_trivia_qa=lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase )
if __name__ == "__main__":
_lowercase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
_lowercase : Optional[int] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 661 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: int, lowercase: str ) -> Dict:
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Dict, lowercase: Tuple, lowercase: str ) -> str:
A : Any =tmp_path / 'cache'
A : Dict ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict =JsonDatasetReader(lowercase, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Any, lowercase: Union[str, Any] ) -> Tuple:
A : Tuple =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : Optional[Any] =features.copy() if features else default_expected_features
A : Union[str, Any] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : str =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def A__ ( lowercase: Optional[int], lowercase: str, lowercase: Dict ) -> Optional[int]:
A : int =tmp_path / 'cache'
A : Tuple ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A : int =features.copy() if features else default_expected_features
A : str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( lowercase: Optional[Any], lowercase: str ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : str ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A : Dict =features.copy()
A : List[str] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int =tmp_path / 'cache'
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Union[str, Any], lowercase: Any, lowercase: str ) -> Optional[Any]:
A : Optional[int] =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =JsonDatasetReader(lowercase, cache_dir=lowercase, split=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Union[str, Any] ) -> List[Any]:
if issubclass(lowercase, lowercase ):
A : int =jsonl_path
elif issubclass(lowercase, lowercase ):
A : Any =[jsonl_path]
A : Optional[Any] =tmp_path / 'cache'
A : Tuple ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[str] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Optional[Any]=("train",) ) -> Tuple:
assert isinstance(lowercase, lowercase )
for split in splits:
A : List[str] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Tuple, lowercase: Optional[int], lowercase: Any ) -> str:
A : List[str] =tmp_path / 'cache'
A : Union[str, Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : str =JsonDatasetReader({'train': jsonl_path}, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Optional[int] ) -> Tuple:
A : Any =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =features.copy() if features else default_expected_features
A : Dict =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] =JsonDatasetReader({'train': jsonl_path}, features=lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any] ) -> Tuple:
if split:
A : Optional[int] ={split: jsonl_path}
else:
A : Dict ='train'
A : Optional[Any] ={'train': jsonl_path, 'test': jsonl_path}
A : Tuple =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[Any] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( lowercase: List[Any] ) -> Tuple:
return json.load(lowercase )
def A__ ( lowercase: List[Any] ) -> Tuple:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : Any =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : List[Any] =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
A : Union[str, Any] =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
A : Union[str, Any] =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : str =f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : List[str] =f.read()
assert exported_content == original_content
| 661 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_lowercase : List[str] =logging.get_logger(__name__)
def A__ ( lowercase: nn.ModuleList, lowercase: nn.ModuleList, lowercase: List[int] ) -> None:
A : Dict =nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowercase ) == len(lowercase ), F'{len(lowercase )} != {len(lowercase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_lowercase : Union[str, Any] ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_lowercase : Optional[int] ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def A__ ( lowercase: Any, lowercase: Union[str, Any] ) -> int:
try:
A : Any =LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(lowercase ) )
def A__ ( lowercase: Optional[int], lowercase: str ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(lowercase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def A__ ( lowercase: Union[str, PreTrainedModel], lowercase: Union[str, Path] = "student", lowercase: Union[int, None] = None, lowercase: Union[int, None] = None, lowercase: Optional[Any]=False, lowercase: Optional[Any]=None, lowercase: List[str]=None, **lowercase: List[str], ) -> Tuple[PreTrainedModel, List[int], List[int]]:
A : Optional[int] ='encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(lowercase, lowercase ):
AutoTokenizer.from_pretrained(lowercase ).save_pretrained(lowercase ) # purely for convenience
A : int =AutoModelForSeqaSeqLM.from_pretrained(lowercase ).eval()
else:
assert isinstance(lowercase, lowercase ), F'teacher must be a model or string got type {type(lowercase )}'
A : Any =teacher.config.to_diff_dict()
try:
A , A : Optional[int] =teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
A : Union[str, Any] =teacher_e
if d is None:
A : Any =teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config, 'num_encoder_layers' ):
A , A : str =teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
A , A : Optional[Any] =teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
A : Optional[int] =teacher_e
if d is None:
A : List[Any] =teacher_d
if hasattr(teacher.config, 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowercase )
# Copy weights
A : int =teacher.config_class(**lowercase )
A : List[str] =AutoModelForSeqaSeqLM.from_config(lowercase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
A : Optional[int] =student.load_state_dict(teacher.state_dict(), strict=lowercase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
A , A : str =list(range(lowercase ) ), list(range(lowercase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(lowercase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
A : List[int] =pick_layers_to_copy(lowercase, lowercase )
if d_layers_to_copy is None:
A : List[int] =pick_layers_to_copy(lowercase, lowercase )
try:
if hasattr(
lowercase, 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers, student.prophetnet.encoder.layers, lowercase )
copy_layers(teacher.prophetnet.decoder.layers, student.prophetnet.decoder.layers, lowercase )
else:
copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, lowercase )
copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, lowercase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block, student.encoder.block, lowercase )
copy_layers(teacher.decoder.block, student.decoder.block, lowercase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
A : Tuple ={
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(lowercase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 661 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 661 | 1 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = IFImgaImgSuperResolutionPipeline
lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Optional[Any]:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : Optional[int] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : int =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Tuple =floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
A : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
A : int ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 661 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A : Dict =tempfile.mkdtemp()
A : int =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
A : str =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Optional[int] =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
A : Optional[int] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : str =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Union[str, Any] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] =self.get_image_processor()
A : Optional[Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Dict =self.prepare_image_inputs()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : Optional[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
A : str =self.get_image_processor()
A : Union[str, Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : str =[torch.ones((1, 3, 5, 5) )]
A : Optional[Any] =[[17_64, 26_46]]
A : List[Any] =[[6_83, 10_24]]
A : Union[str, Any] =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : str =[np.ones((1, 3, 5, 5) )]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A : Any =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : Tuple =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
A : Optional[Any] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Any =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
A : Optional[Any] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[Any] =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Dict =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
A : Any =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : Tuple =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : List[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : int =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =[tf.ones((1, 3, 5, 5) )]
A : Tuple =[[17_64, 26_46]]
A : Union[str, Any] =[[6_83, 10_24]]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : List[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : Any =[np.ones((1, 3, 5, 5) )]
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A : List[str] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Optional[int] =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Dict =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
A : Any =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Tuple =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
A : Optional[Any] =self.get_image_processor()
A : Dict =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A : Optional[int] =[tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )]
A : Union[str, Any] =[torch.tensor(SCREAMING_SNAKE_CASE__ )]
A : int =[[17_64, 26_46]]
A : int =[[6_83, 10_24]]
A : Dict =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
A : Union[str, Any] =self.get_image_processor()
A : int =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : List[Any] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Tuple =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
A : Dict =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
| 661 | 1 |
import functools
from typing import Any
def A__ ( lowercase: str, lowercase: list[str] ) -> bool:
# Validation
if not isinstance(lowercase, lowercase ) or len(lowercase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(lowercase, lowercase ) or not all(
isinstance(lowercase, lowercase ) and len(lowercase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
A : dict[str, Any] ={}
A : Dict ='WORD_KEEPER'
for word in words:
A : List[str] =trie
for c in word:
if c not in trie_node:
A : Optional[int] ={}
A : Optional[int] =trie_node[c]
A : List[str] =True
A : Optional[Any] =len(lowercase )
# Dynamic programming method
@functools.cache
def is_breakable(lowercase: int ) -> bool:
if index == len_string:
return True
A : int =trie
for i in range(lowercase, lowercase ):
A : int =trie_node.get(string[i], lowercase )
if trie_node is None:
return False
if trie_node.get(lowercase, lowercase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_lowercase : Optional[Any] =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def A__ ( lowercase: Optional[int] ) -> Optional[int]:
A : str =test_results.split(' ' )
A : List[str] =0
A : Tuple =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A : List[str] =expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( lowercase: List[Any] ) -> str:
A : Union[str, Any] ={}
A : Optional[Any] =None
A : Union[str, Any] =False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]', lowercase ):
A : List[Any] =True
A : Any =line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
A : Dict =line
A : List[str] =False
return failures
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
A : Tuple =title
A : Dict =doc_test_results['time_spent'].split(',' )[0]
A : Union[str, Any] =doc_test_results['success']
A : Any =doc_test_results['failures']
A : Optional[Any] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A : Union[str, Any] =doc_test_results
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
A : Any =[self._time_spent]
A : List[str] =0
for time in time_spent:
A : List[Any] =time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
A : List[str] =[0, 0, time_parts[0]]
A , A , A : Tuple =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A : str =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Tuple =40
A : Optional[Any] ={k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
A : Any =''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Optional[int] =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
A : Tuple =[
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
A : Any =f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
A : Dict =client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
A : List[str] =''
for key, value in failures.items():
A : Any =value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
A : Union[str, Any] =job_name
A : Any ={'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
A : int ={
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
A : Union[str, Any] =self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
A : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
A : Any =f'*Num failures* :{len(job_result["failed"] )} \n'
A : List[Any] =job_result['failures']
A : Any =self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def A__ ( ) -> Union[str, Any]:
A : Any =os.environ['GITHUB_RUN_ID']
A : List[Any] =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
A : Union[str, Any] =requests.get(lowercase ).json()
A : List[Any] ={}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
A : List[str] =math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowercase ):
A : List[str] =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.', lowercase )
return {}
def A__ ( lowercase: str ) -> Optional[Any]:
A : Any ={}
if os.path.exists(lowercase ):
A : List[Any] =os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase, lowercase ), encoding='utf-8' ) as f:
A : Optional[int] =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase, lowercase )}.' ) from e
return _artifact
def A__ ( ) -> int:
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
A : Dict =name
A : Dict =[]
def __str__( self : Optional[Any] ) -> List[str]:
return self.name
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.paths.append({'name': self.name, 'path': path} )
A : Dict[str, Artifact] ={}
A : str =filter(os.path.isdir, os.listdir() )
for directory in directories:
A : Tuple =directory
if artifact_name not in _available_artifacts:
A : int =Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
_lowercase : Optional[int] =get_job_links()
_lowercase : str =retrieve_available_artifacts()
_lowercase : List[Any] =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_lowercase : Optional[Any] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_lowercase : List[Any] =github_actions_job_links.get('''run_doctests''')
_lowercase : int =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_lowercase : Dict =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_lowercase , _lowercase , _lowercase : List[Any] =handle_test_results(artifact['''stats'''])
_lowercase : Any =failed
_lowercase : Union[str, Any] =success
_lowercase : str =time_spent[1:-1] + ''', '''
_lowercase : Any =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_lowercase : Tuple =line.replace('''FAILED ''', '''''')
_lowercase : int =line.split()[0].replace('''\n''', '''''')
if "::" in line:
_lowercase , _lowercase : str =line.split('''::''')
else:
_lowercase , _lowercase : Union[str, Any] =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_lowercase : Any =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_lowercase : Any =all_failures[test] if test in all_failures else '''N/A'''
_lowercase : Tuple =failure
break
_lowercase : Optional[int] =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 661 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : str ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowercase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
_lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowercase : Any ='''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowercase : Any ='''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
_lowercase : int ='''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Any:
A : Any =len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
A : Union[str, Any] =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
A : List[Any] =TER(
normalized=SCREAMING_SNAKE_CASE__ , no_punct=SCREAMING_SNAKE_CASE__ , asian_support=SCREAMING_SNAKE_CASE__ , case_sensitive=SCREAMING_SNAKE_CASE__ , )
A : str =sb_ter.corpus_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 661 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowercase: str ) -> List[str]:
def decorator(lowercase: int ):
A : Tuple =getattr(lowercase, 'handle_key', [] )
handle += [key]
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
def A__ ( *lowercase: List[str] ) -> Dict:
def decorator(lowercase: Union[str, Any] ):
A : Optional[int] =getattr(lowercase, 'handle_key', [] )
handle += keys
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __new__( cls : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
A : Dict =super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , 'key_handler' ):
setattr(SCREAMING_SNAKE_CASE__ , 'key_handler' , {} )
setattr(SCREAMING_SNAKE_CASE__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] )
for key in handled_keys:
A : str =value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ) -> Any:
A : str =get_character()
if char != KEYMAP["undefined"]:
A : List[str] =ord(SCREAMING_SNAKE_CASE__ )
A : List[str] =cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
A : List[str] =char
return handler(cls )
else:
return None
def A__ ( cls: Optional[int] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 661 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> int:
pass
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
lowercase : Any = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
A : str =pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A : Tuple =[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
A : Any =object_detector(examples[0] , threshold=0.0 )
A : Dict =len(SCREAMING_SNAKE_CASE__ )
self.assertGreater(SCREAMING_SNAKE_CASE__ , 0 )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{
'score': ANY(SCREAMING_SNAKE_CASE__ ),
'label': ANY(SCREAMING_SNAKE_CASE__ ),
'box': {'xmin': ANY(SCREAMING_SNAKE_CASE__ ), 'ymin': ANY(SCREAMING_SNAKE_CASE__ ), 'xmax': ANY(SCREAMING_SNAKE_CASE__ ), 'ymax': ANY(SCREAMING_SNAKE_CASE__ )},
}
for i in range(SCREAMING_SNAKE_CASE__ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
pass
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
A : Any =pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A : List[str] =object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
A : int =object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
A : str =pipeline('zero-shot-object-detection' )
A : int =object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
A : Optional[Any] =object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
pass
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
A : Dict =0.2
A : int =pipeline('zero-shot-object-detection' )
A : str =object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=SCREAMING_SNAKE_CASE__ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
A : Any =2
A : str =pipeline('zero-shot-object-detection' )
A : Dict =object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=SCREAMING_SNAKE_CASE__ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 661 |
import math
def A__ ( lowercase: int ) -> list:
A : Optional[Any] =[True] * n
A : Tuple =False
A : List[Any] =False
A : Dict =True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A : Dict =i * 2
while index < n:
A : Dict =False
A : Dict =index + i
A : Tuple =[2]
for i in range(3, lowercase, 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A__ ( lowercase: int = 999_966_663_333 ) -> int:
A : Optional[int] =math.floor(math.sqrt(lowercase ) ) + 100
A : Optional[int] =prime_sieve(lowercase )
A : Optional[Any] =0
A : List[Any] =0
A : Union[str, Any] =primes[prime_index]
while (last_prime**2) <= limit:
A : Tuple =primes[prime_index + 1]
A : Optional[int] =last_prime**2
A : Tuple =next_prime**2
# Get numbers divisible by lps(current)
A : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A : List[Any] =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A : Any =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A : List[str] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 661 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Dict ={
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =[
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
_lowercase : Optional[Any] =['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowercase : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
import heapq
def A__ ( lowercase: dict ) -> set[int]:
A : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase, [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
A : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A : List[str] =heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A : str =elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 661 | 1 |
def A__ ( ) -> list[list[int]]:
return [list(range(1_000 - i, -1_000 - i, -1 ) ) for i in range(1_000 )]
_lowercase : str =generate_large_matrix()
_lowercase : Optional[int] =(
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( lowercase: list[list[int]] ) -> None:
assert all(row == sorted(lowercase, reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase, reverse=lowercase ) for col in zip(*lowercase ) )
def A__ ( lowercase: list[int] ) -> int:
A : Optional[Any] =0
A : List[Any] =len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
A : List[Any] =(left + right) // 2
A : Optional[int] =array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
A : List[str] =mid + 1
else:
A : int =mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def A__ ( lowercase: list[list[int]] ) -> int:
A : Optional[int] =0
A : Dict =len(grid[0] )
for i in range(len(lowercase ) ):
A : str =find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def A__ ( lowercase: list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def A__ ( lowercase: list[list[int]] ) -> int:
A : str =0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def A__ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
A : Any =(
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
A : Optional[int] =timeit(F'{func}(grid=grid)', setup=lowercase, number=500 )
print(F'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 661 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase : List[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
A : Tuple =feature_size
A : int =sampling_rate
A : List[str] =padding_value
A : Tuple =kwargs.pop('padding_side' , 'right' )
A : str =kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A : Tuple ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
A : Dict =processed_features[self.model_input_names[0]]
A : int =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
A : List[Any] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A : List[str] =required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A : Any =0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
A : Dict =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
A : List[Any] ='tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ='pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
A : Union[str, Any] ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A : int =to_numpy(SCREAMING_SNAKE_CASE__ )
else:
A : List[Any] =[to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A : List[Any] =self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =processed_features[self.model_input_names[0]]
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A : Tuple =[]
for i in range(SCREAMING_SNAKE_CASE__ ):
A : int ={k: v[i] for k, v in processed_features.items()}
# truncation
A : List[Any] =self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A : Any =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A : Optional[Any] =PaddingStrategy.MAX_LENGTH
A : List[Any] ={}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
A : Optional[Any] =self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
A : Dict =[]
if value.dtype is np.dtype(np.floataa ):
A : Tuple =value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> dict:
A : Optional[int] =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Tuple =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : int =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A : str =np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
A : Union[str, Any] =max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
A : Dict =np.pad(
processed_features['attention_mask'] , (0, difference) )
A : str =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A : List[Any] =np.pad(
processed_features['attention_mask'] , (difference, 0) )
A : Union[str, Any] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A : Tuple =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Any =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : List[str] =len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
A : Union[str, Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A : Dict =processed_features['attention_mask'][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
A : List[Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Tuple =PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =padding
else:
A : List[str] =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 661 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : List[Any] =1_6
_lowercase : Union[str, Any] =3_2
def A__ ( lowercase: Accelerator, lowercase: int = 16, lowercase: str = "bert-base-cased" ) -> Optional[int]:
A : List[Any] =AutoTokenizer.from_pretrained(lowercase )
A : Any =load_dataset('glue', 'mrpc' )
def tokenize_function(lowercase: Any ):
# max_length=None => use the model max length (it's actually the default)
A : List[str] =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A : Any =datasets.map(
lowercase, batched=lowercase, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Dict =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase, padding='max_length', max_length=128, return_tensors='pt' )
return tokenizer.pad(lowercase, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
A : Union[str, Any] =DataLoader(
tokenized_datasets['train'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
A : str =DataLoader(
tokenized_datasets['validation'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
def A__ ( lowercase: Dict, lowercase: Optional[int], lowercase: Any, lowercase: str ) -> Tuple:
model.eval()
A : Tuple =0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : Tuple =model(**lowercase )
A : Tuple =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A : Union[str, Any] =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
A : List[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
A : Optional[int] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase, references=lowercase, )
A : Union[str, Any] =metric.compute()
return eval_metric["accuracy"]
def A__ ( lowercase: Union[str, Any], lowercase: Dict ) -> List[str]:
# Initialize accelerator
A : Optional[int] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : int =config['lr']
A : Optional[Any] =int(config['num_epochs'] )
A : Union[str, Any] =int(config['seed'] )
A : List[str] =int(config['batch_size'] )
A : Optional[Any] =args.model_name_or_path
set_seed(lowercase )
A , A : str =get_dataloaders(lowercase, lowercase, lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[str] =AutoModelForSequenceClassification.from_pretrained(lowercase, return_dict=lowercase )
# Instantiate optimizer
A : Any =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A : List[str] =optimizer_cls(params=model.parameters(), lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
A : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A : Dict =1
A : Union[str, Any] =(len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A : List[Any] =get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=0, num_training_steps=lowercase, )
else:
A : List[str] =DummyScheduler(lowercase, total_num_steps=lowercase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A : Optional[int] =accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# We need to keep track of how many total steps we have iterated over
A : Tuple =0
# We also need to keep track of the stating epoch so files are named properly
A : List[str] =0
A : Tuple =evaluate.load('glue', 'mrpc' )
A : Optional[int] =num_epochs
if args.partial_train_epoch is not None:
A : Dict =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A : List[Any] =args.resume_from_checkpoint.split('epoch_' )[1]
A : List[Any] =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A : Union[str, Any] =int(lowercase ) + 1
A : List[str] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
accelerator.print('resumed checkpoint performance:', lowercase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:', lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:', optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir, F'state_{starting_epoch-1}.json' ), 'r' ) as f:
A : Union[str, Any] =json.load(lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A : str ={}
for epoch in range(lowercase, lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
A : Tuple =model(**lowercase )
A : List[Any] =outputs.loss
A : Any =loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A : Union[str, Any] =F'epoch_{epoch}'
A : Optional[Any] =os.path.join(args.output_dir, lowercase )
accelerator.save_state(lowercase )
A : Optional[Any] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
A : Dict =accuracy
A : Optional[Any] =lr_scheduler.get_lr()[0]
A : Any =optimizer.param_groups[0]['lr']
A : str =epoch
A : Dict =overall_step
accelerator.print(F'epoch {epoch}:', lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'state_{epoch}.json' ), 'w' ) as f:
json.dump(lowercase, lowercase )
def A__ ( ) -> Optional[int]:
A : Optional[int] =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=lowercase, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=lowercase, )
parser.add_argument(
'--output_dir', type=lowercase, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--resume_from_checkpoint', type=lowercase, default=lowercase, help='If the training should continue from a checkpoint folder.', )
parser.add_argument(
'--partial_train_epoch', type=lowercase, default=lowercase, help='If passed, the training will stop after this number of epochs.', )
parser.add_argument(
'--num_epochs', type=lowercase, default=2, help='Number of train epochs.', )
A : str =parser.parse_args()
A : Optional[int] ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 661 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : List[str] ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : int = "deberta-v2"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=12_81_00 , SCREAMING_SNAKE_CASE__ : List[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : List[str]=24 , SCREAMING_SNAKE_CASE__ : List[str]=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-7 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=-1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : Dict =hidden_size
A : Optional[Any] =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Optional[int] =intermediate_size
A : Any =hidden_act
A : Any =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Tuple =type_vocab_size
A : Tuple =initializer_range
A : int =relative_attention
A : int =max_relative_positions
A : Optional[Any] =pad_token_id
A : Union[str, Any] =position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE__ ) == str:
A : Any =[x.strip() for x in pos_att_type.lower().split('|' )]
A : Any =pos_att_type
A : Tuple =vocab_size
A : Any =layer_norm_eps
A : Optional[Any] =kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE__ )
A : str =pooler_dropout
A : Any =pooler_hidden_act
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A : List[Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : int ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
return 12
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
A : str =super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 661 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.